Land the Fan (disabled)
authordanno@chromium.org <danno@chromium.org@ce2b1a6d-e550-0410-aec6-3dcde31c8c00>
Wed, 30 Jul 2014 13:54:45 +0000 (13:54 +0000)
committerdanno@chromium.org <danno@chromium.org@ce2b1a6d-e550-0410-aec6-3dcde31c8c00>
Wed, 30 Jul 2014 13:54:45 +0000 (13:54 +0000)
R=mstarzinger@chromium.org

Review URL: https://codereview.chromium.org/426233002

git-svn-id: https://v8.googlecode.com/svn/branches/bleeding_edge@22709 ce2b1a6d-e550-0410-aec6-3dcde31c8c00

323 files changed:
build/toolchain.gypi
include/v8.h
src/arm/assembler-arm.cc
src/arm/assembler-arm.h
src/arm/code-stubs-arm.cc
src/arm/deoptimizer-arm.cc
src/arm/disasm-arm.cc
src/arm/lithium-arm.cc
src/arm/lithium-arm.h
src/arm/lithium-codegen-arm.cc
src/arm/macro-assembler-arm.cc
src/arm/macro-assembler-arm.h
src/arm/simulator-arm.cc
src/arm64/code-stubs-arm64.cc
src/arm64/deoptimizer-arm64.cc
src/arm64/lithium-arm64.cc
src/arm64/lithium-arm64.h
src/arm64/lithium-codegen-arm64.cc
src/arm64/simulator-arm64.cc
src/arm64/simulator-arm64.h
src/arm64/utils-arm64.h
src/ast.h
src/base/logging.h
src/bootstrapper.cc
src/checks.cc
src/checks.h
src/code-stubs.cc
src/code-stubs.h
src/compiler-intrinsics.h
src/compiler.cc
src/compiler.h
src/compiler/arm/code-generator-arm.cc [new file with mode: 0644]
src/compiler/arm/instruction-codes-arm.h [new file with mode: 0644]
src/compiler/arm/instruction-selector-arm.cc [new file with mode: 0644]
src/compiler/arm/linkage-arm.cc [new file with mode: 0644]
src/compiler/arm64/code-generator-arm64.cc [new file with mode: 0644]
src/compiler/arm64/instruction-codes-arm64.h [new file with mode: 0644]
src/compiler/arm64/instruction-selector-arm64.cc [new file with mode: 0644]
src/compiler/arm64/linkage-arm64.cc [new file with mode: 0644]
src/compiler/ast-graph-builder.cc [new file with mode: 0644]
src/compiler/ast-graph-builder.h [new file with mode: 0644]
src/compiler/code-generator-impl.h [new file with mode: 0644]
src/compiler/code-generator.cc [new file with mode: 0644]
src/compiler/code-generator.h [new file with mode: 0644]
src/compiler/common-node-cache.h [new file with mode: 0644]
src/compiler/common-operator.h [new file with mode: 0644]
src/compiler/control-builders.cc [new file with mode: 0644]
src/compiler/control-builders.h [new file with mode: 0644]
src/compiler/frame.h [new file with mode: 0644]
src/compiler/gap-resolver.cc [new file with mode: 0644]
src/compiler/gap-resolver.h [new file with mode: 0644]
src/compiler/generic-algorithm-inl.h [new file with mode: 0644]
src/compiler/generic-algorithm.h [new file with mode: 0644]
src/compiler/generic-graph.h [new file with mode: 0644]
src/compiler/generic-node-inl.h [new file with mode: 0644]
src/compiler/generic-node.h [new file with mode: 0644]
src/compiler/graph-builder.cc [new file with mode: 0644]
src/compiler/graph-builder.h [new file with mode: 0644]
src/compiler/graph-inl.h [new file with mode: 0644]
src/compiler/graph-reducer.cc [new file with mode: 0644]
src/compiler/graph-reducer.h [new file with mode: 0644]
src/compiler/graph-replay.cc [new file with mode: 0644]
src/compiler/graph-replay.h [new file with mode: 0644]
src/compiler/graph-visualizer.cc [new file with mode: 0644]
src/compiler/graph-visualizer.h [new file with mode: 0644]
src/compiler/graph.cc [new file with mode: 0644]
src/compiler/graph.h [new file with mode: 0644]
src/compiler/ia32/code-generator-ia32.cc [new file with mode: 0644]
src/compiler/ia32/instruction-codes-ia32.h [new file with mode: 0644]
src/compiler/ia32/instruction-selector-ia32.cc [new file with mode: 0644]
src/compiler/ia32/linkage-ia32.cc [new file with mode: 0644]
src/compiler/instruction-codes.h [new file with mode: 0644]
src/compiler/instruction-selector-impl.h [new file with mode: 0644]
src/compiler/instruction-selector.cc [new file with mode: 0644]
src/compiler/instruction-selector.h [new file with mode: 0644]
src/compiler/instruction.cc [new file with mode: 0644]
src/compiler/instruction.h [new file with mode: 0644]
src/compiler/ir-operations.txt [new file with mode: 0644]
src/compiler/js-context-specialization.cc [new file with mode: 0644]
src/compiler/js-context-specialization.h [new file with mode: 0644]
src/compiler/js-generic-lowering.cc [new file with mode: 0644]
src/compiler/js-generic-lowering.h [new file with mode: 0644]
src/compiler/js-graph.cc [new file with mode: 0644]
src/compiler/js-graph.h [new file with mode: 0644]
src/compiler/js-operator.h [new file with mode: 0644]
src/compiler/js-typed-lowering.cc [new file with mode: 0644]
src/compiler/js-typed-lowering.h [new file with mode: 0644]
src/compiler/linkage-impl.h [new file with mode: 0644]
src/compiler/linkage.cc [new file with mode: 0644]
src/compiler/linkage.h [new file with mode: 0644]
src/compiler/lowering-builder.cc [new file with mode: 0644]
src/compiler/lowering-builder.h [new file with mode: 0644]
src/compiler/machine-node-factory.h [new file with mode: 0644]
src/compiler/machine-operator-reducer.cc [new file with mode: 0644]
src/compiler/machine-operator-reducer.h [new file with mode: 0644]
src/compiler/machine-operator.h [new file with mode: 0644]
src/compiler/node-aux-data-inl.h [new file with mode: 0644]
src/compiler/node-aux-data.h [new file with mode: 0644]
src/compiler/node-cache.cc [new file with mode: 0644]
src/compiler/node-cache.h [new file with mode: 0644]
src/compiler/node-matchers.h [new file with mode: 0644]
src/compiler/node-properties-inl.h [new file with mode: 0644]
src/compiler/node-properties.h [new file with mode: 0644]
src/compiler/node.cc [new file with mode: 0644]
src/compiler/node.h [new file with mode: 0644]
src/compiler/opcodes.h [new file with mode: 0644]
src/compiler/operator-properties-inl.h [new file with mode: 0644]
src/compiler/operator-properties.h [new file with mode: 0644]
src/compiler/operator.h [new file with mode: 0644]
src/compiler/phi-reducer.h [new file with mode: 0644]
src/compiler/pipeline.cc [new file with mode: 0644]
src/compiler/pipeline.h [new file with mode: 0644]
src/compiler/raw-machine-assembler.cc [new file with mode: 0644]
src/compiler/raw-machine-assembler.h [new file with mode: 0644]
src/compiler/register-allocator.cc [new file with mode: 0644]
src/compiler/register-allocator.h [new file with mode: 0644]
src/compiler/representation-change.h [new file with mode: 0644]
src/compiler/schedule.cc [new file with mode: 0644]
src/compiler/schedule.h [new file with mode: 0644]
src/compiler/scheduler.cc [new file with mode: 0644]
src/compiler/scheduler.h [new file with mode: 0644]
src/compiler/simplified-lowering.cc [new file with mode: 0644]
src/compiler/simplified-lowering.h [new file with mode: 0644]
src/compiler/simplified-node-factory.h [new file with mode: 0644]
src/compiler/simplified-operator.h [new file with mode: 0644]
src/compiler/source-position.cc [new file with mode: 0644]
src/compiler/source-position.h [new file with mode: 0644]
src/compiler/structured-machine-assembler.cc [new file with mode: 0644]
src/compiler/structured-machine-assembler.h [new file with mode: 0644]
src/compiler/typer.cc [new file with mode: 0644]
src/compiler/typer.h [new file with mode: 0644]
src/compiler/verifier.cc [new file with mode: 0644]
src/compiler/verifier.h [new file with mode: 0644]
src/compiler/x64/code-generator-x64.cc [new file with mode: 0644]
src/compiler/x64/instruction-codes-x64.h [new file with mode: 0644]
src/compiler/x64/instruction-selector-x64.cc [new file with mode: 0644]
src/compiler/x64/linkage-x64.cc [new file with mode: 0644]
src/contexts.cc
src/contexts.h
src/data-flow.h
src/deoptimizer.cc
src/deoptimizer.h
src/elements-kind.h
src/field-index.h
src/flag-definitions.h
src/frames.cc
src/gdb-jit.cc
src/globals.h
src/hydrogen-gvn.h
src/hydrogen-instructions.h
src/hydrogen-types.cc
src/hydrogen-types.h
src/hydrogen.cc
src/hydrogen.h
src/ia32/assembler-ia32-inl.h
src/ia32/assembler-ia32.cc
src/ia32/assembler-ia32.h
src/ia32/code-stubs-ia32.cc
src/ia32/deoptimizer-ia32.cc
src/ia32/disasm-ia32.cc
src/ia32/lithium-codegen-ia32.cc
src/ia32/lithium-ia32.cc
src/ia32/lithium-ia32.h
src/isolate.cc
src/isolate.h
src/lithium-allocator-inl.h
src/lithium-allocator.cc
src/lithium-allocator.h
src/lithium-inl.h [new file with mode: 0644]
src/lithium.cc
src/lithium.h
src/mips/code-stubs-mips.cc
src/mips/deoptimizer-mips.cc
src/mips/lithium-codegen-mips.cc
src/mips/lithium-mips.cc
src/mips/lithium-mips.h
src/mips64/code-stubs-mips64.cc
src/mips64/deoptimizer-mips64.cc
src/mips64/lithium-codegen-mips64.cc
src/objects-debug.cc
src/objects-inl.h
src/objects-printer.cc
src/objects-visiting-inl.h
src/objects.cc
src/objects.h
src/parser.cc
src/property.cc
src/property.h
src/runtime.cc
src/runtime.h
src/safepoint-table.cc
src/scopeinfo.cc
src/scopeinfo.h
src/scopes.cc
src/scopes.h
src/string-stream.cc
src/types.cc
src/types.h
src/typing.cc
src/unique.h
src/v8.cc
src/variables.cc
src/variables.h
src/x64/assembler-x64.cc
src/x64/assembler-x64.h
src/x64/code-stubs-x64.cc
src/x64/deoptimizer-x64.cc
src/x64/disasm-x64.cc
src/x64/lithium-codegen-x64.cc
src/x64/lithium-x64.cc
src/x64/lithium-x64.h
src/x87/code-stubs-x87.cc
src/x87/deoptimizer-x87.cc
src/x87/lithium-codegen-x87.cc
src/zone-allocator.h
src/zone-containers.h
test/cctest/cctest.gyp
test/cctest/cctest.h
test/cctest/cctest.status
test/cctest/compiler/call-tester.h [new file with mode: 0644]
test/cctest/compiler/codegen-tester.cc [new file with mode: 0644]
test/cctest/compiler/codegen-tester.h [new file with mode: 0644]
test/cctest/compiler/compiler/call-tester.h [new file with mode: 0644]
test/cctest/compiler/compiler/codegen-tester.cc [new file with mode: 0644]
test/cctest/compiler/compiler/codegen-tester.h [new file with mode: 0644]
test/cctest/compiler/compiler/function-tester.h [new file with mode: 0644]
test/cctest/compiler/compiler/graph-builder-tester.cc [new file with mode: 0644]
test/cctest/compiler/compiler/graph-builder-tester.h [new file with mode: 0644]
test/cctest/compiler/compiler/graph-tester.h [new file with mode: 0644]
test/cctest/compiler/compiler/instruction-selector-tester.h [new file with mode: 0644]
test/cctest/compiler/compiler/simplified-graph-builder.cc [new file with mode: 0644]
test/cctest/compiler/compiler/simplified-graph-builder.h [new file with mode: 0644]
test/cctest/compiler/compiler/test-branch-combine.cc [new file with mode: 0644]
test/cctest/compiler/compiler/test-codegen-deopt.cc [new file with mode: 0644]
test/cctest/compiler/compiler/test-gap-resolver.cc [new file with mode: 0644]
test/cctest/compiler/compiler/test-graph-reducer.cc [new file with mode: 0644]
test/cctest/compiler/compiler/test-instruction-selector-arm.cc [new file with mode: 0644]
test/cctest/compiler/compiler/test-instruction-selector.cc [new file with mode: 0644]
test/cctest/compiler/compiler/test-instruction.cc [new file with mode: 0644]
test/cctest/compiler/compiler/test-js-constant-cache.cc [new file with mode: 0644]
test/cctest/compiler/compiler/test-js-context-specialization.cc [new file with mode: 0644]
test/cctest/compiler/compiler/test-js-typed-lowering.cc [new file with mode: 0644]
test/cctest/compiler/compiler/test-linkage.cc [new file with mode: 0644]
test/cctest/compiler/compiler/test-machine-operator-reducer.cc [new file with mode: 0644]
test/cctest/compiler/compiler/test-node-algorithm.cc [new file with mode: 0644]
test/cctest/compiler/compiler/test-node-cache.cc [new file with mode: 0644]
test/cctest/compiler/compiler/test-node.cc [new file with mode: 0644]
test/cctest/compiler/compiler/test-operator.cc [new file with mode: 0644]
test/cctest/compiler/compiler/test-phi-reducer.cc [new file with mode: 0644]
test/cctest/compiler/compiler/test-pipeline.cc [new file with mode: 0644]
test/cctest/compiler/compiler/test-representation-change.cc [new file with mode: 0644]
test/cctest/compiler/compiler/test-run-deopt.cc [new file with mode: 0644]
test/cctest/compiler/compiler/test-run-intrinsics.cc [new file with mode: 0644]
test/cctest/compiler/compiler/test-run-jsbranches.cc [new file with mode: 0644]
test/cctest/compiler/compiler/test-run-jscalls.cc [new file with mode: 0644]
test/cctest/compiler/compiler/test-run-jsexceptions.cc [new file with mode: 0644]
test/cctest/compiler/compiler/test-run-jsops.cc [new file with mode: 0644]
test/cctest/compiler/compiler/test-run-machops.cc [new file with mode: 0644]
test/cctest/compiler/compiler/test-run-variables.cc [new file with mode: 0644]
test/cctest/compiler/compiler/test-schedule.cc [new file with mode: 0644]
test/cctest/compiler/compiler/test-scheduler.cc [new file with mode: 0644]
test/cctest/compiler/compiler/test-simplified-lowering.cc [new file with mode: 0644]
test/cctest/compiler/compiler/test-structured-ifbuilder-fuzzer.cc [new file with mode: 0644]
test/cctest/compiler/compiler/test-structured-machine-assembler.cc [new file with mode: 0644]
test/cctest/compiler/compiler/value-helper.h [new file with mode: 0644]
test/cctest/compiler/function-tester.h [new file with mode: 0644]
test/cctest/compiler/graph-builder-tester.cc [new file with mode: 0644]
test/cctest/compiler/graph-builder-tester.h [new file with mode: 0644]
test/cctest/compiler/graph-tester.h [new file with mode: 0644]
test/cctest/compiler/instruction-selector-tester.h [new file with mode: 0644]
test/cctest/compiler/simplified-graph-builder.cc [new file with mode: 0644]
test/cctest/compiler/simplified-graph-builder.h [new file with mode: 0644]
test/cctest/compiler/test-branch-combine.cc [new file with mode: 0644]
test/cctest/compiler/test-codegen-deopt.cc [new file with mode: 0644]
test/cctest/compiler/test-gap-resolver.cc [new file with mode: 0644]
test/cctest/compiler/test-graph-reducer.cc [new file with mode: 0644]
test/cctest/compiler/test-instruction-selector-arm.cc [new file with mode: 0644]
test/cctest/compiler/test-instruction-selector.cc [new file with mode: 0644]
test/cctest/compiler/test-instruction.cc [new file with mode: 0644]
test/cctest/compiler/test-js-constant-cache.cc [new file with mode: 0644]
test/cctest/compiler/test-js-context-specialization.cc [new file with mode: 0644]
test/cctest/compiler/test-js-typed-lowering.cc [new file with mode: 0644]
test/cctest/compiler/test-linkage.cc [new file with mode: 0644]
test/cctest/compiler/test-machine-operator-reducer.cc [new file with mode: 0644]
test/cctest/compiler/test-node-algorithm.cc [new file with mode: 0644]
test/cctest/compiler/test-node-cache.cc [new file with mode: 0644]
test/cctest/compiler/test-node.cc [new file with mode: 0644]
test/cctest/compiler/test-operator.cc [new file with mode: 0644]
test/cctest/compiler/test-phi-reducer.cc [new file with mode: 0644]
test/cctest/compiler/test-pipeline.cc [new file with mode: 0644]
test/cctest/compiler/test-representation-change.cc [new file with mode: 0644]
test/cctest/compiler/test-run-deopt.cc [new file with mode: 0644]
test/cctest/compiler/test-run-intrinsics.cc [new file with mode: 0644]
test/cctest/compiler/test-run-jsbranches.cc [new file with mode: 0644]
test/cctest/compiler/test-run-jscalls.cc [new file with mode: 0644]
test/cctest/compiler/test-run-jsexceptions.cc [new file with mode: 0644]
test/cctest/compiler/test-run-jsops.cc [new file with mode: 0644]
test/cctest/compiler/test-run-machops.cc [new file with mode: 0644]
test/cctest/compiler/test-run-variables.cc [new file with mode: 0644]
test/cctest/compiler/test-schedule.cc [new file with mode: 0644]
test/cctest/compiler/test-scheduler.cc [new file with mode: 0644]
test/cctest/compiler/test-simplified-lowering.cc [new file with mode: 0644]
test/cctest/compiler/test-structured-ifbuilder-fuzzer.cc [new file with mode: 0644]
test/cctest/compiler/test-structured-machine-assembler.cc [new file with mode: 0644]
test/cctest/compiler/value-helper.h [new file with mode: 0644]
test/cctest/test-alloc.cc
test/cctest/test-assembler-arm.cc
test/cctest/test-assembler-arm64.cc
test/cctest/test-assembler-ia32.cc
test/cctest/test-assembler-x64.cc
test/cctest/test-checks.cc [new file with mode: 0644]
test/cctest/test-disasm-ia32.cc
test/cctest/test-disasm-x64.cc
test/cctest/test-parsing.cc
test/cctest/test-regexp.cc
test/cctest/test-symbols.cc
test/fuzz-natives/fuzz-natives.status
test/mjsunit/assert-opt-and-deopt.js
test/mjsunit/mjsunit.status
test/mjsunit/runtime-gen/classof.js [deleted file]
tools/generate-runtime-tests.py
tools/gyp/v8.gyp
tools/run-tests.py

index 496cef0dbd0d56a55dd558710de82c66c5c5859a..9650792e12529826734122054b1bc8668fdabeeb 100644 (file)
         'defines': [
           'WIN32',
         ],
+        # 4351: VS 2005 and later are warning us that they've fixed a bug
+        #       present in VS 2003 and earlier.
+        'msvs_disabled_warnings': [4351],
         'msvs_configuration_attributes': {
           'OutputDirectory': '<(DEPTH)\\build\\$(ConfigurationName)',
           'IntermediateDirectory': '$(OutDir)\\obj\\$(ProjectName)',
index c1e714a146a584f86a3939f1a08fa9c587bcdde0..b1eace3d1f7b60f69642b913ad1bb1dc4a3690d2 100644 (file)
@@ -5596,7 +5596,7 @@ class Internals {
   static const int kJSObjectHeaderSize = 3 * kApiPointerSize;
   static const int kFixedArrayHeaderSize = 2 * kApiPointerSize;
   static const int kContextHeaderSize = 2 * kApiPointerSize;
-  static const int kContextEmbedderDataIndex = 76;
+  static const int kContextEmbedderDataIndex = 95;
   static const int kFullStringRepresentationMask = 0x07;
   static const int kStringEncodingMask = 0x4;
   static const int kExternalTwoByteRepresentationTag = 0x02;
index 5e8f14ab0e3eaada8bd9799c1e7f718c74b4eef2..df45159767baf629160e2ddb694f8e0fefad0dc2 100644 (file)
@@ -1544,6 +1544,15 @@ void Assembler::sdiv(Register dst, Register src1, Register src2,
 }
 
 
+void Assembler::udiv(Register dst, Register src1, Register src2,
+                     Condition cond) {
+  ASSERT(!dst.is(pc) && !src1.is(pc) && !src2.is(pc));
+  ASSERT(IsEnabled(SUDIV));
+  emit(cond | B26 | B25 | B24 | B21 | B20 | dst.code() * B16 | 0xf * B12 |
+       src2.code() * B8 | B4 | src1.code());
+}
+
+
 void Assembler::mul(Register dst, Register src1, Register src2,
                     SBit s, Condition cond) {
   ASSERT(!dst.is(pc) && !src1.is(pc) && !src2.is(pc));
@@ -2156,9 +2165,14 @@ void Assembler::vldr(const DwVfpRegister dst,
 void Assembler::vldr(const DwVfpRegister dst,
                      const MemOperand& operand,
                      const Condition cond) {
-  ASSERT(!operand.rm().is_valid());
   ASSERT(operand.am_ == Offset);
-  vldr(dst, operand.rn(), operand.offset(), cond);
+  if (operand.rm().is_valid()) {
+    add(ip, operand.rn(),
+        Operand(operand.rm(), operand.shift_op_, operand.shift_imm_));
+    vldr(dst, ip, 0, cond);
+  } else {
+    vldr(dst, operand.rn(), operand.offset(), cond);
+  }
 }
 
 
@@ -2199,9 +2213,14 @@ void Assembler::vldr(const SwVfpRegister dst,
 void Assembler::vldr(const SwVfpRegister dst,
                      const MemOperand& operand,
                      const Condition cond) {
-  ASSERT(!operand.rm().is_valid());
   ASSERT(operand.am_ == Offset);
-  vldr(dst, operand.rn(), operand.offset(), cond);
+  if (operand.rm().is_valid()) {
+    add(ip, operand.rn(),
+        Operand(operand.rm(), operand.shift_op_, operand.shift_imm_));
+    vldr(dst, ip, 0, cond);
+  } else {
+    vldr(dst, operand.rn(), operand.offset(), cond);
+  }
 }
 
 
@@ -2242,9 +2261,14 @@ void Assembler::vstr(const DwVfpRegister src,
 void Assembler::vstr(const DwVfpRegister src,
                      const MemOperand& operand,
                      const Condition cond) {
-  ASSERT(!operand.rm().is_valid());
   ASSERT(operand.am_ == Offset);
-  vstr(src, operand.rn(), operand.offset(), cond);
+  if (operand.rm().is_valid()) {
+    add(ip, operand.rn(),
+        Operand(operand.rm(), operand.shift_op_, operand.shift_imm_));
+    vstr(src, ip, 0, cond);
+  } else {
+    vstr(src, operand.rn(), operand.offset(), cond);
+  }
 }
 
 
@@ -2284,9 +2308,14 @@ void Assembler::vstr(const SwVfpRegister src,
 void Assembler::vstr(const SwVfpRegister src,
                      const MemOperand& operand,
                      const Condition cond) {
-  ASSERT(!operand.rm().is_valid());
   ASSERT(operand.am_ == Offset);
-  vstr(src, operand.rn(), operand.offset(), cond);
+  if (operand.rm().is_valid()) {
+    add(ip, operand.rn(),
+        Operand(operand.rm(), operand.shift_op_, operand.shift_imm_));
+    vstr(src, ip, 0, cond);
+  } else {
+    vstr(src, operand.rn(), operand.offset(), cond);
+  }
 }
 
 
@@ -3125,6 +3154,7 @@ bool Assembler::IsNop(Instr instr, int type) {
 }
 
 
+// static
 bool Assembler::ImmediateFitsAddrMode1Instruction(int32_t imm32) {
   uint32_t dummy1;
   uint32_t dummy2;
index e0b89a5d86cc2999b81609946b8f48eb386a9f29..839bb0a22979dac09e5056458bf272e68b1a3c29 100644 (file)
@@ -922,6 +922,35 @@ class Assembler : public AssemblerBase {
   void mvn(Register dst, const Operand& src,
            SBit s = LeaveCC, Condition cond = al);
 
+  // Shift instructions
+
+  void asr(Register dst, Register src1, const Operand& src2, SBit s = LeaveCC,
+           Condition cond = al) {
+    if (src2.is_reg()) {
+      mov(dst, Operand(src1, ASR, src2.rm()), s, cond);
+    } else {
+      mov(dst, Operand(src1, ASR, src2.immediate()), s, cond);
+    }
+  }
+
+  void lsl(Register dst, Register src1, const Operand& src2, SBit s = LeaveCC,
+           Condition cond = al) {
+    if (src2.is_reg()) {
+      mov(dst, Operand(src1, LSL, src2.rm()), s, cond);
+    } else {
+      mov(dst, Operand(src1, LSL, src2.immediate()), s, cond);
+    }
+  }
+
+  void lsr(Register dst, Register src1, const Operand& src2, SBit s = LeaveCC,
+           Condition cond = al) {
+    if (src2.is_reg()) {
+      mov(dst, Operand(src1, LSR, src2.rm()), s, cond);
+    } else {
+      mov(dst, Operand(src1, LSR, src2.immediate()), s, cond);
+    }
+  }
+
   // Multiply instructions
 
   void mla(Register dst, Register src1, Register src2, Register srcA,
@@ -933,6 +962,8 @@ class Assembler : public AssemblerBase {
   void sdiv(Register dst, Register src1, Register src2,
             Condition cond = al);
 
+  void udiv(Register dst, Register src1, Register src2, Condition cond = al);
+
   void mul(Register dst, Register src1, Register src2,
            SBit s = LeaveCC, Condition cond = al);
 
@@ -1290,7 +1321,7 @@ class Assembler : public AssemblerBase {
   }
 
   // Check whether an immediate fits an addressing mode 1 instruction.
-  bool ImmediateFitsAddrMode1Instruction(int32_t imm32);
+  static bool ImmediateFitsAddrMode1Instruction(int32_t imm32);
 
   // Check whether an immediate fits an addressing mode 2 instruction.
   bool ImmediateFitsAddrMode2Instruction(int32_t imm32);
index b997fd88fe35473538a0a09e473cd93fcf7459be..919a4ec755f5f199eebeef91ec661bf46de8942c 100644 (file)
@@ -19,7 +19,7 @@ void FastNewClosureStub::InitializeInterfaceDescriptor(
     CodeStubInterfaceDescriptor* descriptor) {
   Register registers[] = { cp, r2 };
   descriptor->Initialize(
-      ARRAY_SIZE(registers), registers,
+      MajorKey(), ARRAY_SIZE(registers), registers,
       Runtime::FunctionForId(Runtime::kNewClosureFromStubFailure)->entry);
 }
 
@@ -27,14 +27,14 @@ void FastNewClosureStub::InitializeInterfaceDescriptor(
 void FastNewContextStub::InitializeInterfaceDescriptor(
     CodeStubInterfaceDescriptor* descriptor) {
   Register registers[] = { cp, r1 };
-  descriptor->Initialize(ARRAY_SIZE(registers), registers);
+  descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers);
 }
 
 
 void ToNumberStub::InitializeInterfaceDescriptor(
     CodeStubInterfaceDescriptor* descriptor) {
   Register registers[] = { cp, r0 };
-  descriptor->Initialize(ARRAY_SIZE(registers), registers);
+  descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers);
 }
 
 
@@ -42,7 +42,7 @@ void NumberToStringStub::InitializeInterfaceDescriptor(
     CodeStubInterfaceDescriptor* descriptor) {
   Register registers[] = { cp, r0 };
   descriptor->Initialize(
-      ARRAY_SIZE(registers), registers,
+      MajorKey(), ARRAY_SIZE(registers), registers,
       Runtime::FunctionForId(Runtime::kNumberToStringRT)->entry);
 }
 
@@ -56,9 +56,8 @@ void FastCloneShallowArrayStub::InitializeInterfaceDescriptor(
     Representation::Smi(),
     Representation::Tagged() };
   descriptor->Initialize(
-      ARRAY_SIZE(registers), registers,
-      Runtime::FunctionForId(
-          Runtime::kCreateArrayLiteralStubBailout)->entry,
+      MajorKey(), ARRAY_SIZE(registers), registers,
+      Runtime::FunctionForId(Runtime::kCreateArrayLiteralStubBailout)->entry,
       representations);
 }
 
@@ -67,7 +66,7 @@ void FastCloneShallowObjectStub::InitializeInterfaceDescriptor(
     CodeStubInterfaceDescriptor* descriptor) {
   Register registers[] = { cp, r3, r2, r1, r0 };
   descriptor->Initialize(
-      ARRAY_SIZE(registers), registers,
+      MajorKey(), ARRAY_SIZE(registers), registers,
       Runtime::FunctionForId(Runtime::kCreateObjectLiteral)->entry);
 }
 
@@ -75,7 +74,36 @@ void FastCloneShallowObjectStub::InitializeInterfaceDescriptor(
 void CreateAllocationSiteStub::InitializeInterfaceDescriptor(
     CodeStubInterfaceDescriptor* descriptor) {
   Register registers[] = { cp, r2, r3 };
-  descriptor->Initialize(ARRAY_SIZE(registers), registers);
+  descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers);
+}
+
+
+void InstanceofStub::InitializeInterfaceDescriptor(
+    Isolate* isolate, CodeStubInterfaceDescriptor* descriptor) {
+  Register registers[] = {cp, left(), right()};
+  descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers);
+}
+
+
+void CallFunctionStub::InitializeInterfaceDescriptor(
+    Isolate* isolate, CodeStubInterfaceDescriptor* descriptor) {
+  // r1  function    the function to call
+  Register registers[] = {cp, r1};
+  descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers);
+}
+
+
+void CallConstructStub::InitializeInterfaceDescriptor(
+    Isolate* isolate, CodeStubInterfaceDescriptor* descriptor) {
+  // r0 : number of arguments
+  // r1 : the function to call
+  // r2 : feedback vector
+  // r3 : (only if r2 is not the megamorphic symbol) slot in feedback
+  //      vector (Smi)
+  // TODO(turbofan): So far we don't gather type feedback and hence skip the
+  // slot parameter, but ArrayConstructStub needs the vector to be undefined.
+  Register registers[] = {cp, r0, r1, r2};
+  descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers);
 }
 
 
@@ -83,7 +111,7 @@ void RegExpConstructResultStub::InitializeInterfaceDescriptor(
     CodeStubInterfaceDescriptor* descriptor) {
   Register registers[] = { cp, r2, r1, r0 };
   descriptor->Initialize(
-      ARRAY_SIZE(registers), registers,
+      MajorKey(), ARRAY_SIZE(registers), registers,
       Runtime::FunctionForId(Runtime::kRegExpConstructResult)->entry);
 }
 
@@ -93,7 +121,7 @@ void TransitionElementsKindStub::InitializeInterfaceDescriptor(
   Register registers[] = { cp, r0, r1 };
   Address entry =
       Runtime::FunctionForId(Runtime::kTransitionElementsKind)->entry;
-  descriptor->Initialize(ARRAY_SIZE(registers), registers,
+  descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers,
                          FUNCTION_ADDR(entry));
 }
 
@@ -101,7 +129,7 @@ void TransitionElementsKindStub::InitializeInterfaceDescriptor(
 void CompareNilICStub::InitializeInterfaceDescriptor(
     CodeStubInterfaceDescriptor* descriptor) {
   Register registers[] = { cp, r0 };
-  descriptor->Initialize(ARRAY_SIZE(registers), registers,
+  descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers,
                          FUNCTION_ADDR(CompareNilIC_Miss));
   descriptor->SetMissHandler(
       ExternalReference(IC_Utility(IC::kCompareNilIC_Miss), isolate()));
@@ -112,7 +140,7 @@ const Register InterfaceDescriptor::ContextRegister() { return cp; }
 
 
 static void InitializeArrayConstructorDescriptor(
-    CodeStubInterfaceDescriptor* descriptor,
+    CodeStub::Major major, CodeStubInterfaceDescriptor* descriptor,
     int constant_stack_parameter_count) {
   // register state
   // cp -- context
@@ -124,10 +152,8 @@ static void InitializeArrayConstructorDescriptor(
 
   if (constant_stack_parameter_count == 0) {
     Register registers[] = { cp, r1, r2 };
-    descriptor->Initialize(ARRAY_SIZE(registers), registers,
-                           deopt_handler,
-                           NULL,
-                           constant_stack_parameter_count,
+    descriptor->Initialize(major, ARRAY_SIZE(registers), registers,
+                           deopt_handler, NULL, constant_stack_parameter_count,
                            JS_FUNCTION_STUB_MODE);
   } else {
     // stack param count needs (constructor pointer, and single argument)
@@ -137,19 +163,16 @@ static void InitializeArrayConstructorDescriptor(
         Representation::Tagged(),
         Representation::Tagged(),
         Representation::Integer32() };
-    descriptor->Initialize(ARRAY_SIZE(registers), registers,
-                           r0,
-                           deopt_handler,
-                           representations,
+    descriptor->Initialize(major, ARRAY_SIZE(registers), registers, r0,
+                           deopt_handler, representations,
                            constant_stack_parameter_count,
-                           JS_FUNCTION_STUB_MODE,
-                           PASS_ARGUMENTS);
+                           JS_FUNCTION_STUB_MODE, PASS_ARGUMENTS);
   }
 }
 
 
 static void InitializeInternalArrayConstructorDescriptor(
-    CodeStubInterfaceDescriptor* descriptor,
+    CodeStub::Major major, CodeStubInterfaceDescriptor* descriptor,
     int constant_stack_parameter_count) {
   // register state
   // cp -- context
@@ -160,10 +183,8 @@ static void InitializeInternalArrayConstructorDescriptor(
 
   if (constant_stack_parameter_count == 0) {
     Register registers[] = { cp, r1 };
-    descriptor->Initialize(ARRAY_SIZE(registers), registers,
-                           deopt_handler,
-                           NULL,
-                           constant_stack_parameter_count,
+    descriptor->Initialize(major, ARRAY_SIZE(registers), registers,
+                           deopt_handler, NULL, constant_stack_parameter_count,
                            JS_FUNCTION_STUB_MODE);
   } else {
     // stack param count needs (constructor pointer, and single argument)
@@ -172,39 +193,36 @@ static void InitializeInternalArrayConstructorDescriptor(
         Representation::Tagged(),
         Representation::Tagged(),
         Representation::Integer32() };
-    descriptor->Initialize(ARRAY_SIZE(registers), registers,
-                           r0,
-                           deopt_handler,
-                           representations,
+    descriptor->Initialize(major, ARRAY_SIZE(registers), registers, r0,
+                           deopt_handler, representations,
                            constant_stack_parameter_count,
-                           JS_FUNCTION_STUB_MODE,
-                           PASS_ARGUMENTS);
+                           JS_FUNCTION_STUB_MODE, PASS_ARGUMENTS);
   }
 }
 
 
 void ArrayNoArgumentConstructorStub::InitializeInterfaceDescriptor(
     CodeStubInterfaceDescriptor* descriptor) {
-  InitializeArrayConstructorDescriptor(descriptor, 0);
+  InitializeArrayConstructorDescriptor(MajorKey(), descriptor, 0);
 }
 
 
 void ArraySingleArgumentConstructorStub::InitializeInterfaceDescriptor(
     CodeStubInterfaceDescriptor* descriptor) {
-  InitializeArrayConstructorDescriptor(descriptor, 1);
+  InitializeArrayConstructorDescriptor(MajorKey(), descriptor, 1);
 }
 
 
 void ArrayNArgumentsConstructorStub::InitializeInterfaceDescriptor(
     CodeStubInterfaceDescriptor* descriptor) {
-  InitializeArrayConstructorDescriptor(descriptor, -1);
+  InitializeArrayConstructorDescriptor(MajorKey(), descriptor, -1);
 }
 
 
 void ToBooleanStub::InitializeInterfaceDescriptor(
     CodeStubInterfaceDescriptor* descriptor) {
   Register registers[] = { cp, r0 };
-  descriptor->Initialize(ARRAY_SIZE(registers), registers,
+  descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers,
                          FUNCTION_ADDR(ToBooleanIC_Miss));
   descriptor->SetMissHandler(
       ExternalReference(IC_Utility(IC::kToBooleanIC_Miss), isolate()));
@@ -213,26 +231,26 @@ void ToBooleanStub::InitializeInterfaceDescriptor(
 
 void InternalArrayNoArgumentConstructorStub::InitializeInterfaceDescriptor(
     CodeStubInterfaceDescriptor* descriptor) {
-  InitializeInternalArrayConstructorDescriptor(descriptor, 0);
+  InitializeInternalArrayConstructorDescriptor(MajorKey(), descriptor, 0);
 }
 
 
 void InternalArraySingleArgumentConstructorStub::InitializeInterfaceDescriptor(
     CodeStubInterfaceDescriptor* descriptor) {
-  InitializeInternalArrayConstructorDescriptor(descriptor, 1);
+  InitializeInternalArrayConstructorDescriptor(MajorKey(), descriptor, 1);
 }
 
 
 void InternalArrayNArgumentsConstructorStub::InitializeInterfaceDescriptor(
     CodeStubInterfaceDescriptor* descriptor) {
-  InitializeInternalArrayConstructorDescriptor(descriptor, -1);
+  InitializeInternalArrayConstructorDescriptor(MajorKey(), descriptor, -1);
 }
 
 
 void BinaryOpICStub::InitializeInterfaceDescriptor(
     CodeStubInterfaceDescriptor* descriptor) {
   Register registers[] = { cp, r1, r0 };
-  descriptor->Initialize(ARRAY_SIZE(registers), registers,
+  descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers,
                          FUNCTION_ADDR(BinaryOpIC_Miss));
   descriptor->SetMissHandler(
       ExternalReference(IC_Utility(IC::kBinaryOpIC_Miss), isolate()));
@@ -242,7 +260,7 @@ void BinaryOpICStub::InitializeInterfaceDescriptor(
 void BinaryOpWithAllocationSiteStub::InitializeInterfaceDescriptor(
     CodeStubInterfaceDescriptor* descriptor) {
   Register registers[] = { cp, r2, r1, r0 };
-  descriptor->Initialize(ARRAY_SIZE(registers), registers,
+  descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers,
                          FUNCTION_ADDR(BinaryOpIC_MissWithAllocationSite));
 }
 
@@ -250,9 +268,8 @@ void BinaryOpWithAllocationSiteStub::InitializeInterfaceDescriptor(
 void StringAddStub::InitializeInterfaceDescriptor(
     CodeStubInterfaceDescriptor* descriptor) {
   Register registers[] = { cp, r1, r0 };
-  descriptor->Initialize(
-      ARRAY_SIZE(registers), registers,
-      Runtime::FunctionForId(Runtime::kStringAdd)->entry);
+  descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers,
+                         Runtime::FunctionForId(Runtime::kStringAdd)->entry);
 }
 
 
@@ -1672,8 +1689,6 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
 void InstanceofStub::Generate(MacroAssembler* masm) {
   // Call site inlining and patching implies arguments in registers.
   ASSERT(HasArgsInRegisters() || !HasCallSiteInlineCheck());
-  // ReturnTrueFalse is only implemented for inlined call sites.
-  ASSERT(!ReturnTrueFalseObject() || HasCallSiteInlineCheck());
 
   // Fixed register usage throughout the stub:
   const Register object = r0;  // Object (lhs).
@@ -1695,7 +1710,7 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
 
   // If there is a call site cache don't look in the global cache, but do the
   // real lookup and update the call site cache.
-  if (!HasCallSiteInlineCheck()) {
+  if (!HasCallSiteInlineCheck() && !ReturnTrueFalseObject()) {
     Label miss;
     __ CompareRoot(function, Heap::kInstanceofCacheFunctionRootIndex);
     __ b(ne, &miss);
@@ -1751,11 +1766,15 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
   __ ldr(scratch, FieldMemOperand(scratch, HeapObject::kMapOffset));
   __ ldr(scratch, FieldMemOperand(scratch, Map::kPrototypeOffset));
   __ jmp(&loop);
+  Factory* factory = isolate()->factory();
 
   __ bind(&is_instance);
   if (!HasCallSiteInlineCheck()) {
     __ mov(r0, Operand(Smi::FromInt(0)));
     __ StoreRoot(r0, Heap::kInstanceofCacheAnswerRootIndex);
+    if (ReturnTrueFalseObject()) {
+      __ Move(r0, factory->true_value());
+    }
   } else {
     // Patch the call site to return true.
     __ LoadRoot(r0, Heap::kTrueValueRootIndex);
@@ -1777,6 +1796,9 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
   if (!HasCallSiteInlineCheck()) {
     __ mov(r0, Operand(Smi::FromInt(1)));
     __ StoreRoot(r0, Heap::kInstanceofCacheAnswerRootIndex);
+    if (ReturnTrueFalseObject()) {
+      __ Move(r0, factory->false_value());
+    }
   } else {
     // Patch the call site to return false.
     __ LoadRoot(r0, Heap::kFalseValueRootIndex);
@@ -1806,19 +1828,31 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
   // Null is not instance of anything.
   __ cmp(scratch, Operand(isolate()->factory()->null_value()));
   __ b(ne, &object_not_null);
-  __ mov(r0, Operand(Smi::FromInt(1)));
+  if (ReturnTrueFalseObject()) {
+    __ Move(r0, factory->false_value());
+  } else {
+    __ mov(r0, Operand(Smi::FromInt(1)));
+  }
   __ Ret(HasArgsInRegisters() ? 0 : 2);
 
   __ bind(&object_not_null);
   // Smi values are not instances of anything.
   __ JumpIfNotSmi(object, &object_not_null_or_smi);
-  __ mov(r0, Operand(Smi::FromInt(1)));
+  if (ReturnTrueFalseObject()) {
+    __ Move(r0, factory->false_value());
+  } else {
+    __ mov(r0, Operand(Smi::FromInt(1)));
+  }
   __ Ret(HasArgsInRegisters() ? 0 : 2);
 
   __ bind(&object_not_null_or_smi);
   // String values are not instances of anything.
   __ IsObjectJSStringType(object, scratch, &slow);
-  __ mov(r0, Operand(Smi::FromInt(1)));
+  if (ReturnTrueFalseObject()) {
+    __ Move(r0, factory->false_value());
+  } else {
+    __ mov(r0, Operand(Smi::FromInt(1)));
+  }
   __ Ret(HasArgsInRegisters() ? 0 : 2);
 
   // Slow-case.  Tail call builtin.
index 4dfc284064da81ac5f830e0c44e5207c83c0c64b..e3a795bd1d528188d414438c3c1a99aa6e774fca 100644 (file)
@@ -49,9 +49,6 @@ void Deoptimizer::PatchCodeForDeoptimization(Isolate* isolate, Code* code) {
 
   DeoptimizationInputData* deopt_data =
       DeoptimizationInputData::cast(code->deoptimization_data());
-  SharedFunctionInfo* shared =
-      SharedFunctionInfo::cast(deopt_data->SharedFunctionInfo());
-  shared->EvictFromOptimizedCodeMap(code, "deoptimized code");
 #ifdef DEBUG
   Address prev_call_address = NULL;
 #endif
index 70b1eb83b75fb7142c6553e0e4a8c654d6592259..604a2b88120d3d4591944dca153cbeea74f49e8b 100644 (file)
@@ -1097,13 +1097,16 @@ void Decoder::DecodeType3(Instruction* instr) {
     }
     case db_x: {
       if (FLAG_enable_sudiv) {
-        if (!instr->HasW()) {
-          if (instr->Bits(5, 4) == 0x1) {
-            if ((instr->Bit(22) == 0x0) && (instr->Bit(20) == 0x1)) {
+        if (instr->Bits(5, 4) == 0x1) {
+          if ((instr->Bit(22) == 0x0) && (instr->Bit(20) == 0x1)) {
+            if (instr->Bit(21) == 0x1) {
+              // UDIV (in V8 notation matching ARM ISA format) rn = rm/rs
+              Format(instr, "udiv'cond'b 'rn, 'rm, 'rs");
+            } else {
               // SDIV (in V8 notation matching ARM ISA format) rn = rm/rs
               Format(instr, "sdiv'cond'b 'rn, 'rm, 'rs");
-              break;
             }
+            break;
           }
         }
       }
index f4fef13ccecd1a9786a4c9ee2716d0f4a72b9fe6..9098f9d032e46ce4dfad8c775357b566bbf20567 100644 (file)
@@ -4,10 +4,9 @@
 
 #include "src/v8.h"
 
-#include "src/arm/lithium-arm.h"
 #include "src/arm/lithium-codegen-arm.h"
 #include "src/hydrogen-osr.h"
-#include "src/lithium-allocator-inl.h"
+#include "src/lithium-inl.h"
 
 namespace v8 {
 namespace internal {
index 26e8cc5ebccde9644772ad044eed32613a61cb58..37303933ec3382e2e27342f26fe1a5859fa75d47 100644 (file)
@@ -223,6 +223,9 @@ class LInstruction : public ZoneObject {
 
   virtual bool IsControl() const { return false; }
 
+  // Try deleting this instruction if possible.
+  virtual bool TryDelete() { return false; }
+
   void set_environment(LEnvironment* env) { environment_ = env; }
   LEnvironment* environment() const { return environment_; }
   bool HasEnvironment() const { return environment_ != NULL; }
@@ -261,11 +264,12 @@ class LInstruction : public ZoneObject {
   void VerifyCall();
 #endif
 
+  virtual int InputCount() = 0;
+  virtual LOperand* InputAt(int i) = 0;
+
  private:
   // Iterator support.
   friend class InputIterator;
-  virtual int InputCount() = 0;
-  virtual LOperand* InputAt(int i) = 0;
 
   friend class TempIterator;
   virtual int TempCount() = 0;
index 92ef3aa316ee729d453d41cffb113561a92fd2f6..e4cec8cfd2f87a4be33d4fe79c1df6554ee17825 100644 (file)
@@ -932,7 +932,7 @@ void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
   int length = deoptimizations_.length();
   if (length == 0) return;
   Handle<DeoptimizationInputData> data =
-      DeoptimizationInputData::New(isolate(), length, TENURED);
+      DeoptimizationInputData::New(isolate(), length, 0, TENURED);
 
   Handle<ByteArray> translations =
       translations_.CreateByteArray(isolate()->factory());
index 7ce3112f8a361e5d0f4fbaa0e3f74065d026a92d..c1b34053758172ad731953ddbbeb25794adbc6da 100644 (file)
@@ -254,7 +254,7 @@ void MacroAssembler::Mls(Register dst, Register src1, Register src2,
     CpuFeatureScope scope(this, MLS);
     mls(dst, src1, src2, srcA, cond);
   } else {
-    ASSERT(!dst.is(srcA));
+    ASSERT(!srcA.is(ip));
     mul(ip, src1, src2, LeaveCC, cond);
     sub(dst, srcA, ip, LeaveCC, cond);
   }
index d29ca79e935e85eab95b8579977577af691905d8..aa19806662fc2e474e22db63619da405c070867c 100644 (file)
@@ -152,6 +152,9 @@ class MacroAssembler: public Assembler {
   // Register move. May do nothing if the registers are identical.
   void Move(Register dst, Handle<Object> value);
   void Move(Register dst, Register src, Condition cond = al);
+  void Move(Register dst, const Operand& src, Condition cond = al) {
+    if (!src.is_reg() || !src.rm().is(dst)) mov(dst, src, LeaveCC, cond);
+  }
   void Move(DwVfpRegister dst, DwVfpRegister src);
 
   void Load(Register dst, const MemOperand& src, Representation r);
index 60a5e806bee2fca1c0bd5d6b9192f2acc6299761..9d1aafacc57ec2d9e9a4c116f34cf96899b82691 100644 (file)
@@ -2711,28 +2711,30 @@ void Simulator::DecodeType3(Instruction* instr) {
     }
     case db_x: {
       if (FLAG_enable_sudiv) {
-        if (!instr->HasW()) {
-          if (instr->Bits(5, 4) == 0x1) {
-             if ((instr->Bit(22) == 0x0) && (instr->Bit(20) == 0x1)) {
-               // sdiv (in V8 notation matching ARM ISA format) rn = rm/rs
-               // Format(instr, "'sdiv'cond'b 'rn, 'rm, 'rs);
-               int rm = instr->RmValue();
-               int32_t rm_val = get_register(rm);
-               int rs = instr->RsValue();
-               int32_t rs_val = get_register(rs);
-               int32_t ret_val = 0;
-               ASSERT(rs_val != 0);
-               if ((rm_val == kMinInt) && (rs_val == -1)) {
-                 ret_val = kMinInt;
-               } else {
-                 ret_val = rm_val / rs_val;
-               }
-               set_register(rn, ret_val);
-               return;
-             }
-           }
-         }
-       }
+        if (instr->Bits(5, 4) == 0x1) {
+          if ((instr->Bit(22) == 0x0) && (instr->Bit(20) == 0x1)) {
+            // (s/u)div (in V8 notation matching ARM ISA format) rn = rm/rs
+            // Format(instr, "'(s/u)div'cond'b 'rn, 'rm, 'rs);
+            int rm = instr->RmValue();
+            int32_t rm_val = get_register(rm);
+            int rs = instr->RsValue();
+            int32_t rs_val = get_register(rs);
+            int32_t ret_val = 0;
+            ASSERT(rs_val != 0);
+            // udiv
+            if (instr->Bit(21) == 0x1) {
+              ret_val = static_cast<int32_t>(static_cast<uint32_t>(rm_val) /
+                                             static_cast<uint32_t>(rs_val));
+            } else if ((rm_val == kMinInt) && (rs_val == -1)) {
+              ret_val = kMinInt;
+            } else {
+              ret_val = rm_val / rs_val;
+            }
+            set_register(rn, ret_val);
+            return;
+          }
+        }
+      }
       // Format(instr, "'memop'cond'b 'rd, ['rn, -'shift_rm]'w");
       addr = rn_val - shifter_operand;
       if (instr->HasW()) {
@@ -2772,7 +2774,7 @@ void Simulator::DecodeType3(Instruction* instr) {
           uint32_t rd_val =
               static_cast<uint32_t>(get_register(instr->RdValue()));
           uint32_t bitcount = msbit - lsbit + 1;
-          uint32_t mask = (1 << bitcount) - 1;
+          uint32_t mask = 0xffffffffu >> (32 - bitcount);
           rd_val &= ~(mask << lsbit);
           if (instr->RmValue() != 15) {
             // bfi - bitfield insert.
index 2989c2b6409757f234ee7f3cf8f358f10e9647f4..f745975bca14ff14892c03d92387242de9a69fdb 100644 (file)
@@ -20,7 +20,7 @@ void FastNewClosureStub::InitializeInterfaceDescriptor(
   // x2: function info
   Register registers[] = { cp, x2 };
   descriptor->Initialize(
-      ARRAY_SIZE(registers), registers,
+      MajorKey(), ARRAY_SIZE(registers), registers,
       Runtime::FunctionForId(Runtime::kNewClosureFromStubFailure)->entry);
 }
 
@@ -30,7 +30,7 @@ void FastNewContextStub::InitializeInterfaceDescriptor(
   // cp: context
   // x1: function
   Register registers[] = { cp, x1 };
-  descriptor->Initialize(ARRAY_SIZE(registers), registers);
+  descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers);
 }
 
 
@@ -39,7 +39,7 @@ void ToNumberStub::InitializeInterfaceDescriptor(
   // cp: context
   // x0: value
   Register registers[] = { cp, x0 };
-  descriptor->Initialize(ARRAY_SIZE(registers), registers);
+  descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers);
 }
 
 
@@ -49,7 +49,7 @@ void NumberToStringStub::InitializeInterfaceDescriptor(
   // x0: value
   Register registers[] = { cp, x0 };
   descriptor->Initialize(
-      ARRAY_SIZE(registers), registers,
+      MajorKey(), ARRAY_SIZE(registers), registers,
       Runtime::FunctionForId(Runtime::kNumberToStringRT)->entry);
 }
 
@@ -67,9 +67,8 @@ void FastCloneShallowArrayStub::InitializeInterfaceDescriptor(
     Representation::Smi(),
     Representation::Tagged() };
   descriptor->Initialize(
-      ARRAY_SIZE(registers), registers,
-      Runtime::FunctionForId(
-          Runtime::kCreateArrayLiteralStubBailout)->entry,
+      MajorKey(), ARRAY_SIZE(registers), registers,
+      Runtime::FunctionForId(Runtime::kCreateArrayLiteralStubBailout)->entry,
       representations);
 }
 
@@ -83,7 +82,7 @@ void FastCloneShallowObjectStub::InitializeInterfaceDescriptor(
   // x0: object literal flags
   Register registers[] = { cp, x3, x2, x1, x0 };
   descriptor->Initialize(
-      ARRAY_SIZE(registers), registers,
+      MajorKey(), ARRAY_SIZE(registers), registers,
       Runtime::FunctionForId(Runtime::kCreateObjectLiteral)->entry);
 }
 
@@ -94,7 +93,35 @@ void CreateAllocationSiteStub::InitializeInterfaceDescriptor(
   // x2: feedback vector
   // x3: call feedback slot
   Register registers[] = { cp, x2, x3 };
-  descriptor->Initialize(ARRAY_SIZE(registers), registers);
+  descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers);
+}
+
+
+void InstanceofStub::InitializeInterfaceDescriptor(
+    Isolate* isolate, CodeStubInterfaceDescriptor* descriptor) {
+  Register registers[] = {cp, left(), right()};
+  descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers);
+}
+
+
+void CallFunctionStub::InitializeInterfaceDescriptor(
+    Isolate* isolate, CodeStubInterfaceDescriptor* descriptor) {
+  // x1  function    the function to call
+  Register registers[] = {cp, x1};
+  descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers);
+}
+
+
+void CallConstructStub::InitializeInterfaceDescriptor(
+    Isolate* isolate, CodeStubInterfaceDescriptor* descriptor) {
+  // x0 : number of arguments
+  // x1 : the function to call
+  // x2 : feedback vector
+  // x3 : slot in feedback vector (smi) (if r2 is not the megamorphic symbol)
+  // TODO(turbofan): So far we don't gather type feedback and hence skip the
+  // slot parameter, but ArrayConstructStub needs the vector to be undefined.
+  Register registers[] = {cp, x0, x1, x2};
+  descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers);
 }
 
 
@@ -106,7 +133,7 @@ void RegExpConstructResultStub::InitializeInterfaceDescriptor(
   // x0: string
   Register registers[] = { cp, x2, x1, x0 };
   descriptor->Initialize(
-      ARRAY_SIZE(registers), registers,
+      MajorKey(), ARRAY_SIZE(registers), registers,
       Runtime::FunctionForId(Runtime::kRegExpConstructResult)->entry);
 }
 
@@ -119,7 +146,7 @@ void TransitionElementsKindStub::InitializeInterfaceDescriptor(
   Register registers[] = { cp, x0, x1 };
   Address entry =
       Runtime::FunctionForId(Runtime::kTransitionElementsKind)->entry;
-  descriptor->Initialize(ARRAY_SIZE(registers), registers,
+  descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers,
                          FUNCTION_ADDR(entry));
 }
 
@@ -129,7 +156,7 @@ void CompareNilICStub::InitializeInterfaceDescriptor(
   // cp: context
   // x0: value to compare
   Register registers[] = { cp, x0 };
-  descriptor->Initialize(ARRAY_SIZE(registers), registers,
+  descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers,
                          FUNCTION_ADDR(CompareNilIC_Miss));
   descriptor->SetMissHandler(
       ExternalReference(IC_Utility(IC::kCompareNilIC_Miss), isolate()));
@@ -140,7 +167,7 @@ const Register InterfaceDescriptor::ContextRegister() { return cp; }
 
 
 static void InitializeArrayConstructorDescriptor(
-    CodeStubInterfaceDescriptor* descriptor,
+    CodeStub::Major major, CodeStubInterfaceDescriptor* descriptor,
     int constant_stack_parameter_count) {
   // cp: context
   // x1: function
@@ -151,10 +178,8 @@ static void InitializeArrayConstructorDescriptor(
 
   if (constant_stack_parameter_count == 0) {
     Register registers[] = { cp, x1, x2 };
-    descriptor->Initialize(ARRAY_SIZE(registers), registers,
-                           deopt_handler,
-                           NULL,
-                           constant_stack_parameter_count,
+    descriptor->Initialize(major, ARRAY_SIZE(registers), registers,
+                           deopt_handler, NULL, constant_stack_parameter_count,
                            JS_FUNCTION_STUB_MODE);
   } else {
     // stack param count needs (constructor pointer, and single argument)
@@ -164,37 +189,34 @@ static void InitializeArrayConstructorDescriptor(
         Representation::Tagged(),
         Representation::Tagged(),
         Representation::Integer32() };
-    descriptor->Initialize(ARRAY_SIZE(registers), registers,
-                           x0,
-                           deopt_handler,
-                           representations,
+    descriptor->Initialize(major, ARRAY_SIZE(registers), registers, x0,
+                           deopt_handler, representations,
                            constant_stack_parameter_count,
-                           JS_FUNCTION_STUB_MODE,
-                           PASS_ARGUMENTS);
+                           JS_FUNCTION_STUB_MODE, PASS_ARGUMENTS);
   }
 }
 
 
 void ArrayNoArgumentConstructorStub::InitializeInterfaceDescriptor(
     CodeStubInterfaceDescriptor* descriptor) {
-  InitializeArrayConstructorDescriptor(descriptor, 0);
+  InitializeArrayConstructorDescriptor(MajorKey(), descriptor, 0);
 }
 
 
 void ArraySingleArgumentConstructorStub::InitializeInterfaceDescriptor(
     CodeStubInterfaceDescriptor* descriptor) {
-  InitializeArrayConstructorDescriptor(descriptor, 1);
+  InitializeArrayConstructorDescriptor(MajorKey(), descriptor, 1);
 }
 
 
 void ArrayNArgumentsConstructorStub::InitializeInterfaceDescriptor(
     CodeStubInterfaceDescriptor* descriptor) {
-  InitializeArrayConstructorDescriptor(descriptor, -1);
+  InitializeArrayConstructorDescriptor(MajorKey(), descriptor, -1);
 }
 
 
 static void InitializeInternalArrayConstructorDescriptor(
-    CodeStubInterfaceDescriptor* descriptor,
+    CodeStub::Major major, CodeStubInterfaceDescriptor* descriptor,
     int constant_stack_parameter_count) {
   // cp: context
   // x1: constructor function
@@ -204,10 +226,8 @@ static void InitializeInternalArrayConstructorDescriptor(
 
   if (constant_stack_parameter_count == 0) {
     Register registers[] = { cp, x1 };
-    descriptor->Initialize(ARRAY_SIZE(registers), registers,
-                           deopt_handler,
-                           NULL,
-                           constant_stack_parameter_count,
+    descriptor->Initialize(major, ARRAY_SIZE(registers), registers,
+                           deopt_handler, NULL, constant_stack_parameter_count,
                            JS_FUNCTION_STUB_MODE);
   } else {
     // stack param count needs (constructor pointer, and single argument)
@@ -216,32 +236,29 @@ static void InitializeInternalArrayConstructorDescriptor(
         Representation::Tagged(),
         Representation::Tagged(),
         Representation::Integer32() };
-    descriptor->Initialize(ARRAY_SIZE(registers), registers,
-                           x0,
-                           deopt_handler,
-                           representations,
+    descriptor->Initialize(major, ARRAY_SIZE(registers), registers, x0,
+                           deopt_handler, representations,
                            constant_stack_parameter_count,
-                           JS_FUNCTION_STUB_MODE,
-                           PASS_ARGUMENTS);
+                           JS_FUNCTION_STUB_MODE, PASS_ARGUMENTS);
   }
 }
 
 
 void InternalArrayNoArgumentConstructorStub::InitializeInterfaceDescriptor(
     CodeStubInterfaceDescriptor* descriptor) {
-  InitializeInternalArrayConstructorDescriptor(descriptor, 0);
+  InitializeInternalArrayConstructorDescriptor(MajorKey(), descriptor, 0);
 }
 
 
 void InternalArraySingleArgumentConstructorStub::InitializeInterfaceDescriptor(
     CodeStubInterfaceDescriptor* descriptor) {
-  InitializeInternalArrayConstructorDescriptor(descriptor, 1);
+  InitializeInternalArrayConstructorDescriptor(MajorKey(), descriptor, 1);
 }
 
 
 void InternalArrayNArgumentsConstructorStub::InitializeInterfaceDescriptor(
     CodeStubInterfaceDescriptor* descriptor) {
-  InitializeInternalArrayConstructorDescriptor(descriptor, -1);
+  InitializeInternalArrayConstructorDescriptor(MajorKey(), descriptor, -1);
 }
 
 
@@ -250,7 +267,7 @@ void ToBooleanStub::InitializeInterfaceDescriptor(
   // cp: context
   // x0: value
   Register registers[] = { cp, x0 };
-  descriptor->Initialize(ARRAY_SIZE(registers), registers,
+  descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers,
                          FUNCTION_ADDR(ToBooleanIC_Miss));
   descriptor->SetMissHandler(
       ExternalReference(IC_Utility(IC::kToBooleanIC_Miss), isolate()));
@@ -263,7 +280,7 @@ void BinaryOpICStub::InitializeInterfaceDescriptor(
   // x1: left operand
   // x0: right operand
   Register registers[] = { cp, x1, x0 };
-  descriptor->Initialize(ARRAY_SIZE(registers), registers,
+  descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers,
                          FUNCTION_ADDR(BinaryOpIC_Miss));
   descriptor->SetMissHandler(
       ExternalReference(IC_Utility(IC::kBinaryOpIC_Miss), isolate()));
@@ -277,7 +294,7 @@ void BinaryOpWithAllocationSiteStub::InitializeInterfaceDescriptor(
   // x1: left operand
   // x0: right operand
   Register registers[] = { cp, x2, x1, x0 };
-  descriptor->Initialize(ARRAY_SIZE(registers), registers,
+  descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers,
                          FUNCTION_ADDR(BinaryOpIC_MissWithAllocationSite));
 }
 
@@ -288,9 +305,8 @@ void StringAddStub::InitializeInterfaceDescriptor(
   // x1: left operand
   // x0: right operand
   Register registers[] = { cp, x1, x0 };
-  descriptor->Initialize(
-      ARRAY_SIZE(registers), registers,
-      Runtime::FunctionForId(Runtime::kStringAdd)->entry);
+  descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers,
+                         Runtime::FunctionForId(Runtime::kStringAdd)->entry);
 }
 
 
index 712881d9db08f381145a6a9ef9912571eba6bb57..9ed0607a6253776da1e9ed8c091c6f1b496cf5cd 100644 (file)
@@ -32,9 +32,6 @@ void Deoptimizer::PatchCodeForDeoptimization(Isolate* isolate, Code* code) {
 
   DeoptimizationInputData* deopt_data =
       DeoptimizationInputData::cast(code->deoptimization_data());
-  SharedFunctionInfo* shared =
-      SharedFunctionInfo::cast(deopt_data->SharedFunctionInfo());
-  shared->EvictFromOptimizedCodeMap(code, "deoptimized code");
   Address code_start_address = code->instruction_start();
 #ifdef DEBUG
   Address prev_call_address = NULL;
index ff224182601d97c4d883f286411a27e26aafe770..1fbf72ff6174c015b6cd13179d4cd37a76ffdfe1 100644 (file)
@@ -4,15 +4,13 @@
 
 #include "src/v8.h"
 
-#include "src/arm64/lithium-arm64.h"
 #include "src/arm64/lithium-codegen-arm64.h"
 #include "src/hydrogen-osr.h"
-#include "src/lithium-allocator-inl.h"
+#include "src/lithium-inl.h"
 
 namespace v8 {
 namespace internal {
 
-
 #define DEFINE_COMPILE(type)                            \
   void L##type::CompileToNative(LCodeGen* generator) {  \
     generator->Do##type(this);                          \
index 865a09501b0c5733e810f1c8ba773e5e9ae88a67..decfd34f86dd6123bbe622da29def1094a88bce3 100644 (file)
@@ -234,6 +234,9 @@ class LInstruction : public ZoneObject {
 
   virtual bool IsControl() const { return false; }
 
+  // Try deleting this instruction if possible.
+  virtual bool TryDelete() { return false; }
+
   void set_environment(LEnvironment* env) { environment_ = env; }
   LEnvironment* environment() const { return environment_; }
   bool HasEnvironment() const { return environment_ != NULL; }
index aa6030e3026e574613fb341d93132f4e46e35cb4..21d1754e435ed3fa7c612c4c351b248e8ebb6c00 100644 (file)
@@ -938,7 +938,7 @@ void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
   if (length == 0) return;
 
   Handle<DeoptimizationInputData> data =
-      DeoptimizationInputData::New(isolate(), length, TENURED);
+      DeoptimizationInputData::New(isolate(), length, 0, TENURED);
 
   Handle<ByteArray> translations =
       translations_.CreateByteArray(isolate()->factory());
index d63575be5e698fdf8eb99f7d635208e14529e8f6..0d2ba3a2978f0790f3cdcc1ac98b627cda1d8c6c 100644 (file)
@@ -14,6 +14,7 @@
 #include "src/assembler.h"
 #include "src/disasm.h"
 #include "src/macro-assembler.h"
+#include "src/ostreams.h"
 
 namespace v8 {
 namespace internal {
@@ -2912,7 +2913,7 @@ T Simulator::FPMaxNM(T a, T b) {
 template <typename T>
 T Simulator::FPMin(T a, T b) {
   // NaNs should be handled elsewhere.
-  ASSERT(!isnan(a) && !isnan(b));
+  ASSERT(!std::isnan(a) && !std::isnan(b));
 
   if ((a == 0.0) && (b == 0.0) &&
       (copysign(1.0, a) != copysign(1.0, b))) {
index 4c4d515d1e35a5a670ede965ee9e1c502a0d6b32..471dac627f65309b85ad6bbacfd3d298e809919b 100644 (file)
@@ -211,6 +211,7 @@ class Simulator : public DecoderVisitor {
    public:
     template<typename T>
     explicit CallArgument(T argument) {
+      bits_ = 0;
       ASSERT(sizeof(argument) <= sizeof(bits_));
       memcpy(&bits_, &argument, sizeof(argument));
       type_ = X_ARG;
index 8494983772ee3d1aeaa0844a2b1985e6cb04fb02..b7f870e0ae2b0618e12a053ff51dda2d3736ffa0 100644 (file)
@@ -88,13 +88,13 @@ inline bool IsQuietNaN(T num) {
 
 // Convert the NaN in 'num' to a quiet NaN.
 inline double ToQuietNaN(double num) {
-  ASSERT(isnan(num));
+  ASSERT(std::isnan(num));
   return rawbits_to_double(double_to_rawbits(num) | kDQuietNanMask);
 }
 
 
 inline float ToQuietNaN(float num) {
-  ASSERT(isnan(num));
+  ASSERT(std::isnan(num));
   return rawbits_to_float(float_to_rawbits(num) | kSQuietNanMask);
 }
 
index b48a1f164aee44f460a5620a141b38880168e01f..d0051ecf825ecad1648ce644d27946f560742a37 100644 (file)
--- a/src/ast.h
+++ b/src/ast.h
@@ -15,7 +15,6 @@
 #include "src/isolate.h"
 #include "src/jsregexp.h"
 #include "src/list-inl.h"
-#include "src/ostreams.h"
 #include "src/runtime.h"
 #include "src/small-pointer-list.h"
 #include "src/smart-pointers.h"
@@ -113,6 +112,7 @@ class BreakableStatement;
 class Expression;
 class IterationStatement;
 class MaterializedLiteral;
+class OStream;
 class Statement;
 class TargetCollector;
 class TypeFeedbackOracle;
@@ -1516,6 +1516,13 @@ class ObjectLiteral V8_FINAL : public MaterializedLiteral {
   // marked expressions, no store code is emitted.
   void CalculateEmitStore(Zone* zone);
 
+  // Assemble bitfield of flags for the CreateObjectLiteral helper.
+  int ComputeFlags() const {
+    int flags = fast_elements() ? kFastElements : kNoFlags;
+    flags |= has_function() ? kHasFunction : kNoFlags;
+    return flags;
+  }
+
   enum Flags {
     kNoFlags = 0,
     kFastElements = 1,
@@ -1595,6 +1602,13 @@ class ArrayLiteral V8_FINAL : public MaterializedLiteral {
   // Populate the constant elements fixed array.
   void BuildConstantElements(Isolate* isolate);
 
+  // Assemble bitfield of flags for the CreateArrayLiteral helper.
+  int ComputeFlags() const {
+    int flags = depth() == 1 ? kShallowElements : kNoFlags;
+    flags |= ArrayLiteral::kDisableMementos;
+    return flags;
+  }
+
   enum Flags {
     kNoFlags = 0,
     kShallowElements = 1,
index de91077142bdfc08f1dce4ffa4c53b0dc6f757c3..23b6b9e54152a77753f0fa975efbd65bdad61231 100644 (file)
@@ -152,29 +152,6 @@ inline void CheckNonEqualsHelper(const char* file,
 }
 
 
-// Helper function used by the CHECK function when given floating
-// point arguments.  Should not be called directly.
-inline void CheckEqualsHelper(const char* file,
-                              int line,
-                              const char* expected_source,
-                              double expected,
-                              const char* value_source,
-                              double value) {
-  // Force values to 64 bit memory to truncate 80 bit precision on IA32.
-  volatile double* exp = new double[1];
-  *exp = expected;
-  volatile double* val = new double[1];
-  *val = value;
-  if (*exp != *val) {
-    V8_Fatal(file, line,
-             "CHECK_EQ(%s, %s) failed\n#   Expected: %f\n#   Found: %f",
-             expected_source, value_source, *exp, *val);
-  }
-  delete[] exp;
-  delete[] val;
-}
-
-
 inline void CheckNonEqualsHelper(const char* file,
                               int line,
                               const char* expected_source,
@@ -189,27 +166,6 @@ inline void CheckNonEqualsHelper(const char* file,
 }
 
 
-inline void CheckNonEqualsHelper(const char* file,
-                                 int line,
-                                 const char* expected_source,
-                                 double expected,
-                                 const char* value_source,
-                                 double value) {
-  // Force values to 64 bit memory to truncate 80 bit precision on IA32.
-  volatile double* exp = new double[1];
-  *exp = expected;
-  volatile double* val = new double[1];
-  *val = value;
-  if (*exp == *val) {
-    V8_Fatal(file, line,
-             "CHECK_NE(%s, %s) failed\n#   Value: %f",
-             expected_source, value_source, *val);
-  }
-  delete[] exp;
-  delete[] val;
-}
-
-
 #define CHECK_EQ(expected, value) CheckEqualsHelper(__FILE__, __LINE__, \
   #expected, expected, #value, value)
 
index e58bf5abdb43b79699b32fea96f7fc6f340dfd08..4d5aec9602863828306f9f4c0442dee22c32c946 100644 (file)
@@ -1534,6 +1534,38 @@ bool Genesis::CompileScriptCached(Isolate* isolate,
 }
 
 
+static Handle<JSObject> ResolveBuiltinIdHolder(Handle<Context> native_context,
+                                               const char* holder_expr) {
+  Isolate* isolate = native_context->GetIsolate();
+  Factory* factory = isolate->factory();
+  Handle<GlobalObject> global(native_context->global_object());
+  const char* period_pos = strchr(holder_expr, '.');
+  if (period_pos == NULL) {
+    return Handle<JSObject>::cast(
+        Object::GetPropertyOrElement(
+            global, factory->InternalizeUtf8String(holder_expr))
+            .ToHandleChecked());
+  }
+  const char* inner = period_pos + 1;
+  ASSERT_EQ(NULL, strchr(inner, '.'));
+  Vector<const char> property(holder_expr,
+                              static_cast<int>(period_pos - holder_expr));
+  Handle<String> property_string = factory->InternalizeUtf8String(property);
+  ASSERT(!property_string.is_null());
+  Handle<JSObject> object = Handle<JSObject>::cast(
+      Object::GetProperty(global, property_string).ToHandleChecked());
+  if (strcmp("prototype", inner) == 0) {
+    Handle<JSFunction> function = Handle<JSFunction>::cast(object);
+    return Handle<JSObject>(JSObject::cast(function->prototype()));
+  }
+  Handle<String> inner_string = factory->InternalizeUtf8String(inner);
+  ASSERT(!inner_string.is_null());
+  Handle<Object> value =
+      Object::GetProperty(object, inner_string).ToHandleChecked();
+  return Handle<JSObject>::cast(value);
+}
+
+
 #define INSTALL_NATIVE(Type, name, var)                                        \
   Handle<String> var##_name =                                                  \
       factory()->InternalizeOneByteString(STATIC_ASCII_VECTOR(name));          \
@@ -1541,6 +1573,12 @@ bool Genesis::CompileScriptCached(Isolate* isolate,
       handle(native_context()->builtins()), var##_name).ToHandleChecked();     \
   native_context()->set_##var(Type::cast(*var##_native));
 
+#define INSTALL_NATIVE_MATH(name)                                    \
+  {                                                                  \
+    Handle<Object> fun =                                             \
+        ResolveBuiltinIdHolder(native_context(), "Math." #name);     \
+    native_context()->set_math_##name##_fun(JSFunction::cast(*fun)); \
+  }
 
 void Genesis::InstallNativeFunctions() {
   HandleScope scope(isolate());
@@ -1583,6 +1621,26 @@ void Genesis::InstallNativeFunctions() {
                  native_object_get_notifier);
   INSTALL_NATIVE(JSFunction, "NativeObjectNotifierPerformChange",
                  native_object_notifier_perform_change);
+
+  INSTALL_NATIVE_MATH(abs)
+  INSTALL_NATIVE_MATH(acos)
+  INSTALL_NATIVE_MATH(asin)
+  INSTALL_NATIVE_MATH(atan)
+  INSTALL_NATIVE_MATH(atan2)
+  INSTALL_NATIVE_MATH(ceil)
+  INSTALL_NATIVE_MATH(cos)
+  INSTALL_NATIVE_MATH(exp)
+  INSTALL_NATIVE_MATH(floor)
+  INSTALL_NATIVE_MATH(imul)
+  INSTALL_NATIVE_MATH(log)
+  INSTALL_NATIVE_MATH(max)
+  INSTALL_NATIVE_MATH(min)
+  INSTALL_NATIVE_MATH(pow)
+  INSTALL_NATIVE_MATH(random)
+  INSTALL_NATIVE_MATH(round)
+  INSTALL_NATIVE_MATH(sin)
+  INSTALL_NATIVE_MATH(sqrt)
+  INSTALL_NATIVE_MATH(tan)
 }
 
 
@@ -2029,28 +2087,6 @@ bool Genesis::InstallExperimentalNatives() {
 }
 
 
-static Handle<JSObject> ResolveBuiltinIdHolder(
-    Handle<Context> native_context,
-    const char* holder_expr) {
-  Isolate* isolate = native_context->GetIsolate();
-  Factory* factory = isolate->factory();
-  Handle<GlobalObject> global(native_context->global_object());
-  const char* period_pos = strchr(holder_expr, '.');
-  if (period_pos == NULL) {
-    return Handle<JSObject>::cast(Object::GetPropertyOrElement(
-        global, factory->InternalizeUtf8String(holder_expr)).ToHandleChecked());
-  }
-  ASSERT_EQ(".prototype", period_pos);
-  Vector<const char> property(holder_expr,
-                              static_cast<int>(period_pos - holder_expr));
-  Handle<String> property_string = factory->InternalizeUtf8String(property);
-  ASSERT(!property_string.is_null());
-  Handle<JSFunction> function = Handle<JSFunction>::cast(
-      Object::GetProperty(global, property_string).ToHandleChecked());
-  return Handle<JSObject>(JSObject::cast(function->prototype()));
-}
-
-
 static void InstallBuiltinFunctionId(Handle<JSObject> holder,
                                      const char* function_name,
                                      BuiltinFunctionId id) {
@@ -2336,6 +2372,10 @@ bool Genesis::InstallJSBuiltins(Handle<JSBuiltinsObject> builtins) {
         isolate(), builtins, Builtins::GetName(id)).ToHandleChecked();
     Handle<JSFunction> function = Handle<JSFunction>::cast(function_object);
     builtins->set_javascript_builtin(id, *function);
+    // TODO(mstarzinger): This is just a temporary hack to make TurboFan work,
+    // the correct solution is to restore the context register after invoking
+    // builtins from full-codegen.
+    function->shared()->set_optimization_disabled(true);
     if (!Compiler::EnsureCompiled(function, CLEAR_EXCEPTION)) {
       return false;
     }
index e2c2c079f04b72b8b69b4738380cfaf17844f244..e5a4caa6c8a3926601b8ac7af65c2ee9d798f1fc 100644 (file)
@@ -14,6 +14,50 @@ intptr_t HeapObjectTagMask() { return kHeapObjectTagMask; }
 } }  // namespace v8::internal
 
 
+static bool CheckEqualsStrict(volatile double* exp, volatile double* val) {
+  v8::internal::DoubleRepresentation exp_rep(*exp);
+  v8::internal::DoubleRepresentation val_rep(*val);
+  if (std::isnan(exp_rep.value) && std::isnan(val_rep.value)) return true;
+  return exp_rep.bits == val_rep.bits;
+}
+
+
+void CheckEqualsHelper(const char* file, int line, const char* expected_source,
+                       double expected, const char* value_source,
+                       double value) {
+  // Force values to 64 bit memory to truncate 80 bit precision on IA32.
+  volatile double* exp = new double[1];
+  *exp = expected;
+  volatile double* val = new double[1];
+  *val = value;
+  if (!CheckEqualsStrict(exp, val)) {
+    V8_Fatal(file, line,
+             "CHECK_EQ(%s, %s) failed\n#   Expected: %f\n#   Found: %f",
+             expected_source, value_source, *exp, *val);
+  }
+  delete[] exp;
+  delete[] val;
+}
+
+
+void CheckNonEqualsHelper(const char* file, int line,
+                          const char* expected_source, double expected,
+                          const char* value_source, double value) {
+  // Force values to 64 bit memory to truncate 80 bit precision on IA32.
+  volatile double* exp = new double[1];
+  *exp = expected;
+  volatile double* val = new double[1];
+  *val = value;
+  if (CheckEqualsStrict(exp, val)) {
+    V8_Fatal(file, line,
+             "CHECK_EQ(%s, %s) failed\n#   Expected: %f\n#   Found: %f",
+             expected_source, value_source, *exp, *val);
+  }
+  delete[] exp;
+  delete[] val;
+}
+
+
 void CheckEqualsHelper(const char* file,
                        int line,
                        const char* expected_source,
index ae50a96a6dbadba7666fb3271b0b42ff3e6059d2..e39c9bf36ad0fc43ce8a2e63192eaa89a4aed825 100644 (file)
@@ -53,8 +53,14 @@ const bool FLAG_enable_slow_asserts = false;
 } }  // namespace v8::internal
 
 
-void CheckNonEqualsHelper(const char* file,
-                          int line,
+void CheckNonEqualsHelper(const char* file, int line,
+                          const char* expected_source, double expected,
+                          const char* value_source, double value);
+
+void CheckEqualsHelper(const char* file, int line, const char* expected_source,
+                       double expected, const char* value_source, double value);
+
+void CheckNonEqualsHelper(const char* file, int line,
                           const char* unexpected_source,
                           v8::Handle<v8::Value> unexpected,
                           const char* value_source,
index 5df029480c728b8abe1f169cbeb48b3d14f0dac0..f23c80b056e4c0944bd47040fbd90fa87a8b611c 100644 (file)
@@ -63,12 +63,10 @@ void InterfaceDescriptor::Initialize(
 
 
 void CodeStubInterfaceDescriptor::Initialize(
-    int register_parameter_count,
-    Register* registers,
+    CodeStub::Major major, int register_parameter_count, Register* registers,
     Address deoptimization_handler,
     Representation* register_param_representations,
-    int hint_stack_parameter_count,
-    StubFunctionMode function_mode) {
+    int hint_stack_parameter_count, StubFunctionMode function_mode) {
   InterfaceDescriptor::Initialize(register_parameter_count, registers,
                                   register_param_representations);
 
@@ -76,22 +74,18 @@ void CodeStubInterfaceDescriptor::Initialize(
 
   hint_stack_parameter_count_ = hint_stack_parameter_count;
   function_mode_ = function_mode;
+  major_ = major;
 }
 
 
 void CodeStubInterfaceDescriptor::Initialize(
-    int register_parameter_count,
-    Register* registers,
-    Register stack_parameter_count,
-    Address deoptimization_handler,
+    CodeStub::Major major, int register_parameter_count, Register* registers,
+    Register stack_parameter_count, Address deoptimization_handler,
     Representation* register_param_representations,
-    int hint_stack_parameter_count,
-    StubFunctionMode function_mode,
+    int hint_stack_parameter_count, StubFunctionMode function_mode,
     HandlerArgumentsMode handler_mode) {
-  Initialize(register_parameter_count, registers,
-             deoptimization_handler,
-             register_param_representations,
-             hint_stack_parameter_count,
+  Initialize(major, register_parameter_count, registers, deoptimization_handler,
+             register_param_representations, hint_stack_parameter_count,
              function_mode);
   stack_parameter_count_ = stack_parameter_count;
   handler_arguments_mode_ = handler_mode;
@@ -591,7 +585,7 @@ void LoadFastElementStub::InitializeInterfaceDescriptor(
                            LoadIC::ReceiverRegister(),
                            LoadIC::NameRegister() };
   STATIC_ASSERT(LoadIC::kParameterCount == 2);
-  descriptor->Initialize(ARRAY_SIZE(registers), registers,
+  descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers,
                          FUNCTION_ADDR(KeyedLoadIC_MissFromStubFailure));
 }
 
@@ -602,7 +596,7 @@ void LoadDictionaryElementStub::InitializeInterfaceDescriptor(
                            LoadIC::ReceiverRegister(),
                            LoadIC::NameRegister() };
   STATIC_ASSERT(LoadIC::kParameterCount == 2);
-  descriptor->Initialize(ARRAY_SIZE(registers), registers,
+  descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers,
                          FUNCTION_ADDR(KeyedLoadIC_MissFromStubFailure));
 }
 
@@ -614,7 +608,7 @@ void KeyedLoadGenericStub::InitializeInterfaceDescriptor(
                            LoadIC::NameRegister() };
   STATIC_ASSERT(LoadIC::kParameterCount == 2);
   descriptor->Initialize(
-      ARRAY_SIZE(registers), registers,
+      MajorKey(), ARRAY_SIZE(registers), registers,
       Runtime::FunctionForId(Runtime::kKeyedGetProperty)->entry);
 }
 
@@ -623,7 +617,7 @@ void LoadFieldStub::InitializeInterfaceDescriptor(
     CodeStubInterfaceDescriptor* descriptor) {
   Register registers[] = { InterfaceDescriptor::ContextRegister(),
                            LoadIC::ReceiverRegister() };
-  descriptor->Initialize(ARRAY_SIZE(registers), registers);
+  descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers);
 }
 
 
@@ -632,7 +626,7 @@ void StringLengthStub::InitializeInterfaceDescriptor(
   Register registers[] = { InterfaceDescriptor::ContextRegister(),
                            LoadIC::ReceiverRegister(),
                            LoadIC::NameRegister() };
-  descriptor->Initialize(ARRAY_SIZE(registers), registers);
+  descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers);
 }
 
 
@@ -642,9 +636,8 @@ void StoreFastElementStub::InitializeInterfaceDescriptor(
                            KeyedStoreIC::ReceiverRegister(),
                            KeyedStoreIC::NameRegister(),
                            KeyedStoreIC::ValueRegister() };
-  descriptor->Initialize(
-      ARRAY_SIZE(registers), registers,
-      FUNCTION_ADDR(KeyedStoreIC_MissFromStubFailure));
+  descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers,
+                         FUNCTION_ADDR(KeyedStoreIC_MissFromStubFailure));
 }
 
 
@@ -655,7 +648,7 @@ void ElementsTransitionAndStoreStub::InitializeInterfaceDescriptor(
                            MapRegister(),
                            KeyRegister(),
                            ObjectRegister() };
-  descriptor->Initialize(ARRAY_SIZE(registers), registers,
+  descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers,
                          FUNCTION_ADDR(ElementsTransitionAndStoreIC_Miss));
 }
 
@@ -666,7 +659,7 @@ void StoreGlobalStub::InitializeInterfaceDescriptor(
                            StoreIC::ReceiverRegister(),
                            StoreIC::NameRegister(),
                            StoreIC::ValueRegister() };
-  descriptor->Initialize(ARRAY_SIZE(registers), registers,
+  descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers,
                          FUNCTION_ADDR(StoreIC_MissFromStubFailure));
 }
 
index fd6b0a9e27d4293eec0b6a2d44044dc0487ef258..03f7a69ffa45d7527ca15d9c0da15ff865837cdb 100644 (file)
@@ -10,6 +10,7 @@
 #include "src/codegen.h"
 #include "src/globals.h"
 #include "src/macro-assembler.h"
+#include "src/ostreams.h"
 
 namespace v8 {
 namespace internal {
@@ -348,13 +349,13 @@ class CodeStubInterfaceDescriptor: public InterfaceDescriptor {
  public:
   CodeStubInterfaceDescriptor();
 
-  void Initialize(int register_parameter_count, Register* registers,
-                  Address deoptimization_handler = NULL,
+  void Initialize(CodeStub::Major major, int register_parameter_count,
+                  Register* registers, Address deoptimization_handler = NULL,
                   Representation* register_param_representations = NULL,
                   int hint_stack_parameter_count = -1,
                   StubFunctionMode function_mode = NOT_JS_FUNCTION_STUB_MODE);
-  void Initialize(int register_parameter_count, Register* registers,
-                  Register stack_parameter_count,
+  void Initialize(CodeStub::Major major, int register_parameter_count,
+                  Register* registers, Register stack_parameter_count,
                   Address deoptimization_handler = NULL,
                   Representation* register_param_representations = NULL,
                   int hint_stack_parameter_count = -1,
@@ -394,6 +395,7 @@ class CodeStubInterfaceDescriptor: public InterfaceDescriptor {
   Register stack_parameter_count() const { return stack_parameter_count_; }
   StubFunctionMode function_mode() const { return function_mode_; }
   Address deoptimization_handler() const { return deoptimization_handler_; }
+  CodeStub::Major MajorKey() const { return major_; }
 
  private:
   Register stack_parameter_count_;
@@ -407,6 +409,7 @@ class CodeStubInterfaceDescriptor: public InterfaceDescriptor {
 
   ExternalReference miss_handler_;
   bool has_miss_handler_;
+  CodeStub::Major major_;
 };
 
 
@@ -743,6 +746,9 @@ class InstanceofStub: public PlatformCodeStub {
 
   void Generate(MacroAssembler* masm);
 
+  virtual void InitializeInterfaceDescriptor(
+      Isolate* isolate, CodeStubInterfaceDescriptor* descriptor);
+
  private:
   Major MajorKey() const { return Instanceof; }
   int MinorKey() const { return static_cast<int>(flags_); }
@@ -1132,10 +1138,11 @@ class CallApiGetterStub : public PlatformCodeStub {
 
 class BinaryOpICStub : public HydrogenCodeStub {
  public:
-  BinaryOpICStub(Isolate* isolate, Token::Value op, OverwriteMode mode)
+  BinaryOpICStub(Isolate* isolate, Token::Value op,
+                 OverwriteMode mode = NO_OVERWRITE)
       : HydrogenCodeStub(isolate, UNINITIALIZED), state_(isolate, op, mode) {}
 
-  BinaryOpICStub(Isolate* isolate, const BinaryOpIC::State& state)
+  explicit BinaryOpICStub(Isolate* isolate, const BinaryOpIC::State& state)
       : HydrogenCodeStub(isolate), state_(state) {}
 
   static void GenerateAheadOfTime(Isolate* isolate);
@@ -1618,6 +1625,9 @@ class CallFunctionStub: public PlatformCodeStub {
     return ArgcBits::decode(minor_key);
   }
 
+  virtual void InitializeInterfaceDescriptor(
+      Isolate* isolate, CodeStubInterfaceDescriptor* descriptor);
+
  private:
   int argc_;
   CallFunctionFlags flags_;
@@ -1655,6 +1665,9 @@ class CallConstructStub: public PlatformCodeStub {
     code->set_has_function_cache(RecordCallTarget());
   }
 
+  virtual void InitializeInterfaceDescriptor(
+      Isolate* isolate, CodeStubInterfaceDescriptor* descriptor);
+
  private:
   CallConstructorFlags flags_;
 
index f31895e2d373451a4076213839edb9038832f8e4..669dd28b6a92dd0d89c5d85c1aa43c92be7daad7 100644 (file)
@@ -5,6 +5,8 @@
 #ifndef V8_COMPILER_INTRINSICS_H_
 #define V8_COMPILER_INTRINSICS_H_
 
+#include "src/base/macros.h"
+
 namespace v8 {
 namespace internal {
 
index 71e1c5a049f2309c5495f81e27dddac76f60f728..962e7923e688b7fc4744358a1883c90bd3999a40 100644 (file)
@@ -9,6 +9,7 @@
 #include "src/bootstrapper.h"
 #include "src/codegen.h"
 #include "src/compilation-cache.h"
+#include "src/compiler/pipeline.h"
 #include "src/cpu-profiler.h"
 #include "src/debug.h"
 #include "src/deoptimizer.h"
@@ -57,6 +58,19 @@ CompilationInfo::CompilationInfo(Handle<Script> script,
 }
 
 
+CompilationInfo::CompilationInfo(Isolate* isolate, Zone* zone)
+    : flags_(StrictModeField::encode(SLOPPY)),
+      script_(Handle<Script>::null()),
+      osr_ast_id_(BailoutId::None()),
+      parameter_count_(0),
+      this_has_uses_(true),
+      optimization_id_(-1),
+      ast_value_factory_(NULL),
+      ast_value_factory_owned_(false) {
+  Initialize(isolate, STUB, zone);
+}
+
+
 CompilationInfo::CompilationInfo(Handle<SharedFunctionInfo> shared_info,
                                  Zone* zone)
     : flags_(StrictModeField::encode(SLOPPY) | IsLazy::encode(true)),
@@ -354,15 +368,16 @@ OptimizedCompileJob::Status OptimizedCompileJob::CreateGraph() {
     return AbortAndDisableOptimization(kFunctionWithIllegalRedeclaration);
   }
 
-  // Take --hydrogen-filter into account.
+  // Check the whitelist for Crankshaft.
   if (!info()->closure()->PassesFilter(FLAG_hydrogen_filter)) {
     return AbortOptimization(kHydrogenFilter);
   }
 
+  // Crankshaft requires a version of fullcode with deoptimization support.
   // Recompile the unoptimized version of the code if the current version
-  // doesn't have deoptimization support. Alternatively, we may decide to
-  // run the full code generator to get a baseline for the compile-time
-  // performance of the hydrogen-based compiler.
+  // doesn't have deoptimization support already.
+  // Otherwise, if we are gathering compilation time and space statistics
+  // for hydrogen, gather baseline statistics for a fullcode compilation.
   bool should_recompile = !info()->shared_info()->has_deoptimization_support();
   if (should_recompile || FLAG_hydrogen_stats) {
     base::ElapsedTimer timer;
@@ -390,14 +405,20 @@ OptimizedCompileJob::Status OptimizedCompileJob::CreateGraph() {
     }
   }
 
-  // Check that the unoptimized, shared code is ready for
-  // optimizations.  When using the always_opt flag we disregard the
-  // optimizable marker in the code object and optimize anyway. This
-  // is safe as long as the unoptimized code has deoptimization
-  // support.
-  ASSERT(FLAG_always_opt || info()->shared_info()->code()->optimizable());
   ASSERT(info()->shared_info()->has_deoptimization_support());
 
+  // Check the whitelist for TurboFan.
+  if (info()->closure()->PassesFilter(FLAG_turbo_filter) &&
+      // TODO(turbofan): Make try-catch work and remove this bailout.
+      info()->function()->dont_optimize_reason() != kTryCatchStatement &&
+      info()->function()->dont_optimize_reason() != kTryFinallyStatement &&
+      // TODO(turbofan): Make OSR work and remove this bailout.
+      !info()->is_osr()) {
+    compiler::Pipeline pipeline(info());
+    pipeline.GenerateCode();
+    return SetLastStatus(SUCCEEDED);
+  }
+
   if (FLAG_trace_hydrogen) {
     Handle<String> name = info()->function()->debug_name();
     PrintF("-----------------------------------------------------------\n");
@@ -447,6 +468,11 @@ OptimizedCompileJob::Status OptimizedCompileJob::OptimizeGraph() {
   DisallowCodeDependencyChange no_dependency_change;
 
   ASSERT(last_status() == SUCCEEDED);
+  // TODO(turbofan): Currently everything is done in the first phase.
+  if (!info()->code().is_null()) {
+    return last_status();
+  }
+
   Timer t(this, &time_taken_to_optimize_);
   ASSERT(graph_ != NULL);
   BailoutReason bailout_reason = kNoReason;
@@ -464,6 +490,12 @@ OptimizedCompileJob::Status OptimizedCompileJob::OptimizeGraph() {
 
 OptimizedCompileJob::Status OptimizedCompileJob::GenerateCode() {
   ASSERT(last_status() == SUCCEEDED);
+  // TODO(turbofan): Currently everything is done in the first phase.
+  if (!info()->code().is_null()) {
+    RecordOptimizationStats();
+    return last_status();
+  }
+
   ASSERT(!info()->HasAbortedDueToDependencyChange());
   DisallowCodeDependencyChange no_dependency_change;
   DisallowJavascriptExecution no_js(isolate());
@@ -1115,6 +1147,9 @@ static void InsertCodeIntoOptimizedCodeMap(CompilationInfo* info) {
   Handle<Code> code = info->code();
   if (code->kind() != Code::OPTIMIZED_FUNCTION) return;  // Nothing to do.
 
+  // Context specialization folds-in the context, so no sharing can occur.
+  if (code->is_turbofanned() && FLAG_context_specialization) return;
+
   // Cache optimized code.
   if (FLAG_cache_optimized_code) {
     Handle<JSFunction> function = info->closure();
index 6b0bb1b658d9d4d3921c88e45791945ee7705284..2ea864a77f179cfd8ba3d619863448bf93e55379 100644 (file)
@@ -63,6 +63,7 @@ class ScriptData {
 class CompilationInfo {
  public:
   CompilationInfo(Handle<JSFunction> closure, Zone* zone);
+  CompilationInfo(Isolate* isolate, Zone* zone);
   virtual ~CompilationInfo();
 
   Isolate* isolate() const {
@@ -391,7 +392,6 @@ class CompilationInfo {
   void Initialize(Isolate* isolate, Mode mode, Zone* zone);
 
   void SetMode(Mode mode) {
-    ASSERT(isolate()->use_crankshaft());
     mode_ = mode;
   }
 
diff --git a/src/compiler/arm/code-generator-arm.cc b/src/compiler/arm/code-generator-arm.cc
new file mode 100644 (file)
index 0000000..781252b
--- /dev/null
@@ -0,0 +1,828 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/code-generator.h"
+
+#include "src/arm/macro-assembler-arm.h"
+#include "src/compiler/code-generator-impl.h"
+#include "src/compiler/gap-resolver.h"
+#include "src/compiler/node-matchers.h"
+#include "src/compiler/node-properties-inl.h"
+#include "src/scopes.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+#define __ masm()->
+
+
+#define kScratchReg r9
+
+
+// Adds Arm-specific methods to convert InstructionOperands.
+class ArmOperandConverter : public InstructionOperandConverter {
+ public:
+  ArmOperandConverter(CodeGenerator* gen, Instruction* instr)
+      : InstructionOperandConverter(gen, instr) {}
+
+  SBit OutputSBit() const {
+    switch (instr_->flags_mode()) {
+      case kFlags_branch:
+      case kFlags_set:
+        return SetCC;
+      case kFlags_none:
+        return LeaveCC;
+    }
+    UNREACHABLE();
+    return LeaveCC;
+  }
+
+  Operand InputImmediate(int index) {
+    Constant constant = ToConstant(instr_->InputAt(index));
+    switch (constant.type()) {
+      case Constant::kInt32:
+        return Operand(constant.ToInt32());
+      case Constant::kFloat64:
+        return Operand(
+            isolate()->factory()->NewNumber(constant.ToFloat64(), TENURED));
+      case Constant::kInt64:
+      case Constant::kExternalReference:
+      case Constant::kHeapObject:
+        break;
+    }
+    UNREACHABLE();
+    return Operand::Zero();
+  }
+
+  Operand InputOperand2(int first_index) {
+    const int index = first_index;
+    switch (AddressingModeField::decode(instr_->opcode())) {
+      case kMode_None:
+      case kMode_Offset_RI:
+      case kMode_Offset_RR:
+        break;
+      case kMode_Operand2_I:
+        return InputImmediate(index + 0);
+      case kMode_Operand2_R:
+        return Operand(InputRegister(index + 0));
+      case kMode_Operand2_R_ASR_I:
+        return Operand(InputRegister(index + 0), ASR, InputInt5(index + 1));
+      case kMode_Operand2_R_ASR_R:
+        return Operand(InputRegister(index + 0), ASR, InputRegister(index + 1));
+      case kMode_Operand2_R_LSL_I:
+        return Operand(InputRegister(index + 0), LSL, InputInt5(index + 1));
+      case kMode_Operand2_R_LSL_R:
+        return Operand(InputRegister(index + 0), LSL, InputRegister(index + 1));
+      case kMode_Operand2_R_LSR_I:
+        return Operand(InputRegister(index + 0), LSR, InputInt5(index + 1));
+      case kMode_Operand2_R_LSR_R:
+        return Operand(InputRegister(index + 0), LSR, InputRegister(index + 1));
+    }
+    UNREACHABLE();
+    return Operand::Zero();
+  }
+
+  MemOperand InputOffset(int* first_index) {
+    const int index = *first_index;
+    switch (AddressingModeField::decode(instr_->opcode())) {
+      case kMode_None:
+      case kMode_Operand2_I:
+      case kMode_Operand2_R:
+      case kMode_Operand2_R_ASR_I:
+      case kMode_Operand2_R_ASR_R:
+      case kMode_Operand2_R_LSL_I:
+      case kMode_Operand2_R_LSL_R:
+      case kMode_Operand2_R_LSR_I:
+      case kMode_Operand2_R_LSR_R:
+        break;
+      case kMode_Offset_RI:
+        *first_index += 2;
+        return MemOperand(InputRegister(index + 0), InputInt32(index + 1));
+      case kMode_Offset_RR:
+        *first_index += 2;
+        return MemOperand(InputRegister(index + 0), InputRegister(index + 1));
+    }
+    UNREACHABLE();
+    return MemOperand(r0);
+  }
+
+  MemOperand InputOffset() {
+    int index = 0;
+    return InputOffset(&index);
+  }
+
+  MemOperand ToMemOperand(InstructionOperand* op) const {
+    ASSERT(op != NULL);
+    ASSERT(!op->IsRegister());
+    ASSERT(!op->IsDoubleRegister());
+    ASSERT(op->IsStackSlot() || op->IsDoubleStackSlot());
+    // The linkage computes where all spill slots are located.
+    FrameOffset offset = linkage()->GetFrameOffset(op->index(), frame(), 0);
+    return MemOperand(offset.from_stack_pointer() ? sp : fp, offset.offset());
+  }
+};
+
+
+// Assembles an instruction after register allocation, producing machine code.
+void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
+  ArmOperandConverter i(this, instr);
+
+  switch (ArchOpcodeField::decode(instr->opcode())) {
+    case kArchJmp:
+      __ b(code_->GetLabel(i.InputBlock(0)));
+      ASSERT_EQ(LeaveCC, i.OutputSBit());
+      break;
+    case kArchNop:
+      // don't emit code for nops.
+      ASSERT_EQ(LeaveCC, i.OutputSBit());
+      break;
+    case kArchRet:
+      AssembleReturn();
+      ASSERT_EQ(LeaveCC, i.OutputSBit());
+      break;
+    case kArchDeoptimize: {
+      int deoptimization_id = MiscField::decode(instr->opcode());
+      BuildTranslation(instr, deoptimization_id);
+
+      Address deopt_entry = Deoptimizer::GetDeoptimizationEntry(
+          isolate(), deoptimization_id, Deoptimizer::LAZY);
+      __ Call(deopt_entry, RelocInfo::RUNTIME_ENTRY);
+      ASSERT_EQ(LeaveCC, i.OutputSBit());
+      break;
+    }
+    case kArmAdd:
+      __ add(i.OutputRegister(), i.InputRegister(0), i.InputOperand2(1),
+             i.OutputSBit());
+      break;
+    case kArmAnd:
+      __ and_(i.OutputRegister(), i.InputRegister(0), i.InputOperand2(1),
+              i.OutputSBit());
+      break;
+    case kArmBic:
+      __ bic(i.OutputRegister(), i.InputRegister(0), i.InputOperand2(1),
+             i.OutputSBit());
+      break;
+    case kArmMul:
+      __ mul(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
+             i.OutputSBit());
+      break;
+    case kArmMla:
+      __ mla(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
+             i.InputRegister(2), i.OutputSBit());
+      break;
+    case kArmMls: {
+      CpuFeatureScope scope(masm(), MLS);
+      __ mls(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
+             i.InputRegister(2));
+      ASSERT_EQ(LeaveCC, i.OutputSBit());
+      break;
+    }
+    case kArmSdiv: {
+      CpuFeatureScope scope(masm(), SUDIV);
+      __ sdiv(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
+      ASSERT_EQ(LeaveCC, i.OutputSBit());
+      break;
+    }
+    case kArmUdiv: {
+      CpuFeatureScope scope(masm(), SUDIV);
+      __ udiv(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
+      ASSERT_EQ(LeaveCC, i.OutputSBit());
+      break;
+    }
+    case kArmMov:
+      __ Move(i.OutputRegister(), i.InputOperand2(0));
+      ASSERT_EQ(LeaveCC, i.OutputSBit());
+      break;
+    case kArmMvn:
+      __ mvn(i.OutputRegister(), i.InputOperand2(0), i.OutputSBit());
+      break;
+    case kArmOrr:
+      __ orr(i.OutputRegister(), i.InputRegister(0), i.InputOperand2(1),
+             i.OutputSBit());
+      break;
+    case kArmEor:
+      __ eor(i.OutputRegister(), i.InputRegister(0), i.InputOperand2(1),
+             i.OutputSBit());
+      break;
+    case kArmSub:
+      __ sub(i.OutputRegister(), i.InputRegister(0), i.InputOperand2(1),
+             i.OutputSBit());
+      break;
+    case kArmRsb:
+      __ rsb(i.OutputRegister(), i.InputRegister(0), i.InputOperand2(1),
+             i.OutputSBit());
+      break;
+    case kArmBfc: {
+      CpuFeatureScope scope(masm(), ARMv7);
+      __ bfc(i.OutputRegister(), i.InputInt8(1), i.InputInt8(2));
+      ASSERT_EQ(LeaveCC, i.OutputSBit());
+      break;
+    }
+    case kArmUbfx: {
+      CpuFeatureScope scope(masm(), ARMv7);
+      __ ubfx(i.OutputRegister(), i.InputRegister(0), i.InputInt8(1),
+              i.InputInt8(2));
+      ASSERT_EQ(LeaveCC, i.OutputSBit());
+      break;
+    }
+    case kArmCallCodeObject: {
+      if (instr->InputAt(0)->IsImmediate()) {
+        Handle<Code> code = Handle<Code>::cast(i.InputHeapObject(0));
+        __ Call(code, RelocInfo::CODE_TARGET);
+        RecordSafepoint(instr->pointer_map(), Safepoint::kSimple, 0,
+                        Safepoint::kNoLazyDeopt);
+      } else {
+        Register reg = i.InputRegister(0);
+        int entry = Code::kHeaderSize - kHeapObjectTag;
+        __ ldr(reg, MemOperand(reg, entry));
+        __ Call(reg);
+        RecordSafepoint(instr->pointer_map(), Safepoint::kSimple, 0,
+                        Safepoint::kNoLazyDeopt);
+      }
+      bool lazy_deopt = (MiscField::decode(instr->opcode()) == 1);
+      if (lazy_deopt) {
+        RecordLazyDeoptimizationEntry(instr);
+      }
+      ASSERT_EQ(LeaveCC, i.OutputSBit());
+      break;
+    }
+    case kArmCallJSFunction: {
+      Register func = i.InputRegister(0);
+
+      // TODO(jarin) The load of the context should be separated from the call.
+      __ ldr(cp, FieldMemOperand(func, JSFunction::kContextOffset));
+      __ ldr(ip, FieldMemOperand(func, JSFunction::kCodeEntryOffset));
+      __ Call(ip);
+
+      RecordSafepoint(instr->pointer_map(), Safepoint::kSimple, 0,
+                      Safepoint::kNoLazyDeopt);
+      RecordLazyDeoptimizationEntry(instr);
+      ASSERT_EQ(LeaveCC, i.OutputSBit());
+      break;
+    }
+    case kArmCallAddress: {
+      DirectCEntryStub stub(isolate());
+      stub.GenerateCall(masm(), i.InputRegister(0));
+      ASSERT_EQ(LeaveCC, i.OutputSBit());
+      break;
+    }
+    case kArmPush:
+      __ Push(i.InputRegister(0));
+      ASSERT_EQ(LeaveCC, i.OutputSBit());
+      break;
+    case kArmDrop: {
+      int words = MiscField::decode(instr->opcode());
+      __ Drop(words);
+      ASSERT_EQ(LeaveCC, i.OutputSBit());
+      break;
+    }
+    case kArmCmp:
+      __ cmp(i.InputRegister(0), i.InputOperand2(1));
+      ASSERT_EQ(SetCC, i.OutputSBit());
+      break;
+    case kArmCmn:
+      __ cmn(i.InputRegister(0), i.InputOperand2(1));
+      ASSERT_EQ(SetCC, i.OutputSBit());
+      break;
+    case kArmTst:
+      __ tst(i.InputRegister(0), i.InputOperand2(1));
+      ASSERT_EQ(SetCC, i.OutputSBit());
+      break;
+    case kArmTeq:
+      __ teq(i.InputRegister(0), i.InputOperand2(1));
+      ASSERT_EQ(SetCC, i.OutputSBit());
+      break;
+    case kArmVcmpF64:
+      __ VFPCompareAndSetFlags(i.InputDoubleRegister(0),
+                               i.InputDoubleRegister(1));
+      ASSERT_EQ(SetCC, i.OutputSBit());
+      break;
+    case kArmVaddF64:
+      __ vadd(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+              i.InputDoubleRegister(1));
+      ASSERT_EQ(LeaveCC, i.OutputSBit());
+      break;
+    case kArmVsubF64:
+      __ vsub(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+              i.InputDoubleRegister(1));
+      ASSERT_EQ(LeaveCC, i.OutputSBit());
+      break;
+    case kArmVmulF64:
+      __ vmul(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+              i.InputDoubleRegister(1));
+      ASSERT_EQ(LeaveCC, i.OutputSBit());
+      break;
+    case kArmVmlaF64:
+      __ vmla(i.OutputDoubleRegister(), i.InputDoubleRegister(1),
+              i.InputDoubleRegister(2));
+      ASSERT_EQ(LeaveCC, i.OutputSBit());
+      break;
+    case kArmVmlsF64:
+      __ vmls(i.OutputDoubleRegister(), i.InputDoubleRegister(1),
+              i.InputDoubleRegister(2));
+      ASSERT_EQ(LeaveCC, i.OutputSBit());
+      break;
+    case kArmVdivF64:
+      __ vdiv(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+              i.InputDoubleRegister(1));
+      ASSERT_EQ(LeaveCC, i.OutputSBit());
+      break;
+    case kArmVmodF64: {
+      // TODO(bmeurer): We should really get rid of this special instruction,
+      // and generate a CallAddress instruction instead.
+      FrameScope scope(masm(), StackFrame::MANUAL);
+      __ PrepareCallCFunction(0, 2, kScratchReg);
+      __ MovToFloatParameters(i.InputDoubleRegister(0),
+                              i.InputDoubleRegister(1));
+      __ CallCFunction(ExternalReference::mod_two_doubles_operation(isolate()),
+                       0, 2);
+      // Move the result in the double result register.
+      __ MovFromFloatResult(i.OutputDoubleRegister());
+      ASSERT_EQ(LeaveCC, i.OutputSBit());
+      break;
+    }
+    case kArmVnegF64:
+      __ vneg(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
+      break;
+    case kArmVcvtF64S32: {
+      SwVfpRegister scratch = kScratchDoubleReg.low();
+      __ vmov(scratch, i.InputRegister(0));
+      __ vcvt_f64_s32(i.OutputDoubleRegister(), scratch);
+      ASSERT_EQ(LeaveCC, i.OutputSBit());
+      break;
+    }
+    case kArmVcvtF64U32: {
+      SwVfpRegister scratch = kScratchDoubleReg.low();
+      __ vmov(scratch, i.InputRegister(0));
+      __ vcvt_f64_u32(i.OutputDoubleRegister(), scratch);
+      ASSERT_EQ(LeaveCC, i.OutputSBit());
+      break;
+    }
+    case kArmVcvtS32F64: {
+      SwVfpRegister scratch = kScratchDoubleReg.low();
+      __ vcvt_s32_f64(scratch, i.InputDoubleRegister(0));
+      __ vmov(i.OutputRegister(), scratch);
+      ASSERT_EQ(LeaveCC, i.OutputSBit());
+      break;
+    }
+    case kArmVcvtU32F64: {
+      SwVfpRegister scratch = kScratchDoubleReg.low();
+      __ vcvt_u32_f64(scratch, i.InputDoubleRegister(0));
+      __ vmov(i.OutputRegister(), scratch);
+      ASSERT_EQ(LeaveCC, i.OutputSBit());
+      break;
+    }
+    case kArmLoadWord8:
+      __ ldrb(i.OutputRegister(), i.InputOffset());
+      ASSERT_EQ(LeaveCC, i.OutputSBit());
+      break;
+    case kArmStoreWord8: {
+      int index = 0;
+      MemOperand operand = i.InputOffset(&index);
+      __ strb(i.InputRegister(index), operand);
+      ASSERT_EQ(LeaveCC, i.OutputSBit());
+      break;
+    }
+    case kArmLoadWord16:
+      __ ldrh(i.OutputRegister(), i.InputOffset());
+      break;
+    case kArmStoreWord16: {
+      int index = 0;
+      MemOperand operand = i.InputOffset(&index);
+      __ strh(i.InputRegister(index), operand);
+      ASSERT_EQ(LeaveCC, i.OutputSBit());
+      break;
+    }
+    case kArmLoadWord32:
+      __ ldr(i.OutputRegister(), i.InputOffset());
+      break;
+    case kArmStoreWord32: {
+      int index = 0;
+      MemOperand operand = i.InputOffset(&index);
+      __ str(i.InputRegister(index), operand);
+      ASSERT_EQ(LeaveCC, i.OutputSBit());
+      break;
+    }
+    case kArmFloat64Load:
+      __ vldr(i.OutputDoubleRegister(), i.InputOffset());
+      ASSERT_EQ(LeaveCC, i.OutputSBit());
+      break;
+    case kArmFloat64Store: {
+      int index = 0;
+      MemOperand operand = i.InputOffset(&index);
+      __ vstr(i.InputDoubleRegister(index), operand);
+      ASSERT_EQ(LeaveCC, i.OutputSBit());
+      break;
+    }
+    case kArmStoreWriteBarrier: {
+      Register object = i.InputRegister(0);
+      Register index = i.InputRegister(1);
+      Register value = i.InputRegister(2);
+      __ add(index, object, index);
+      __ str(value, MemOperand(index));
+      SaveFPRegsMode mode =
+          frame()->DidAllocateDoubleRegisters() ? kSaveFPRegs : kDontSaveFPRegs;
+      LinkRegisterStatus lr_status = kLRHasNotBeenSaved;
+      __ RecordWrite(object, index, value, lr_status, mode);
+      ASSERT_EQ(LeaveCC, i.OutputSBit());
+      break;
+    }
+  }
+}
+
+
+// Assembles branches after an instruction.
+void CodeGenerator::AssembleArchBranch(Instruction* instr,
+                                       FlagsCondition condition) {
+  ArmOperandConverter i(this, instr);
+  Label done;
+
+  // Emit a branch. The true and false targets are always the last two inputs
+  // to the instruction.
+  BasicBlock* tblock = i.InputBlock(instr->InputCount() - 2);
+  BasicBlock* fblock = i.InputBlock(instr->InputCount() - 1);
+  bool fallthru = IsNextInAssemblyOrder(fblock);
+  Label* tlabel = code()->GetLabel(tblock);
+  Label* flabel = fallthru ? &done : code()->GetLabel(fblock);
+  switch (condition) {
+    case kUnorderedEqual:
+      __ b(vs, flabel);
+    // Fall through.
+    case kEqual:
+      __ b(eq, tlabel);
+      break;
+    case kUnorderedNotEqual:
+      __ b(vs, tlabel);
+    // Fall through.
+    case kNotEqual:
+      __ b(ne, tlabel);
+      break;
+    case kSignedLessThan:
+      __ b(lt, tlabel);
+      break;
+    case kSignedGreaterThanOrEqual:
+      __ b(ge, tlabel);
+      break;
+    case kSignedLessThanOrEqual:
+      __ b(le, tlabel);
+      break;
+    case kSignedGreaterThan:
+      __ b(gt, tlabel);
+      break;
+    case kUnorderedLessThan:
+      __ b(vs, flabel);
+    // Fall through.
+    case kUnsignedLessThan:
+      __ b(lo, tlabel);
+      break;
+    case kUnorderedGreaterThanOrEqual:
+      __ b(vs, tlabel);
+    // Fall through.
+    case kUnsignedGreaterThanOrEqual:
+      __ b(hs, tlabel);
+      break;
+    case kUnorderedLessThanOrEqual:
+      __ b(vs, flabel);
+    // Fall through.
+    case kUnsignedLessThanOrEqual:
+      __ b(ls, tlabel);
+      break;
+    case kUnorderedGreaterThan:
+      __ b(vs, tlabel);
+    // Fall through.
+    case kUnsignedGreaterThan:
+      __ b(hi, tlabel);
+      break;
+  }
+  if (!fallthru) __ b(flabel);  // no fallthru to flabel.
+  __ bind(&done);
+}
+
+
+// Assembles boolean materializations after an instruction.
+void CodeGenerator::AssembleArchBoolean(Instruction* instr,
+                                        FlagsCondition condition) {
+  ArmOperandConverter i(this, instr);
+  Label done;
+
+  // Materialize a full 32-bit 1 or 0 value.
+  Label check;
+  Register reg = i.OutputRegister();
+  Condition cc = kNoCondition;
+  switch (condition) {
+    case kUnorderedEqual:
+      __ b(vc, &check);
+      __ mov(reg, Operand(0));
+      __ b(&done);
+    // Fall through.
+    case kEqual:
+      cc = eq;
+      break;
+    case kUnorderedNotEqual:
+      __ b(vc, &check);
+      __ mov(reg, Operand(1));
+      __ b(&done);
+    // Fall through.
+    case kNotEqual:
+      cc = ne;
+      break;
+    case kSignedLessThan:
+      cc = lt;
+      break;
+    case kSignedGreaterThanOrEqual:
+      cc = ge;
+      break;
+    case kSignedLessThanOrEqual:
+      cc = le;
+      break;
+    case kSignedGreaterThan:
+      cc = gt;
+      break;
+    case kUnorderedLessThan:
+      __ b(vc, &check);
+      __ mov(reg, Operand(0));
+      __ b(&done);
+    // Fall through.
+    case kUnsignedLessThan:
+      cc = lo;
+      break;
+    case kUnorderedGreaterThanOrEqual:
+      __ b(vc, &check);
+      __ mov(reg, Operand(1));
+      __ b(&done);
+    // Fall through.
+    case kUnsignedGreaterThanOrEqual:
+      cc = hs;
+      break;
+    case kUnorderedLessThanOrEqual:
+      __ b(vc, &check);
+      __ mov(reg, Operand(0));
+      __ b(&done);
+    // Fall through.
+    case kUnsignedLessThanOrEqual:
+      cc = ls;
+      break;
+    case kUnorderedGreaterThan:
+      __ b(vc, &check);
+      __ mov(reg, Operand(1));
+      __ b(&done);
+    // Fall through.
+    case kUnsignedGreaterThan:
+      cc = hi;
+      break;
+  }
+  __ bind(&check);
+  __ mov(reg, Operand(0));
+  __ mov(reg, Operand(1), LeaveCC, cc);
+  __ bind(&done);
+}
+
+
+void CodeGenerator::AssemblePrologue() {
+  CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
+  if (descriptor->kind() == CallDescriptor::kCallAddress) {
+    __ Push(lr, fp);
+    __ mov(fp, sp);
+    const RegList saves = descriptor->CalleeSavedRegisters();
+    if (saves != 0) {  // Save callee-saved registers.
+      int register_save_area_size = 0;
+      for (int i = Register::kNumRegisters - 1; i >= 0; i--) {
+        if (!((1 << i) & saves)) continue;
+        register_save_area_size += kPointerSize;
+      }
+      frame()->SetRegisterSaveAreaSize(register_save_area_size);
+      __ stm(db_w, sp, saves);
+    }
+  } else if (descriptor->IsJSFunctionCall()) {
+    CompilationInfo* info = linkage()->info();
+    __ Prologue(info->IsCodePreAgingActive());
+    frame()->SetRegisterSaveAreaSize(
+        StandardFrameConstants::kFixedFrameSizeFromFp);
+
+    // Sloppy mode functions and builtins need to replace the receiver with the
+    // global proxy when called as functions (without an explicit receiver
+    // object).
+    // TODO(mstarzinger/verwaest): Should this be moved back into the CallIC?
+    if (info->strict_mode() == SLOPPY && !info->is_native()) {
+      Label ok;
+      // +2 for return address and saved frame pointer.
+      int receiver_slot = info->scope()->num_parameters() + 2;
+      __ ldr(r2, MemOperand(fp, receiver_slot * kPointerSize));
+      __ CompareRoot(r2, Heap::kUndefinedValueRootIndex);
+      __ b(ne, &ok);
+      __ ldr(r2, GlobalObjectOperand());
+      __ ldr(r2, FieldMemOperand(r2, GlobalObject::kGlobalProxyOffset));
+      __ str(r2, MemOperand(fp, receiver_slot * kPointerSize));
+      __ bind(&ok);
+    }
+
+  } else {
+    __ StubPrologue();
+    frame()->SetRegisterSaveAreaSize(
+        StandardFrameConstants::kFixedFrameSizeFromFp);
+  }
+  int stack_slots = frame()->GetSpillSlotCount();
+  if (stack_slots > 0) {
+    __ sub(sp, sp, Operand(stack_slots * kPointerSize));
+  }
+}
+
+
+void CodeGenerator::AssembleReturn() {
+  CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
+  if (descriptor->kind() == CallDescriptor::kCallAddress) {
+    if (frame()->GetRegisterSaveAreaSize() > 0) {
+      // Remove this frame's spill slots first.
+      int stack_slots = frame()->GetSpillSlotCount();
+      if (stack_slots > 0) {
+        __ add(sp, sp, Operand(stack_slots * kPointerSize));
+      }
+      // Restore registers.
+      const RegList saves = descriptor->CalleeSavedRegisters();
+      if (saves != 0) {
+        __ ldm(ia_w, sp, saves);
+      }
+    }
+    __ mov(sp, fp);
+    __ ldm(ia_w, sp, fp.bit() | lr.bit());
+    __ Ret();
+  } else {
+    __ mov(sp, fp);
+    __ ldm(ia_w, sp, fp.bit() | lr.bit());
+    int pop_count =
+        descriptor->IsJSFunctionCall() ? descriptor->ParameterCount() : 0;
+    __ Drop(pop_count);
+    __ Ret();
+  }
+}
+
+
+void CodeGenerator::AssembleMove(InstructionOperand* source,
+                                 InstructionOperand* destination) {
+  ArmOperandConverter g(this, NULL);
+  // Dispatch on the source and destination operand kinds.  Not all
+  // combinations are possible.
+  if (source->IsRegister()) {
+    ASSERT(destination->IsRegister() || destination->IsStackSlot());
+    Register src = g.ToRegister(source);
+    if (destination->IsRegister()) {
+      __ mov(g.ToRegister(destination), src);
+    } else {
+      __ str(src, g.ToMemOperand(destination));
+    }
+  } else if (source->IsStackSlot()) {
+    ASSERT(destination->IsRegister() || destination->IsStackSlot());
+    MemOperand src = g.ToMemOperand(source);
+    if (destination->IsRegister()) {
+      __ ldr(g.ToRegister(destination), src);
+    } else {
+      Register temp = kScratchReg;
+      __ ldr(temp, src);
+      __ str(temp, g.ToMemOperand(destination));
+    }
+  } else if (source->IsConstant()) {
+    if (destination->IsRegister() || destination->IsStackSlot()) {
+      Register dst =
+          destination->IsRegister() ? g.ToRegister(destination) : kScratchReg;
+      Constant src = g.ToConstant(source);
+      switch (src.type()) {
+        case Constant::kInt32:
+          __ mov(dst, Operand(src.ToInt32()));
+          break;
+        case Constant::kInt64:
+          UNREACHABLE();
+          break;
+        case Constant::kFloat64:
+          __ Move(dst,
+                  isolate()->factory()->NewNumber(src.ToFloat64(), TENURED));
+          break;
+        case Constant::kExternalReference:
+          __ mov(dst, Operand(src.ToExternalReference()));
+          break;
+        case Constant::kHeapObject:
+          __ Move(dst, src.ToHeapObject());
+          break;
+      }
+      if (destination->IsStackSlot()) __ str(dst, g.ToMemOperand(destination));
+    } else if (destination->IsDoubleRegister()) {
+      DwVfpRegister result = g.ToDoubleRegister(destination);
+      __ vmov(result, g.ToDouble(source));
+    } else {
+      ASSERT(destination->IsDoubleStackSlot());
+      DwVfpRegister temp = kScratchDoubleReg;
+      __ vmov(temp, g.ToDouble(source));
+      __ vstr(temp, g.ToMemOperand(destination));
+    }
+  } else if (source->IsDoubleRegister()) {
+    DwVfpRegister src = g.ToDoubleRegister(source);
+    if (destination->IsDoubleRegister()) {
+      DwVfpRegister dst = g.ToDoubleRegister(destination);
+      __ Move(dst, src);
+    } else {
+      ASSERT(destination->IsDoubleStackSlot());
+      __ vstr(src, g.ToMemOperand(destination));
+    }
+  } else if (source->IsDoubleStackSlot()) {
+    ASSERT(destination->IsDoubleRegister() || destination->IsDoubleStackSlot());
+    MemOperand src = g.ToMemOperand(source);
+    if (destination->IsDoubleRegister()) {
+      __ vldr(g.ToDoubleRegister(destination), src);
+    } else {
+      DwVfpRegister temp = kScratchDoubleReg;
+      __ vldr(temp, src);
+      __ vstr(temp, g.ToMemOperand(destination));
+    }
+  } else {
+    UNREACHABLE();
+  }
+}
+
+
+void CodeGenerator::AssembleSwap(InstructionOperand* source,
+                                 InstructionOperand* destination) {
+  ArmOperandConverter g(this, NULL);
+  // Dispatch on the source and destination operand kinds.  Not all
+  // combinations are possible.
+  if (source->IsRegister()) {
+    // Register-register.
+    Register temp = kScratchReg;
+    Register src = g.ToRegister(source);
+    if (destination->IsRegister()) {
+      Register dst = g.ToRegister(destination);
+      __ Move(temp, src);
+      __ Move(src, dst);
+      __ Move(dst, temp);
+    } else {
+      ASSERT(destination->IsStackSlot());
+      MemOperand dst = g.ToMemOperand(destination);
+      __ mov(temp, src);
+      __ ldr(src, dst);
+      __ str(temp, dst);
+    }
+  } else if (source->IsStackSlot()) {
+    ASSERT(destination->IsStackSlot());
+    Register temp_0 = kScratchReg;
+    SwVfpRegister temp_1 = kScratchDoubleReg.low();
+    MemOperand src = g.ToMemOperand(source);
+    MemOperand dst = g.ToMemOperand(destination);
+    __ ldr(temp_0, src);
+    __ vldr(temp_1, dst);
+    __ str(temp_0, dst);
+    __ vstr(temp_1, src);
+  } else if (source->IsDoubleRegister()) {
+    DwVfpRegister temp = kScratchDoubleReg;
+    DwVfpRegister src = g.ToDoubleRegister(source);
+    if (destination->IsDoubleRegister()) {
+      DwVfpRegister dst = g.ToDoubleRegister(destination);
+      __ Move(temp, src);
+      __ Move(src, dst);
+      __ Move(src, temp);
+    } else {
+      ASSERT(destination->IsDoubleStackSlot());
+      MemOperand dst = g.ToMemOperand(destination);
+      __ Move(temp, src);
+      __ vldr(src, dst);
+      __ vstr(temp, dst);
+    }
+  } else if (source->IsDoubleStackSlot()) {
+    ASSERT(destination->IsDoubleStackSlot());
+    Register temp_0 = kScratchReg;
+    DwVfpRegister temp_1 = kScratchDoubleReg;
+    MemOperand src0 = g.ToMemOperand(source);
+    MemOperand src1(src0.rn(), src0.offset() + kPointerSize);
+    MemOperand dst0 = g.ToMemOperand(destination);
+    MemOperand dst1(dst0.rn(), dst0.offset() + kPointerSize);
+    __ vldr(temp_1, dst0);  // Save destination in temp_1.
+    __ ldr(temp_0, src0);   // Then use temp_0 to copy source to destination.
+    __ str(temp_0, dst0);
+    __ ldr(temp_0, src1);
+    __ str(temp_0, dst1);
+    __ vstr(temp_1, src0);
+  } else {
+    // No other combinations are possible.
+    UNREACHABLE();
+  }
+}
+
+
+void CodeGenerator::AddNopForSmiCodeInlining() {
+  // On 32-bit ARM we do not insert nops for inlined Smi code.
+  UNREACHABLE();
+}
+
+#ifdef DEBUG
+
+// Checks whether the code between start_pc and end_pc is a no-op.
+bool CodeGenerator::IsNopForSmiCodeInlining(Handle<Code> code, int start_pc,
+                                            int end_pc) {
+  return false;
+}
+
+#endif  // DEBUG
+
+#undef __
+}
+}
+}  // namespace v8::internal::compiler
diff --git a/src/compiler/arm/instruction-codes-arm.h b/src/compiler/arm/instruction-codes-arm.h
new file mode 100644 (file)
index 0000000..b222bb3
--- /dev/null
@@ -0,0 +1,84 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_ARM_INSTRUCTION_CODES_ARM_H_
+#define V8_COMPILER_ARM_INSTRUCTION_CODES_ARM_H_
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// ARM-specific opcodes that specify which assembly sequence to emit.
+// Most opcodes specify a single instruction.
+#define TARGET_ARCH_OPCODE_LIST(V) \
+  V(ArmAdd)                        \
+  V(ArmAnd)                        \
+  V(ArmBic)                        \
+  V(ArmCmp)                        \
+  V(ArmCmn)                        \
+  V(ArmTst)                        \
+  V(ArmTeq)                        \
+  V(ArmOrr)                        \
+  V(ArmEor)                        \
+  V(ArmSub)                        \
+  V(ArmRsb)                        \
+  V(ArmMul)                        \
+  V(ArmMla)                        \
+  V(ArmMls)                        \
+  V(ArmSdiv)                       \
+  V(ArmUdiv)                       \
+  V(ArmMov)                        \
+  V(ArmMvn)                        \
+  V(ArmBfc)                        \
+  V(ArmUbfx)                       \
+  V(ArmCallCodeObject)             \
+  V(ArmCallJSFunction)             \
+  V(ArmCallAddress)                \
+  V(ArmPush)                       \
+  V(ArmDrop)                       \
+  V(ArmVcmpF64)                    \
+  V(ArmVaddF64)                    \
+  V(ArmVsubF64)                    \
+  V(ArmVmulF64)                    \
+  V(ArmVmlaF64)                    \
+  V(ArmVmlsF64)                    \
+  V(ArmVdivF64)                    \
+  V(ArmVmodF64)                    \
+  V(ArmVnegF64)                    \
+  V(ArmVcvtF64S32)                 \
+  V(ArmVcvtF64U32)                 \
+  V(ArmVcvtS32F64)                 \
+  V(ArmVcvtU32F64)                 \
+  V(ArmFloat64Load)                \
+  V(ArmFloat64Store)               \
+  V(ArmLoadWord8)                  \
+  V(ArmStoreWord8)                 \
+  V(ArmLoadWord16)                 \
+  V(ArmStoreWord16)                \
+  V(ArmLoadWord32)                 \
+  V(ArmStoreWord32)                \
+  V(ArmStoreWriteBarrier)
+
+
+// Addressing modes represent the "shape" of inputs to an instruction.
+// Many instructions support multiple addressing modes. Addressing modes
+// are encoded into the InstructionCode of the instruction and tell the
+// code generator after register allocation which assembler method to call.
+#define TARGET_ADDRESSING_MODE_LIST(V)  \
+  V(Offset_RI)        /* [%r0 + K] */   \
+  V(Offset_RR)        /* [%r0 + %r1] */ \
+  V(Operand2_I)       /* K */           \
+  V(Operand2_R)       /* %r0 */         \
+  V(Operand2_R_ASR_I) /* %r0 ASR K */   \
+  V(Operand2_R_LSL_I) /* %r0 LSL K */   \
+  V(Operand2_R_LSR_I) /* %r0 LSR K */   \
+  V(Operand2_R_ASR_R) /* %r0 ASR %r1 */ \
+  V(Operand2_R_LSL_R) /* %r0 LSL %r1 */ \
+  V(Operand2_R_LSR_R) /* %r0 LSR %r1 */
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_COMPILER_ARM_INSTRUCTION_CODES_ARM_H_
diff --git a/src/compiler/arm/instruction-selector-arm.cc b/src/compiler/arm/instruction-selector-arm.cc
new file mode 100644 (file)
index 0000000..95e3333
--- /dev/null
@@ -0,0 +1,796 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/instruction-selector-impl.h"
+#include "src/compiler/node-matchers.h"
+#include "src/compiler-intrinsics.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// Adds Arm-specific methods for generating InstructionOperands.
+class ArmOperandGenerator V8_FINAL : public OperandGenerator {
+ public:
+  explicit ArmOperandGenerator(InstructionSelector* selector)
+      : OperandGenerator(selector) {}
+
+  InstructionOperand* UseOperand(Node* node, InstructionCode opcode) {
+    if (CanBeImmediate(node, opcode)) {
+      return UseImmediate(node);
+    }
+    return UseRegister(node);
+  }
+
+  bool CanBeImmediate(Node* node, InstructionCode opcode) {
+    int32_t value;
+    switch (node->opcode()) {
+      case IrOpcode::kInt32Constant:
+      case IrOpcode::kNumberConstant:
+        value = ValueOf<int32_t>(node->op());
+        break;
+      default:
+        return false;
+    }
+    switch (ArchOpcodeField::decode(opcode)) {
+      case kArmAnd:
+      case kArmMov:
+      case kArmMvn:
+      case kArmBic:
+        return ImmediateFitsAddrMode1Instruction(value) ||
+               ImmediateFitsAddrMode1Instruction(~value);
+
+      case kArmAdd:
+      case kArmSub:
+      case kArmCmp:
+      case kArmCmn:
+        return ImmediateFitsAddrMode1Instruction(value) ||
+               ImmediateFitsAddrMode1Instruction(-value);
+
+      case kArmTst:
+      case kArmTeq:
+      case kArmOrr:
+      case kArmEor:
+      case kArmRsb:
+        return ImmediateFitsAddrMode1Instruction(value);
+
+      case kArmFloat64Load:
+      case kArmFloat64Store:
+        return value >= -1020 && value <= 1020 && (value % 4) == 0;
+
+      case kArmLoadWord8:
+      case kArmStoreWord8:
+      case kArmLoadWord32:
+      case kArmStoreWord32:
+      case kArmStoreWriteBarrier:
+        return value >= -4095 && value <= 4095;
+
+      case kArmLoadWord16:
+      case kArmStoreWord16:
+        return value >= -255 && value <= 255;
+
+      case kArchJmp:
+      case kArchNop:
+      case kArchRet:
+      case kArchDeoptimize:
+      case kArmMul:
+      case kArmMla:
+      case kArmMls:
+      case kArmSdiv:
+      case kArmUdiv:
+      case kArmBfc:
+      case kArmUbfx:
+      case kArmCallCodeObject:
+      case kArmCallJSFunction:
+      case kArmCallAddress:
+      case kArmPush:
+      case kArmDrop:
+      case kArmVcmpF64:
+      case kArmVaddF64:
+      case kArmVsubF64:
+      case kArmVmulF64:
+      case kArmVmlaF64:
+      case kArmVmlsF64:
+      case kArmVdivF64:
+      case kArmVmodF64:
+      case kArmVnegF64:
+      case kArmVcvtF64S32:
+      case kArmVcvtF64U32:
+      case kArmVcvtS32F64:
+      case kArmVcvtU32F64:
+        return false;
+    }
+    UNREACHABLE();
+    return false;
+  }
+
+ private:
+  bool ImmediateFitsAddrMode1Instruction(int32_t imm) const {
+    return Assembler::ImmediateFitsAddrMode1Instruction(imm);
+  }
+};
+
+
+static void VisitRRRFloat64(InstructionSelector* selector, ArchOpcode opcode,
+                            Node* node) {
+  ArmOperandGenerator g(selector);
+  selector->Emit(opcode, g.DefineAsDoubleRegister(node),
+                 g.UseDoubleRegister(node->InputAt(0)),
+                 g.UseDoubleRegister(node->InputAt(1)));
+}
+
+
+static Instruction* EmitBinop(InstructionSelector* selector,
+                              InstructionCode opcode, size_t output_count,
+                              InstructionOperand** outputs, Node* left,
+                              Node* right, size_t label_count,
+                              InstructionOperand** labels) {
+  ArmOperandGenerator g(selector);
+  InstructionOperand* inputs[5];
+  size_t input_count = 0;
+
+  inputs[input_count++] = g.UseRegister(left);
+  if (g.CanBeImmediate(right, opcode)) {
+    opcode |= AddressingModeField::encode(kMode_Operand2_I);
+    inputs[input_count++] = g.UseImmediate(right);
+  } else if (right->opcode() == IrOpcode::kWord32Sar) {
+    Int32BinopMatcher mright(right);
+    inputs[input_count++] = g.UseRegister(mright.left().node());
+    if (mright.right().IsInRange(1, 32)) {
+      opcode |= AddressingModeField::encode(kMode_Operand2_R_ASR_I);
+      inputs[input_count++] = g.UseImmediate(mright.right().node());
+    } else {
+      opcode |= AddressingModeField::encode(kMode_Operand2_R_ASR_R);
+      inputs[input_count++] = g.UseRegister(mright.right().node());
+    }
+  } else if (right->opcode() == IrOpcode::kWord32Shl) {
+    Int32BinopMatcher mright(right);
+    inputs[input_count++] = g.UseRegister(mright.left().node());
+    if (mright.right().IsInRange(0, 31)) {
+      opcode |= AddressingModeField::encode(kMode_Operand2_R_LSL_I);
+      inputs[input_count++] = g.UseImmediate(mright.right().node());
+    } else {
+      opcode |= AddressingModeField::encode(kMode_Operand2_R_LSL_R);
+      inputs[input_count++] = g.UseRegister(mright.right().node());
+    }
+  } else if (right->opcode() == IrOpcode::kWord32Shr) {
+    Int32BinopMatcher mright(right);
+    inputs[input_count++] = g.UseRegister(mright.left().node());
+    if (mright.right().IsInRange(1, 32)) {
+      opcode |= AddressingModeField::encode(kMode_Operand2_R_LSR_I);
+      inputs[input_count++] = g.UseImmediate(mright.right().node());
+    } else {
+      opcode |= AddressingModeField::encode(kMode_Operand2_R_LSR_R);
+      inputs[input_count++] = g.UseRegister(mright.right().node());
+    }
+  } else {
+    opcode |= AddressingModeField::encode(kMode_Operand2_R);
+    inputs[input_count++] = g.UseRegister(right);
+  }
+
+  // Append the optional labels.
+  while (label_count-- != 0) {
+    inputs[input_count++] = *labels++;
+  }
+
+  ASSERT_NE(0, input_count);
+  ASSERT_GE(ARRAY_SIZE(inputs), input_count);
+  ASSERT_NE(kMode_None, AddressingModeField::decode(opcode));
+
+  return selector->Emit(opcode, output_count, outputs, input_count, inputs);
+}
+
+
+static Instruction* EmitBinop(InstructionSelector* selector,
+                              InstructionCode opcode, Node* node, Node* left,
+                              Node* right) {
+  ArmOperandGenerator g(selector);
+  InstructionOperand* outputs[] = {g.DefineAsRegister(node)};
+  const size_t output_count = ARRAY_SIZE(outputs);
+  return EmitBinop(selector, opcode, output_count, outputs, left, right, 0,
+                   NULL);
+}
+
+
+// Shared routine for multiple binary operations.
+static void VisitBinop(InstructionSelector* selector, Node* node,
+                       InstructionCode opcode, InstructionCode reverse_opcode) {
+  ArmOperandGenerator g(selector);
+  Int32BinopMatcher m(node);
+
+  Node* left = m.left().node();
+  Node* right = m.right().node();
+  if (g.CanBeImmediate(m.left().node(), reverse_opcode) ||
+      m.left().IsWord32Sar() || m.left().IsWord32Shl() ||
+      m.left().IsWord32Shr()) {
+    opcode = reverse_opcode;
+    std::swap(left, right);
+  }
+
+  EmitBinop(selector, opcode, node, left, right);
+}
+
+
+void InstructionSelector::VisitLoad(Node* node) {
+  MachineRepresentation rep = OpParameter<MachineRepresentation>(node);
+  ArmOperandGenerator g(this);
+  Node* base = node->InputAt(0);
+  Node* index = node->InputAt(1);
+
+  InstructionOperand* result = rep == kMachineFloat64
+                                   ? g.DefineAsDoubleRegister(node)
+                                   : g.DefineAsRegister(node);
+
+  ArchOpcode opcode;
+  switch (rep) {
+    case kMachineFloat64:
+      opcode = kArmFloat64Load;
+      break;
+    case kMachineWord8:
+      opcode = kArmLoadWord8;
+      break;
+    case kMachineWord16:
+      opcode = kArmLoadWord16;
+      break;
+    case kMachineTagged:  // Fall through.
+    case kMachineWord32:
+      opcode = kArmLoadWord32;
+      break;
+    default:
+      UNREACHABLE();
+      return;
+  }
+
+  if (g.CanBeImmediate(index, opcode)) {
+    Emit(opcode | AddressingModeField::encode(kMode_Offset_RI), result,
+         g.UseRegister(base), g.UseImmediate(index));
+  } else if (g.CanBeImmediate(base, opcode)) {
+    Emit(opcode | AddressingModeField::encode(kMode_Offset_RI), result,
+         g.UseRegister(index), g.UseImmediate(base));
+  } else {
+    Emit(opcode | AddressingModeField::encode(kMode_Offset_RR), result,
+         g.UseRegister(base), g.UseRegister(index));
+  }
+}
+
+
+void InstructionSelector::VisitStore(Node* node) {
+  ArmOperandGenerator g(this);
+  Node* base = node->InputAt(0);
+  Node* index = node->InputAt(1);
+  Node* value = node->InputAt(2);
+
+  StoreRepresentation store_rep = OpParameter<StoreRepresentation>(node);
+  MachineRepresentation rep = store_rep.rep;
+  if (store_rep.write_barrier_kind == kFullWriteBarrier) {
+    ASSERT(rep == kMachineTagged);
+    // TODO(dcarney): refactor RecordWrite function to take temp registers
+    //                and pass them here instead of using fixed regs
+    // TODO(dcarney): handle immediate indices.
+    InstructionOperand* temps[] = {g.TempRegister(r5), g.TempRegister(r6)};
+    Emit(kArmStoreWriteBarrier, NULL, g.UseFixed(base, r4),
+         g.UseFixed(index, r5), g.UseFixed(value, r6), ARRAY_SIZE(temps),
+         temps);
+    return;
+  }
+  ASSERT_EQ(kNoWriteBarrier, store_rep.write_barrier_kind);
+  InstructionOperand* val = rep == kMachineFloat64 ? g.UseDoubleRegister(value)
+                                                   : g.UseRegister(value);
+
+  ArchOpcode opcode;
+  switch (rep) {
+    case kMachineFloat64:
+      opcode = kArmFloat64Store;
+      break;
+    case kMachineWord8:
+      opcode = kArmStoreWord8;
+      break;
+    case kMachineWord16:
+      opcode = kArmStoreWord16;
+      break;
+    case kMachineTagged:  // Fall through.
+    case kMachineWord32:
+      opcode = kArmStoreWord32;
+      break;
+    default:
+      UNREACHABLE();
+      return;
+  }
+
+  if (g.CanBeImmediate(index, opcode)) {
+    Emit(opcode | AddressingModeField::encode(kMode_Offset_RI), NULL,
+         g.UseRegister(base), g.UseImmediate(index), val);
+  } else if (g.CanBeImmediate(base, opcode)) {
+    Emit(opcode | AddressingModeField::encode(kMode_Offset_RI), NULL,
+         g.UseRegister(index), g.UseImmediate(base), val);
+  } else {
+    Emit(opcode | AddressingModeField::encode(kMode_Offset_RR), NULL,
+         g.UseRegister(base), g.UseRegister(index), val);
+  }
+}
+
+
+void InstructionSelector::VisitWord32And(Node* node) {
+  ArmOperandGenerator g(this);
+  Int32BinopMatcher m(node);
+  if (m.left().IsWord32Xor() && CanCover(node, m.left().node())) {
+    Int32BinopMatcher mleft(m.left().node());
+    if (mleft.right().Is(-1)) {
+      EmitBinop(this, kArmBic, node, m.right().node(), mleft.left().node());
+      return;
+    }
+  }
+  if (m.right().IsWord32Xor() && CanCover(node, m.right().node())) {
+    Int32BinopMatcher mright(m.right().node());
+    if (mright.right().Is(-1)) {
+      EmitBinop(this, kArmBic, node, m.left().node(), mright.left().node());
+      return;
+    }
+  }
+  if (CpuFeatures::IsSupported(ARMv7) && m.right().HasValue()) {
+    uint32_t value = m.right().Value();
+    uint32_t width = CompilerIntrinsics::CountSetBits(value);
+    uint32_t msb = CompilerIntrinsics::CountLeadingZeros(value);
+    if (msb + width == 32) {
+      ASSERT_EQ(0, CompilerIntrinsics::CountTrailingZeros(value));
+      if (m.left().IsWord32Shr()) {
+        Int32BinopMatcher mleft(m.left().node());
+        if (mleft.right().IsInRange(0, 31)) {
+          Emit(kArmUbfx, g.DefineAsRegister(node),
+               g.UseRegister(mleft.left().node()),
+               g.UseImmediate(mleft.right().node()), g.TempImmediate(width));
+          return;
+        }
+      }
+      Emit(kArmUbfx, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
+           g.TempImmediate(0), g.TempImmediate(width));
+      return;
+    }
+    // Try to interpret this AND as BFC.
+    width = 32 - width;
+    msb = CompilerIntrinsics::CountLeadingZeros(~value);
+    uint32_t lsb = CompilerIntrinsics::CountTrailingZeros(~value);
+    if (msb + width + lsb == 32) {
+      Emit(kArmBfc, g.DefineSameAsFirst(node), g.UseRegister(m.left().node()),
+           g.TempImmediate(lsb), g.TempImmediate(width));
+      return;
+    }
+  }
+  VisitBinop(this, node, kArmAnd, kArmAnd);
+}
+
+
+void InstructionSelector::VisitWord32Or(Node* node) {
+  VisitBinop(this, node, kArmOrr, kArmOrr);
+}
+
+
+void InstructionSelector::VisitWord32Xor(Node* node) {
+  ArmOperandGenerator g(this);
+  Int32BinopMatcher m(node);
+  if (m.right().Is(-1)) {
+    Emit(kArmMvn | AddressingModeField::encode(kMode_Operand2_R),
+         g.DefineSameAsFirst(node), g.UseRegister(m.left().node()));
+  } else {
+    VisitBinop(this, node, kArmEor, kArmEor);
+  }
+}
+
+
+void InstructionSelector::VisitWord32Shl(Node* node) {
+  ArmOperandGenerator g(this);
+  Int32BinopMatcher m(node);
+  if (m.right().IsInRange(0, 31)) {
+    Emit(kArmMov | AddressingModeField::encode(kMode_Operand2_R_LSL_I),
+         g.DefineAsRegister(node), g.UseRegister(m.left().node()),
+         g.UseImmediate(m.right().node()));
+  } else {
+    Emit(kArmMov | AddressingModeField::encode(kMode_Operand2_R_LSL_R),
+         g.DefineAsRegister(node), g.UseRegister(m.left().node()),
+         g.UseRegister(m.right().node()));
+  }
+}
+
+
+void InstructionSelector::VisitWord32Shr(Node* node) {
+  ArmOperandGenerator g(this);
+  Int32BinopMatcher m(node);
+  if (CpuFeatures::IsSupported(ARMv7) && m.left().IsWord32And() &&
+      m.right().IsInRange(0, 31)) {
+    int32_t lsb = m.right().Value();
+    Int32BinopMatcher mleft(m.left().node());
+    if (mleft.right().HasValue()) {
+      uint32_t value = (mleft.right().Value() >> lsb) << lsb;
+      uint32_t width = CompilerIntrinsics::CountSetBits(value);
+      uint32_t msb = CompilerIntrinsics::CountLeadingZeros(value);
+      if (msb + width + lsb == 32) {
+        ASSERT_EQ(lsb, CompilerIntrinsics::CountTrailingZeros(value));
+        Emit(kArmUbfx, g.DefineAsRegister(node),
+             g.UseRegister(mleft.left().node()), g.TempImmediate(lsb),
+             g.TempImmediate(width));
+        return;
+      }
+    }
+  }
+  if (m.right().IsInRange(1, 32)) {
+    Emit(kArmMov | AddressingModeField::encode(kMode_Operand2_R_LSR_I),
+         g.DefineAsRegister(node), g.UseRegister(m.left().node()),
+         g.UseImmediate(m.right().node()));
+    return;
+  }
+  Emit(kArmMov | AddressingModeField::encode(kMode_Operand2_R_LSR_R),
+       g.DefineAsRegister(node), g.UseRegister(m.left().node()),
+       g.UseRegister(m.right().node()));
+}
+
+
+void InstructionSelector::VisitWord32Sar(Node* node) {
+  ArmOperandGenerator g(this);
+  Int32BinopMatcher m(node);
+  if (m.right().IsInRange(1, 32)) {
+    Emit(kArmMov | AddressingModeField::encode(kMode_Operand2_R_ASR_I),
+         g.DefineAsRegister(node), g.UseRegister(m.left().node()),
+         g.UseImmediate(m.right().node()));
+  } else {
+    Emit(kArmMov | AddressingModeField::encode(kMode_Operand2_R_ASR_R),
+         g.DefineAsRegister(node), g.UseRegister(m.left().node()),
+         g.UseRegister(m.right().node()));
+  }
+}
+
+
+void InstructionSelector::VisitInt32Add(Node* node) {
+  ArmOperandGenerator g(this);
+  Int32BinopMatcher m(node);
+  if (m.left().IsInt32Mul() && CanCover(node, m.left().node())) {
+    Int32BinopMatcher mleft(m.left().node());
+    Emit(kArmMla, g.DefineAsRegister(node), g.UseRegister(mleft.left().node()),
+         g.UseRegister(mleft.right().node()), g.UseRegister(m.right().node()));
+    return;
+  }
+  if (m.right().IsInt32Mul() && CanCover(node, m.right().node())) {
+    Int32BinopMatcher mright(m.right().node());
+    Emit(kArmMla, g.DefineAsRegister(node), g.UseRegister(mright.left().node()),
+         g.UseRegister(mright.right().node()), g.UseRegister(m.left().node()));
+    return;
+  }
+  VisitBinop(this, node, kArmAdd, kArmAdd);
+}
+
+
+void InstructionSelector::VisitInt32Sub(Node* node) {
+  ArmOperandGenerator g(this);
+  Int32BinopMatcher m(node);
+  if (CpuFeatures::IsSupported(MLS) && m.right().IsInt32Mul() &&
+      CanCover(node, m.right().node())) {
+    Int32BinopMatcher mright(m.right().node());
+    Emit(kArmMls, g.DefineAsRegister(node), g.UseRegister(mright.left().node()),
+         g.UseRegister(mright.right().node()), g.UseRegister(m.left().node()));
+    return;
+  }
+  VisitBinop(this, node, kArmSub, kArmRsb);
+}
+
+
+void InstructionSelector::VisitInt32Mul(Node* node) {
+  ArmOperandGenerator g(this);
+  Int32BinopMatcher m(node);
+  if (m.right().HasValue() && m.right().Value() > 0) {
+    int32_t value = m.right().Value();
+    if (IsPowerOf2(value - 1)) {
+      Emit(kArmAdd | AddressingModeField::encode(kMode_Operand2_R_LSL_I),
+           g.DefineAsRegister(node), g.UseRegister(m.left().node()),
+           g.UseRegister(m.left().node()),
+           g.TempImmediate(WhichPowerOf2(value - 1)));
+      return;
+    }
+    if (value < kMaxInt && IsPowerOf2(value + 1)) {
+      Emit(kArmRsb | AddressingModeField::encode(kMode_Operand2_R_LSL_I),
+           g.DefineAsRegister(node), g.UseRegister(m.left().node()),
+           g.UseRegister(m.left().node()),
+           g.TempImmediate(WhichPowerOf2(value + 1)));
+      return;
+    }
+  }
+  Emit(kArmMul, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
+       g.UseRegister(m.right().node()));
+}
+
+
+static void EmitDiv(InstructionSelector* selector, ArchOpcode div_opcode,
+                    ArchOpcode f64i32_opcode, ArchOpcode i32f64_opcode,
+                    InstructionOperand* result_operand,
+                    InstructionOperand* left_operand,
+                    InstructionOperand* right_operand) {
+  ArmOperandGenerator g(selector);
+  if (CpuFeatures::IsSupported(SUDIV)) {
+    selector->Emit(div_opcode, result_operand, left_operand, right_operand);
+    return;
+  }
+  InstructionOperand* left_double_operand = g.TempDoubleRegister();
+  InstructionOperand* right_double_operand = g.TempDoubleRegister();
+  InstructionOperand* result_double_operand = g.TempDoubleRegister();
+  selector->Emit(f64i32_opcode, left_double_operand, left_operand);
+  selector->Emit(f64i32_opcode, right_double_operand, right_operand);
+  selector->Emit(kArmVdivF64, result_double_operand, left_double_operand,
+                 right_double_operand);
+  selector->Emit(i32f64_opcode, result_operand, result_double_operand);
+}
+
+
+static void VisitDiv(InstructionSelector* selector, Node* node,
+                     ArchOpcode div_opcode, ArchOpcode f64i32_opcode,
+                     ArchOpcode i32f64_opcode) {
+  ArmOperandGenerator g(selector);
+  Int32BinopMatcher m(node);
+  EmitDiv(selector, div_opcode, f64i32_opcode, i32f64_opcode,
+          g.DefineAsRegister(node), g.UseRegister(m.left().node()),
+          g.UseRegister(m.right().node()));
+}
+
+
+void InstructionSelector::VisitInt32Div(Node* node) {
+  VisitDiv(this, node, kArmSdiv, kArmVcvtF64S32, kArmVcvtS32F64);
+}
+
+
+void InstructionSelector::VisitInt32UDiv(Node* node) {
+  VisitDiv(this, node, kArmUdiv, kArmVcvtF64U32, kArmVcvtU32F64);
+}
+
+
+static void VisitMod(InstructionSelector* selector, Node* node,
+                     ArchOpcode div_opcode, ArchOpcode f64i32_opcode,
+                     ArchOpcode i32f64_opcode) {
+  ArmOperandGenerator g(selector);
+  Int32BinopMatcher m(node);
+  InstructionOperand* div_operand = g.TempRegister();
+  InstructionOperand* result_operand = g.DefineAsRegister(node);
+  InstructionOperand* left_operand = g.UseRegister(m.left().node());
+  InstructionOperand* right_operand = g.UseRegister(m.right().node());
+  EmitDiv(selector, div_opcode, f64i32_opcode, i32f64_opcode, div_operand,
+          left_operand, right_operand);
+  if (CpuFeatures::IsSupported(MLS)) {
+    selector->Emit(kArmMls, result_operand, div_operand, right_operand,
+                   left_operand);
+    return;
+  }
+  InstructionOperand* mul_operand = g.TempRegister();
+  selector->Emit(kArmMul, mul_operand, div_operand, right_operand);
+  selector->Emit(kArmSub, result_operand, left_operand, mul_operand);
+}
+
+
+void InstructionSelector::VisitInt32Mod(Node* node) {
+  VisitMod(this, node, kArmSdiv, kArmVcvtF64S32, kArmVcvtS32F64);
+}
+
+
+void InstructionSelector::VisitInt32UMod(Node* node) {
+  VisitMod(this, node, kArmUdiv, kArmVcvtF64U32, kArmVcvtU32F64);
+}
+
+
+void InstructionSelector::VisitConvertInt32ToFloat64(Node* node) {
+  ArmOperandGenerator g(this);
+  Emit(kArmVcvtF64S32, g.DefineAsDoubleRegister(node),
+       g.UseRegister(node->InputAt(0)));
+}
+
+
+void InstructionSelector::VisitConvertFloat64ToInt32(Node* node) {
+  ArmOperandGenerator g(this);
+  Emit(kArmVcvtS32F64, g.DefineAsRegister(node),
+       g.UseDoubleRegister(node->InputAt(0)));
+}
+
+
+void InstructionSelector::VisitFloat64Add(Node* node) {
+  ArmOperandGenerator g(this);
+  Int32BinopMatcher m(node);
+  if (m.left().IsFloat64Mul() && CanCover(node, m.left().node())) {
+    Int32BinopMatcher mleft(m.left().node());
+    Emit(kArmVmlaF64, g.DefineSameAsFirst(node),
+         g.UseRegister(m.right().node()), g.UseRegister(mleft.left().node()),
+         g.UseRegister(mleft.right().node()));
+    return;
+  }
+  if (m.right().IsFloat64Mul() && CanCover(node, m.right().node())) {
+    Int32BinopMatcher mright(m.right().node());
+    Emit(kArmVmlaF64, g.DefineSameAsFirst(node), g.UseRegister(m.left().node()),
+         g.UseRegister(mright.left().node()),
+         g.UseRegister(mright.right().node()));
+    return;
+  }
+  VisitRRRFloat64(this, kArmVaddF64, node);
+}
+
+
+void InstructionSelector::VisitFloat64Sub(Node* node) {
+  ArmOperandGenerator g(this);
+  Int32BinopMatcher m(node);
+  if (m.right().IsFloat64Mul() && CanCover(node, m.right().node())) {
+    Int32BinopMatcher mright(m.right().node());
+    Emit(kArmVmlsF64, g.DefineSameAsFirst(node), g.UseRegister(m.left().node()),
+         g.UseRegister(mright.left().node()),
+         g.UseRegister(mright.right().node()));
+    return;
+  }
+  VisitRRRFloat64(this, kArmVsubF64, node);
+}
+
+
+void InstructionSelector::VisitFloat64Mul(Node* node) {
+  ArmOperandGenerator g(this);
+  Float64BinopMatcher m(node);
+  if (m.right().Is(-1.0)) {
+    Emit(kArmVnegF64, g.DefineAsRegister(node),
+         g.UseDoubleRegister(m.left().node()));
+  } else {
+    VisitRRRFloat64(this, kArmVmulF64, node);
+  }
+}
+
+
+void InstructionSelector::VisitFloat64Div(Node* node) {
+  VisitRRRFloat64(this, kArmVdivF64, node);
+}
+
+
+void InstructionSelector::VisitFloat64Mod(Node* node) {
+  ArmOperandGenerator g(this);
+  Emit(kArmVmodF64, g.DefineAsFixedDouble(node, d0),
+       g.UseFixedDouble(node->InputAt(0), d0),
+       g.UseFixedDouble(node->InputAt(1), d1))->MarkAsCall();
+}
+
+
+void InstructionSelector::VisitCall(Node* call, BasicBlock* continuation,
+                                    BasicBlock* deoptimization) {
+  ArmOperandGenerator g(this);
+  CallDescriptor* descriptor = OpParameter<CallDescriptor*>(call);
+  CallBuffer buffer(zone(), descriptor);  // TODO(turbofan): temp zone here?
+
+  // Compute InstructionOperands for inputs and outputs.
+  // TODO(turbofan): on ARM64 it's probably better to use the code object in a
+  // register if there are multiple uses of it. Improve constant pool and the
+  // heuristics in the register allocator for where to emit constants.
+  InitializeCallBuffer(call, &buffer, true, false, continuation,
+                       deoptimization);
+
+  // TODO(dcarney): might be possible to use claim/poke instead
+  // Push any stack arguments.
+  for (int i = buffer.pushed_count - 1; i >= 0; --i) {
+    Node* input = buffer.pushed_nodes[i];
+    Emit(kArmPush, NULL, g.UseRegister(input));
+  }
+
+  // Select the appropriate opcode based on the call type.
+  InstructionCode opcode;
+  switch (descriptor->kind()) {
+    case CallDescriptor::kCallCodeObject: {
+      bool lazy_deopt = descriptor->CanLazilyDeoptimize();
+      opcode = kArmCallCodeObject | MiscField::encode(lazy_deopt ? 1 : 0);
+      break;
+    }
+    case CallDescriptor::kCallAddress:
+      opcode = kArmCallAddress;
+      break;
+    case CallDescriptor::kCallJSFunction:
+      opcode = kArmCallJSFunction;
+      break;
+    default:
+      UNREACHABLE();
+      return;
+  }
+
+  // Emit the call instruction.
+  Instruction* call_instr =
+      Emit(opcode, buffer.output_count, buffer.outputs,
+           buffer.fixed_and_control_count(), buffer.fixed_and_control_args);
+
+  call_instr->MarkAsCall();
+  if (deoptimization != NULL) {
+    ASSERT(continuation != NULL);
+    call_instr->MarkAsControl();
+  }
+
+  // Caller clean up of stack for C-style calls.
+  if (descriptor->kind() == CallDescriptor::kCallAddress &&
+      buffer.pushed_count > 0) {
+    ASSERT(deoptimization == NULL && continuation == NULL);
+    Emit(kArmDrop | MiscField::encode(buffer.pushed_count), NULL);
+  }
+}
+
+
+// Shared routine for multiple compare operations.
+static void VisitWordCompare(InstructionSelector* selector, Node* node,
+                             InstructionCode opcode, FlagsContinuation* cont,
+                             bool commutative, bool requires_output) {
+  ArmOperandGenerator g(selector);
+  Int32BinopMatcher m(node);
+
+  Node* left = m.left().node();
+  Node* right = m.right().node();
+  if (g.CanBeImmediate(m.left().node(), opcode) || m.left().IsWord32Sar() ||
+      m.left().IsWord32Shl() || m.left().IsWord32Shr()) {
+    if (!commutative) cont->Commute();
+    std::swap(left, right);
+  }
+
+  opcode = cont->Encode(opcode);
+  if (cont->IsBranch()) {
+    InstructionOperand* outputs[1];
+    size_t output_count = 0;
+    if (requires_output) {
+      outputs[output_count++] = g.DefineAsRegister(node);
+    }
+    InstructionOperand* labels[] = {g.Label(cont->true_block()),
+                                    g.Label(cont->false_block())};
+    const size_t label_count = ARRAY_SIZE(labels);
+    EmitBinop(selector, opcode, output_count, outputs, left, right, label_count,
+              labels)->MarkAsControl();
+  } else {
+    ASSERT(cont->IsSet());
+    EmitBinop(selector, opcode, cont->result(), left, right);
+  }
+}
+
+
+void InstructionSelector::VisitWord32Test(Node* node, FlagsContinuation* cont) {
+  switch (node->opcode()) {
+    case IrOpcode::kInt32Add:
+      return VisitWordCompare(this, node, kArmCmn, cont, true, false);
+    case IrOpcode::kInt32Sub:
+      return VisitWordCompare(this, node, kArmCmp, cont, false, false);
+    case IrOpcode::kWord32And:
+      return VisitWordCompare(this, node, kArmTst, cont, true, false);
+    case IrOpcode::kWord32Or:
+      return VisitWordCompare(this, node, kArmOrr, cont, true, true);
+    case IrOpcode::kWord32Xor:
+      return VisitWordCompare(this, node, kArmTeq, cont, true, false);
+    default:
+      break;
+  }
+
+  ArmOperandGenerator g(this);
+  InstructionCode opcode =
+      cont->Encode(kArmTst) | AddressingModeField::encode(kMode_Operand2_R);
+  if (cont->IsBranch()) {
+    Emit(opcode, NULL, g.UseRegister(node), g.UseRegister(node),
+         g.Label(cont->true_block()),
+         g.Label(cont->false_block()))->MarkAsControl();
+  } else {
+    Emit(opcode, g.DefineAsRegister(cont->result()), g.UseRegister(node),
+         g.UseRegister(node));
+  }
+}
+
+
+void InstructionSelector::VisitWord32Compare(Node* node,
+                                             FlagsContinuation* cont) {
+  VisitWordCompare(this, node, kArmCmp, cont, false, false);
+}
+
+
+void InstructionSelector::VisitFloat64Compare(Node* node,
+                                              FlagsContinuation* cont) {
+  ArmOperandGenerator g(this);
+  Float64BinopMatcher m(node);
+  if (cont->IsBranch()) {
+    Emit(cont->Encode(kArmVcmpF64), NULL, g.UseDoubleRegister(m.left().node()),
+         g.UseDoubleRegister(m.right().node()), g.Label(cont->true_block()),
+         g.Label(cont->false_block()))->MarkAsControl();
+  } else {
+    ASSERT(cont->IsSet());
+    Emit(cont->Encode(kArmVcmpF64), g.DefineAsRegister(cont->result()),
+         g.UseDoubleRegister(m.left().node()),
+         g.UseDoubleRegister(m.right().node()));
+  }
+}
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
diff --git a/src/compiler/arm/linkage-arm.cc b/src/compiler/arm/linkage-arm.cc
new file mode 100644 (file)
index 0000000..489a0c6
--- /dev/null
@@ -0,0 +1,66 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#include "src/assembler.h"
+#include "src/code-stubs.h"
+#include "src/compiler/linkage.h"
+#include "src/compiler/linkage-impl.h"
+#include "src/zone.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+struct LinkageHelperTraits {
+  static Register ReturnValueReg() { return r0; }
+  static Register ReturnValue2Reg() { return r1; }
+  static Register JSCallFunctionReg() { return r1; }
+  static Register ContextReg() { return cp; }
+  static Register RuntimeCallFunctionReg() { return r1; }
+  static Register RuntimeCallArgCountReg() { return r0; }
+  static RegList CCalleeSaveRegisters() {
+    return r4.bit() | r5.bit() | r6.bit() | r7.bit() | r8.bit() | r9.bit() |
+           r10.bit();
+  }
+  static Register CRegisterParameter(int i) {
+    static Register register_parameters[] = {r0, r1, r2, r3};
+    return register_parameters[i];
+  }
+  static int CRegisterParametersLength() { return 4; }
+};
+
+
+CallDescriptor* Linkage::GetJSCallDescriptor(int parameter_count, Zone* zone) {
+  return LinkageHelper::GetJSCallDescriptor<LinkageHelperTraits>(
+      zone, parameter_count);
+}
+
+
+CallDescriptor* Linkage::GetRuntimeCallDescriptor(
+    Runtime::FunctionId function, int parameter_count,
+    Operator::Property properties,
+    CallDescriptor::DeoptimizationSupport can_deoptimize, Zone* zone) {
+  return LinkageHelper::GetRuntimeCallDescriptor<LinkageHelperTraits>(
+      zone, function, parameter_count, properties, can_deoptimize);
+}
+
+
+CallDescriptor* Linkage::GetStubCallDescriptor(
+    CodeStubInterfaceDescriptor* descriptor, int stack_parameter_count) {
+  return LinkageHelper::GetStubCallDescriptor<LinkageHelperTraits>(
+      this->info_->zone(), descriptor, stack_parameter_count);
+}
+
+
+CallDescriptor* Linkage::GetSimplifiedCDescriptor(
+    Zone* zone, int num_params, MachineRepresentation return_type,
+    const MachineRepresentation* param_types) {
+  return LinkageHelper::GetSimplifiedCDescriptor<LinkageHelperTraits>(
+      zone, num_params, return_type, param_types);
+}
+}
+}
+}  // namespace v8::internal::compiler
diff --git a/src/compiler/arm64/code-generator-arm64.cc b/src/compiler/arm64/code-generator-arm64.cc
new file mode 100644 (file)
index 0000000..28c7c97
--- /dev/null
@@ -0,0 +1,825 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/code-generator.h"
+
+#include "src/arm64/macro-assembler-arm64.h"
+#include "src/compiler/code-generator-impl.h"
+#include "src/compiler/gap-resolver.h"
+#include "src/compiler/node-matchers.h"
+#include "src/compiler/node-properties-inl.h"
+#include "src/scopes.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+#define __ masm()->
+
+
+// Adds Arm64-specific methods to convert InstructionOperands.
+class Arm64OperandConverter V8_FINAL : public InstructionOperandConverter {
+ public:
+  Arm64OperandConverter(CodeGenerator* gen, Instruction* instr)
+      : InstructionOperandConverter(gen, instr) {}
+
+  Register InputRegister32(int index) {
+    return ToRegister(instr_->InputAt(index)).W();
+  }
+
+  Register InputRegister64(int index) { return InputRegister(index); }
+
+  Operand InputImmediate(int index) {
+    return ToImmediate(instr_->InputAt(index));
+  }
+
+  Operand InputOperand(int index) { return ToOperand(instr_->InputAt(index)); }
+
+  Operand InputOperand64(int index) { return InputOperand(index); }
+
+  Operand InputOperand32(int index) {
+    return ToOperand32(instr_->InputAt(index));
+  }
+
+  Register OutputRegister64() { return OutputRegister(); }
+
+  Register OutputRegister32() { return ToRegister(instr_->Output()).W(); }
+
+  MemOperand MemoryOperand(int* first_index) {
+    const int index = *first_index;
+    switch (AddressingModeField::decode(instr_->opcode())) {
+      case kMode_None:
+        break;
+      case kMode_MRI:
+        *first_index += 2;
+        return MemOperand(InputRegister(index + 0), InputInt32(index + 1));
+      case kMode_MRR:
+        *first_index += 2;
+        return MemOperand(InputRegister(index + 0), InputRegister(index + 1),
+                          SXTW);
+    }
+    UNREACHABLE();
+    return MemOperand(no_reg);
+  }
+
+  MemOperand MemoryOperand() {
+    int index = 0;
+    return MemoryOperand(&index);
+  }
+
+  Operand ToOperand(InstructionOperand* op) {
+    if (op->IsRegister()) {
+      return Operand(ToRegister(op));
+    }
+    return ToImmediate(op);
+  }
+
+  Operand ToOperand32(InstructionOperand* op) {
+    if (op->IsRegister()) {
+      return Operand(ToRegister(op).W());
+    }
+    return ToImmediate(op);
+  }
+
+  Operand ToImmediate(InstructionOperand* operand) {
+    Constant constant = ToConstant(operand);
+    switch (constant.type()) {
+      case Constant::kInt32:
+        return Operand(constant.ToInt32());
+      case Constant::kInt64:
+        return Operand(constant.ToInt64());
+      case Constant::kFloat64:
+        return Operand(
+            isolate()->factory()->NewNumber(constant.ToFloat64(), TENURED));
+      case Constant::kExternalReference:
+        return Operand(constant.ToExternalReference());
+      case Constant::kHeapObject:
+        return Operand(constant.ToHeapObject());
+    }
+    UNREACHABLE();
+    return Operand(-1);
+  }
+
+  MemOperand ToMemOperand(InstructionOperand* op, MacroAssembler* masm) const {
+    ASSERT(op != NULL);
+    ASSERT(!op->IsRegister());
+    ASSERT(!op->IsDoubleRegister());
+    ASSERT(op->IsStackSlot() || op->IsDoubleStackSlot());
+    // The linkage computes where all spill slots are located.
+    FrameOffset offset = linkage()->GetFrameOffset(op->index(), frame(), 0);
+    return MemOperand(offset.from_stack_pointer() ? masm->StackPointer() : fp,
+                      offset.offset());
+  }
+};
+
+
+#define ASSEMBLE_SHIFT(asm_instr, width)                                       \
+  do {                                                                         \
+    if (instr->InputAt(1)->IsRegister()) {                                     \
+      __ asm_instr(i.OutputRegister##width(), i.InputRegister##width(0),       \
+                   i.InputRegister##width(1));                                 \
+    } else {                                                                   \
+      int64_t imm = i.InputOperand##width(1).immediate().value();              \
+      __ asm_instr(i.OutputRegister##width(), i.InputRegister##width(0), imm); \
+    }                                                                          \
+  } while (0);
+
+
+// Assembles an instruction after register allocation, producing machine code.
+void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
+  Arm64OperandConverter i(this, instr);
+
+  switch (ArchOpcodeField::decode(instr->opcode())) {
+    case kArchJmp:
+      __ B(code_->GetLabel(i.InputBlock(0)));
+      break;
+    case kArchNop:
+      // don't emit code for nops.
+      break;
+    case kArchRet:
+      AssembleReturn();
+      break;
+    case kArchDeoptimize: {
+      int deoptimization_id = MiscField::decode(instr->opcode());
+      BuildTranslation(instr, deoptimization_id);
+
+      Address deopt_entry = Deoptimizer::GetDeoptimizationEntry(
+          isolate(), deoptimization_id, Deoptimizer::LAZY);
+      __ Call(deopt_entry, RelocInfo::RUNTIME_ENTRY);
+      break;
+    }
+    case kArm64Add:
+      __ Add(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+      break;
+    case kArm64Add32:
+      __ Add(i.OutputRegister32(), i.InputRegister32(0), i.InputOperand32(1));
+      break;
+    case kArm64And:
+      __ And(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+      break;
+    case kArm64And32:
+      __ And(i.OutputRegister32(), i.InputRegister32(0), i.InputOperand32(1));
+      break;
+    case kArm64Mul:
+      __ Mul(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
+      break;
+    case kArm64Mul32:
+      __ Mul(i.OutputRegister32(), i.InputRegister32(0), i.InputRegister32(1));
+      break;
+    case kArm64Idiv:
+      __ Sdiv(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
+      break;
+    case kArm64Idiv32:
+      __ Sdiv(i.OutputRegister32(), i.InputRegister32(0), i.InputRegister32(1));
+      break;
+    case kArm64Udiv:
+      __ Udiv(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
+      break;
+    case kArm64Udiv32:
+      __ Udiv(i.OutputRegister32(), i.InputRegister32(0), i.InputRegister32(1));
+      break;
+    case kArm64Imod: {
+      UseScratchRegisterScope scope(masm());
+      Register temp = scope.AcquireX();
+      __ Sdiv(temp, i.InputRegister(0), i.InputRegister(1));
+      __ Msub(i.OutputRegister(), temp, i.InputRegister(1), i.InputRegister(0));
+      break;
+    }
+    case kArm64Imod32: {
+      UseScratchRegisterScope scope(masm());
+      Register temp = scope.AcquireW();
+      __ Sdiv(temp, i.InputRegister32(0), i.InputRegister32(1));
+      __ Msub(i.OutputRegister32(), temp, i.InputRegister32(1),
+              i.InputRegister32(0));
+      break;
+    }
+    case kArm64Umod: {
+      UseScratchRegisterScope scope(masm());
+      Register temp = scope.AcquireX();
+      __ Udiv(temp, i.InputRegister(0), i.InputRegister(1));
+      __ Msub(i.OutputRegister(), temp, i.InputRegister(1), i.InputRegister(0));
+      break;
+    }
+    case kArm64Umod32: {
+      UseScratchRegisterScope scope(masm());
+      Register temp = scope.AcquireW();
+      __ Udiv(temp, i.InputRegister32(0), i.InputRegister32(1));
+      __ Msub(i.OutputRegister32(), temp, i.InputRegister32(1),
+              i.InputRegister32(0));
+      break;
+    }
+    // TODO(dcarney): use mvn instr??
+    case kArm64Not:
+      __ Orn(i.OutputRegister(), xzr, i.InputOperand(0));
+      break;
+    case kArm64Not32:
+      __ Orn(i.OutputRegister32(), wzr, i.InputOperand32(0));
+      break;
+    case kArm64Neg:
+      __ Neg(i.OutputRegister(), i.InputOperand(0));
+      break;
+    case kArm64Neg32:
+      __ Neg(i.OutputRegister32(), i.InputOperand32(0));
+      break;
+    case kArm64Or:
+      __ Orr(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+      break;
+    case kArm64Or32:
+      __ Orr(i.OutputRegister32(), i.InputRegister32(0), i.InputOperand32(1));
+      break;
+    case kArm64Xor:
+      __ Eor(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+      break;
+    case kArm64Xor32:
+      __ Eor(i.OutputRegister32(), i.InputRegister32(0), i.InputOperand32(1));
+      break;
+    case kArm64Sub:
+      __ Sub(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+      break;
+    case kArm64Sub32:
+      __ Sub(i.OutputRegister32(), i.InputRegister32(0), i.InputOperand32(1));
+      break;
+    case kArm64Shl:
+      ASSEMBLE_SHIFT(Lsl, 64);
+      break;
+    case kArm64Shl32:
+      ASSEMBLE_SHIFT(Lsl, 32);
+      break;
+    case kArm64Shr:
+      ASSEMBLE_SHIFT(Lsr, 64);
+      break;
+    case kArm64Shr32:
+      ASSEMBLE_SHIFT(Lsr, 32);
+      break;
+    case kArm64Sar:
+      ASSEMBLE_SHIFT(Asr, 64);
+      break;
+    case kArm64Sar32:
+      ASSEMBLE_SHIFT(Asr, 32);
+      break;
+    case kArm64CallCodeObject: {
+      if (instr->InputAt(0)->IsImmediate()) {
+        Handle<Code> code = Handle<Code>::cast(i.InputHeapObject(0));
+        __ Call(code, RelocInfo::CODE_TARGET);
+        RecordSafepoint(instr->pointer_map(), Safepoint::kSimple, 0,
+                        Safepoint::kNoLazyDeopt);
+      } else {
+        Register reg = i.InputRegister(0);
+        int entry = Code::kHeaderSize - kHeapObjectTag;
+        __ Ldr(reg, MemOperand(reg, entry));
+        __ Call(reg);
+        RecordSafepoint(instr->pointer_map(), Safepoint::kSimple, 0,
+                        Safepoint::kNoLazyDeopt);
+      }
+      bool lazy_deopt = (MiscField::decode(instr->opcode()) == 1);
+      if (lazy_deopt) {
+        RecordLazyDeoptimizationEntry(instr);
+      }
+      // Meaningless instruction for ICs to overwrite.
+      AddNopForSmiCodeInlining();
+      break;
+    }
+    case kArm64CallJSFunction: {
+      Register func = i.InputRegister(0);
+
+      // TODO(jarin) The load of the context should be separated from the call.
+      __ Ldr(cp, FieldMemOperand(func, JSFunction::kContextOffset));
+      __ Ldr(x10, FieldMemOperand(func, JSFunction::kCodeEntryOffset));
+      __ Call(x10);
+
+      RecordSafepoint(instr->pointer_map(), Safepoint::kSimple, 0,
+                      Safepoint::kNoLazyDeopt);
+      RecordLazyDeoptimizationEntry(instr);
+      break;
+    }
+    case kArm64CallAddress: {
+      DirectCEntryStub stub(isolate());
+      stub.GenerateCall(masm(), i.InputRegister(0));
+      break;
+    }
+    case kArm64Claim: {
+      int words = MiscField::decode(instr->opcode());
+      __ Claim(words);
+      break;
+    }
+    case kArm64Poke: {
+      int slot = MiscField::decode(instr->opcode());
+      Operand operand(slot * kPointerSize);
+      __ Poke(i.InputRegister(0), operand);
+      break;
+    }
+    case kArm64PokePairZero: {
+      // TODO(dcarney): test slot offset and register order.
+      int slot = MiscField::decode(instr->opcode()) - 1;
+      __ PokePair(i.InputRegister(0), xzr, slot * kPointerSize);
+      break;
+    }
+    case kArm64PokePair: {
+      int slot = MiscField::decode(instr->opcode()) - 1;
+      __ PokePair(i.InputRegister(1), i.InputRegister(0), slot * kPointerSize);
+      break;
+    }
+    case kArm64Drop: {
+      int words = MiscField::decode(instr->opcode());
+      __ Drop(words);
+      break;
+    }
+    case kArm64Cmp:
+      __ Cmp(i.InputRegister(0), i.InputOperand(1));
+      break;
+    case kArm64Cmp32:
+      __ Cmp(i.InputRegister32(0), i.InputOperand32(1));
+      break;
+    case kArm64Tst:
+      __ Tst(i.InputRegister(0), i.InputOperand(1));
+      break;
+    case kArm64Tst32:
+      __ Tst(i.InputRegister32(0), i.InputOperand32(1));
+      break;
+    case kArm64Float64Cmp:
+      __ Fcmp(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
+      break;
+    case kArm64Float64Add:
+      __ Fadd(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+              i.InputDoubleRegister(1));
+      break;
+    case kArm64Float64Sub:
+      __ Fsub(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+              i.InputDoubleRegister(1));
+      break;
+    case kArm64Float64Mul:
+      __ Fmul(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+              i.InputDoubleRegister(1));
+      break;
+    case kArm64Float64Div:
+      __ Fdiv(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+              i.InputDoubleRegister(1));
+      break;
+    case kArm64Float64Mod: {
+      // TODO(dcarney): implement directly. See note in lithium-codegen-arm64.cc
+      FrameScope scope(masm(), StackFrame::MANUAL);
+      ASSERT(d0.is(i.InputDoubleRegister(0)));
+      ASSERT(d1.is(i.InputDoubleRegister(1)));
+      ASSERT(d0.is(i.OutputDoubleRegister()));
+      // TODO(dcarney): make sure this saves all relevant registers.
+      __ CallCFunction(ExternalReference::mod_two_doubles_operation(isolate()),
+                       0, 2);
+      break;
+    }
+    case kArm64Int32ToInt64:
+      __ Sxtw(i.OutputRegister(), i.InputRegister(0));
+      break;
+    case kArm64Int64ToInt32:
+      if (!i.OutputRegister().is(i.InputRegister(0))) {
+        __ Mov(i.OutputRegister(), i.InputRegister(0));
+      }
+      break;
+    case kArm64Float64ToInt32:
+      __ Fcvtzs(i.OutputRegister32(), i.InputDoubleRegister(0));
+      break;
+    case kArm64Int32ToFloat64:
+      __ Scvtf(i.OutputDoubleRegister(), i.InputRegister32(0));
+      break;
+    case kArm64LoadWord8:
+      __ Ldrb(i.OutputRegister(), i.MemoryOperand());
+      break;
+    case kArm64StoreWord8:
+      __ Strb(i.InputRegister(2), i.MemoryOperand());
+      break;
+    case kArm64LoadWord16:
+      __ Ldrh(i.OutputRegister(), i.MemoryOperand());
+      break;
+    case kArm64StoreWord16:
+      __ Strh(i.InputRegister(2), i.MemoryOperand());
+      break;
+    case kArm64LoadWord32:
+      __ Ldr(i.OutputRegister32(), i.MemoryOperand());
+      break;
+    case kArm64StoreWord32:
+      __ Str(i.InputRegister32(2), i.MemoryOperand());
+      break;
+    case kArm64LoadWord64:
+      __ Ldr(i.OutputRegister(), i.MemoryOperand());
+      break;
+    case kArm64StoreWord64:
+      __ Str(i.InputRegister(2), i.MemoryOperand());
+      break;
+    case kArm64Float64Load:
+      __ Ldr(i.OutputDoubleRegister(), i.MemoryOperand());
+      break;
+    case kArm64Float64Store:
+      __ Str(i.InputDoubleRegister(2), i.MemoryOperand());
+      break;
+    case kArm64StoreWriteBarrier: {
+      Register object = i.InputRegister(0);
+      Register index = i.InputRegister(1);
+      Register value = i.InputRegister(2);
+      __ Add(index, object, Operand(index, SXTW));
+      __ Str(value, MemOperand(index));
+      SaveFPRegsMode mode = code_->frame()->DidAllocateDoubleRegisters()
+                                ? kSaveFPRegs
+                                : kDontSaveFPRegs;
+      // TODO(dcarney): we shouldn't test write barriers from c calls.
+      LinkRegisterStatus lr_status = kLRHasNotBeenSaved;
+      UseScratchRegisterScope scope(masm());
+      Register temp = no_reg;
+      if (csp.is(masm()->StackPointer())) {
+        temp = scope.AcquireX();
+        lr_status = kLRHasBeenSaved;
+        __ Push(lr, temp);  // Need to push a pair
+      }
+      __ RecordWrite(object, index, value, lr_status, mode);
+      if (csp.is(masm()->StackPointer())) {
+        __ Pop(temp, lr);
+      }
+      break;
+    }
+  }
+}
+
+
+// Assemble branches after this instruction.
+void CodeGenerator::AssembleArchBranch(Instruction* instr,
+                                       FlagsCondition condition) {
+  Arm64OperandConverter i(this, instr);
+  Label done;
+
+  // Emit a branch. The true and false targets are always the last two inputs
+  // to the instruction.
+  BasicBlock* tblock = i.InputBlock(instr->InputCount() - 2);
+  BasicBlock* fblock = i.InputBlock(instr->InputCount() - 1);
+  bool fallthru = IsNextInAssemblyOrder(fblock);
+  Label* tlabel = code()->GetLabel(tblock);
+  Label* flabel = fallthru ? &done : code()->GetLabel(fblock);
+  switch (condition) {
+    case kUnorderedEqual:
+      __ B(vs, flabel);
+    // Fall through.
+    case kEqual:
+      __ B(eq, tlabel);
+      break;
+    case kUnorderedNotEqual:
+      __ B(vs, tlabel);
+    // Fall through.
+    case kNotEqual:
+      __ B(ne, tlabel);
+      break;
+    case kSignedLessThan:
+      __ B(lt, tlabel);
+      break;
+    case kSignedGreaterThanOrEqual:
+      __ B(ge, tlabel);
+      break;
+    case kSignedLessThanOrEqual:
+      __ B(le, tlabel);
+      break;
+    case kSignedGreaterThan:
+      __ B(gt, tlabel);
+      break;
+    case kUnorderedLessThan:
+      __ B(vs, flabel);
+    // Fall through.
+    case kUnsignedLessThan:
+      __ B(lo, tlabel);
+      break;
+    case kUnorderedGreaterThanOrEqual:
+      __ B(vs, tlabel);
+    // Fall through.
+    case kUnsignedGreaterThanOrEqual:
+      __ B(hs, tlabel);
+      break;
+    case kUnorderedLessThanOrEqual:
+      __ B(vs, flabel);
+    // Fall through.
+    case kUnsignedLessThanOrEqual:
+      __ B(ls, tlabel);
+      break;
+    case kUnorderedGreaterThan:
+      __ B(vs, tlabel);
+    // Fall through.
+    case kUnsignedGreaterThan:
+      __ B(hi, tlabel);
+      break;
+  }
+  if (!fallthru) __ B(flabel);  // no fallthru to flabel.
+  __ Bind(&done);
+}
+
+
+// Assemble boolean materializations after this instruction.
+void CodeGenerator::AssembleArchBoolean(Instruction* instr,
+                                        FlagsCondition condition) {
+  Arm64OperandConverter i(this, instr);
+  Label done;
+
+  // Materialize a full 64-bit 1 or 0 value.
+  Label check;
+  Register reg = i.OutputRegister();
+  Condition cc = nv;
+  switch (condition) {
+    case kUnorderedEqual:
+      __ B(vc, &check);
+      __ Mov(reg, 0);
+      __ B(&done);
+    // Fall through.
+    case kEqual:
+      cc = eq;
+      break;
+    case kUnorderedNotEqual:
+      __ B(vc, &check);
+      __ Mov(reg, 1);
+      __ B(&done);
+    // Fall through.
+    case kNotEqual:
+      cc = ne;
+      break;
+    case kSignedLessThan:
+      cc = lt;
+      break;
+    case kSignedGreaterThanOrEqual:
+      cc = ge;
+      break;
+    case kSignedLessThanOrEqual:
+      cc = le;
+      break;
+    case kSignedGreaterThan:
+      cc = gt;
+      break;
+    case kUnorderedLessThan:
+      __ B(vc, &check);
+      __ Mov(reg, 0);
+      __ B(&done);
+    // Fall through.
+    case kUnsignedLessThan:
+      cc = lo;
+      break;
+    case kUnorderedGreaterThanOrEqual:
+      __ B(vc, &check);
+      __ Mov(reg, 1);
+      __ B(&done);
+    // Fall through.
+    case kUnsignedGreaterThanOrEqual:
+      cc = hs;
+      break;
+    case kUnorderedLessThanOrEqual:
+      __ B(vc, &check);
+      __ Mov(reg, 0);
+      __ B(&done);
+    // Fall through.
+    case kUnsignedLessThanOrEqual:
+      cc = ls;
+      break;
+    case kUnorderedGreaterThan:
+      __ B(vc, &check);
+      __ Mov(reg, 1);
+      __ B(&done);
+    // Fall through.
+    case kUnsignedGreaterThan:
+      cc = hi;
+      break;
+  }
+  __ bind(&check);
+  __ Cset(reg, cc);
+  __ B(&done);
+  __ Bind(&done);
+}
+
+
+// TODO(dcarney): increase stack slots in frame once before first use.
+static int AlignedStackSlots(int stack_slots) {
+  if (stack_slots & 1) stack_slots++;
+  return stack_slots;
+}
+
+
+void CodeGenerator::AssemblePrologue() {
+  CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
+  if (descriptor->kind() == CallDescriptor::kCallAddress) {
+    __ SetStackPointer(csp);
+    __ Push(lr, fp);
+    __ Mov(fp, csp);
+    // TODO(dcarney): correct callee saved registers.
+    __ PushCalleeSavedRegisters();
+    frame()->SetRegisterSaveAreaSize(20 * kPointerSize);
+  } else if (descriptor->IsJSFunctionCall()) {
+    CompilationInfo* info = linkage()->info();
+    __ SetStackPointer(jssp);
+    __ Prologue(info->IsCodePreAgingActive());
+    frame()->SetRegisterSaveAreaSize(
+        StandardFrameConstants::kFixedFrameSizeFromFp);
+
+    // Sloppy mode functions and builtins need to replace the receiver with the
+    // global proxy when called as functions (without an explicit receiver
+    // object).
+    // TODO(mstarzinger/verwaest): Should this be moved back into the CallIC?
+    if (info->strict_mode() == SLOPPY && !info->is_native()) {
+      Label ok;
+      // +2 for return address and saved frame pointer.
+      int receiver_slot = info->scope()->num_parameters() + 2;
+      __ Ldr(x10, MemOperand(fp, receiver_slot * kXRegSize));
+      __ JumpIfNotRoot(x10, Heap::kUndefinedValueRootIndex, &ok);
+      __ Ldr(x10, GlobalObjectMemOperand());
+      __ Ldr(x10, FieldMemOperand(x10, GlobalObject::kGlobalProxyOffset));
+      __ Str(x10, MemOperand(fp, receiver_slot * kXRegSize));
+      __ Bind(&ok);
+    }
+
+  } else {
+    __ SetStackPointer(jssp);
+    __ StubPrologue();
+    frame()->SetRegisterSaveAreaSize(
+        StandardFrameConstants::kFixedFrameSizeFromFp);
+  }
+  int stack_slots = frame()->GetSpillSlotCount();
+  if (stack_slots > 0) {
+    Register sp = __ StackPointer();
+    if (!sp.Is(csp)) {
+      __ Sub(sp, sp, stack_slots * kPointerSize);
+    }
+    __ Sub(csp, csp, AlignedStackSlots(stack_slots) * kPointerSize);
+  }
+}
+
+
+void CodeGenerator::AssembleReturn() {
+  CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
+  if (descriptor->kind() == CallDescriptor::kCallAddress) {
+    if (frame()->GetRegisterSaveAreaSize() > 0) {
+      // Remove this frame's spill slots first.
+      int stack_slots = frame()->GetSpillSlotCount();
+      if (stack_slots > 0) {
+        __ Add(csp, csp, AlignedStackSlots(stack_slots) * kPointerSize);
+      }
+      // Restore registers.
+      // TODO(dcarney): correct callee saved registers.
+      __ PopCalleeSavedRegisters();
+    }
+    __ Mov(csp, fp);
+    __ Pop(fp, lr);
+    __ Ret();
+  } else {
+    __ Mov(jssp, fp);
+    __ Pop(fp, lr);
+    int pop_count =
+        descriptor->IsJSFunctionCall() ? descriptor->ParameterCount() : 0;
+    __ Drop(pop_count);
+    __ Ret();
+  }
+}
+
+
+void CodeGenerator::AssembleMove(InstructionOperand* source,
+                                 InstructionOperand* destination) {
+  Arm64OperandConverter g(this, NULL);
+  // Dispatch on the source and destination operand kinds.  Not all
+  // combinations are possible.
+  if (source->IsRegister()) {
+    ASSERT(destination->IsRegister() || destination->IsStackSlot());
+    Register src = g.ToRegister(source);
+    if (destination->IsRegister()) {
+      __ Mov(g.ToRegister(destination), src);
+    } else {
+      __ Str(src, g.ToMemOperand(destination, masm()));
+    }
+  } else if (source->IsStackSlot()) {
+    MemOperand src = g.ToMemOperand(source, masm());
+    ASSERT(destination->IsRegister() || destination->IsStackSlot());
+    if (destination->IsRegister()) {
+      __ Ldr(g.ToRegister(destination), src);
+    } else {
+      UseScratchRegisterScope scope(masm());
+      Register temp = scope.AcquireX();
+      __ Ldr(temp, src);
+      __ Str(temp, g.ToMemOperand(destination, masm()));
+    }
+  } else if (source->IsConstant()) {
+    ConstantOperand* constant_source = ConstantOperand::cast(source);
+    if (destination->IsRegister() || destination->IsStackSlot()) {
+      UseScratchRegisterScope scope(masm());
+      Register dst = destination->IsRegister() ? g.ToRegister(destination)
+                                               : scope.AcquireX();
+      Constant src = g.ToConstant(source);
+      if (src.type() == Constant::kHeapObject) {
+        __ LoadObject(dst, src.ToHeapObject());
+      } else {
+        __ Mov(dst, g.ToImmediate(source));
+      }
+      if (destination->IsStackSlot()) {
+        __ Str(dst, g.ToMemOperand(destination, masm()));
+      }
+    } else if (destination->IsDoubleRegister()) {
+      FPRegister result = g.ToDoubleRegister(destination);
+      __ Fmov(result, g.ToDouble(constant_source));
+    } else {
+      ASSERT(destination->IsDoubleStackSlot());
+      UseScratchRegisterScope scope(masm());
+      FPRegister temp = scope.AcquireD();
+      __ Fmov(temp, g.ToDouble(constant_source));
+      __ Str(temp, g.ToMemOperand(destination, masm()));
+    }
+  } else if (source->IsDoubleRegister()) {
+    FPRegister src = g.ToDoubleRegister(source);
+    if (destination->IsDoubleRegister()) {
+      FPRegister dst = g.ToDoubleRegister(destination);
+      __ Fmov(dst, src);
+    } else {
+      ASSERT(destination->IsDoubleStackSlot());
+      __ Str(src, g.ToMemOperand(destination, masm()));
+    }
+  } else if (source->IsDoubleStackSlot()) {
+    ASSERT(destination->IsDoubleRegister() || destination->IsDoubleStackSlot());
+    MemOperand src = g.ToMemOperand(source, masm());
+    if (destination->IsDoubleRegister()) {
+      __ Ldr(g.ToDoubleRegister(destination), src);
+    } else {
+      UseScratchRegisterScope scope(masm());
+      FPRegister temp = scope.AcquireD();
+      __ Ldr(temp, src);
+      __ Str(temp, g.ToMemOperand(destination, masm()));
+    }
+  } else {
+    UNREACHABLE();
+  }
+}
+
+
+void CodeGenerator::AssembleSwap(InstructionOperand* source,
+                                 InstructionOperand* destination) {
+  Arm64OperandConverter g(this, NULL);
+  // Dispatch on the source and destination operand kinds.  Not all
+  // combinations are possible.
+  if (source->IsRegister()) {
+    // Register-register.
+    UseScratchRegisterScope scope(masm());
+    Register temp = scope.AcquireX();
+    Register src = g.ToRegister(source);
+    if (destination->IsRegister()) {
+      Register dst = g.ToRegister(destination);
+      __ Mov(temp, src);
+      __ Mov(src, dst);
+      __ Mov(dst, temp);
+    } else {
+      ASSERT(destination->IsStackSlot());
+      MemOperand dst = g.ToMemOperand(destination, masm());
+      __ Mov(temp, src);
+      __ Ldr(src, dst);
+      __ Str(temp, dst);
+    }
+  } else if (source->IsStackSlot() || source->IsDoubleStackSlot()) {
+    UseScratchRegisterScope scope(masm());
+    CPURegister temp_0 = scope.AcquireX();
+    CPURegister temp_1 = scope.AcquireX();
+    MemOperand src = g.ToMemOperand(source, masm());
+    MemOperand dst = g.ToMemOperand(destination, masm());
+    __ Ldr(temp_0, src);
+    __ Ldr(temp_1, dst);
+    __ Str(temp_0, dst);
+    __ Str(temp_1, src);
+  } else if (source->IsDoubleRegister()) {
+    UseScratchRegisterScope scope(masm());
+    FPRegister temp = scope.AcquireD();
+    FPRegister src = g.ToDoubleRegister(source);
+    if (destination->IsDoubleRegister()) {
+      FPRegister dst = g.ToDoubleRegister(destination);
+      __ Fmov(temp, src);
+      __ Fmov(src, dst);
+      __ Fmov(src, temp);
+    } else {
+      ASSERT(destination->IsDoubleStackSlot());
+      MemOperand dst = g.ToMemOperand(destination, masm());
+      __ Fmov(temp, src);
+      __ Ldr(src, dst);
+      __ Str(temp, dst);
+    }
+  } else {
+    // No other combinations are possible.
+    UNREACHABLE();
+  }
+}
+
+
+void CodeGenerator::AddNopForSmiCodeInlining() { __ movz(xzr, 0); }
+
+#undef __
+
+#if DEBUG
+
+// Checks whether the code between start_pc and end_pc is a no-op.
+bool CodeGenerator::IsNopForSmiCodeInlining(Handle<Code> code, int start_pc,
+                                            int end_pc) {
+  if (start_pc + 4 != end_pc) {
+    return false;
+  }
+  Address instr_address = code->instruction_start() + start_pc;
+
+  v8::internal::Instruction* instr =
+      reinterpret_cast<v8::internal::Instruction*>(instr_address);
+  return instr->IsMovz() && instr->Rd() == xzr.code() && instr->SixtyFourBits();
+}
+
+#endif  // DEBUG
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
diff --git a/src/compiler/arm64/instruction-codes-arm64.h b/src/compiler/arm64/instruction-codes-arm64.h
new file mode 100644 (file)
index 0000000..7241e9a
--- /dev/null
@@ -0,0 +1,101 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_ARM64_INSTRUCTION_CODES_ARM64_H_
+#define V8_COMPILER_ARM64_INSTRUCTION_CODES_ARM64_H_
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// ARM64-specific opcodes that specify which assembly sequence to emit.
+// Most opcodes specify a single instruction.
+#define TARGET_ARCH_OPCODE_LIST(V) \
+  V(Arm64Add)                      \
+  V(Arm64Add32)                    \
+  V(Arm64And)                      \
+  V(Arm64And32)                    \
+  V(Arm64Cmp)                      \
+  V(Arm64Cmp32)                    \
+  V(Arm64Tst)                      \
+  V(Arm64Tst32)                    \
+  V(Arm64Or)                       \
+  V(Arm64Or32)                     \
+  V(Arm64Xor)                      \
+  V(Arm64Xor32)                    \
+  V(Arm64Sub)                      \
+  V(Arm64Sub32)                    \
+  V(Arm64Mul)                      \
+  V(Arm64Mul32)                    \
+  V(Arm64Idiv)                     \
+  V(Arm64Idiv32)                   \
+  V(Arm64Udiv)                     \
+  V(Arm64Udiv32)                   \
+  V(Arm64Imod)                     \
+  V(Arm64Imod32)                   \
+  V(Arm64Umod)                     \
+  V(Arm64Umod32)                   \
+  V(Arm64Not)                      \
+  V(Arm64Not32)                    \
+  V(Arm64Neg)                      \
+  V(Arm64Neg32)                    \
+  V(Arm64Shl)                      \
+  V(Arm64Shl32)                    \
+  V(Arm64Shr)                      \
+  V(Arm64Shr32)                    \
+  V(Arm64Sar)                      \
+  V(Arm64Sar32)                    \
+  V(Arm64CallCodeObject)           \
+  V(Arm64CallJSFunction)           \
+  V(Arm64CallAddress)              \
+  V(Arm64Claim)                    \
+  V(Arm64Poke)                     \
+  V(Arm64PokePairZero)             \
+  V(Arm64PokePair)                 \
+  V(Arm64Drop)                     \
+  V(Arm64Float64Cmp)               \
+  V(Arm64Float64Add)               \
+  V(Arm64Float64Sub)               \
+  V(Arm64Float64Mul)               \
+  V(Arm64Float64Div)               \
+  V(Arm64Float64Mod)               \
+  V(Arm64Int32ToInt64)             \
+  V(Arm64Int64ToInt32)             \
+  V(Arm64Float64ToInt32)           \
+  V(Arm64Int32ToFloat64)           \
+  V(Arm64Float64Load)              \
+  V(Arm64Float64Store)             \
+  V(Arm64LoadWord8)                \
+  V(Arm64StoreWord8)               \
+  V(Arm64LoadWord16)               \
+  V(Arm64StoreWord16)              \
+  V(Arm64LoadWord32)               \
+  V(Arm64StoreWord32)              \
+  V(Arm64LoadWord64)               \
+  V(Arm64StoreWord64)              \
+  V(Arm64StoreWriteBarrier)
+
+
+// Addressing modes represent the "shape" of inputs to an instruction.
+// Many instructions support multiple addressing modes. Addressing modes
+// are encoded into the InstructionCode of the instruction and tell the
+// code generator after register allocation which assembler method to call.
+//
+// We use the following local notation for addressing modes:
+//
+// R = register
+// O = register or stack slot
+// D = double register
+// I = immediate (handle, external, int32)
+// MRI = [register + immediate]
+// MRR = [register + register]
+#define TARGET_ADDRESSING_MODE_LIST(V) \
+  V(MRI) /* [%r0 + K] */               \
+  V(MRR) /* [%r0 + %r1] */
+
+}  // namespace internal
+}  // namespace compiler
+}  // namespace v8
+
+#endif  // V8_COMPILER_ARM64_INSTRUCTION_CODES_ARM64_H_
diff --git a/src/compiler/arm64/instruction-selector-arm64.cc b/src/compiler/arm64/instruction-selector-arm64.cc
new file mode 100644 (file)
index 0000000..7352c46
--- /dev/null
@@ -0,0 +1,606 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/instruction-selector-impl.h"
+#include "src/compiler/node-matchers.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+enum ImmediateMode {
+  kArithimeticImm,  // 12 bit unsigned immediate shifted left 0 or 12 bits
+  kShift32Imm,      // 0 - 31
+  kShift64Imm,      // 0 -63
+  kLogical32Imm,
+  kLogical64Imm,
+  kLoadStoreImm,  // unsigned 9 bit or signed 7 bit
+  kNoImmediate
+};
+
+
+// Adds Arm64-specific methods for generating operands.
+class Arm64OperandGenerator V8_FINAL : public OperandGenerator {
+ public:
+  explicit Arm64OperandGenerator(InstructionSelector* selector)
+      : OperandGenerator(selector) {}
+
+  InstructionOperand* UseOperand(Node* node, ImmediateMode mode) {
+    if (CanBeImmediate(node, mode)) {
+      return UseImmediate(node);
+    }
+    return UseRegister(node);
+  }
+
+  bool CanBeImmediate(Node* node, ImmediateMode mode) {
+    int64_t value;
+    switch (node->opcode()) {
+      // TODO(turbofan): SMI number constants as immediates.
+      case IrOpcode::kInt32Constant:
+        value = ValueOf<int32_t>(node->op());
+        break;
+      default:
+        return false;
+    }
+    unsigned ignored;
+    switch (mode) {
+      case kLogical32Imm:
+        // TODO(dcarney): some unencodable values can be handled by
+        // switching instructions.
+        return Assembler::IsImmLogical(static_cast<uint64_t>(value), 32,
+                                       &ignored, &ignored, &ignored);
+      case kLogical64Imm:
+        return Assembler::IsImmLogical(static_cast<uint64_t>(value), 64,
+                                       &ignored, &ignored, &ignored);
+      case kArithimeticImm:
+        // TODO(dcarney): -values can be handled by instruction swapping
+        return Assembler::IsImmAddSub(value);
+      case kShift32Imm:
+        return 0 <= value && value < 31;
+      case kShift64Imm:
+        return 0 <= value && value < 63;
+      case kLoadStoreImm:
+        return (0 <= value && value < (1 << 9)) ||
+               (-(1 << 6) <= value && value < (1 << 6));
+      case kNoImmediate:
+        return false;
+    }
+    return false;
+  }
+};
+
+
+static void VisitRR(InstructionSelector* selector, ArchOpcode opcode,
+                    Node* node) {
+  Arm64OperandGenerator g(selector);
+  selector->Emit(opcode, g.DefineAsRegister(node),
+                 g.UseRegister(node->InputAt(0)));
+}
+
+
+static void VisitRRR(InstructionSelector* selector, ArchOpcode opcode,
+                     Node* node) {
+  Arm64OperandGenerator g(selector);
+  selector->Emit(opcode, g.DefineAsRegister(node),
+                 g.UseRegister(node->InputAt(0)),
+                 g.UseRegister(node->InputAt(1)));
+}
+
+
+static void VisitRRRFloat64(InstructionSelector* selector, ArchOpcode opcode,
+                            Node* node) {
+  Arm64OperandGenerator g(selector);
+  selector->Emit(opcode, g.DefineAsDoubleRegister(node),
+                 g.UseDoubleRegister(node->InputAt(0)),
+                 g.UseDoubleRegister(node->InputAt(1)));
+}
+
+
+static void VisitRRO(InstructionSelector* selector, ArchOpcode opcode,
+                     Node* node, ImmediateMode operand_mode) {
+  Arm64OperandGenerator g(selector);
+  selector->Emit(opcode, g.DefineAsRegister(node),
+                 g.UseRegister(node->InputAt(0)),
+                 g.UseOperand(node->InputAt(1), operand_mode));
+}
+
+
+// Shared routine for multiple binary operations.
+static void VisitBinop(InstructionSelector* selector, Node* node,
+                       ArchOpcode opcode, ImmediateMode operand_mode,
+                       bool commutative) {
+  VisitRRO(selector, opcode, node, operand_mode);
+}
+
+
+void InstructionSelector::VisitLoad(Node* node) {
+  MachineRepresentation rep = OpParameter<MachineRepresentation>(node);
+  Arm64OperandGenerator g(this);
+  Node* base = node->InputAt(0);
+  Node* index = node->InputAt(1);
+
+  InstructionOperand* result = rep == kMachineFloat64
+                                   ? g.DefineAsDoubleRegister(node)
+                                   : g.DefineAsRegister(node);
+
+  ArchOpcode opcode;
+  switch (rep) {
+    case kMachineFloat64:
+      opcode = kArm64Float64Load;
+      break;
+    case kMachineWord8:
+      opcode = kArm64LoadWord8;
+      break;
+    case kMachineWord16:
+      opcode = kArm64LoadWord16;
+      break;
+    case kMachineWord32:
+      opcode = kArm64LoadWord32;
+      break;
+    case kMachineTagged:  // Fall through.
+    case kMachineWord64:
+      opcode = kArm64LoadWord64;
+      break;
+    default:
+      UNREACHABLE();
+      return;
+  }
+  if (g.CanBeImmediate(index, kLoadStoreImm)) {
+    Emit(opcode | AddressingModeField::encode(kMode_MRI), result,
+         g.UseRegister(base), g.UseImmediate(index));
+  } else if (g.CanBeImmediate(index, kLoadStoreImm)) {
+    Emit(opcode | AddressingModeField::encode(kMode_MRI), result,
+         g.UseRegister(index), g.UseImmediate(base));
+  } else {
+    Emit(opcode | AddressingModeField::encode(kMode_MRR), result,
+         g.UseRegister(base), g.UseRegister(index));
+  }
+}
+
+
+void InstructionSelector::VisitStore(Node* node) {
+  Arm64OperandGenerator g(this);
+  Node* base = node->InputAt(0);
+  Node* index = node->InputAt(1);
+  Node* value = node->InputAt(2);
+
+  StoreRepresentation store_rep = OpParameter<StoreRepresentation>(node);
+  MachineRepresentation rep = store_rep.rep;
+  if (store_rep.write_barrier_kind == kFullWriteBarrier) {
+    ASSERT(rep == kMachineTagged);
+    // TODO(dcarney): refactor RecordWrite function to take temp registers
+    //                and pass them here instead of using fixed regs
+    // TODO(dcarney): handle immediate indices.
+    InstructionOperand* temps[] = {g.TempRegister(x11), g.TempRegister(x12)};
+    Emit(kArm64StoreWriteBarrier, NULL, g.UseFixed(base, x10),
+         g.UseFixed(index, x11), g.UseFixed(value, x12), ARRAY_SIZE(temps),
+         temps);
+    return;
+  }
+  ASSERT_EQ(kNoWriteBarrier, store_rep.write_barrier_kind);
+  InstructionOperand* val;
+  if (rep == kMachineFloat64) {
+    val = g.UseDoubleRegister(value);
+  } else {
+    val = g.UseRegister(value);
+  }
+  ArchOpcode opcode;
+  switch (rep) {
+    case kMachineFloat64:
+      opcode = kArm64Float64Store;
+      break;
+    case kMachineWord8:
+      opcode = kArm64StoreWord8;
+      break;
+    case kMachineWord16:
+      opcode = kArm64StoreWord16;
+      break;
+    case kMachineWord32:
+      opcode = kArm64StoreWord32;
+      break;
+    case kMachineTagged:  // Fall through.
+    case kMachineWord64:
+      opcode = kArm64StoreWord64;
+      break;
+    default:
+      UNREACHABLE();
+      return;
+  }
+  if (g.CanBeImmediate(index, kLoadStoreImm)) {
+    Emit(opcode | AddressingModeField::encode(kMode_MRI), NULL,
+         g.UseRegister(base), g.UseImmediate(index), val);
+  } else if (g.CanBeImmediate(index, kLoadStoreImm)) {
+    Emit(opcode | AddressingModeField::encode(kMode_MRI), NULL,
+         g.UseRegister(index), g.UseImmediate(base), val);
+  } else {
+    Emit(opcode | AddressingModeField::encode(kMode_MRR), NULL,
+         g.UseRegister(base), g.UseRegister(index), val);
+  }
+}
+
+
+void InstructionSelector::VisitWord32And(Node* node) {
+  VisitBinop(this, node, kArm64And32, kLogical32Imm, true);
+}
+
+
+void InstructionSelector::VisitWord64And(Node* node) {
+  VisitBinop(this, node, kArm64And, kLogical64Imm, true);
+}
+
+
+void InstructionSelector::VisitWord32Or(Node* node) {
+  VisitBinop(this, node, kArm64Or32, kLogical32Imm, true);
+}
+
+
+void InstructionSelector::VisitWord64Or(Node* node) {
+  VisitBinop(this, node, kArm64Or, kLogical64Imm, true);
+}
+
+
+template <typename T>
+static void VisitXor(InstructionSelector* selector, Node* node,
+                     ArchOpcode xor_opcode, ArchOpcode not_opcode) {
+  Arm64OperandGenerator g(selector);
+  BinopMatcher<IntMatcher<T>, IntMatcher<T> > m(node);
+  if (m.right().Is(-1)) {
+    selector->Emit(not_opcode, g.DefineAsRegister(node),
+                   g.UseRegister(m.left().node()));
+  } else {
+    VisitBinop(selector, node, xor_opcode, kLogical32Imm, true);
+  }
+}
+
+
+void InstructionSelector::VisitWord32Xor(Node* node) {
+  VisitXor<int32_t>(this, node, kArm64Xor32, kArm64Not32);
+}
+
+
+void InstructionSelector::VisitWord64Xor(Node* node) {
+  VisitXor<int64_t>(this, node, kArm64Xor, kArm64Not);
+}
+
+
+void InstructionSelector::VisitWord32Shl(Node* node) {
+  VisitRRO(this, kArm64Shl32, node, kShift32Imm);
+}
+
+
+void InstructionSelector::VisitWord64Shl(Node* node) {
+  VisitRRO(this, kArm64Shl, node, kShift64Imm);
+}
+
+
+void InstructionSelector::VisitWord32Shr(Node* node) {
+  VisitRRO(this, kArm64Shr32, node, kShift32Imm);
+}
+
+
+void InstructionSelector::VisitWord64Shr(Node* node) {
+  VisitRRO(this, kArm64Shr, node, kShift64Imm);
+}
+
+
+void InstructionSelector::VisitWord32Sar(Node* node) {
+  VisitRRO(this, kArm64Sar32, node, kShift32Imm);
+}
+
+
+void InstructionSelector::VisitWord64Sar(Node* node) {
+  VisitRRO(this, kArm64Sar, node, kShift64Imm);
+}
+
+
+void InstructionSelector::VisitInt32Add(Node* node) {
+  VisitBinop(this, node, kArm64Add32, kArithimeticImm, true);
+}
+
+
+void InstructionSelector::VisitInt64Add(Node* node) {
+  VisitBinop(this, node, kArm64Add, kArithimeticImm, true);
+}
+
+
+template <typename T>
+static void VisitSub(InstructionSelector* selector, Node* node,
+                     ArchOpcode sub_opcode, ArchOpcode neg_opcode) {
+  Arm64OperandGenerator g(selector);
+  BinopMatcher<IntMatcher<T>, IntMatcher<T> > m(node);
+  if (m.left().Is(0)) {
+    selector->Emit(neg_opcode, g.DefineAsRegister(node),
+                   g.UseRegister(m.right().node()));
+  } else {
+    VisitBinop(selector, node, sub_opcode, kArithimeticImm, false);
+  }
+}
+
+
+void InstructionSelector::VisitInt32Sub(Node* node) {
+  VisitSub<int32_t>(this, node, kArm64Sub32, kArm64Neg32);
+}
+
+
+void InstructionSelector::VisitInt64Sub(Node* node) {
+  VisitSub<int64_t>(this, node, kArm64Sub, kArm64Neg);
+}
+
+
+void InstructionSelector::VisitInt32Mul(Node* node) {
+  VisitRRR(this, kArm64Mul32, node);
+}
+
+
+void InstructionSelector::VisitInt64Mul(Node* node) {
+  VisitRRR(this, kArm64Mul, node);
+}
+
+
+void InstructionSelector::VisitInt32Div(Node* node) {
+  VisitRRR(this, kArm64Idiv32, node);
+}
+
+
+void InstructionSelector::VisitInt64Div(Node* node) {
+  VisitRRR(this, kArm64Idiv, node);
+}
+
+
+void InstructionSelector::VisitInt32UDiv(Node* node) {
+  VisitRRR(this, kArm64Udiv32, node);
+}
+
+
+void InstructionSelector::VisitInt64UDiv(Node* node) {
+  VisitRRR(this, kArm64Udiv, node);
+}
+
+
+void InstructionSelector::VisitInt32Mod(Node* node) {
+  VisitRRR(this, kArm64Imod32, node);
+}
+
+
+void InstructionSelector::VisitInt64Mod(Node* node) {
+  VisitRRR(this, kArm64Imod, node);
+}
+
+
+void InstructionSelector::VisitInt32UMod(Node* node) {
+  VisitRRR(this, kArm64Umod32, node);
+}
+
+
+void InstructionSelector::VisitInt64UMod(Node* node) {
+  VisitRRR(this, kArm64Umod, node);
+}
+
+
+void InstructionSelector::VisitConvertInt32ToInt64(Node* node) {
+  VisitRR(this, kArm64Int32ToInt64, node);
+}
+
+
+void InstructionSelector::VisitConvertInt64ToInt32(Node* node) {
+  VisitRR(this, kArm64Int64ToInt32, node);
+}
+
+
+void InstructionSelector::VisitConvertInt32ToFloat64(Node* node) {
+  Arm64OperandGenerator g(this);
+  Emit(kArm64Int32ToFloat64, g.DefineAsDoubleRegister(node),
+       g.UseRegister(node->InputAt(0)));
+}
+
+
+void InstructionSelector::VisitConvertFloat64ToInt32(Node* node) {
+  Arm64OperandGenerator g(this);
+  Emit(kArm64Float64ToInt32, g.DefineAsRegister(node),
+       g.UseDoubleRegister(node->InputAt(0)));
+}
+
+
+void InstructionSelector::VisitFloat64Add(Node* node) {
+  VisitRRRFloat64(this, kArm64Float64Add, node);
+}
+
+
+void InstructionSelector::VisitFloat64Sub(Node* node) {
+  VisitRRRFloat64(this, kArm64Float64Sub, node);
+}
+
+
+void InstructionSelector::VisitFloat64Mul(Node* node) {
+  VisitRRRFloat64(this, kArm64Float64Mul, node);
+}
+
+
+void InstructionSelector::VisitFloat64Div(Node* node) {
+  VisitRRRFloat64(this, kArm64Float64Div, node);
+}
+
+
+void InstructionSelector::VisitFloat64Mod(Node* node) {
+  Arm64OperandGenerator g(this);
+  Emit(kArm64Float64Mod, g.DefineAsFixedDouble(node, d0),
+       g.UseFixedDouble(node->InputAt(0), d0),
+       g.UseFixedDouble(node->InputAt(1), d1))->MarkAsCall();
+}
+
+
+// Shared routine for multiple compare operations.
+static void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
+                         InstructionOperand* left, InstructionOperand* right,
+                         FlagsContinuation* cont) {
+  Arm64OperandGenerator g(selector);
+  opcode = cont->Encode(opcode);
+  if (cont->IsBranch()) {
+    selector->Emit(opcode, NULL, left, right, g.Label(cont->true_block()),
+                   g.Label(cont->false_block()))->MarkAsControl();
+  } else {
+    ASSERT(cont->IsSet());
+    selector->Emit(opcode, g.DefineAsRegister(cont->result()), left, right);
+  }
+}
+
+
+// Shared routine for multiple word compare operations.
+static void VisitWordCompare(InstructionSelector* selector, Node* node,
+                             InstructionCode opcode, FlagsContinuation* cont,
+                             bool commutative) {
+  Arm64OperandGenerator g(selector);
+  Node* left = node->InputAt(0);
+  Node* right = node->InputAt(1);
+
+  // Match immediates on left or right side of comparison.
+  if (g.CanBeImmediate(right, kArithimeticImm)) {
+    VisitCompare(selector, opcode, g.UseRegister(left), g.UseImmediate(right),
+                 cont);
+  } else if (g.CanBeImmediate(left, kArithimeticImm)) {
+    if (!commutative) cont->Commute();
+    VisitCompare(selector, opcode, g.UseRegister(right), g.UseImmediate(left),
+                 cont);
+  } else {
+    VisitCompare(selector, opcode, g.UseRegister(left), g.UseRegister(right),
+                 cont);
+  }
+}
+
+
+void InstructionSelector::VisitWord32Test(Node* node, FlagsContinuation* cont) {
+  switch (node->opcode()) {
+    case IrOpcode::kWord32And:
+      return VisitWordCompare(this, node, kArm64Tst32, cont, true);
+    default:
+      break;
+  }
+
+  Arm64OperandGenerator g(this);
+  VisitCompare(this, kArm64Tst32, g.UseRegister(node), g.UseRegister(node),
+               cont);
+}
+
+
+void InstructionSelector::VisitWord64Test(Node* node, FlagsContinuation* cont) {
+  switch (node->opcode()) {
+    case IrOpcode::kWord64And:
+      return VisitWordCompare(this, node, kArm64Tst, cont, true);
+    default:
+      break;
+  }
+
+  Arm64OperandGenerator g(this);
+  VisitCompare(this, kArm64Tst, g.UseRegister(node), g.UseRegister(node), cont);
+}
+
+
+void InstructionSelector::VisitWord32Compare(Node* node,
+                                             FlagsContinuation* cont) {
+  VisitWordCompare(this, node, kArm64Cmp32, cont, false);
+}
+
+
+void InstructionSelector::VisitWord64Compare(Node* node,
+                                             FlagsContinuation* cont) {
+  VisitWordCompare(this, node, kArm64Cmp, cont, false);
+}
+
+
+void InstructionSelector::VisitFloat64Compare(Node* node,
+                                              FlagsContinuation* cont) {
+  Arm64OperandGenerator g(this);
+  Node* left = node->InputAt(0);
+  Node* right = node->InputAt(1);
+  VisitCompare(this, kArm64Float64Cmp, g.UseDoubleRegister(left),
+               g.UseDoubleRegister(right), cont);
+}
+
+
+void InstructionSelector::VisitCall(Node* call, BasicBlock* continuation,
+                                    BasicBlock* deoptimization) {
+  Arm64OperandGenerator g(this);
+  CallDescriptor* descriptor = OpParameter<CallDescriptor*>(call);
+  CallBuffer buffer(zone(), descriptor);  // TODO(turbofan): temp zone here?
+
+  // Compute InstructionOperands for inputs and outputs.
+  // TODO(turbofan): on ARM64 it's probably better to use the code object in a
+  // register if there are multiple uses of it. Improve constant pool and the
+  // heuristics in the register allocator for where to emit constants.
+  InitializeCallBuffer(call, &buffer, true, false, continuation,
+                       deoptimization);
+
+  // Push the arguments to the stack.
+  bool is_c_frame = descriptor->kind() == CallDescriptor::kCallAddress;
+  bool pushed_count_uneven = buffer.pushed_count & 1;
+  int aligned_push_count = buffer.pushed_count;
+  if (is_c_frame && pushed_count_uneven) {
+    aligned_push_count++;
+  }
+  // TODO(dcarney): claim and poke probably take small immediates,
+  //                loop here or whatever.
+  // Bump the stack pointer(s).
+  if (aligned_push_count > 0) {
+    // TODO(dcarney): it would be better to bump the csp here only
+    //                and emit paired stores with increment for non c frames.
+    Emit(kArm64Claim | MiscField::encode(aligned_push_count), NULL);
+  }
+  // Move arguments to the stack.
+  {
+    int slot = buffer.pushed_count - 1;
+    // Emit the uneven pushes.
+    if (pushed_count_uneven) {
+      Node* input = buffer.pushed_nodes[slot];
+      ArchOpcode opcode = is_c_frame ? kArm64PokePairZero : kArm64Poke;
+      Emit(opcode | MiscField::encode(slot), NULL, g.UseRegister(input));
+      slot--;
+    }
+    // Now all pushes can be done in pairs.
+    for (; slot >= 0; slot -= 2) {
+      Emit(kArm64PokePair | MiscField::encode(slot), NULL,
+           g.UseRegister(buffer.pushed_nodes[slot]),
+           g.UseRegister(buffer.pushed_nodes[slot - 1]));
+    }
+  }
+
+  // Select the appropriate opcode based on the call type.
+  InstructionCode opcode;
+  switch (descriptor->kind()) {
+    case CallDescriptor::kCallCodeObject: {
+      bool lazy_deopt = descriptor->CanLazilyDeoptimize();
+      opcode = kArm64CallCodeObject | MiscField::encode(lazy_deopt ? 1 : 0);
+      break;
+    }
+    case CallDescriptor::kCallAddress:
+      opcode = kArm64CallAddress;
+      break;
+    case CallDescriptor::kCallJSFunction:
+      opcode = kArm64CallJSFunction;
+      break;
+    default:
+      UNREACHABLE();
+      return;
+  }
+
+  // Emit the call instruction.
+  Instruction* call_instr =
+      Emit(opcode, buffer.output_count, buffer.outputs,
+           buffer.fixed_and_control_count(), buffer.fixed_and_control_args);
+
+  call_instr->MarkAsCall();
+  if (deoptimization != NULL) {
+    ASSERT(continuation != NULL);
+    call_instr->MarkAsControl();
+  }
+
+  // Caller clean up of stack for C-style calls.
+  if (is_c_frame && aligned_push_count > 0) {
+    ASSERT(deoptimization == NULL && continuation == NULL);
+    Emit(kArm64Drop | MiscField::encode(aligned_push_count), NULL);
+  }
+}
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
diff --git a/src/compiler/arm64/linkage-arm64.cc b/src/compiler/arm64/linkage-arm64.cc
new file mode 100644 (file)
index 0000000..05f80a0
--- /dev/null
@@ -0,0 +1,66 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#include "src/assembler.h"
+#include "src/code-stubs.h"
+#include "src/compiler/linkage.h"
+#include "src/compiler/linkage-impl.h"
+#include "src/zone.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+struct LinkageHelperTraits {
+  static Register ReturnValueReg() { return x0; }
+  static Register ReturnValue2Reg() { return x1; }
+  static Register JSCallFunctionReg() { return x1; }
+  static Register ContextReg() { return cp; }
+  static Register RuntimeCallFunctionReg() { return x1; }
+  static Register RuntimeCallArgCountReg() { return x0; }
+  static RegList CCalleeSaveRegisters() {
+    // TODO(dcarney): correct callee saved registers.
+    return 0;
+  }
+  static Register CRegisterParameter(int i) {
+    static Register register_parameters[] = {x0, x1, x2, x3, x4, x5, x6, x7};
+    return register_parameters[i];
+  }
+  static int CRegisterParametersLength() { return 8; }
+};
+
+
+CallDescriptor* Linkage::GetJSCallDescriptor(int parameter_count, Zone* zone) {
+  return LinkageHelper::GetJSCallDescriptor<LinkageHelperTraits>(
+      zone, parameter_count);
+}
+
+
+CallDescriptor* Linkage::GetRuntimeCallDescriptor(
+    Runtime::FunctionId function, int parameter_count,
+    Operator::Property properties,
+    CallDescriptor::DeoptimizationSupport can_deoptimize, Zone* zone) {
+  return LinkageHelper::GetRuntimeCallDescriptor<LinkageHelperTraits>(
+      zone, function, parameter_count, properties, can_deoptimize);
+}
+
+
+CallDescriptor* Linkage::GetStubCallDescriptor(
+    CodeStubInterfaceDescriptor* descriptor, int stack_parameter_count) {
+  return LinkageHelper::GetStubCallDescriptor<LinkageHelperTraits>(
+      this->info_->zone(), descriptor, stack_parameter_count);
+}
+
+
+CallDescriptor* Linkage::GetSimplifiedCDescriptor(
+    Zone* zone, int num_params, MachineRepresentation return_type,
+    const MachineRepresentation* param_types) {
+  return LinkageHelper::GetSimplifiedCDescriptor<LinkageHelperTraits>(
+      zone, num_params, return_type, param_types);
+}
+}
+}
+}  // namespace v8::internal::compiler
diff --git a/src/compiler/ast-graph-builder.cc b/src/compiler/ast-graph-builder.cc
new file mode 100644 (file)
index 0000000..47adab3
--- /dev/null
@@ -0,0 +1,1990 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/ast-graph-builder.h"
+
+#include "src/compiler.h"
+#include "src/compiler/control-builders.h"
+#include "src/compiler/node-properties.h"
+#include "src/compiler/node-properties-inl.h"
+#include "src/full-codegen.h"
+#include "src/parser.h"
+#include "src/scopes.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+AstGraphBuilder::AstGraphBuilder(CompilationInfo* info, JSGraph* jsgraph,
+                                 SourcePositionTable* source_positions)
+    : StructuredGraphBuilder(jsgraph->graph(), jsgraph->common()),
+      info_(info),
+      jsgraph_(jsgraph),
+      source_positions_(source_positions),
+      globals_(0, info->zone()),
+      breakable_(NULL),
+      execution_context_(NULL) {
+  InitializeAstVisitor(info->zone());
+}
+
+
+Node* AstGraphBuilder::GetFunctionClosure() {
+  if (!function_closure_.is_set()) {
+    // Parameter -1 is special for the function closure
+    Operator* op = common()->Parameter(-1);
+    Node* node = NewNode(op);
+    function_closure_.set(node);
+  }
+  return function_closure_.get();
+}
+
+
+Node* AstGraphBuilder::GetFunctionContext() {
+  if (!function_context_.is_set()) {
+    // Parameter (arity + 1) is special for the outer context of the function
+    Operator* op = common()->Parameter(info()->num_parameters() + 1);
+    Node* node = NewNode(op);
+    function_context_.set(node);
+  }
+  return function_context_.get();
+}
+
+
+bool AstGraphBuilder::CreateGraph() {
+  Scope* scope = info()->scope();
+  ASSERT(graph() != NULL);
+
+  SourcePositionTable::Scope start_pos(
+      source_positions(),
+      SourcePosition(info()->shared_info()->start_position()));
+
+  // Set up the basic structure of the graph.
+  graph()->SetStart(graph()->NewNode(common()->Start()));
+
+  // Initialize the top-level environment.
+  Environment env(this, scope, graph()->start());
+  set_environment(&env);
+
+  // Build node to initialize local function context.
+  Node* closure = GetFunctionClosure();
+  Node* outer = GetFunctionContext();
+  Node* inner = BuildLocalFunctionContext(outer, closure);
+
+  // Push top-level function scope for the function body.
+  ContextScope top_context(this, scope, inner);
+
+  // Build the arguments object if it is used.
+  BuildArgumentsObject(scope->arguments());
+
+  // Emit tracing call if requested to do so.
+  if (FLAG_trace) {
+    NewNode(javascript()->Runtime(Runtime::kTraceEnter, 0));
+  }
+
+  // Visit implicit declaration of the function name.
+  if (scope->is_function_scope() && scope->function() != NULL) {
+    VisitVariableDeclaration(scope->function());
+  }
+
+  // Visit declarations within the function scope.
+  VisitDeclarations(scope->declarations());
+
+  // TODO(mstarzinger): This should do an inlined stack check.
+  NewNode(javascript()->Runtime(Runtime::kStackGuard, 0));
+
+  // Visit statements in the function body.
+  VisitStatements(info()->function()->body());
+  if (HasStackOverflow()) return false;
+
+  SourcePositionTable::Scope end_pos(
+      source_positions(),
+      SourcePosition(info()->shared_info()->end_position() - 1));
+
+  // Emit tracing call if requested to do so.
+  if (FLAG_trace) {
+    // TODO(mstarzinger): Only traces implicit return.
+    Node* return_value = jsgraph()->UndefinedConstant();
+    NewNode(javascript()->Runtime(Runtime::kTraceExit, 1), return_value);
+  }
+
+  // Return 'undefined' in case we can fall off the end.
+  Node* control = NewNode(common()->Return(), jsgraph()->UndefinedConstant());
+  UpdateControlDependencyToLeaveFunction(control);
+
+  // Finish the basic structure of the graph.
+  environment()->UpdateControlDependency(exit_control());
+  graph()->SetEnd(NewNode(common()->End()));
+
+  return true;
+}
+
+
+// Left-hand side can only be a property, a global or a variable slot.
+enum LhsKind { VARIABLE, NAMED_PROPERTY, KEYED_PROPERTY };
+
+
+// Determine the left-hand side kind of an assignment.
+static LhsKind DetermineLhsKind(Expression* expr) {
+  Property* property = expr->AsProperty();
+  ASSERT(expr->IsValidReferenceExpression());
+  LhsKind lhs_kind =
+      (property == NULL) ? VARIABLE : (property->key()->IsPropertyName())
+                                          ? NAMED_PROPERTY
+                                          : KEYED_PROPERTY;
+  return lhs_kind;
+}
+
+
+// Helper to find an existing shared function info in the baseline code for the
+// given function literal. Used to canonicalize SharedFunctionInfo objects.
+static Handle<SharedFunctionInfo> SearchSharedFunctionInfo(
+    Code* unoptimized_code, FunctionLiteral* expr) {
+  int start_position = expr->start_position();
+  for (RelocIterator it(unoptimized_code); !it.done(); it.next()) {
+    RelocInfo* rinfo = it.rinfo();
+    if (rinfo->rmode() != RelocInfo::EMBEDDED_OBJECT) continue;
+    Object* obj = rinfo->target_object();
+    if (obj->IsSharedFunctionInfo()) {
+      SharedFunctionInfo* shared = SharedFunctionInfo::cast(obj);
+      if (shared->start_position() == start_position) {
+        return Handle<SharedFunctionInfo>(shared);
+      }
+    }
+  }
+  return Handle<SharedFunctionInfo>();
+}
+
+
+StructuredGraphBuilder::Environment* AstGraphBuilder::CopyEnvironment(
+    StructuredGraphBuilder::Environment* env) {
+  return new (zone()) Environment(*reinterpret_cast<Environment*>(env));
+}
+
+
+AstGraphBuilder::Environment::Environment(AstGraphBuilder* builder,
+                                          Scope* scope,
+                                          Node* control_dependency)
+    : StructuredGraphBuilder::Environment(builder, control_dependency),
+      parameters_count_(scope->num_parameters() + 1),
+      locals_count_(scope->num_stack_slots()),
+      parameters_node_(NULL),
+      locals_node_(NULL),
+      stack_node_(NULL),
+      parameters_dirty_(false),
+      locals_dirty_(false),
+      stack_dirty_(false) {
+  ASSERT_EQ(scope->num_parameters() + 1, parameters_count());
+
+  // Bind the receiver variable.
+  values()->insert(values()->end(), parameters_count(),
+                   static_cast<Node*>(NULL));
+  Node* receiver = builder->graph()->NewNode(common()->Parameter(0));
+  Bind(scope->receiver(), receiver);
+
+  // Bind all parameter variables. The parameter indices are shifted by 1
+  // (receiver is parameter index -1 but environment index 0).
+  for (int i = 0; i < scope->num_parameters(); ++i) {
+    // Unused parameters are allocated to Variable::UNALLOCATED.
+    if (!scope->parameter(i)->IsParameter()) continue;
+    Node* parameter = builder->graph()->NewNode(common()->Parameter(i + 1));
+    Bind(scope->parameter(i), parameter);
+  }
+
+  // Bind all local variables to undefined.
+  Node* undefined_constant = builder->jsgraph()->UndefinedConstant();
+  values()->insert(values()->end(), locals_count(), undefined_constant);
+}
+
+
+AstGraphBuilder::Environment::Environment(const Environment& copy)
+    : StructuredGraphBuilder::Environment(
+          static_cast<StructuredGraphBuilder::Environment>(copy)),
+      parameters_count_(copy.parameters_count_),
+      locals_count_(copy.locals_count_),
+      parameters_node_(copy.parameters_node_),
+      locals_node_(copy.locals_node_),
+      stack_node_(copy.stack_node_),
+      parameters_dirty_(copy.parameters_dirty_),
+      locals_dirty_(copy.locals_dirty_),
+      stack_dirty_(copy.stack_dirty_) {}
+
+
+Node* AstGraphBuilder::Environment::Checkpoint(BailoutId ast_id) {
+  UNIMPLEMENTED();  // TODO(mstarzinger): Implementation below is incomplete.
+  if (parameters_dirty_) {
+    Node** parameters = &values()->front();
+    parameters_node_ = graph()->NewNode(NULL, parameters_count(), parameters);
+    parameters_dirty_ = false;
+  }
+  if (locals_dirty_) {
+    Node** locals = &values()->at(parameters_count_);
+    locals_node_ = graph()->NewNode(NULL, locals_count(), locals);
+    locals_dirty_ = false;
+  }
+  FrameStateDescriptor descriptor(ast_id);
+  // TODO(jarin): add environment to the node.
+  Operator* op = common()->FrameState(descriptor);
+
+  return graph()->NewNode(op);
+}
+
+
+AstGraphBuilder::AstContext::AstContext(AstGraphBuilder* own,
+                                        Expression::Context kind)
+    : kind_(kind), owner_(own), outer_(own->ast_context()) {
+  owner()->set_ast_context(this);  // Push.
+#ifdef DEBUG
+  original_height_ = environment()->stack_height();
+#endif
+}
+
+
+AstGraphBuilder::AstContext::~AstContext() {
+  owner()->set_ast_context(outer_);  // Pop.
+}
+
+
+AstGraphBuilder::AstEffectContext::~AstEffectContext() {
+  ASSERT(environment()->stack_height() == original_height_);
+}
+
+
+AstGraphBuilder::AstValueContext::~AstValueContext() {
+  ASSERT(environment()->stack_height() == original_height_ + 1);
+}
+
+
+AstGraphBuilder::AstTestContext::~AstTestContext() {
+  ASSERT(environment()->stack_height() == original_height_ + 1);
+}
+
+
+void AstGraphBuilder::AstEffectContext::ProduceValue(Node* value) {
+  // The value is ignored.
+}
+
+
+void AstGraphBuilder::AstValueContext::ProduceValue(Node* value) {
+  environment()->Push(value);
+}
+
+
+void AstGraphBuilder::AstTestContext::ProduceValue(Node* value) {
+  environment()->Push(owner()->BuildToBoolean(value));
+}
+
+
+Node* AstGraphBuilder::AstEffectContext::ConsumeValue() { return NULL; }
+
+
+Node* AstGraphBuilder::AstValueContext::ConsumeValue() {
+  return environment()->Pop();
+}
+
+
+Node* AstGraphBuilder::AstTestContext::ConsumeValue() {
+  return environment()->Pop();
+}
+
+
+AstGraphBuilder::BreakableScope* AstGraphBuilder::BreakableScope::FindBreakable(
+    BreakableStatement* target) {
+  BreakableScope* current = this;
+  while (current != NULL && current->target_ != target) {
+    owner_->environment()->Drop(current->drop_extra_);
+    current = current->next_;
+  }
+  ASSERT(current != NULL);  // Always found (unless stack is malformed).
+  return current;
+}
+
+
+void AstGraphBuilder::BreakableScope::BreakTarget(BreakableStatement* stmt) {
+  FindBreakable(stmt)->control_->Break();
+}
+
+
+void AstGraphBuilder::BreakableScope::ContinueTarget(BreakableStatement* stmt) {
+  FindBreakable(stmt)->control_->Continue();
+}
+
+
+void AstGraphBuilder::VisitForValueOrNull(Expression* expr) {
+  if (expr == NULL) {
+    return environment()->Push(jsgraph()->NullConstant());
+  }
+  VisitForValue(expr);
+}
+
+
+void AstGraphBuilder::VisitForValues(ZoneList<Expression*>* exprs) {
+  for (int i = 0; i < exprs->length(); ++i) {
+    VisitForValue(exprs->at(i));
+  }
+}
+
+
+void AstGraphBuilder::VisitForValue(Expression* expr) {
+  AstValueContext for_value(this);
+  if (!HasStackOverflow()) {
+    expr->Accept(this);
+  }
+}
+
+
+void AstGraphBuilder::VisitForEffect(Expression* expr) {
+  AstEffectContext for_effect(this);
+  if (!HasStackOverflow()) {
+    expr->Accept(this);
+  }
+}
+
+
+void AstGraphBuilder::VisitForTest(Expression* expr) {
+  AstTestContext for_condition(this);
+  if (!HasStackOverflow()) {
+    expr->Accept(this);
+  }
+}
+
+
+void AstGraphBuilder::VisitVariableDeclaration(VariableDeclaration* decl) {
+  Variable* variable = decl->proxy()->var();
+  VariableMode mode = decl->mode();
+  bool hole_init = mode == CONST || mode == CONST_LEGACY || mode == LET;
+  switch (variable->location()) {
+    case Variable::UNALLOCATED: {
+      Handle<Oddball> value = variable->binding_needs_init()
+                                  ? isolate()->factory()->the_hole_value()
+                                  : isolate()->factory()->undefined_value();
+      globals()->Add(variable->name(), zone());
+      globals()->Add(value, zone());
+      break;
+    }
+    case Variable::PARAMETER:
+    case Variable::LOCAL:
+      if (hole_init) {
+        Node* value = jsgraph()->TheHoleConstant();
+        environment()->Bind(variable, value);
+      }
+      break;
+    case Variable::CONTEXT:
+      if (hole_init) {
+        Node* value = jsgraph()->TheHoleConstant();
+        Operator* op = javascript()->StoreContext(0, variable->index());
+        NewNode(op, current_context(), value);
+      }
+      break;
+    case Variable::LOOKUP:
+      UNIMPLEMENTED();
+  }
+}
+
+
+void AstGraphBuilder::VisitFunctionDeclaration(FunctionDeclaration* decl) {
+  Variable* variable = decl->proxy()->var();
+  switch (variable->location()) {
+    case Variable::UNALLOCATED: {
+      Handle<SharedFunctionInfo> function =
+          Compiler::BuildFunctionInfo(decl->fun(), info()->script());
+      // Check for stack-overflow exception.
+      if (function.is_null()) return SetStackOverflow();
+      globals()->Add(variable->name(), zone());
+      globals()->Add(function, zone());
+      break;
+    }
+    case Variable::PARAMETER:
+    case Variable::LOCAL: {
+      VisitForValue(decl->fun());
+      Node* value = environment()->Pop();
+      environment()->Bind(variable, value);
+      break;
+    }
+    case Variable::CONTEXT: {
+      VisitForValue(decl->fun());
+      Node* value = environment()->Pop();
+      Operator* op = javascript()->StoreContext(0, variable->index());
+      NewNode(op, current_context(), value);
+      break;
+    }
+    case Variable::LOOKUP:
+      UNIMPLEMENTED();
+  }
+}
+
+
+void AstGraphBuilder::VisitModuleDeclaration(ModuleDeclaration* decl) {
+  UNREACHABLE();
+}
+
+
+void AstGraphBuilder::VisitImportDeclaration(ImportDeclaration* decl) {
+  UNREACHABLE();
+}
+
+
+void AstGraphBuilder::VisitExportDeclaration(ExportDeclaration* decl) {
+  UNREACHABLE();
+}
+
+
+void AstGraphBuilder::VisitModuleLiteral(ModuleLiteral* modl) { UNREACHABLE(); }
+
+
+void AstGraphBuilder::VisitModuleVariable(ModuleVariable* modl) {
+  UNREACHABLE();
+}
+
+
+void AstGraphBuilder::VisitModulePath(ModulePath* modl) { UNREACHABLE(); }
+
+
+void AstGraphBuilder::VisitModuleUrl(ModuleUrl* modl) { UNREACHABLE(); }
+
+
+void AstGraphBuilder::VisitBlock(Block* stmt) {
+  BlockBuilder block(this);
+  BreakableScope scope(this, stmt, &block, 0);
+  if (stmt->labels() != NULL) block.BeginBlock();
+  if (stmt->scope() == NULL) {
+    // Visit statements in the same scope, no declarations.
+    VisitStatements(stmt->statements());
+  } else {
+    Operator* op = javascript()->CreateBlockContext();
+    Node* scope_info = jsgraph()->Constant(stmt->scope()->GetScopeInfo());
+    Node* context = NewNode(op, scope_info, GetFunctionClosure());
+    ContextScope scope(this, stmt->scope(), context);
+
+    // Visit declarations and statements in a block scope.
+    VisitDeclarations(stmt->scope()->declarations());
+    VisitStatements(stmt->statements());
+  }
+  if (stmt->labels() != NULL) block.EndBlock();
+}
+
+
+void AstGraphBuilder::VisitModuleStatement(ModuleStatement* stmt) {
+  UNREACHABLE();
+}
+
+
+void AstGraphBuilder::VisitExpressionStatement(ExpressionStatement* stmt) {
+  VisitForEffect(stmt->expression());
+}
+
+
+void AstGraphBuilder::VisitEmptyStatement(EmptyStatement* stmt) {
+  // Do nothing.
+}
+
+
+void AstGraphBuilder::VisitIfStatement(IfStatement* stmt) {
+  IfBuilder compare_if(this);
+  VisitForTest(stmt->condition());
+  Node* condition = environment()->Pop();
+  compare_if.If(condition);
+  compare_if.Then();
+  Visit(stmt->then_statement());
+  compare_if.Else();
+  Visit(stmt->else_statement());
+  compare_if.End();
+}
+
+
+void AstGraphBuilder::VisitContinueStatement(ContinueStatement* stmt) {
+  StructuredGraphBuilder::Environment* env = environment()->CopyAsUnreachable();
+  breakable()->ContinueTarget(stmt->target());
+  set_environment(env);
+}
+
+
+void AstGraphBuilder::VisitBreakStatement(BreakStatement* stmt) {
+  StructuredGraphBuilder::Environment* env = environment()->CopyAsUnreachable();
+  breakable()->BreakTarget(stmt->target());
+  set_environment(env);
+}
+
+
+void AstGraphBuilder::VisitReturnStatement(ReturnStatement* stmt) {
+  VisitForValue(stmt->expression());
+  Node* result = environment()->Pop();
+  Node* control = NewNode(common()->Return(), result);
+  UpdateControlDependencyToLeaveFunction(control);
+}
+
+
+void AstGraphBuilder::VisitWithStatement(WithStatement* stmt) {
+  VisitForValue(stmt->expression());
+  Node* value = environment()->Pop();
+  Operator* op = javascript()->CreateWithContext();
+  Node* context = NewNode(op, value, GetFunctionClosure());
+  ContextScope scope(this, stmt->scope(), context);
+  Visit(stmt->statement());
+}
+
+
+void AstGraphBuilder::VisitSwitchStatement(SwitchStatement* stmt) {
+  ZoneList<CaseClause*>* clauses = stmt->cases();
+  SwitchBuilder compare_switch(this, clauses->length());
+  BreakableScope scope(this, stmt, &compare_switch, 0);
+  compare_switch.BeginSwitch();
+  int default_index = -1;
+
+  // Keep the switch value on the stack until a case matches.
+  VisitForValue(stmt->tag());
+  Node* tag = environment()->Top();
+
+  // Iterate over all cases and create nodes for label comparison.
+  for (int i = 0; i < clauses->length(); i++) {
+    CaseClause* clause = clauses->at(i);
+
+    // The default is not a test, remember index.
+    if (clause->is_default()) {
+      default_index = i;
+      continue;
+    }
+
+    // Create nodes to perform label comparison as if via '==='. The switch
+    // value is still on the operand stack while the label is evaluated.
+    VisitForValue(clause->label());
+    Node* label = environment()->Pop();
+    Operator* op = javascript()->StrictEqual();
+    Node* condition = NewNode(op, tag, label);
+    compare_switch.BeginLabel(i, condition);
+
+    // Discard the switch value at label match.
+    environment()->Pop();
+    compare_switch.EndLabel();
+  }
+
+  // Discard the switch value and mark the default case.
+  environment()->Pop();
+  if (default_index >= 0) {
+    compare_switch.DefaultAt(default_index);
+  }
+
+  // Iterate over all cases and create nodes for case bodies.
+  for (int i = 0; i < clauses->length(); i++) {
+    CaseClause* clause = clauses->at(i);
+    compare_switch.BeginCase(i);
+    VisitStatements(clause->statements());
+    compare_switch.EndCase();
+  }
+
+  compare_switch.EndSwitch();
+}
+
+
+void AstGraphBuilder::VisitDoWhileStatement(DoWhileStatement* stmt) {
+  LoopBuilder while_loop(this);
+  while_loop.BeginLoop();
+  VisitIterationBody(stmt, &while_loop, 0);
+  while_loop.EndBody();
+  VisitForTest(stmt->cond());
+  Node* condition = environment()->Pop();
+  while_loop.BreakUnless(condition);
+  while_loop.EndLoop();
+}
+
+
+void AstGraphBuilder::VisitWhileStatement(WhileStatement* stmt) {
+  LoopBuilder while_loop(this);
+  while_loop.BeginLoop();
+  VisitForTest(stmt->cond());
+  Node* condition = environment()->Pop();
+  while_loop.BreakUnless(condition);
+  VisitIterationBody(stmt, &while_loop, 0);
+  while_loop.EndBody();
+  while_loop.EndLoop();
+}
+
+
+void AstGraphBuilder::VisitForStatement(ForStatement* stmt) {
+  LoopBuilder for_loop(this);
+  VisitIfNotNull(stmt->init());
+  for_loop.BeginLoop();
+  if (stmt->cond() != NULL) {
+    VisitForTest(stmt->cond());
+    Node* condition = environment()->Pop();
+    for_loop.BreakUnless(condition);
+  }
+  VisitIterationBody(stmt, &for_loop, 0);
+  for_loop.EndBody();
+  VisitIfNotNull(stmt->next());
+  for_loop.EndLoop();
+}
+
+
+// TODO(dcarney): this is a big function.  Try to clean up some.
+void AstGraphBuilder::VisitForInStatement(ForInStatement* stmt) {
+  VisitForValue(stmt->subject());
+  Node* obj = environment()->Pop();
+  // Check for undefined or null before entering loop.
+  IfBuilder is_undefined(this);
+  Node* is_undefined_cond =
+      NewNode(javascript()->StrictEqual(), obj, jsgraph()->UndefinedConstant());
+  is_undefined.If(is_undefined_cond);
+  is_undefined.Then();
+  is_undefined.Else();
+  {
+    IfBuilder is_null(this);
+    Node* is_null_cond =
+        NewNode(javascript()->StrictEqual(), obj, jsgraph()->NullConstant());
+    is_null.If(is_null_cond);
+    is_null.Then();
+    is_null.Else();
+    // Convert object to jsobject.
+    // PrepareForBailoutForId(stmt->PrepareId(), TOS_REG);
+    obj = NewNode(javascript()->ToObject(), obj);
+    environment()->Push(obj);
+    // TODO(dcarney): should do a fast enum cache check here to skip runtime.
+    environment()->Push(obj);
+    Node* cache_type = ProcessArguments(
+        javascript()->Runtime(Runtime::kGetPropertyNamesFast, 1), 1);
+    // TODO(dcarney): these next runtime calls should be removed in favour of
+    //                a few simplified instructions.
+    environment()->Push(obj);
+    environment()->Push(cache_type);
+    Node* cache_pair =
+        ProcessArguments(javascript()->Runtime(Runtime::kForInInit, 2), 2);
+    // cache_type may have been replaced.
+    Node* cache_array = NewNode(common()->Projection(0), cache_pair);
+    cache_type = NewNode(common()->Projection(1), cache_pair);
+    environment()->Push(cache_type);
+    environment()->Push(cache_array);
+    Node* cache_length = ProcessArguments(
+        javascript()->Runtime(Runtime::kForInCacheArrayLength, 2), 2);
+    {
+      // TODO(dcarney): this check is actually supposed to be for the
+      //                empty enum case only.
+      IfBuilder have_no_properties(this);
+      Node* empty_array_cond = NewNode(javascript()->StrictEqual(),
+                                       cache_length, jsgraph()->ZeroConstant());
+      have_no_properties.If(empty_array_cond);
+      have_no_properties.Then();
+      // Pop obj and skip loop.
+      environment()->Pop();
+      have_no_properties.Else();
+      {
+        // Construct the rest of the environment.
+        environment()->Push(cache_type);
+        environment()->Push(cache_array);
+        environment()->Push(cache_length);
+        environment()->Push(jsgraph()->ZeroConstant());
+        // PrepareForBailoutForId(stmt->BodyId(), NO_REGISTERS);
+        LoopBuilder for_loop(this);
+        for_loop.BeginLoop();
+        // Check loop termination condition.
+        Node* index = environment()->Peek(0);
+        Node* exit_cond =
+            NewNode(javascript()->LessThan(), index, cache_length);
+        for_loop.BreakUnless(exit_cond);
+        // TODO(dcarney): this runtime call should be a handful of
+        //                simplified instructions that
+        //                basically produce
+        //                    value = array[index]
+        environment()->Push(obj);
+        environment()->Push(cache_array);
+        environment()->Push(cache_type);
+        environment()->Push(index);
+        Node* pair =
+            ProcessArguments(javascript()->Runtime(Runtime::kForInNext, 4), 4);
+        Node* value = NewNode(common()->Projection(0), pair);
+        Node* should_filter = NewNode(common()->Projection(1), pair);
+        environment()->Push(value);
+        {
+          // Test if FILTER_KEY needs to be called.
+          IfBuilder test_should_filter(this);
+          Node* should_filter_cond =
+              NewNode(javascript()->StrictEqual(), should_filter,
+                      jsgraph()->TrueConstant());
+          test_should_filter.If(should_filter_cond);
+          test_should_filter.Then();
+          value = environment()->Pop();
+          // TODO(dcarney): Better load from function context.
+          // See comment in BuildLoadBuiltinsObject.
+          Handle<JSFunction> function(JSFunction::cast(
+              info()->context()->builtins()->javascript_builtin(
+                  Builtins::FILTER_KEY)));
+          // Callee.
+          environment()->Push(jsgraph()->HeapConstant(function));
+          // Receiver.
+          environment()->Push(obj);
+          // Args.
+          environment()->Push(value);
+          // result is either the string key or Smi(0) indicating the property
+          // is gone.
+          Node* res = ProcessArguments(
+              javascript()->Call(3, NO_CALL_FUNCTION_FLAGS), 3);
+          Node* property_missing = NewNode(javascript()->StrictEqual(), res,
+                                           jsgraph()->ZeroConstant());
+          {
+            IfBuilder is_property_missing(this);
+            is_property_missing.If(property_missing);
+            is_property_missing.Then();
+            // Inc counter and continue.
+            Node* index_inc =
+                NewNode(javascript()->Add(), index, jsgraph()->OneConstant());
+            environment()->Poke(0, index_inc);
+            for_loop.Continue();
+            is_property_missing.Else();
+            is_property_missing.End();
+          }
+          // Replace 'value' in environment.
+          environment()->Push(res);
+          test_should_filter.Else();
+          test_should_filter.End();
+        }
+        value = environment()->Pop();
+        // Bind value and do loop body.
+        VisitForInAssignment(stmt->each(), value);
+        VisitIterationBody(stmt, &for_loop, 5);
+        // Inc counter and continue.
+        Node* index_inc =
+            NewNode(javascript()->Add(), index, jsgraph()->OneConstant());
+        environment()->Poke(0, index_inc);
+        for_loop.EndBody();
+        for_loop.EndLoop();
+        environment()->Drop(5);
+        // PrepareForBailoutForId(stmt->ExitId(), NO_REGISTERS);
+      }
+      have_no_properties.End();
+    }
+    is_null.End();
+  }
+  is_undefined.End();
+}
+
+
+void AstGraphBuilder::VisitForOfStatement(ForOfStatement* stmt) {
+  VisitForValue(stmt->subject());
+  environment()->Pop();
+  // TODO(turbofan): create and use loop builder.
+}
+
+
+void AstGraphBuilder::VisitTryCatchStatement(TryCatchStatement* stmt) {
+  UNREACHABLE();
+}
+
+
+void AstGraphBuilder::VisitTryFinallyStatement(TryFinallyStatement* stmt) {
+  UNREACHABLE();
+}
+
+
+void AstGraphBuilder::VisitDebuggerStatement(DebuggerStatement* stmt) {
+  // TODO(turbofan): Do we really need a separate reloc-info for this?
+  NewNode(javascript()->Runtime(Runtime::kDebugBreak, 0));
+}
+
+
+void AstGraphBuilder::VisitFunctionLiteral(FunctionLiteral* expr) {
+  Node* context = current_context();
+
+  // Build a new shared function info if we cannot find one in the baseline
+  // code. We also have a stack overflow if the recursive compilation did.
+  Handle<SharedFunctionInfo> shared_info =
+      SearchSharedFunctionInfo(info()->shared_info()->code(), expr);
+  if (shared_info.is_null()) {
+    shared_info = Compiler::BuildFunctionInfo(expr, info()->script());
+    CHECK(!shared_info.is_null());  // TODO(mstarzinger): Set stack overflow?
+  }
+
+  // Create node to instantiate a new closure.
+  Node* info = jsgraph()->Constant(shared_info);
+  Node* pretenure = expr->pretenure() ? jsgraph()->TrueConstant()
+                                      : jsgraph()->FalseConstant();
+  Operator* op = javascript()->Runtime(Runtime::kNewClosure, 3);
+  Node* value = NewNode(op, context, info, pretenure);
+  ast_context()->ProduceValue(value);
+}
+
+
+void AstGraphBuilder::VisitNativeFunctionLiteral(NativeFunctionLiteral* expr) {
+  UNREACHABLE();
+}
+
+
+void AstGraphBuilder::VisitConditional(Conditional* expr) {
+  IfBuilder compare_if(this);
+  VisitForTest(expr->condition());
+  Node* condition = environment()->Pop();
+  compare_if.If(condition);
+  compare_if.Then();
+  Visit(expr->then_expression());
+  compare_if.Else();
+  Visit(expr->else_expression());
+  compare_if.End();
+  ast_context()->ReplaceValue();
+}
+
+
+void AstGraphBuilder::VisitVariableProxy(VariableProxy* expr) {
+  Node* value = BuildVariableLoad(expr->var());
+  ast_context()->ProduceValue(value);
+}
+
+
+void AstGraphBuilder::VisitLiteral(Literal* expr) {
+  Node* value = jsgraph()->Constant(expr->value());
+  ast_context()->ProduceValue(value);
+}
+
+
+void AstGraphBuilder::VisitRegExpLiteral(RegExpLiteral* expr) {
+  Handle<JSFunction> closure = info()->closure();
+
+  // Create node to materialize a regular expression literal.
+  Node* literals_array = jsgraph()->Constant(handle(closure->literals()));
+  Node* literal_index = jsgraph()->Constant(expr->literal_index());
+  Node* pattern = jsgraph()->Constant(expr->pattern());
+  Node* flags = jsgraph()->Constant(expr->flags());
+  Operator* op = javascript()->Runtime(Runtime::kMaterializeRegExpLiteral, 4);
+  Node* literal = NewNode(op, literals_array, literal_index, pattern, flags);
+  ast_context()->ProduceValue(literal);
+}
+
+
+void AstGraphBuilder::VisitObjectLiteral(ObjectLiteral* expr) {
+  Handle<JSFunction> closure = info()->closure();
+
+  // Create node to deep-copy the literal boilerplate.
+  expr->BuildConstantProperties(isolate());
+  Node* literals_array = jsgraph()->Constant(handle(closure->literals()));
+  Node* literal_index = jsgraph()->Constant(expr->literal_index());
+  Node* constants = jsgraph()->Constant(expr->constant_properties());
+  Node* flags = jsgraph()->Constant(expr->ComputeFlags());
+  Operator* op = javascript()->Runtime(Runtime::kCreateObjectLiteral, 4);
+  Node* literal = NewNode(op, literals_array, literal_index, constants, flags);
+
+  // The object is expected on the operand stack during computation of the
+  // property values and is the value of the entire expression.
+  environment()->Push(literal);
+
+  // Mark all computed expressions that are bound to a key that is shadowed by
+  // a later occurrence of the same key. For the marked expressions, no store
+  // code is emitted.
+  expr->CalculateEmitStore(zone());
+
+  // Create nodes to store computed values into the literal.
+  AccessorTable accessor_table(zone());
+  for (int i = 0; i < expr->properties()->length(); i++) {
+    ObjectLiteral::Property* property = expr->properties()->at(i);
+    if (property->IsCompileTimeValue()) continue;
+
+    Literal* key = property->key();
+    switch (property->kind()) {
+      case ObjectLiteral::Property::CONSTANT:
+        UNREACHABLE();
+      case ObjectLiteral::Property::MATERIALIZED_LITERAL:
+        ASSERT(!CompileTimeValue::IsCompileTimeValue(property->value()));
+      // Fall through.
+      case ObjectLiteral::Property::COMPUTED: {
+        // It is safe to use [[Put]] here because the boilerplate already
+        // contains computed properties with an uninitialized value.
+        if (key->value()->IsInternalizedString()) {
+          if (property->emit_store()) {
+            VisitForValue(property->value());
+            Node* value = environment()->Pop();
+            PrintableUnique<Name> name = MakeUnique(key->AsPropertyName());
+            NewNode(javascript()->StoreNamed(name), literal, value);
+          } else {
+            VisitForEffect(property->value());
+          }
+          break;
+        }
+        environment()->Push(literal);  // Duplicate receiver.
+        VisitForValue(property->key());
+        VisitForValue(property->value());
+        Node* value = environment()->Pop();
+        Node* key = environment()->Pop();
+        Node* receiver = environment()->Pop();
+        if (property->emit_store()) {
+          Node* strict = jsgraph()->Constant(SLOPPY);
+          Operator* op = javascript()->Runtime(Runtime::kSetProperty, 4);
+          NewNode(op, receiver, key, value, strict);
+        }
+        break;
+      }
+      case ObjectLiteral::Property::PROTOTYPE: {
+        environment()->Push(literal);  // Duplicate receiver.
+        VisitForValue(property->value());
+        Node* value = environment()->Pop();
+        Node* receiver = environment()->Pop();
+        if (property->emit_store()) {
+          Operator* op = javascript()->Runtime(Runtime::kSetPrototype, 2);
+          NewNode(op, receiver, value);
+        }
+        break;
+      }
+      case ObjectLiteral::Property::GETTER:
+        accessor_table.lookup(key)->second->getter = property->value();
+        break;
+      case ObjectLiteral::Property::SETTER:
+        accessor_table.lookup(key)->second->setter = property->value();
+        break;
+    }
+  }
+
+  // Create nodes to define accessors, using only a single call to the runtime
+  // for each pair of corresponding getters and setters.
+  for (AccessorTable::Iterator it = accessor_table.begin();
+       it != accessor_table.end(); ++it) {
+    VisitForValue(it->first);
+    VisitForValueOrNull(it->second->getter);
+    VisitForValueOrNull(it->second->setter);
+    Node* setter = environment()->Pop();
+    Node* getter = environment()->Pop();
+    Node* name = environment()->Pop();
+    Node* attr = jsgraph()->Constant(NONE);
+    Operator* op =
+        javascript()->Runtime(Runtime::kDefineAccessorPropertyUnchecked, 5);
+    NewNode(op, literal, name, getter, setter, attr);
+  }
+
+  // Transform literals that contain functions to fast properties.
+  if (expr->has_function()) {
+    Operator* op = javascript()->Runtime(Runtime::kToFastProperties, 1);
+    NewNode(op, literal);
+  }
+
+  ast_context()->ProduceValue(environment()->Pop());
+}
+
+
+void AstGraphBuilder::VisitArrayLiteral(ArrayLiteral* expr) {
+  Handle<JSFunction> closure = info()->closure();
+
+  // Create node to deep-copy the literal boilerplate.
+  expr->BuildConstantElements(isolate());
+  Node* literals_array = jsgraph()->Constant(handle(closure->literals()));
+  Node* literal_index = jsgraph()->Constant(expr->literal_index());
+  Node* constants = jsgraph()->Constant(expr->constant_elements());
+  Node* flags = jsgraph()->Constant(expr->ComputeFlags());
+  Operator* op = javascript()->Runtime(Runtime::kCreateArrayLiteral, 4);
+  Node* literal = NewNode(op, literals_array, literal_index, constants, flags);
+
+  // The array and the literal index are both expected on the operand stack
+  // during computation of the element values.
+  environment()->Push(literal);
+  environment()->Push(literal_index);
+
+  // Create nodes to evaluate all the non-constant subexpressions and to store
+  // them into the newly cloned array.
+  for (int i = 0; i < expr->values()->length(); i++) {
+    Expression* subexpr = expr->values()->at(i);
+    if (CompileTimeValue::IsCompileTimeValue(subexpr)) continue;
+
+    VisitForValue(subexpr);
+    Node* value = environment()->Pop();
+    Node* index = jsgraph()->Constant(i);
+    NewNode(javascript()->StoreProperty(), literal, index, value);
+  }
+
+  environment()->Pop();  // Array literal index.
+  ast_context()->ProduceValue(environment()->Pop());
+}
+
+
+void AstGraphBuilder::VisitForInAssignment(Expression* expr, Node* value) {
+  ASSERT(expr->IsValidReferenceExpression());
+
+  // Left-hand side can only be a property, a global or a variable slot.
+  Property* property = expr->AsProperty();
+  LhsKind assign_type = DetermineLhsKind(expr);
+
+  // Evaluate LHS expression and store the value.
+  switch (assign_type) {
+    case VARIABLE: {
+      Variable* var = expr->AsVariableProxy()->var();
+      BuildVariableAssignment(var, value, Token::ASSIGN);
+      break;
+    }
+    case NAMED_PROPERTY: {
+      environment()->Push(value);
+      VisitForValue(property->obj());
+      Node* object = environment()->Pop();
+      value = environment()->Pop();
+      PrintableUnique<Name> name =
+          MakeUnique(property->key()->AsLiteral()->AsPropertyName());
+      NewNode(javascript()->StoreNamed(name), object, value);
+      break;
+    }
+    case KEYED_PROPERTY: {
+      environment()->Push(value);
+      VisitForValue(property->obj());
+      VisitForValue(property->key());
+      Node* key = environment()->Pop();
+      Node* object = environment()->Pop();
+      value = environment()->Pop();
+      NewNode(javascript()->StoreProperty(), object, key, value);
+      break;
+    }
+  }
+}
+
+
+void AstGraphBuilder::VisitAssignment(Assignment* expr) {
+  ASSERT(expr->target()->IsValidReferenceExpression());
+
+  // Left-hand side can only be a property, a global or a variable slot.
+  Property* property = expr->target()->AsProperty();
+  LhsKind assign_type = DetermineLhsKind(expr->target());
+
+  // Evaluate LHS expression.
+  switch (assign_type) {
+    case VARIABLE:
+      // Nothing to do here.
+      break;
+    case NAMED_PROPERTY:
+      VisitForValue(property->obj());
+      break;
+    case KEYED_PROPERTY: {
+      VisitForValue(property->obj());
+      VisitForValue(property->key());
+      break;
+    }
+  }
+
+  // Evaluate the value and potentially handle compound assignments by loading
+  // the left-hand side value and performing a binary operation.
+  if (expr->is_compound()) {
+    Node* old_value = NULL;
+    switch (assign_type) {
+      case VARIABLE: {
+        Variable* variable = expr->target()->AsVariableProxy()->var();
+        old_value = BuildVariableLoad(variable);
+        break;
+      }
+      case NAMED_PROPERTY: {
+        Node* object = environment()->Top();
+        PrintableUnique<Name> name =
+            MakeUnique(property->key()->AsLiteral()->AsPropertyName());
+        old_value = NewNode(javascript()->LoadNamed(name), object);
+        break;
+      }
+      case KEYED_PROPERTY: {
+        Node* key = environment()->Top();
+        Node* object = environment()->Peek(1);
+        old_value = NewNode(javascript()->LoadProperty(), object, key);
+        break;
+      }
+    }
+    environment()->Push(old_value);
+    VisitForValue(expr->value());
+    Node* right = environment()->Pop();
+    Node* left = environment()->Pop();
+    Node* value = BuildBinaryOp(left, right, expr->binary_op());
+    environment()->Push(value);
+  } else {
+    VisitForValue(expr->value());
+  }
+
+  // Store the value.
+  Node* value = environment()->Pop();
+  switch (assign_type) {
+    case VARIABLE: {
+      Variable* variable = expr->target()->AsVariableProxy()->var();
+      BuildVariableAssignment(variable, value, expr->op());
+      break;
+    }
+    case NAMED_PROPERTY: {
+      Node* object = environment()->Pop();
+      PrintableUnique<Name> name =
+          MakeUnique(property->key()->AsLiteral()->AsPropertyName());
+      NewNode(javascript()->StoreNamed(name), object, value);
+      break;
+    }
+    case KEYED_PROPERTY: {
+      Node* key = environment()->Pop();
+      Node* object = environment()->Pop();
+      NewNode(javascript()->StoreProperty(), object, key, value);
+      break;
+    }
+  }
+
+  ast_context()->ProduceValue(value);
+}
+
+
+void AstGraphBuilder::VisitYield(Yield* expr) {
+  VisitForValue(expr->generator_object());
+  VisitForValue(expr->expression());
+  environment()->Pop();
+  environment()->Pop();
+  // TODO(turbofan): VisitYield
+  ast_context()->ProduceValue(jsgraph()->UndefinedConstant());
+}
+
+
+void AstGraphBuilder::VisitThrow(Throw* expr) {
+  VisitForValue(expr->exception());
+  Node* exception = environment()->Pop();
+  Operator* op = javascript()->Runtime(Runtime::kThrow, 1);
+  Node* value = NewNode(op, exception);
+  ast_context()->ProduceValue(value);
+}
+
+
+void AstGraphBuilder::VisitProperty(Property* expr) {
+  Node* value;
+  if (expr->key()->IsPropertyName()) {
+    VisitForValue(expr->obj());
+    Node* object = environment()->Pop();
+    PrintableUnique<Name> name =
+        MakeUnique(expr->key()->AsLiteral()->AsPropertyName());
+    value = NewNode(javascript()->LoadNamed(name), object);
+  } else {
+    VisitForValue(expr->obj());
+    VisitForValue(expr->key());
+    Node* key = environment()->Pop();
+    Node* object = environment()->Pop();
+    value = NewNode(javascript()->LoadProperty(), object, key);
+  }
+  ast_context()->ProduceValue(value);
+}
+
+
+void AstGraphBuilder::VisitCall(Call* expr) {
+  Expression* callee = expr->expression();
+  Call::CallType call_type = expr->GetCallType(isolate());
+
+  // Prepare the callee and the receiver to the function call. This depends on
+  // the semantics of the underlying call type.
+  CallFunctionFlags flags = NO_CALL_FUNCTION_FLAGS;
+  Node* receiver_value = NULL;
+  Node* callee_value = NULL;
+  bool possibly_eval = false;
+  switch (call_type) {
+    case Call::GLOBAL_CALL: {
+      Variable* variable = callee->AsVariableProxy()->var();
+      callee_value = BuildVariableLoad(variable);
+      receiver_value = jsgraph()->UndefinedConstant();
+      break;
+    }
+    case Call::LOOKUP_SLOT_CALL: {
+      Variable* variable = callee->AsVariableProxy()->var();
+      ASSERT(variable->location() == Variable::LOOKUP);
+      Node* name = jsgraph()->Constant(variable->name());
+      Operator* op = javascript()->Runtime(Runtime::kLoadLookupSlot, 2);
+      Node* pair = NewNode(op, current_context(), name);
+      callee_value = NewNode(common()->Projection(0), pair);
+      receiver_value = NewNode(common()->Projection(1), pair);
+      break;
+    }
+    case Call::PROPERTY_CALL: {
+      Property* property = callee->AsProperty();
+      VisitForValue(property->obj());
+      Node* object = environment()->Top();
+      if (property->key()->IsPropertyName()) {
+        PrintableUnique<Name> name =
+            MakeUnique(property->key()->AsLiteral()->AsPropertyName());
+        callee_value = NewNode(javascript()->LoadNamed(name), object);
+      } else {
+        VisitForValue(property->key());
+        Node* key = environment()->Pop();
+        callee_value = NewNode(javascript()->LoadProperty(), object, key);
+      }
+      receiver_value = environment()->Pop();
+      // Note that a PROPERTY_CALL requires the receiver to be wrapped into an
+      // object for sloppy callees. This could also be modeled explicitly here,
+      // thereby obsoleting the need for a flag to the call operator.
+      flags = CALL_AS_METHOD;
+      break;
+    }
+    case Call::POSSIBLY_EVAL_CALL:
+      possibly_eval = true;
+    // Fall through.
+    case Call::OTHER_CALL:
+      VisitForValue(callee);
+      callee_value = environment()->Pop();
+      receiver_value = jsgraph()->UndefinedConstant();
+      break;
+  }
+
+  // The callee and the receiver both have to be pushed onto the operand stack
+  // before arguments are being evaluated.
+  environment()->Push(callee_value);
+  environment()->Push(receiver_value);
+
+  // Evaluate all arguments to the function call,
+  ZoneList<Expression*>* args = expr->arguments();
+  VisitForValues(args);
+
+  // Resolve callee and receiver for a potential direct eval call. This block
+  // will mutate the callee and receiver values pushed onto the environment.
+  if (possibly_eval && args->length() > 0) {
+    int arg_count = args->length();
+
+    // Extract callee and source string from the environment.
+    Node* callee = environment()->Peek(arg_count + 1);
+    Node* source = environment()->Peek(arg_count - 1);
+
+    // Create node to ask for help resolving potential eval call. This will
+    // provide a fully resolved callee and the corresponding receiver.
+    Node* receiver = environment()->Lookup(info()->scope()->receiver());
+    Node* strict = jsgraph()->Constant(strict_mode());
+    Node* position = jsgraph()->Constant(info()->scope()->start_position());
+    Operator* op =
+        javascript()->Runtime(Runtime::kResolvePossiblyDirectEval, 5);
+    Node* pair = NewNode(op, callee, source, receiver, strict, position);
+    Node* new_callee = NewNode(common()->Projection(0), pair);
+    Node* new_receiver = NewNode(common()->Projection(1), pair);
+
+    // Patch callee and receiver on the environment.
+    environment()->Poke(arg_count + 1, new_callee);
+    environment()->Poke(arg_count + 0, new_receiver);
+  }
+
+  // Create node to perform the function call.
+  Operator* call = javascript()->Call(args->length() + 2, flags);
+  Node* value = ProcessArguments(call, args->length() + 2);
+  ast_context()->ProduceValue(value);
+}
+
+
+void AstGraphBuilder::VisitCallNew(CallNew* expr) {
+  VisitForValue(expr->expression());
+
+  // Evaluate all arguments to the construct call.
+  ZoneList<Expression*>* args = expr->arguments();
+  VisitForValues(args);
+
+  // Create node to perform the construct call.
+  Operator* call = javascript()->CallNew(args->length() + 1);
+  Node* value = ProcessArguments(call, args->length() + 1);
+  ast_context()->ProduceValue(value);
+}
+
+
+void AstGraphBuilder::VisitCallJSRuntime(CallRuntime* expr) {
+  Handle<String> name = expr->name();
+
+  // The callee and the receiver both have to be pushed onto the operand stack
+  // before arguments are being evaluated.
+  CallFunctionFlags flags = NO_CALL_FUNCTION_FLAGS;
+  Node* receiver_value = BuildLoadBuiltinsObject();
+  PrintableUnique<String> unique = MakeUnique(name);
+  Node* callee_value = NewNode(javascript()->LoadNamed(unique), receiver_value);
+  environment()->Push(callee_value);
+  environment()->Push(receiver_value);
+
+  // Evaluate all arguments to the JS runtime call.
+  ZoneList<Expression*>* args = expr->arguments();
+  VisitForValues(args);
+
+  // Create node to perform the JS runtime call.
+  Operator* call = javascript()->Call(args->length() + 2, flags);
+  Node* value = ProcessArguments(call, args->length() + 2);
+  ast_context()->ProduceValue(value);
+}
+
+
+void AstGraphBuilder::VisitCallRuntime(CallRuntime* expr) {
+  const Runtime::Function* function = expr->function();
+
+  // Handle calls to runtime functions implemented in JavaScript separately as
+  // the call follows JavaScript ABI and the callee is statically unknown.
+  if (expr->is_jsruntime()) {
+    ASSERT(function == NULL && expr->name()->length() > 0);
+    return VisitCallJSRuntime(expr);
+  }
+
+  // Evaluate all arguments to the runtime call.
+  ZoneList<Expression*>* args = expr->arguments();
+  VisitForValues(args);
+
+  // Create node to perform the runtime call.
+  Runtime::FunctionId functionId = function->function_id;
+  Operator* call = javascript()->Runtime(functionId, args->length());
+  Node* value = ProcessArguments(call, args->length());
+  ast_context()->ProduceValue(value);
+
+  BuildLazyBailout(value, expr->id());
+}
+
+
+void AstGraphBuilder::VisitUnaryOperation(UnaryOperation* expr) {
+  switch (expr->op()) {
+    case Token::DELETE:
+      return VisitDelete(expr);
+    case Token::VOID:
+      return VisitVoid(expr);
+    case Token::TYPEOF:
+      return VisitTypeof(expr);
+    case Token::NOT:
+      return VisitNot(expr);
+    default:
+      UNREACHABLE();
+  }
+}
+
+
+void AstGraphBuilder::VisitCountOperation(CountOperation* expr) {
+  ASSERT(expr->expression()->IsValidReferenceExpression());
+
+  // Left-hand side can only be a property, a global or a variable slot.
+  Property* property = expr->expression()->AsProperty();
+  LhsKind assign_type = DetermineLhsKind(expr->expression());
+
+  // Reserve space for result of postfix operation.
+  bool is_postfix = expr->is_postfix() && !ast_context()->IsEffect();
+  if (is_postfix) environment()->Push(jsgraph()->UndefinedConstant());
+
+  // Evaluate LHS expression and get old value.
+  Node* old_value = NULL;
+  int stack_depth = -1;
+  switch (assign_type) {
+    case VARIABLE: {
+      Variable* variable = expr->expression()->AsVariableProxy()->var();
+      old_value = BuildVariableLoad(variable);
+      stack_depth = 0;
+      break;
+    }
+    case NAMED_PROPERTY: {
+      VisitForValue(property->obj());
+      Node* object = environment()->Top();
+      PrintableUnique<Name> name =
+          MakeUnique(property->key()->AsLiteral()->AsPropertyName());
+      old_value = NewNode(javascript()->LoadNamed(name), object);
+      stack_depth = 1;
+      break;
+    }
+    case KEYED_PROPERTY: {
+      VisitForValue(property->obj());
+      VisitForValue(property->key());
+      Node* key = environment()->Top();
+      Node* object = environment()->Peek(1);
+      old_value = NewNode(javascript()->LoadProperty(), object, key);
+      stack_depth = 2;
+      break;
+    }
+  }
+
+  // Convert old value into a number.
+  old_value = NewNode(javascript()->ToNumber(), old_value);
+
+  // Save result for postfix expressions at correct stack depth.
+  if (is_postfix) environment()->Poke(stack_depth, old_value);
+
+  // Create node to perform +1/-1 operation.
+  Node* value =
+      BuildBinaryOp(old_value, jsgraph()->OneConstant(), expr->binary_op());
+
+  // Store the value.
+  switch (assign_type) {
+    case VARIABLE: {
+      Variable* variable = expr->expression()->AsVariableProxy()->var();
+      BuildVariableAssignment(variable, value, expr->op());
+      break;
+    }
+    case NAMED_PROPERTY: {
+      Node* object = environment()->Pop();
+      PrintableUnique<Name> name =
+          MakeUnique(property->key()->AsLiteral()->AsPropertyName());
+      NewNode(javascript()->StoreNamed(name), object, value);
+      break;
+    }
+    case KEYED_PROPERTY: {
+      Node* key = environment()->Pop();
+      Node* object = environment()->Pop();
+      NewNode(javascript()->StoreProperty(), object, key, value);
+      break;
+    }
+  }
+
+  // Restore old value for postfix expressions.
+  if (is_postfix) value = environment()->Pop();
+
+  ast_context()->ProduceValue(value);
+}
+
+
+void AstGraphBuilder::VisitBinaryOperation(BinaryOperation* expr) {
+  switch (expr->op()) {
+    case Token::COMMA:
+      return VisitComma(expr);
+    case Token::OR:
+    case Token::AND:
+      return VisitLogicalExpression(expr);
+    default: {
+      VisitForValue(expr->left());
+      VisitForValue(expr->right());
+      Node* right = environment()->Pop();
+      Node* left = environment()->Pop();
+      Node* value = BuildBinaryOp(left, right, expr->op());
+      ast_context()->ProduceValue(value);
+    }
+  }
+}
+
+
+void AstGraphBuilder::VisitCompareOperation(CompareOperation* expr) {
+  Operator* op;
+  switch (expr->op()) {
+    case Token::EQ:
+      op = javascript()->Equal();
+      break;
+    case Token::NE:
+      op = javascript()->NotEqual();
+      break;
+    case Token::EQ_STRICT:
+      op = javascript()->StrictEqual();
+      break;
+    case Token::NE_STRICT:
+      op = javascript()->StrictNotEqual();
+      break;
+    case Token::LT:
+      op = javascript()->LessThan();
+      break;
+    case Token::GT:
+      op = javascript()->GreaterThan();
+      break;
+    case Token::LTE:
+      op = javascript()->LessThanOrEqual();
+      break;
+    case Token::GTE:
+      op = javascript()->GreaterThanOrEqual();
+      break;
+    case Token::INSTANCEOF:
+      op = javascript()->InstanceOf();
+      break;
+    case Token::IN:
+      op = javascript()->HasProperty();
+      break;
+    default:
+      op = NULL;
+      UNREACHABLE();
+  }
+  VisitForValue(expr->left());
+  VisitForValue(expr->right());
+  Node* right = environment()->Pop();
+  Node* left = environment()->Pop();
+  Node* value = NewNode(op, left, right);
+  ast_context()->ProduceValue(value);
+}
+
+
+void AstGraphBuilder::VisitThisFunction(ThisFunction* expr) {
+  Node* value = GetFunctionClosure();
+  ast_context()->ProduceValue(value);
+}
+
+
+void AstGraphBuilder::VisitCaseClause(CaseClause* expr) { UNREACHABLE(); }
+
+
+void AstGraphBuilder::VisitDeclarations(ZoneList<Declaration*>* declarations) {
+  ASSERT(globals()->is_empty());
+  AstVisitor::VisitDeclarations(declarations);
+  if (globals()->is_empty()) return;
+  Handle<FixedArray> data =
+      isolate()->factory()->NewFixedArray(globals()->length(), TENURED);
+  for (int i = 0; i < globals()->length(); ++i) data->set(i, *globals()->at(i));
+  int encoded_flags = DeclareGlobalsEvalFlag::encode(info()->is_eval()) |
+                      DeclareGlobalsNativeFlag::encode(info()->is_native()) |
+                      DeclareGlobalsStrictMode::encode(info()->strict_mode());
+  Node* flags = jsgraph()->Constant(encoded_flags);
+  Node* pairs = jsgraph()->Constant(data);
+  Operator* op = javascript()->Runtime(Runtime::kDeclareGlobals, 3);
+  NewNode(op, current_context(), pairs, flags);
+  globals()->Rewind(0);
+}
+
+
+void AstGraphBuilder::VisitIfNotNull(Statement* stmt) {
+  if (stmt == NULL) return;
+  Visit(stmt);
+}
+
+
+void AstGraphBuilder::VisitIterationBody(IterationStatement* stmt,
+                                         LoopBuilder* loop, int drop_extra) {
+  BreakableScope scope(this, stmt, loop, drop_extra);
+  Visit(stmt->body());
+}
+
+
+void AstGraphBuilder::VisitDelete(UnaryOperation* expr) {
+  Node* value;
+  if (expr->expression()->IsVariableProxy()) {
+    // Delete of an unqualified identifier is only allowed in classic mode but
+    // deleting "this" is allowed in all language modes.
+    Variable* variable = expr->expression()->AsVariableProxy()->var();
+    ASSERT(strict_mode() == SLOPPY || variable->is_this());
+    value = BuildVariableDelete(variable);
+  } else if (expr->expression()->IsProperty()) {
+    Property* property = expr->expression()->AsProperty();
+    VisitForValue(property->obj());
+    VisitForValue(property->key());
+    Node* key = environment()->Pop();
+    Node* object = environment()->Pop();
+    value = NewNode(javascript()->DeleteProperty(strict_mode()), object, key);
+  } else {
+    VisitForEffect(expr->expression());
+    value = jsgraph()->TrueConstant();
+  }
+  ast_context()->ProduceValue(value);
+}
+
+
+void AstGraphBuilder::VisitVoid(UnaryOperation* expr) {
+  VisitForEffect(expr->expression());
+  Node* value = jsgraph()->UndefinedConstant();
+  ast_context()->ProduceValue(value);
+}
+
+
+void AstGraphBuilder::VisitTypeof(UnaryOperation* expr) {
+  Node* operand;
+  if (expr->expression()->IsVariableProxy()) {
+    // Typeof does not throw a reference error on global variables, hence we
+    // perform a non-contextual load in case the operand is a variable proxy.
+    Variable* variable = expr->expression()->AsVariableProxy()->var();
+    operand = BuildVariableLoad(variable, NOT_CONTEXTUAL);
+  } else {
+    VisitForValue(expr->expression());
+    operand = environment()->Pop();
+  }
+  Node* value = NewNode(javascript()->TypeOf(), operand);
+  ast_context()->ProduceValue(value);
+}
+
+
+void AstGraphBuilder::VisitNot(UnaryOperation* expr) {
+  VisitForValue(expr->expression());
+  Node* operand = environment()->Pop();
+  // TODO(mstarzinger): Possible optimization when we are in effect context.
+  Node* value = NewNode(javascript()->UnaryNot(), operand);
+  ast_context()->ProduceValue(value);
+}
+
+
+void AstGraphBuilder::VisitComma(BinaryOperation* expr) {
+  VisitForEffect(expr->left());
+  Visit(expr->right());
+  ast_context()->ReplaceValue();
+}
+
+
+void AstGraphBuilder::VisitLogicalExpression(BinaryOperation* expr) {
+  bool is_logical_and = expr->op() == Token::AND;
+  IfBuilder compare_if(this);
+  VisitForValue(expr->left());
+  Node* condition = environment()->Top();
+  compare_if.If(BuildToBoolean(condition));
+  compare_if.Then();
+  if (is_logical_and) {
+    environment()->Pop();
+    Visit(expr->right());
+  } else if (ast_context()->IsEffect()) {
+    environment()->Pop();
+  }
+  compare_if.Else();
+  if (!is_logical_and) {
+    environment()->Pop();
+    Visit(expr->right());
+  } else if (ast_context()->IsEffect()) {
+    environment()->Pop();
+  }
+  compare_if.End();
+  ast_context()->ReplaceValue();
+}
+
+
+Node* AstGraphBuilder::ProcessArguments(Operator* op, int arity) {
+  ASSERT(environment()->stack_height() >= arity);
+  Node** all = info()->zone()->NewArray<Node*>(arity);  // XXX: alloca?
+  for (int i = arity - 1; i >= 0; --i) {
+    all[i] = environment()->Pop();
+  }
+  Node* value = NewNode(op, arity, all);
+  return value;
+}
+
+
+Node* AstGraphBuilder::BuildLocalFunctionContext(Node* context, Node* closure) {
+  int heap_slots = info()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
+  if (heap_slots <= 0) return context;
+  set_current_context(context);
+
+  // Allocate a new local context.
+  Operator* op = javascript()->CreateFunctionContext();
+  Node* local_context = NewNode(op, closure);
+  set_current_context(local_context);
+
+  // Copy parameters into context if necessary.
+  int num_parameters = info()->scope()->num_parameters();
+  for (int i = 0; i < num_parameters; i++) {
+    Variable* variable = info()->scope()->parameter(i);
+    if (!variable->IsContextSlot()) continue;
+    // Temporary parameter node. The parameter indices are shifted by 1
+    // (receiver is parameter index -1 but environment index 0).
+    Node* parameter = NewNode(common()->Parameter(i + 1));
+    // Context variable (at bottom of the context chain).
+    ASSERT_EQ(0, info()->scope()->ContextChainLength(variable->scope()));
+    Operator* op = javascript()->StoreContext(0, variable->index());
+    NewNode(op, local_context, parameter);
+  }
+
+  return local_context;
+}
+
+
+Node* AstGraphBuilder::BuildArgumentsObject(Variable* arguments) {
+  if (arguments == NULL) return NULL;
+
+  // Allocate and initialize a new arguments object.
+  Node* callee = GetFunctionClosure();
+  Operator* op = javascript()->Runtime(Runtime::kNewArguments, 1);
+  Node* object = NewNode(op, callee);
+
+  // Assign the object to the arguments variable.
+  ASSERT(arguments->IsContextSlot() || arguments->IsStackAllocated());
+  BuildVariableAssignment(arguments, object, Token::ASSIGN);
+
+  return object;
+}
+
+
+Node* AstGraphBuilder::BuildHoleCheckSilent(Node* value, Node* for_hole,
+                                            Node* not_hole) {
+  IfBuilder hole_check(this);
+  Node* the_hole = jsgraph()->TheHoleConstant();
+  Node* check = NewNode(javascript()->StrictEqual(), value, the_hole);
+  hole_check.If(check);
+  hole_check.Then();
+  environment()->Push(for_hole);
+  hole_check.Else();
+  environment()->Push(not_hole);
+  hole_check.End();
+  return environment()->Pop();
+}
+
+
+Node* AstGraphBuilder::BuildHoleCheckThrow(Node* value, Variable* variable,
+                                           Node* not_hole) {
+  IfBuilder hole_check(this);
+  Node* the_hole = jsgraph()->TheHoleConstant();
+  Node* check = NewNode(javascript()->StrictEqual(), value, the_hole);
+  hole_check.If(check);
+  hole_check.Then();
+  environment()->Push(BuildThrowReferenceError(variable));
+  hole_check.Else();
+  environment()->Push(not_hole);
+  hole_check.End();
+  return environment()->Pop();
+}
+
+
+Node* AstGraphBuilder::BuildVariableLoad(Variable* variable,
+                                         ContextualMode contextual_mode) {
+  Node* the_hole = jsgraph()->TheHoleConstant();
+  VariableMode mode = variable->mode();
+  switch (variable->location()) {
+    case Variable::UNALLOCATED: {
+      // Global var, const, or let variable.
+      if (!info()->is_native()) {
+        // TODO(turbofan): This special case is needed only because we don't
+        // use LoadICs yet. Remove this once LoadNamed is lowered to an IC.
+        Node* name = jsgraph()->Constant(variable->name());
+        Runtime::FunctionId function_id =
+            (contextual_mode == CONTEXTUAL)
+                ? Runtime::kLoadLookupSlot
+                : Runtime::kLoadLookupSlotNoReferenceError;
+        Operator* op = javascript()->Runtime(function_id, 2);
+        Node* pair = NewNode(op, current_context(), name);
+        return NewNode(common()->Projection(0), pair);
+      }
+      Node* global = BuildLoadGlobalObject();
+      PrintableUnique<Name> name = MakeUnique(variable->name());
+      Operator* op = javascript()->LoadNamed(name);
+      return NewNode(op, global);
+    }
+    case Variable::PARAMETER:
+    case Variable::LOCAL: {
+      // Local var, const, or let variable.
+      Node* value = environment()->Lookup(variable);
+      if (mode == CONST_LEGACY) {
+        // Perform check for uninitialized legacy const variables.
+        if (value->op() == the_hole->op()) {
+          value = jsgraph()->UndefinedConstant();
+        } else if (value->opcode() == IrOpcode::kPhi) {
+          Node* undefined = jsgraph()->UndefinedConstant();
+          value = BuildHoleCheckSilent(value, undefined, value);
+        }
+      } else if (mode == LET || mode == CONST) {
+        // Perform check for uninitialized let/const variables.
+        if (value->op() == the_hole->op()) {
+          value = BuildThrowReferenceError(variable);
+        } else if (value->opcode() == IrOpcode::kPhi) {
+          value = BuildHoleCheckThrow(value, variable, value);
+        }
+      }
+      return value;
+    }
+    case Variable::CONTEXT: {
+      // Context variable (potentially up the context chain).
+      int depth = current_scope()->ContextChainLength(variable->scope());
+      bool immutable = variable->maybe_assigned() == kNotAssigned;
+      Operator* op =
+          javascript()->LoadContext(depth, variable->index(), immutable);
+      Node* value = NewNode(op, current_context());
+      // TODO(titzer): initialization checks are redundant for already
+      // initialized immutable context loads, but only specialization knows.
+      // Maybe specializer should be a parameter to the graph builder?
+      if (mode == CONST_LEGACY) {
+        // Perform check for uninitialized legacy const variables.
+        Node* undefined = jsgraph()->UndefinedConstant();
+        value = BuildHoleCheckSilent(value, undefined, value);
+      } else if (mode == LET || mode == CONST) {
+        // Perform check for uninitialized let/const variables.
+        value = BuildHoleCheckThrow(value, variable, value);
+      }
+      return value;
+    }
+    case Variable::LOOKUP: {
+      // Dynamic lookup of context variable (anywhere in the chain).
+      Node* name = jsgraph()->Constant(variable->name());
+      Runtime::FunctionId function_id =
+          (contextual_mode == CONTEXTUAL)
+              ? Runtime::kLoadLookupSlot
+              : Runtime::kLoadLookupSlotNoReferenceError;
+      Operator* op = javascript()->Runtime(function_id, 2);
+      Node* pair = NewNode(op, current_context(), name);
+      return NewNode(common()->Projection(0), pair);
+    }
+  }
+  UNREACHABLE();
+  return NULL;
+}
+
+
+Node* AstGraphBuilder::BuildVariableDelete(Variable* variable) {
+  switch (variable->location()) {
+    case Variable::UNALLOCATED: {
+      // Global var, const, or let variable.
+      Node* global = BuildLoadGlobalObject();
+      Node* name = jsgraph()->Constant(variable->name());
+      Operator* op = javascript()->DeleteProperty(strict_mode());
+      return NewNode(op, global, name);
+    }
+    case Variable::PARAMETER:
+    case Variable::LOCAL:
+    case Variable::CONTEXT:
+      // Local var, const, or let variable or context variable.
+      return variable->is_this() ? jsgraph()->TrueConstant()
+                                 : jsgraph()->FalseConstant();
+    case Variable::LOOKUP: {
+      // Dynamic lookup of context variable (anywhere in the chain).
+      Node* name = jsgraph()->Constant(variable->name());
+      Operator* op = javascript()->Runtime(Runtime::kDeleteLookupSlot, 2);
+      return NewNode(op, current_context(), name);
+    }
+  }
+  UNREACHABLE();
+  return NULL;
+}
+
+
+Node* AstGraphBuilder::BuildVariableAssignment(Variable* variable, Node* value,
+                                               Token::Value op) {
+  Node* the_hole = jsgraph()->TheHoleConstant();
+  VariableMode mode = variable->mode();
+  switch (variable->location()) {
+    case Variable::UNALLOCATED: {
+      // Global var, const, or let variable.
+      if (!info()->is_native()) {
+        // TODO(turbofan): This special case is needed only because we don't
+        // use StoreICs yet. Remove this once StoreNamed is lowered to an IC.
+        Node* name = jsgraph()->Constant(variable->name());
+        Node* strict = jsgraph()->Constant(strict_mode());
+        Operator* op = javascript()->Runtime(Runtime::kStoreLookupSlot, 4);
+        return NewNode(op, value, current_context(), name, strict);
+      }
+      Node* global = BuildLoadGlobalObject();
+      PrintableUnique<Name> name = MakeUnique(variable->name());
+      Operator* op = javascript()->StoreNamed(name);
+      return NewNode(op, global, value);
+    }
+    case Variable::PARAMETER:
+    case Variable::LOCAL:
+      // Local var, const, or let variable.
+      if (mode == CONST_LEGACY && op == Token::INIT_CONST_LEGACY) {
+        // Perform an initialization check for legacy const variables.
+        Node* current = environment()->Lookup(variable);
+        if (current->op() != the_hole->op()) {
+          value = BuildHoleCheckSilent(current, value, current);
+        }
+      } else if (mode == CONST_LEGACY && op != Token::INIT_CONST_LEGACY) {
+        // Non-initializing assignments to legacy const is ignored.
+        return value;
+      } else if (mode == LET && op != Token::INIT_LET) {
+        // Perform an initialization check for let declared variables.
+        // Also note that the dynamic hole-check is only done to ensure that
+        // this does not break in the presence of do-expressions within the
+        // temporal dead zone of a let declared variable.
+        Node* current = environment()->Lookup(variable);
+        if (current->op() == the_hole->op()) {
+          value = BuildThrowReferenceError(variable);
+        } else if (value->opcode() == IrOpcode::kPhi) {
+          value = BuildHoleCheckThrow(current, variable, value);
+        }
+      } else if (mode == CONST && op != Token::INIT_CONST) {
+        // All assignments to const variables are early errors.
+        UNREACHABLE();
+      }
+      environment()->Bind(variable, value);
+      return value;
+    case Variable::CONTEXT: {
+      // Context variable (potentially up the context chain).
+      int depth = current_scope()->ContextChainLength(variable->scope());
+      if (mode == CONST_LEGACY && op == Token::INIT_CONST_LEGACY) {
+        // Perform an initialization check for legacy const variables.
+        Operator* op =
+            javascript()->LoadContext(depth, variable->index(), false);
+        Node* current = NewNode(op, current_context());
+        value = BuildHoleCheckSilent(current, value, current);
+      } else if (mode == CONST_LEGACY && op != Token::INIT_CONST_LEGACY) {
+        // Non-initializing assignments to legacy const is ignored.
+        return value;
+      } else if (mode == LET && op != Token::INIT_LET) {
+        // Perform an initialization check for let declared variables.
+        Operator* op =
+            javascript()->LoadContext(depth, variable->index(), false);
+        Node* current = NewNode(op, current_context());
+        value = BuildHoleCheckThrow(current, variable, value);
+      } else if (mode == CONST && op != Token::INIT_CONST) {
+        // All assignments to const variables are early errors.
+        UNREACHABLE();
+      }
+      Operator* op = javascript()->StoreContext(depth, variable->index());
+      return NewNode(op, current_context(), value);
+    }
+    case Variable::LOOKUP: {
+      // Dynamic lookup of context variable (anywhere in the chain).
+      Node* name = jsgraph()->Constant(variable->name());
+      Node* strict = jsgraph()->Constant(strict_mode());
+      // TODO(mstarzinger): Use Runtime::kInitializeLegacyConstLookupSlot for
+      // initializations of const declarations.
+      Operator* op = javascript()->Runtime(Runtime::kStoreLookupSlot, 4);
+      return NewNode(op, value, current_context(), name, strict);
+    }
+  }
+  UNREACHABLE();
+  return NULL;
+}
+
+
+Node* AstGraphBuilder::BuildLoadBuiltinsObject() {
+  // TODO(mstarzinger): Better load from function context, otherwise optimized
+  // code cannot be shared across native contexts.
+  return jsgraph()->Constant(handle(info()->context()->builtins()));
+}
+
+
+Node* AstGraphBuilder::BuildLoadGlobalObject() {
+#if 0
+  Node* context = GetFunctionContext();
+  // TODO(mstarzinger): Use mid-level operator on FixedArray instead of the
+  // JS-level operator that targets JSObject.
+  Node* index = jsgraph()->Constant(Context::GLOBAL_OBJECT_INDEX);
+  return NewNode(javascript()->LoadProperty(), context, index);
+#else
+  // TODO(mstarzinger): Better load from function context, otherwise optimized
+  // code cannot be shared across native contexts. See unused code above.
+  return jsgraph()->Constant(handle(info()->context()->global_object()));
+#endif
+}
+
+
+Node* AstGraphBuilder::BuildToBoolean(Node* value) {
+  // TODO(mstarzinger): Possible optimization is to NOP for boolean values.
+  return NewNode(javascript()->ToBoolean(), value);
+}
+
+
+Node* AstGraphBuilder::BuildThrowReferenceError(Variable* variable) {
+  // TODO(mstarzinger): Should be unified with the VisitThrow implementation.
+  Node* variable_name = jsgraph()->Constant(variable->name());
+  Operator* op = javascript()->Runtime(Runtime::kThrowReferenceError, 1);
+  return NewNode(op, variable_name);
+}
+
+
+Node* AstGraphBuilder::BuildBinaryOp(Node* left, Node* right, Token::Value op) {
+  Operator* js_op;
+  switch (op) {
+    case Token::BIT_OR:
+      js_op = javascript()->BitwiseOr();
+      break;
+    case Token::BIT_AND:
+      js_op = javascript()->BitwiseAnd();
+      break;
+    case Token::BIT_XOR:
+      js_op = javascript()->BitwiseXor();
+      break;
+    case Token::SHL:
+      js_op = javascript()->ShiftLeft();
+      break;
+    case Token::SAR:
+      js_op = javascript()->ShiftRight();
+      break;
+    case Token::SHR:
+      js_op = javascript()->ShiftRightLogical();
+      break;
+    case Token::ADD:
+      js_op = javascript()->Add();
+      break;
+    case Token::SUB:
+      js_op = javascript()->Subtract();
+      break;
+    case Token::MUL:
+      js_op = javascript()->Multiply();
+      break;
+    case Token::DIV:
+      js_op = javascript()->Divide();
+      break;
+    case Token::MOD:
+      js_op = javascript()->Modulus();
+      break;
+    default:
+      UNREACHABLE();
+      js_op = NULL;
+  }
+  return NewNode(js_op, left, right);
+}
+
+
+void AstGraphBuilder::BuildLazyBailout(Node* node, BailoutId ast_id) {
+  if (OperatorProperties::CanLazilyDeoptimize(node->op())) {
+    // The deopting node should have an outgoing control dependency.
+    ASSERT(GetControlDependency() == node);
+
+    StructuredGraphBuilder::Environment* continuation_env =
+        environment_internal();
+    // Create environment for the deoptimization block, and build the block.
+    StructuredGraphBuilder::Environment* deopt_env =
+        CopyEnvironment(continuation_env);
+    set_environment(deopt_env);
+
+    NewNode(common()->LazyDeoptimization());
+
+    FrameStateDescriptor stateDescriptor(ast_id);
+    Node* state_node = NewNode(common()->FrameState(stateDescriptor));
+
+    Node* deoptimize_node = NewNode(common()->Deoptimize(), state_node);
+
+    UpdateControlDependencyToLeaveFunction(deoptimize_node);
+
+    // Continue with the original environment.
+    set_environment(continuation_env);
+
+    NewNode(common()->Continuation());
+  }
+}
+}
+}
+}  // namespace v8::internal::compiler
diff --git a/src/compiler/ast-graph-builder.h b/src/compiler/ast-graph-builder.h
new file mode 100644 (file)
index 0000000..4ab792e
--- /dev/null
@@ -0,0 +1,417 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_AST_GRAPH_BUILDER_H_
+#define V8_COMPILER_AST_GRAPH_BUILDER_H_
+
+#include "src/v8.h"
+
+#include "src/ast.h"
+#include "src/compiler/graph-builder.h"
+#include "src/compiler/js-graph.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+class ControlBuilder;
+class LoopBuilder;
+class Graph;
+
+// The AstGraphBuilder produces a high-level IR graph, based on an
+// underlying AST. The produced graph can either be compiled into a
+// stand-alone function or be wired into another graph for the purposes
+// of function inlining.
+class AstGraphBuilder : public StructuredGraphBuilder, public AstVisitor {
+ public:
+  AstGraphBuilder(CompilationInfo* info, JSGraph* jsgraph,
+                  SourcePositionTable* source_positions_);
+
+  // Creates a graph by visiting the entire AST.
+  bool CreateGraph();
+
+ protected:
+  class AstContext;
+  class AstEffectContext;
+  class AstValueContext;
+  class AstTestContext;
+  class BreakableScope;
+  class ContextScope;
+  class Environment;
+
+  Environment* environment() {
+    return reinterpret_cast<Environment*>(environment_internal());
+  }
+
+  AstContext* ast_context() const { return ast_context_; }
+  BreakableScope* breakable() const { return breakable_; }
+  ContextScope* execution_context() const { return execution_context_; }
+
+  void set_ast_context(AstContext* ctx) { ast_context_ = ctx; }
+  void set_breakable(BreakableScope* brk) { breakable_ = brk; }
+  void set_execution_context(ContextScope* ctx) { execution_context_ = ctx; }
+
+  // Support for control flow builders. The concrete type of the environment
+  // depends on the graph builder, but environments themselves are not virtual.
+  typedef StructuredGraphBuilder::Environment BaseEnvironment;
+  virtual BaseEnvironment* CopyEnvironment(BaseEnvironment* env);
+
+  SourcePositionTable* source_positions() { return source_positions_; }
+
+  // TODO(mstarzinger): The pipeline only needs to be a friend to access the
+  // function context. Remove as soon as the context is a parameter.
+  friend class Pipeline;
+
+  // Getters for values in the activation record.
+  Node* GetFunctionClosure();
+  Node* GetFunctionContext();
+
+  //
+  // The following build methods all generate graph fragments and return one
+  // resulting node. The operand stack height remains the same, variables and
+  // other dependencies tracked by the environment might be mutated though.
+  //
+
+  // Builder to create a local function context.
+  Node* BuildLocalFunctionContext(Node* context, Node* closure);
+
+  // Builder to create an arguments object if it is used.
+  Node* BuildArgumentsObject(Variable* arguments);
+
+  // Builders for variable load and assignment.
+  Node* BuildVariableAssignment(Variable* var, Node* value, Token::Value op);
+  Node* BuildVariableDelete(Variable* var);
+  Node* BuildVariableLoad(Variable* var, ContextualMode mode = CONTEXTUAL);
+
+  // Builders for accessing the function context.
+  Node* BuildLoadBuiltinsObject();
+  Node* BuildLoadGlobalObject();
+  Node* BuildLoadClosure();
+
+  // Builders for automatic type conversion.
+  Node* BuildToBoolean(Node* value);
+
+  // Builders for error reporting at runtime.
+  Node* BuildThrowReferenceError(Variable* var);
+
+  // Builders for dynamic hole-checks at runtime.
+  Node* BuildHoleCheckSilent(Node* value, Node* for_hole, Node* not_hole);
+  Node* BuildHoleCheckThrow(Node* value, Variable* var, Node* not_hole);
+
+  // Builders for binary operations.
+  Node* BuildBinaryOp(Node* left, Node* right, Token::Value op);
+
+#define DECLARE_VISIT(type) virtual void Visit##type(type* node);
+  // Visiting functions for AST nodes make this an AstVisitor.
+  AST_NODE_LIST(DECLARE_VISIT)
+#undef DECLARE_VISIT
+
+  // Visiting function for declarations list is overridden.
+  virtual void VisitDeclarations(ZoneList<Declaration*>* declarations);
+
+ private:
+  CompilationInfo* info_;
+  AstContext* ast_context_;
+  JSGraph* jsgraph_;
+  SourcePositionTable* source_positions_;
+
+  // List of global declarations for functions and variables.
+  ZoneList<Handle<Object> > globals_;
+
+  // Stack of breakable statements entered by the visitor.
+  BreakableScope* breakable_;
+
+  // Stack of context objects pushed onto the chain by the visitor.
+  ContextScope* execution_context_;
+
+  // Nodes representing values in the activation record.
+  SetOncePointer<Node> function_closure_;
+  SetOncePointer<Node> function_context_;
+
+  CompilationInfo* info() { return info_; }
+  StrictMode strict_mode() { return info()->strict_mode(); }
+  JSGraph* jsgraph() { return jsgraph_; }
+  JSOperatorBuilder* javascript() { return jsgraph_->javascript(); }
+  ZoneList<Handle<Object> >* globals() { return &globals_; }
+
+  // Current scope during visitation.
+  inline Scope* current_scope() const;
+
+  // Process arguments to a call by popping {arity} elements off the operand
+  // stack and build a call node using the given call operator.
+  Node* ProcessArguments(Operator* op, int arity);
+
+  // Visit statements.
+  void VisitIfNotNull(Statement* stmt);
+
+  // Visit expressions.
+  void VisitForTest(Expression* expr);
+  void VisitForEffect(Expression* expr);
+  void VisitForValue(Expression* expr);
+  void VisitForValueOrNull(Expression* expr);
+  void VisitForValues(ZoneList<Expression*>* exprs);
+
+  // Common for all IterationStatement bodies.
+  void VisitIterationBody(IterationStatement* stmt, LoopBuilder* loop, int);
+
+  // Dispatched from VisitCallRuntime.
+  void VisitCallJSRuntime(CallRuntime* expr);
+
+  // Dispatched from VisitUnaryOperation.
+  void VisitDelete(UnaryOperation* expr);
+  void VisitVoid(UnaryOperation* expr);
+  void VisitTypeof(UnaryOperation* expr);
+  void VisitNot(UnaryOperation* expr);
+
+  // Dispatched from VisitBinaryOperation.
+  void VisitComma(BinaryOperation* expr);
+  void VisitLogicalExpression(BinaryOperation* expr);
+  void VisitArithmeticExpression(BinaryOperation* expr);
+
+  // Dispatched from VisitForInStatement.
+  void VisitForInAssignment(Expression* expr, Node* value);
+
+  void BuildLazyBailout(Node* node, BailoutId ast_id);
+
+  DEFINE_AST_VISITOR_SUBCLASS_MEMBERS();
+  DISALLOW_COPY_AND_ASSIGN(AstGraphBuilder);
+};
+
+
+// The abstract execution environment for generated code consists of
+// parameter variables, local variables and the operand stack. The
+// environment will perform proper SSA-renaming of all tracked nodes
+// at split and merge points in the control flow. Internally all the
+// values are stored in one list using the following layout:
+//
+//  [parameters (+receiver)] [locals] [operand stack]
+//
+class AstGraphBuilder::Environment
+    : public StructuredGraphBuilder::Environment {
+ public:
+  Environment(AstGraphBuilder* builder, Scope* scope, Node* control_dependency);
+  Environment(const Environment& copy);
+
+  int parameters_count() const { return parameters_count_; }
+  int locals_count() const { return locals_count_; }
+  int stack_height() {
+    return values()->size() - parameters_count_ - locals_count_;
+  }
+
+  // Operations on parameter or local variables. The parameter indices are
+  // shifted by 1 (receiver is parameter index -1 but environment index 0).
+  void Bind(Variable* variable, Node* node) {
+    ASSERT(variable->IsStackAllocated());
+    if (variable->IsParameter()) {
+      values()->at(variable->index() + 1) = node;
+      parameters_dirty_ = true;
+    } else {
+      ASSERT(variable->IsStackLocal());
+      values()->at(variable->index() + parameters_count_) = node;
+      locals_dirty_ = true;
+    }
+  }
+  Node* Lookup(Variable* variable) {
+    ASSERT(variable->IsStackAllocated());
+    if (variable->IsParameter()) {
+      return values()->at(variable->index() + 1);
+    } else {
+      ASSERT(variable->IsStackLocal());
+      return values()->at(variable->index() + parameters_count_);
+    }
+  }
+
+  // Operations on the operand stack.
+  void Push(Node* node) {
+    values()->push_back(node);
+    stack_dirty_ = true;
+  }
+  Node* Top() {
+    ASSERT(stack_height() > 0);
+    return values()->back();
+  }
+  Node* Pop() {
+    ASSERT(stack_height() > 0);
+    Node* back = values()->back();
+    values()->pop_back();
+    return back;
+  }
+
+  // Direct mutations of the operand stack.
+  void Poke(int depth, Node* node) {
+    ASSERT(depth >= 0 && depth < stack_height());
+    int index = values()->size() - depth - 1;
+    values()->at(index) = node;
+  }
+  Node* Peek(int depth) {
+    ASSERT(depth >= 0 && depth < stack_height());
+    int index = values()->size() - depth - 1;
+    return values()->at(index);
+  }
+  void Drop(int depth) {
+    ASSERT(depth >= 0 && depth <= stack_height());
+    values()->erase(values()->end() - depth, values()->end());
+  }
+
+  // Preserve a checkpoint of the environment for the IR graph. Any
+  // further mutation of the environment will not affect checkpoints.
+  Node* Checkpoint(BailoutId ast_id);
+
+ private:
+  int parameters_count_;
+  int locals_count_;
+  Node* parameters_node_;
+  Node* locals_node_;
+  Node* stack_node_;
+  bool parameters_dirty_;
+  bool locals_dirty_;
+  bool stack_dirty_;
+};
+
+
+// Each expression in the AST is evaluated in a specific context. This context
+// decides how the evaluation result is passed up the visitor.
+class AstGraphBuilder::AstContext BASE_EMBEDDED {
+ public:
+  bool IsEffect() const { return kind_ == Expression::kEffect; }
+  bool IsValue() const { return kind_ == Expression::kValue; }
+  bool IsTest() const { return kind_ == Expression::kTest; }
+
+  // Plug a node into this expression context.  Call this function in tail
+  // position in the Visit functions for expressions.
+  virtual void ProduceValue(Node* value) = 0;
+
+  // Unplugs a node from this expression context.  Call this to retrieve the
+  // result of another Visit function that already plugged the context.
+  virtual Node* ConsumeValue() = 0;
+
+  // Shortcut for "context->ProduceValue(context->ConsumeValue())".
+  void ReplaceValue() { ProduceValue(ConsumeValue()); }
+
+ protected:
+  AstContext(AstGraphBuilder* owner, Expression::Context kind);
+  virtual ~AstContext();
+
+  AstGraphBuilder* owner() const { return owner_; }
+  Environment* environment() const { return owner_->environment(); }
+
+// We want to be able to assert, in a context-specific way, that the stack
+// height makes sense when the context is filled.
+#ifdef DEBUG
+  int original_height_;
+#endif
+
+ private:
+  Expression::Context kind_;
+  AstGraphBuilder* owner_;
+  AstContext* outer_;
+};
+
+
+// Context to evaluate expression for its side effects only.
+class AstGraphBuilder::AstEffectContext V8_FINAL : public AstContext {
+ public:
+  explicit AstEffectContext(AstGraphBuilder* owner)
+      : AstContext(owner, Expression::kEffect) {}
+  virtual ~AstEffectContext();
+  virtual void ProduceValue(Node* value) V8_OVERRIDE;
+  virtual Node* ConsumeValue() V8_OVERRIDE;
+};
+
+
+// Context to evaluate expression for its value (and side effects).
+class AstGraphBuilder::AstValueContext V8_FINAL : public AstContext {
+ public:
+  explicit AstValueContext(AstGraphBuilder* owner)
+      : AstContext(owner, Expression::kValue) {}
+  virtual ~AstValueContext();
+  virtual void ProduceValue(Node* value) V8_OVERRIDE;
+  virtual Node* ConsumeValue() V8_OVERRIDE;
+};
+
+
+// Context to evaluate expression for a condition value (and side effects).
+class AstGraphBuilder::AstTestContext V8_FINAL : public AstContext {
+ public:
+  explicit AstTestContext(AstGraphBuilder* owner)
+      : AstContext(owner, Expression::kTest) {}
+  virtual ~AstTestContext();
+  virtual void ProduceValue(Node* value) V8_OVERRIDE;
+  virtual Node* ConsumeValue() V8_OVERRIDE;
+};
+
+
+// Scoped class tracking breakable statements entered by the visitor. Allows to
+// properly 'break' and 'continue' iteration statements as well as to 'break'
+// from blocks within switch statements.
+class AstGraphBuilder::BreakableScope BASE_EMBEDDED {
+ public:
+  BreakableScope(AstGraphBuilder* owner, BreakableStatement* target,
+                 ControlBuilder* control, int drop_extra)
+      : owner_(owner),
+        target_(target),
+        next_(owner->breakable()),
+        control_(control),
+        drop_extra_(drop_extra) {
+    owner_->set_breakable(this);  // Push.
+  }
+
+  ~BreakableScope() {
+    owner_->set_breakable(next_);  // Pop.
+  }
+
+  // Either 'break' or 'continue' the target statement.
+  void BreakTarget(BreakableStatement* target);
+  void ContinueTarget(BreakableStatement* target);
+
+ private:
+  AstGraphBuilder* owner_;
+  BreakableStatement* target_;
+  BreakableScope* next_;
+  ControlBuilder* control_;
+  int drop_extra_;
+
+  // Find the correct scope for the target statement. Note that this also drops
+  // extra operands from the environment for each scope skipped along the way.
+  BreakableScope* FindBreakable(BreakableStatement* target);
+};
+
+
+// Scoped class tracking context objects created by the visitor. Represents
+// mutations of the context chain within the function body and allows to
+// change the current {scope} and {context} during visitation.
+class AstGraphBuilder::ContextScope BASE_EMBEDDED {
+ public:
+  ContextScope(AstGraphBuilder* owner, Scope* scope, Node* context)
+      : owner_(owner),
+        next_(owner->execution_context()),
+        outer_(owner->current_context()),
+        scope_(scope) {
+    owner_->set_execution_context(this);  // Push.
+    owner_->set_current_context(context);
+  }
+
+  ~ContextScope() {
+    owner_->set_execution_context(next_);  // Pop.
+    owner_->set_current_context(outer_);
+  }
+
+  // Current scope during visitation.
+  Scope* scope() const { return scope_; }
+
+ private:
+  AstGraphBuilder* owner_;
+  ContextScope* next_;
+  Node* outer_;
+  Scope* scope_;
+};
+
+Scope* AstGraphBuilder::current_scope() const {
+  return execution_context_->scope();
+}
+}
+}
+}  // namespace v8::internal::compiler
+
+#endif  // V8_COMPILER_AST_GRAPH_BUILDER_H_
diff --git a/src/compiler/code-generator-impl.h b/src/compiler/code-generator-impl.h
new file mode 100644 (file)
index 0000000..b5b5451
--- /dev/null
@@ -0,0 +1,130 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_CODE_GENERATOR_IMPL_H_
+#define V8_COMPILER_CODE_GENERATOR_IMPL_H_
+
+#include "src/compiler/code-generator.h"
+#include "src/compiler/common-operator.h"
+#include "src/compiler/generic-graph.h"
+#include "src/compiler/instruction.h"
+#include "src/compiler/linkage.h"
+#include "src/compiler/machine-operator.h"
+#include "src/compiler/node.h"
+#include "src/compiler/opcodes.h"
+#include "src/compiler/operator.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// Converts InstructionOperands from a given instruction to
+// architecture-specific
+// registers and operands after they have been assigned by the register
+// allocator.
+class InstructionOperandConverter {
+ public:
+  InstructionOperandConverter(CodeGenerator* gen, Instruction* instr)
+      : gen_(gen), instr_(instr) {}
+
+  Register InputRegister(int index) {
+    return ToRegister(instr_->InputAt(index));
+  }
+
+  DoubleRegister InputDoubleRegister(int index) {
+    return ToDoubleRegister(instr_->InputAt(index));
+  }
+
+  double InputDouble(int index) { return ToDouble(instr_->InputAt(index)); }
+
+  int32_t InputInt32(int index) {
+    return ToConstant(instr_->InputAt(index)).ToInt32();
+  }
+
+  int8_t InputInt8(int index) { return static_cast<int8_t>(InputInt32(index)); }
+
+  int16_t InputInt16(int index) {
+    return static_cast<int16_t>(InputInt32(index));
+  }
+
+  uint8_t InputInt5(int index) {
+    return static_cast<uint8_t>(InputInt32(index) & 0x1F);
+  }
+
+  uint8_t InputInt6(int index) {
+    return static_cast<uint8_t>(InputInt32(index) & 0x3F);
+  }
+
+  Handle<HeapObject> InputHeapObject(int index) {
+    return ToHeapObject(instr_->InputAt(index));
+  }
+
+  Label* InputLabel(int index) {
+    return gen_->code()->GetLabel(InputBlock(index));
+  }
+
+  BasicBlock* InputBlock(int index) {
+    NodeId block_id = static_cast<NodeId>(instr_->InputAt(index)->index());
+    // operand should be a block id.
+    ASSERT(block_id >= 0);
+    ASSERT(block_id < gen_->schedule()->BasicBlockCount());
+    return gen_->schedule()->GetBlockById(block_id);
+  }
+
+  Register OutputRegister() { return ToRegister(instr_->Output()); }
+
+  DoubleRegister OutputDoubleRegister() {
+    return ToDoubleRegister(instr_->Output());
+  }
+
+  Register TempRegister(int index) { return ToRegister(instr_->TempAt(index)); }
+
+  Register ToRegister(InstructionOperand* op) {
+    ASSERT(op->IsRegister());
+    return Register::FromAllocationIndex(op->index());
+  }
+
+  DoubleRegister ToDoubleRegister(InstructionOperand* op) {
+    ASSERT(op->IsDoubleRegister());
+    return DoubleRegister::FromAllocationIndex(op->index());
+  }
+
+  Constant ToConstant(InstructionOperand* operand) {
+    if (operand->IsImmediate()) {
+      return gen_->code()->GetImmediate(operand->index());
+    }
+    return gen_->code()->GetConstant(operand->index());
+  }
+
+  double ToDouble(InstructionOperand* operand) {
+    return ToConstant(operand).ToFloat64();
+  }
+
+  Handle<HeapObject> ToHeapObject(InstructionOperand* operand) {
+    return ToConstant(operand).ToHeapObject();
+  }
+
+  Frame* frame() const { return gen_->frame(); }
+  Isolate* isolate() const { return gen_->isolate(); }
+  Linkage* linkage() const { return gen_->linkage(); }
+
+ protected:
+  CodeGenerator* gen_;
+  Instruction* instr_;
+};
+
+
+// TODO(dcarney): generify this on bleeding_edge and replace this call
+// when merged.
+static inline void FinishCode(MacroAssembler* masm) {
+#if V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_ARM
+  masm->CheckConstPool(true, false);
+#endif
+}
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_COMPILER_CODE_GENERATOR_IMPL_H
diff --git a/src/compiler/code-generator.cc b/src/compiler/code-generator.cc
new file mode 100644 (file)
index 0000000..4e7562d
--- /dev/null
@@ -0,0 +1,288 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/code-generator.h"
+
+#include "src/compiler/code-generator-impl.h"
+#include "src/compiler/linkage.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+CodeGenerator::CodeGenerator(InstructionSequence* code)
+    : code_(code),
+      current_block_(NULL),
+      current_source_position_(SourcePosition::Invalid()),
+      masm_(code->zone()->isolate(), NULL, 0),
+      resolver_(this),
+      safepoints_(code->zone()),
+      lazy_deoptimization_entries_(
+          LazyDeoptimizationEntries::allocator_type(code->zone())),
+      deoptimization_states_(
+          DeoptimizationStates::allocator_type(code->zone())),
+      deoptimization_literals_(Literals::allocator_type(code->zone())),
+      translations_(code->zone()) {
+  deoptimization_states_.resize(code->GetDeoptimizationEntryCount(), NULL);
+}
+
+
+Handle<Code> CodeGenerator::GenerateCode() {
+  CompilationInfo* info = linkage()->info();
+
+  // Emit a code line info recording start event.
+  PositionsRecorder* recorder = masm()->positions_recorder();
+  LOG_CODE_EVENT(isolate(), CodeStartLinePosInfoRecordEvent(recorder));
+
+  // Place function entry hook if requested to do so.
+  if (linkage()->GetIncomingDescriptor()->IsJSFunctionCall()) {
+    ProfileEntryHookStub::MaybeCallEntryHook(masm());
+  }
+
+  // Architecture-specific, linkage-specific prologue.
+  info->set_prologue_offset(masm()->pc_offset());
+  AssemblePrologue();
+
+  // Assemble all instructions.
+  for (InstructionSequence::const_iterator i = code()->begin();
+       i != code()->end(); ++i) {
+    AssembleInstruction(*i);
+  }
+
+  FinishCode(masm());
+
+  safepoints()->Emit(masm(), frame()->GetSpillSlotCount());
+
+  // TODO(titzer): what are the right code flags here?
+  Code::Kind kind = Code::STUB;
+  if (linkage()->GetIncomingDescriptor()->IsJSFunctionCall()) {
+    kind = Code::OPTIMIZED_FUNCTION;
+  }
+  Handle<Code> result = v8::internal::CodeGenerator::MakeCodeEpilogue(
+      masm(), Code::ComputeFlags(kind), info);
+  result->set_is_turbofanned(true);
+  result->set_stack_slots(frame()->GetSpillSlotCount());
+  result->set_safepoint_table_offset(safepoints()->GetCodeOffset());
+
+  PopulateDeoptimizationData(result);
+
+  // Emit a code line info recording stop event.
+  void* line_info = recorder->DetachJITHandlerData();
+  LOG_CODE_EVENT(isolate(), CodeEndLinePosInfoRecordEvent(*result, line_info));
+
+  return result;
+}
+
+
+void CodeGenerator::RecordSafepoint(PointerMap* pointers, Safepoint::Kind kind,
+                                    int arguments,
+                                    Safepoint::DeoptMode deopt_mode) {
+  const ZoneList<InstructionOperand*>* operands =
+      pointers->GetNormalizedOperands();
+  Safepoint safepoint =
+      safepoints()->DefineSafepoint(masm(), kind, arguments, deopt_mode);
+  for (int i = 0; i < operands->length(); i++) {
+    InstructionOperand* pointer = operands->at(i);
+    if (pointer->IsStackSlot()) {
+      safepoint.DefinePointerSlot(pointer->index(), zone());
+    } else if (pointer->IsRegister() && (kind & Safepoint::kWithRegisters)) {
+      Register reg = Register::FromAllocationIndex(pointer->index());
+      safepoint.DefinePointerRegister(reg, zone());
+    }
+  }
+}
+
+
+void CodeGenerator::AssembleInstruction(Instruction* instr) {
+  if (instr->IsBlockStart()) {
+    // Bind a label for a block start and handle parallel moves.
+    BlockStartInstruction* block_start = BlockStartInstruction::cast(instr);
+    current_block_ = block_start->block();
+    if (FLAG_code_comments) {
+      // TODO(titzer): these code comments are a giant memory leak.
+      Vector<char> buffer = Vector<char>::New(32);
+      SNPrintF(buffer, "-- B%d start --", block_start->block()->id());
+      masm()->RecordComment(buffer.start());
+    }
+    masm()->bind(block_start->label());
+  }
+  if (instr->IsGapMoves()) {
+    // Handle parallel moves associated with the gap instruction.
+    AssembleGap(GapInstruction::cast(instr));
+  } else if (instr->IsSourcePosition()) {
+    AssembleSourcePosition(SourcePositionInstruction::cast(instr));
+  } else {
+    // Assemble architecture-specific code for the instruction.
+    AssembleArchInstruction(instr);
+
+    // Assemble branches or boolean materializations after this instruction.
+    FlagsMode mode = FlagsModeField::decode(instr->opcode());
+    FlagsCondition condition = FlagsConditionField::decode(instr->opcode());
+    switch (mode) {
+      case kFlags_none:
+        return;
+      case kFlags_set:
+        return AssembleArchBoolean(instr, condition);
+      case kFlags_branch:
+        return AssembleArchBranch(instr, condition);
+    }
+    UNREACHABLE();
+  }
+}
+
+
+void CodeGenerator::AssembleSourcePosition(SourcePositionInstruction* instr) {
+  SourcePosition source_position = instr->source_position();
+  if (source_position == current_source_position_) return;
+  ASSERT(!source_position.IsInvalid());
+  if (!source_position.IsUnknown()) {
+    int code_pos = source_position.raw();
+    masm()->positions_recorder()->RecordPosition(source_position.raw());
+    masm()->positions_recorder()->WriteRecordedPositions();
+    if (FLAG_code_comments) {
+      Vector<char> buffer = Vector<char>::New(256);
+      CompilationInfo* info = linkage()->info();
+      int ln = Script::GetLineNumber(info->script(), code_pos);
+      int cn = Script::GetColumnNumber(info->script(), code_pos);
+      if (info->script()->name()->IsString()) {
+        Handle<String> file(String::cast(info->script()->name()));
+        base::OS::SNPrintF(buffer.start(), buffer.length(), "-- %s:%d:%d --",
+                           file->ToCString().get(), ln, cn);
+      } else {
+        base::OS::SNPrintF(buffer.start(), buffer.length(),
+                           "-- <unknown>:%d:%d --", ln, cn);
+      }
+      masm()->RecordComment(buffer.start());
+    }
+  }
+  current_source_position_ = source_position;
+}
+
+
+void CodeGenerator::AssembleGap(GapInstruction* instr) {
+  for (int i = GapInstruction::FIRST_INNER_POSITION;
+       i <= GapInstruction::LAST_INNER_POSITION; i++) {
+    GapInstruction::InnerPosition inner_pos =
+        static_cast<GapInstruction::InnerPosition>(i);
+    ParallelMove* move = instr->GetParallelMove(inner_pos);
+    if (move != NULL) resolver()->Resolve(move);
+  }
+}
+
+
+void CodeGenerator::PopulateDeoptimizationData(Handle<Code> code_object) {
+  CompilationInfo* info = linkage()->info();
+  int deopt_count = code()->GetDeoptimizationEntryCount();
+  int patch_count = lazy_deoptimization_entries_.size();
+  if (patch_count == 0 && deopt_count == 0) return;
+  Handle<DeoptimizationInputData> data = DeoptimizationInputData::New(
+      isolate(), deopt_count, patch_count, TENURED);
+
+  Handle<ByteArray> translation_array =
+      translations_.CreateByteArray(isolate()->factory());
+
+  data->SetTranslationByteArray(*translation_array);
+  data->SetInlinedFunctionCount(Smi::FromInt(0));
+  data->SetOptimizationId(Smi::FromInt(info->optimization_id()));
+  // TODO(jarin) The following code was copied over from Lithium, not sure
+  // whether the scope or the IsOptimizing condition are really needed.
+  if (info->IsOptimizing()) {
+    // Reference to shared function info does not change between phases.
+    AllowDeferredHandleDereference allow_handle_dereference;
+    data->SetSharedFunctionInfo(*info->shared_info());
+  } else {
+    data->SetSharedFunctionInfo(Smi::FromInt(0));
+  }
+
+  Handle<FixedArray> literals = isolate()->factory()->NewFixedArray(
+      deoptimization_literals_.size(), TENURED);
+  {
+    AllowDeferredHandleDereference copy_handles;
+    for (unsigned i = 0; i < deoptimization_literals_.size(); i++) {
+      literals->set(i, *deoptimization_literals_[i]);
+    }
+    data->SetLiteralArray(*literals);
+  }
+
+  // No OSR in Turbofan yet...
+  BailoutId osr_ast_id = BailoutId::None();
+  data->SetOsrAstId(Smi::FromInt(osr_ast_id.ToInt()));
+  data->SetOsrPcOffset(Smi::FromInt(-1));
+
+  // Populate deoptimization entries.
+  for (int i = 0; i < deopt_count; i++) {
+    FrameStateDescriptor descriptor = code()->GetDeoptimizationEntry(i);
+    data->SetAstId(i, descriptor.bailout_id());
+    data->SetTranslationIndex(i, Smi::FromInt(0));
+    data->SetArgumentsStackHeight(i, Smi::FromInt(0));
+    data->SetPc(i, Smi::FromInt(-1));
+  }
+
+  // Populate the return address patcher entries.
+  for (int i = 0; i < patch_count; ++i) {
+    LazyDeoptimizationEntry entry = lazy_deoptimization_entries_[i];
+    ASSERT(entry.position_after_call() == entry.continuation()->pos() ||
+           IsNopForSmiCodeInlining(code_object, entry.position_after_call(),
+                                   entry.continuation()->pos()));
+    data->SetReturnAddressPc(i, Smi::FromInt(entry.position_after_call()));
+    data->SetPatchedAddressPc(i, Smi::FromInt(entry.deoptimization()->pos()));
+  }
+
+  code_object->set_deoptimization_data(*data);
+}
+
+
+void CodeGenerator::RecordLazyDeoptimizationEntry(Instruction* instr) {
+  InstructionOperandConverter i(this, instr);
+
+  Label after_call;
+  masm()->bind(&after_call);
+
+  // The continuation and deoptimization are the last two inputs:
+  BasicBlock* cont_block = i.InputBlock(instr->InputCount() - 2);
+  BasicBlock* deopt_block = i.InputBlock(instr->InputCount() - 1);
+
+  Label* cont_label = code_->GetLabel(cont_block);
+  Label* deopt_label = code_->GetLabel(deopt_block);
+
+  lazy_deoptimization_entries_.push_back(
+      LazyDeoptimizationEntry(after_call.pos(), cont_label, deopt_label));
+}
+
+
+int CodeGenerator::DefineDeoptimizationLiteral(Handle<Object> literal) {
+  int result = deoptimization_literals_.size();
+  for (unsigned i = 0; i < deoptimization_literals_.size(); ++i) {
+    if (deoptimization_literals_[i].is_identical_to(literal)) return i;
+  }
+  deoptimization_literals_.push_back(literal);
+  return result;
+}
+
+
+void CodeGenerator::BuildTranslation(Instruction* instr,
+                                     int deoptimization_id) {
+  // We should build translation only once.
+  ASSERT_EQ(NULL, deoptimization_states_[deoptimization_id]);
+
+  // TODO(jarin) This should build translation codes from the instruction inputs
+  // and from the framestate descriptor. At the moment, we only create a dummy
+  // translation.
+
+  FrameStateDescriptor descriptor =
+      code()->GetDeoptimizationEntry(deoptimization_id);
+  Translation translation(&translations_, 1, 1, zone());
+  translation.BeginJSFrame(descriptor.bailout_id(), Translation::kSelfLiteralId,
+                           0);
+  int undefined_literal_id =
+      DefineDeoptimizationLiteral(isolate()->factory()->undefined_value());
+  translation.StoreLiteral(undefined_literal_id);
+
+  deoptimization_states_[deoptimization_id] =
+      new (zone()) DeoptimizationState(translation.index());
+}
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
diff --git a/src/compiler/code-generator.h b/src/compiler/code-generator.h
new file mode 100644 (file)
index 0000000..185efad
--- /dev/null
@@ -0,0 +1,144 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_CODE_GENERATOR_H_
+#define V8_COMPILER_CODE_GENERATOR_H_
+
+#include <deque>
+
+#include "src/compiler/gap-resolver.h"
+#include "src/compiler/instruction.h"
+#include "src/deoptimizer.h"
+#include "src/macro-assembler.h"
+#include "src/safepoint-table.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// Generates native code for a sequence of instructions.
+class CodeGenerator V8_FINAL : public GapResolver::Assembler {
+ public:
+  explicit CodeGenerator(InstructionSequence* code);
+
+  // Generate native code.
+  Handle<Code> GenerateCode();
+
+  InstructionSequence* code() const { return code_; }
+  Frame* frame() const { return code()->frame(); }
+  Graph* graph() const { return code()->graph(); }
+  Isolate* isolate() const { return zone()->isolate(); }
+  Linkage* linkage() const { return code()->linkage(); }
+  Schedule* schedule() const { return code()->schedule(); }
+
+ private:
+  MacroAssembler* masm() { return &masm_; }
+  GapResolver* resolver() { return &resolver_; }
+  SafepointTableBuilder* safepoints() { return &safepoints_; }
+  Zone* zone() const { return code()->zone(); }
+
+  // Checks if {block} will appear directly after {current_block_} when
+  // assembling code, in which case, a fall-through can be used.
+  bool IsNextInAssemblyOrder(const BasicBlock* block) const {
+    return block->rpo_number_ == (current_block_->rpo_number_ + 1) &&
+           block->deferred_ == current_block_->deferred_;
+  }
+
+  // Record a safepoint with the given pointer map.
+  void RecordSafepoint(PointerMap* pointers, Safepoint::Kind kind,
+                       int arguments, Safepoint::DeoptMode deopt_mode);
+
+  // Assemble code for the specified instruction.
+  void AssembleInstruction(Instruction* instr);
+  void AssembleSourcePosition(SourcePositionInstruction* instr);
+  void AssembleGap(GapInstruction* gap);
+
+  // ===========================================================================
+  // ============= Architecture-specific code generation methods. ==============
+  // ===========================================================================
+
+  void AssembleArchInstruction(Instruction* instr);
+  void AssembleArchBranch(Instruction* instr, FlagsCondition condition);
+  void AssembleArchBoolean(Instruction* instr, FlagsCondition condition);
+
+  // Generates an architecture-specific, descriptor-specific prologue
+  // to set up a stack frame.
+  void AssemblePrologue();
+  // Generates an architecture-specific, descriptor-specific return sequence
+  // to tear down a stack frame.
+  void AssembleReturn();
+
+  // ===========================================================================
+  // ============== Architecture-specific gap resolver methods. ================
+  // ===========================================================================
+
+  // Interface used by the gap resolver to emit moves and swaps.
+  virtual void AssembleMove(InstructionOperand* source,
+                            InstructionOperand* destination) V8_OVERRIDE;
+  virtual void AssembleSwap(InstructionOperand* source,
+                            InstructionOperand* destination) V8_OVERRIDE;
+
+  // ===========================================================================
+  // Deoptimization table construction
+  void RecordLazyDeoptimizationEntry(Instruction* instr);
+  void PopulateDeoptimizationData(Handle<Code> code);
+  int DefineDeoptimizationLiteral(Handle<Object> literal);
+  void BuildTranslation(Instruction* instr, int deoptimization_id);
+  void AddNopForSmiCodeInlining();
+#if DEBUG
+  static bool IsNopForSmiCodeInlining(Handle<Code> code, int start_pc,
+                                      int end_pc);
+#endif  // DEBUG
+  // ===========================================================================
+
+  class LazyDeoptimizationEntry V8_FINAL {
+   public:
+    LazyDeoptimizationEntry(int position_after_call, Label* continuation,
+                            Label* deoptimization)
+        : position_after_call_(position_after_call),
+          continuation_(continuation),
+          deoptimization_(deoptimization) {}
+
+    int position_after_call() const { return position_after_call_; }
+    Label* continuation() const { return continuation_; }
+    Label* deoptimization() const { return deoptimization_; }
+
+   private:
+    int position_after_call_;
+    Label* continuation_;
+    Label* deoptimization_;
+  };
+
+  struct DeoptimizationState : ZoneObject {
+    int translation_id_;
+
+    explicit DeoptimizationState(int translation_id)
+        : translation_id_(translation_id) {}
+  };
+
+  typedef std::deque<LazyDeoptimizationEntry,
+                     zone_allocator<LazyDeoptimizationEntry> >
+      LazyDeoptimizationEntries;
+  typedef std::deque<DeoptimizationState*,
+                     zone_allocator<DeoptimizationState*> >
+      DeoptimizationStates;
+  typedef std::deque<Handle<Object>, zone_allocator<Handle<Object> > > Literals;
+
+  InstructionSequence* code_;
+  BasicBlock* current_block_;
+  SourcePosition current_source_position_;
+  MacroAssembler masm_;
+  GapResolver resolver_;
+  SafepointTableBuilder safepoints_;
+  LazyDeoptimizationEntries lazy_deoptimization_entries_;
+  DeoptimizationStates deoptimization_states_;
+  Literals deoptimization_literals_;
+  TranslationBuffer translations_;
+};
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_COMPILER_CODE_GENERATOR_H
diff --git a/src/compiler/common-node-cache.h b/src/compiler/common-node-cache.h
new file mode 100644 (file)
index 0000000..2b0ac0b
--- /dev/null
@@ -0,0 +1,51 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_COMMON_NODE_CACHE_H_
+#define V8_COMPILER_COMMON_NODE_CACHE_H_
+
+#include "src/assembler.h"
+#include "src/compiler/node-cache.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// Bundles various caches for common nodes.
+class CommonNodeCache V8_FINAL : public ZoneObject {
+ public:
+  explicit CommonNodeCache(Zone* zone) : zone_(zone) {}
+
+  Node** FindInt32Constant(int32_t value) {
+    return int32_constants_.Find(zone_, value);
+  }
+
+  Node** FindFloat64Constant(double value) {
+    // We canonicalize double constants at the bit representation level.
+    return float64_constants_.Find(zone_, BitCast<int64_t>(value));
+  }
+
+  Node** FindExternalConstant(ExternalReference reference) {
+    return external_constants_.Find(zone_, reference.address());
+  }
+
+  Node** FindNumberConstant(double value) {
+    // We canonicalize double constants at the bit representation level.
+    return number_constants_.Find(zone_, BitCast<int64_t>(value));
+  }
+
+  Zone* zone() const { return zone_; }
+
+ private:
+  Int32NodeCache int32_constants_;
+  Int64NodeCache float64_constants_;
+  PtrNodeCache external_constants_;
+  Int64NodeCache number_constants_;
+  Zone* zone_;
+};
+}
+}
+}  // namespace v8::internal::compiler
+
+#endif  // V8_COMPILER_COMMON_NODE_CACHE_H_
diff --git a/src/compiler/common-operator.h b/src/compiler/common-operator.h
new file mode 100644 (file)
index 0000000..f7f365d
--- /dev/null
@@ -0,0 +1,285 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_COMMON_OPERATOR_H_
+#define V8_COMPILER_COMMON_OPERATOR_H_
+
+#include "src/v8.h"
+
+#include "src/assembler.h"
+#include "src/compiler/linkage.h"
+#include "src/compiler/opcodes.h"
+#include "src/compiler/operator.h"
+#include "src/unique.h"
+
+namespace v8 {
+namespace internal {
+
+class OStream;
+
+namespace compiler {
+
+class ControlOperator : public Operator1<int> {
+ public:
+  ControlOperator(IrOpcode::Value opcode, uint16_t properties, int inputs,
+                  int outputs, int controls, const char* mnemonic)
+      : Operator1(opcode, properties, inputs, outputs, mnemonic, controls) {}
+
+  virtual OStream& PrintParameter(OStream& os) const { return os; }  // NOLINT
+  int ControlInputCount() const { return parameter(); }
+};
+
+class CallOperator : public Operator1<CallDescriptor*> {
+ public:
+  CallOperator(CallDescriptor* descriptor, const char* mnemonic)
+      : Operator1(IrOpcode::kCall, descriptor->properties(),
+                  descriptor->InputCount(), descriptor->ReturnCount(), mnemonic,
+                  descriptor) {}
+
+  virtual OStream& PrintParameter(OStream& os) const {  // NOLINT
+    return os << "[" << *parameter() << "]";
+  }
+};
+
+class FrameStateDescriptor {
+ public:
+  explicit FrameStateDescriptor(BailoutId bailout_id)
+      : bailout_id_(bailout_id) {}
+
+  BailoutId bailout_id() const { return bailout_id_; }
+
+ private:
+  BailoutId bailout_id_;
+};
+
+// Interface for building common operators that can be used at any level of IR,
+// including JavaScript, mid-level, and low-level.
+// TODO(titzer): Move the mnemonics into SimpleOperator and Operator1 classes.
+class CommonOperatorBuilder {
+ public:
+  explicit CommonOperatorBuilder(Zone* zone) : zone_(zone) {}
+
+#define CONTROL_OP(name, inputs, controls)                                   \
+  return new (zone_) ControlOperator(IrOpcode::k##name, Operator::kFoldable, \
+                                     inputs, 0, controls, #name);
+
+  Operator* Start() { CONTROL_OP(Start, 0, 0); }
+  Operator* Dead() { CONTROL_OP(Dead, 0, 0); }
+  Operator* End() { CONTROL_OP(End, 0, 1); }
+  Operator* Branch() { CONTROL_OP(Branch, 1, 1); }
+  Operator* IfTrue() { CONTROL_OP(IfTrue, 0, 1); }
+  Operator* IfFalse() { CONTROL_OP(IfFalse, 0, 1); }
+  Operator* Throw() { CONTROL_OP(Throw, 1, 1); }
+  Operator* LazyDeoptimization() { CONTROL_OP(LazyDeoptimization, 0, 1); }
+  Operator* Continuation() { CONTROL_OP(Continuation, 0, 1); }
+
+  Operator* Deoptimize() {
+    return new (zone_)
+        ControlOperator(IrOpcode::kDeoptimize, 0, 1, 0, 1, "Deoptimize");
+  }
+
+  Operator* Return() {
+    return new (zone_) ControlOperator(IrOpcode::kReturn, 0, 1, 0, 1, "Return");
+  }
+
+  Operator* Merge(int controls) {
+    return new (zone_) ControlOperator(IrOpcode::kMerge, Operator::kFoldable, 0,
+                                       0, controls, "Merge");
+  }
+
+  Operator* Loop(int controls) {
+    return new (zone_) ControlOperator(IrOpcode::kLoop, Operator::kFoldable, 0,
+                                       0, controls, "Loop");
+  }
+
+  Operator* Parameter(int index) {
+    return new (zone_) Operator1<int>(IrOpcode::kParameter, Operator::kPure, 0,
+                                      1, "Parameter", index);
+  }
+  Operator* Int32Constant(int32_t value) {
+    return new (zone_) Operator1<int>(IrOpcode::kInt32Constant, Operator::kPure,
+                                      0, 1, "Int32Constant", value);
+  }
+  Operator* Int64Constant(int64_t value) {
+    return new (zone_)
+        Operator1<int64_t>(IrOpcode::kInt64Constant, Operator::kPure, 0, 1,
+                           "Int64Constant", value);
+  }
+  Operator* Float64Constant(double value) {
+    return new (zone_)
+        Operator1<double>(IrOpcode::kFloat64Constant, Operator::kPure, 0, 1,
+                          "Float64Constant", value);
+  }
+  Operator* ExternalConstant(ExternalReference value) {
+    return new (zone_) Operator1<ExternalReference>(IrOpcode::kExternalConstant,
+                                                    Operator::kPure, 0, 1,
+                                                    "ExternalConstant", value);
+  }
+  Operator* NumberConstant(double value) {
+    return new (zone_)
+        Operator1<double>(IrOpcode::kNumberConstant, Operator::kPure, 0, 1,
+                          "NumberConstant", value);
+  }
+  Operator* HeapConstant(PrintableUnique<Object> value) {
+    return new (zone_) Operator1<PrintableUnique<Object> >(
+        IrOpcode::kHeapConstant, Operator::kPure, 0, 1, "HeapConstant", value);
+  }
+  Operator* Phi(int arguments) {
+    ASSERT(arguments > 0);  // Disallow empty phis.
+    return new (zone_) Operator1<int>(IrOpcode::kPhi, Operator::kPure,
+                                      arguments, 1, "Phi", arguments);
+  }
+  Operator* EffectPhi(int arguments) {
+    ASSERT(arguments > 0);  // Disallow empty phis.
+    return new (zone_) Operator1<int>(IrOpcode::kEffectPhi, Operator::kPure, 0,
+                                      0, "EffectPhi", arguments);
+  }
+  Operator* FrameState(const FrameStateDescriptor& descriptor) {
+    return new (zone_) Operator1<FrameStateDescriptor>(
+        IrOpcode::kFrameState, Operator::kPure, 0, 1, "FrameState", descriptor);
+  }
+  Operator* Call(CallDescriptor* descriptor) {
+    return new (zone_) CallOperator(descriptor, "Call");
+  }
+  Operator* Projection(int index) {
+    return new (zone_) Operator1<int>(IrOpcode::kProjection, Operator::kPure, 1,
+                                      1, "Projection", index);
+  }
+
+ private:
+  Zone* zone_;
+};
+
+
+template <typename T>
+struct CommonOperatorTraits {
+  static inline bool Equals(T a, T b);
+  static inline bool HasValue(Operator* op);
+  static inline T ValueOf(Operator* op);
+};
+
+template <>
+struct CommonOperatorTraits<int32_t> {
+  static inline bool Equals(int32_t a, int32_t b) { return a == b; }
+  static inline bool HasValue(Operator* op) {
+    return op->opcode() == IrOpcode::kInt32Constant ||
+           op->opcode() == IrOpcode::kNumberConstant;
+  }
+  static inline int32_t ValueOf(Operator* op) {
+    if (op->opcode() == IrOpcode::kNumberConstant) {
+      // TODO(titzer): cache the converted int32 value in NumberConstant.
+      return FastD2I(reinterpret_cast<Operator1<double>*>(op)->parameter());
+    }
+    CHECK_EQ(IrOpcode::kInt32Constant, op->opcode());
+    return static_cast<Operator1<int32_t>*>(op)->parameter();
+  }
+};
+
+template <>
+struct CommonOperatorTraits<uint32_t> {
+  static inline bool Equals(uint32_t a, uint32_t b) { return a == b; }
+  static inline bool HasValue(Operator* op) {
+    return CommonOperatorTraits<int32_t>::HasValue(op);
+  }
+  static inline uint32_t ValueOf(Operator* op) {
+    if (op->opcode() == IrOpcode::kNumberConstant) {
+      // TODO(titzer): cache the converted uint32 value in NumberConstant.
+      return FastD2UI(reinterpret_cast<Operator1<double>*>(op)->parameter());
+    }
+    return static_cast<uint32_t>(CommonOperatorTraits<int32_t>::ValueOf(op));
+  }
+};
+
+template <>
+struct CommonOperatorTraits<int64_t> {
+  static inline bool Equals(int64_t a, int64_t b) { return a == b; }
+  static inline bool HasValue(Operator* op) {
+    return op->opcode() == IrOpcode::kInt32Constant ||
+           op->opcode() == IrOpcode::kInt64Constant ||
+           op->opcode() == IrOpcode::kNumberConstant;
+  }
+  static inline int64_t ValueOf(Operator* op) {
+    if (op->opcode() == IrOpcode::kInt32Constant) {
+      return static_cast<int64_t>(CommonOperatorTraits<int32_t>::ValueOf(op));
+    }
+    CHECK_EQ(IrOpcode::kInt64Constant, op->opcode());
+    return static_cast<Operator1<int64_t>*>(op)->parameter();
+  }
+};
+
+template <>
+struct CommonOperatorTraits<uint64_t> {
+  static inline bool Equals(uint64_t a, uint64_t b) { return a == b; }
+  static inline bool HasValue(Operator* op) {
+    return CommonOperatorTraits<int64_t>::HasValue(op);
+  }
+  static inline uint64_t ValueOf(Operator* op) {
+    return static_cast<uint64_t>(CommonOperatorTraits<int64_t>::ValueOf(op));
+  }
+};
+
+template <>
+struct CommonOperatorTraits<double> {
+  static inline bool Equals(double a, double b) {
+    return DoubleRepresentation(a).bits == DoubleRepresentation(b).bits;
+  }
+  static inline bool HasValue(Operator* op) {
+    return op->opcode() == IrOpcode::kFloat64Constant ||
+           op->opcode() == IrOpcode::kInt32Constant ||
+           op->opcode() == IrOpcode::kNumberConstant;
+  }
+  static inline double ValueOf(Operator* op) {
+    if (op->opcode() == IrOpcode::kFloat64Constant ||
+        op->opcode() == IrOpcode::kNumberConstant) {
+      return reinterpret_cast<Operator1<double>*>(op)->parameter();
+    }
+    return static_cast<double>(CommonOperatorTraits<int32_t>::ValueOf(op));
+  }
+};
+
+template <>
+struct CommonOperatorTraits<ExternalReference> {
+  static inline bool Equals(ExternalReference a, ExternalReference b) {
+    return a == b;
+  }
+  static inline bool HasValue(Operator* op) {
+    return op->opcode() == IrOpcode::kExternalConstant;
+  }
+  static inline ExternalReference ValueOf(Operator* op) {
+    CHECK_EQ(IrOpcode::kExternalConstant, op->opcode());
+    return static_cast<Operator1<ExternalReference>*>(op)->parameter();
+  }
+};
+
+template <typename T>
+struct CommonOperatorTraits<PrintableUnique<T> > {
+  static inline bool HasValue(Operator* op) {
+    return op->opcode() == IrOpcode::kHeapConstant;
+  }
+  static inline PrintableUnique<T> ValueOf(Operator* op) {
+    CHECK_EQ(IrOpcode::kHeapConstant, op->opcode());
+    return static_cast<Operator1<PrintableUnique<T> >*>(op)->parameter();
+  }
+};
+
+template <typename T>
+struct CommonOperatorTraits<Handle<T> > {
+  static inline bool HasValue(Operator* op) {
+    return CommonOperatorTraits<PrintableUnique<T> >::HasValue(op);
+  }
+  static inline Handle<T> ValueOf(Operator* op) {
+    return CommonOperatorTraits<PrintableUnique<T> >::ValueOf(op).handle();
+  }
+};
+
+
+template <typename T>
+inline T ValueOf(Operator* op) {
+  return CommonOperatorTraits<T>::ValueOf(op);
+}
+}
+}
+}  // namespace v8::internal::compiler
+
+#endif  // V8_COMPILER_COMMON_OPERATOR_H_
diff --git a/src/compiler/control-builders.cc b/src/compiler/control-builders.cc
new file mode 100644 (file)
index 0000000..3b7d05b
--- /dev/null
@@ -0,0 +1,144 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "control-builders.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+
+void IfBuilder::If(Node* condition) {
+  builder_->NewBranch(condition);
+  else_environment_ = environment()->CopyForConditional();
+}
+
+
+void IfBuilder::Then() { builder_->NewIfTrue(); }
+
+
+void IfBuilder::Else() {
+  builder_->NewMerge();
+  then_environment_ = environment();
+  set_environment(else_environment_);
+  builder_->NewIfFalse();
+}
+
+
+void IfBuilder::End() {
+  then_environment_->Merge(environment());
+  set_environment(then_environment_);
+}
+
+
+void LoopBuilder::BeginLoop() {
+  builder_->NewLoop();
+  loop_environment_ = environment()->CopyForLoop();
+  continue_environment_ = environment()->CopyAsUnreachable();
+  break_environment_ = environment()->CopyAsUnreachable();
+}
+
+
+void LoopBuilder::Continue() {
+  continue_environment_->Merge(environment());
+  environment()->MarkAsUnreachable();
+}
+
+
+void LoopBuilder::Break() {
+  break_environment_->Merge(environment());
+  environment()->MarkAsUnreachable();
+}
+
+
+void LoopBuilder::EndBody() {
+  continue_environment_->Merge(environment());
+  set_environment(continue_environment_);
+}
+
+
+void LoopBuilder::EndLoop() {
+  loop_environment_->Merge(environment());
+  set_environment(break_environment_);
+}
+
+
+void LoopBuilder::BreakUnless(Node* condition) {
+  IfBuilder control_if(builder_);
+  control_if.If(condition);
+  control_if.Then();
+  control_if.Else();
+  Break();
+  control_if.End();
+}
+
+
+void SwitchBuilder::BeginSwitch() {
+  body_environment_ = environment()->CopyAsUnreachable();
+  label_environment_ = environment()->CopyAsUnreachable();
+  break_environment_ = environment()->CopyAsUnreachable();
+  body_environments_.AddBlock(NULL, case_count(), zone());
+}
+
+
+void SwitchBuilder::BeginLabel(int index, Node* condition) {
+  builder_->NewBranch(condition);
+  label_environment_ = environment()->CopyForConditional();
+  builder_->NewIfTrue();
+  body_environments_[index] = environment();
+}
+
+
+void SwitchBuilder::EndLabel() {
+  set_environment(label_environment_);
+  builder_->NewIfFalse();
+}
+
+
+void SwitchBuilder::DefaultAt(int index) {
+  label_environment_ = environment()->CopyAsUnreachable();
+  body_environments_[index] = environment();
+}
+
+
+void SwitchBuilder::BeginCase(int index) {
+  set_environment(body_environments_[index]);
+  environment()->Merge(body_environment_);
+}
+
+
+void SwitchBuilder::Break() {
+  break_environment_->Merge(environment());
+  environment()->MarkAsUnreachable();
+}
+
+
+void SwitchBuilder::EndCase() { body_environment_ = environment(); }
+
+
+void SwitchBuilder::EndSwitch() {
+  break_environment_->Merge(label_environment_);
+  break_environment_->Merge(environment());
+  set_environment(break_environment_);
+}
+
+
+void BlockBuilder::BeginBlock() {
+  break_environment_ = environment()->CopyAsUnreachable();
+}
+
+
+void BlockBuilder::Break() {
+  break_environment_->Merge(environment());
+  environment()->MarkAsUnreachable();
+}
+
+
+void BlockBuilder::EndBlock() {
+  break_environment_->Merge(environment());
+  set_environment(break_environment_);
+}
+}
+}
+}  // namespace v8::internal::compiler
diff --git a/src/compiler/control-builders.h b/src/compiler/control-builders.h
new file mode 100644 (file)
index 0000000..9b604c1
--- /dev/null
@@ -0,0 +1,144 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_CONTROL_BUILDERS_H_
+#define V8_COMPILER_CONTROL_BUILDERS_H_
+
+#include "src/v8.h"
+
+#include "src/compiler/graph-builder.h"
+#include "src/compiler/node.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+
+// Base class for all control builders. Also provides a common interface for
+// control builders to handle 'break' and 'continue' statements when they are
+// used to model breakable statements.
+class ControlBuilder {
+ public:
+  explicit ControlBuilder(StructuredGraphBuilder* builder)
+      : builder_(builder) {}
+  virtual ~ControlBuilder() {}
+
+  // Interface for break and continue.
+  virtual void Break() { UNREACHABLE(); }
+  virtual void Continue() { UNREACHABLE(); }
+
+ protected:
+  typedef StructuredGraphBuilder Builder;
+  typedef StructuredGraphBuilder::Environment Environment;
+
+  Zone* zone() const { return builder_->zone(); }
+  Environment* environment() { return builder_->environment_internal(); }
+  void set_environment(Environment* env) { builder_->set_environment(env); }
+
+  Builder* builder_;
+};
+
+
+// Tracks control flow for a conditional statement.
+class IfBuilder : public ControlBuilder {
+ public:
+  explicit IfBuilder(StructuredGraphBuilder* builder)
+      : ControlBuilder(builder),
+        then_environment_(NULL),
+        else_environment_(NULL) {}
+
+  // Primitive control commands.
+  void If(Node* condition);
+  void Then();
+  void Else();
+  void End();
+
+ private:
+  Environment* then_environment_;  // Environment after the 'then' body.
+  Environment* else_environment_;  // Environment for the 'else' body.
+};
+
+
+// Tracks control flow for an iteration statement.
+class LoopBuilder : public ControlBuilder {
+ public:
+  explicit LoopBuilder(StructuredGraphBuilder* builder)
+      : ControlBuilder(builder),
+        loop_environment_(NULL),
+        continue_environment_(NULL),
+        break_environment_(NULL) {}
+
+  // Primitive control commands.
+  void BeginLoop();
+  void EndBody();
+  void EndLoop();
+
+  // Primitive support for break and continue.
+  virtual void Continue();
+  virtual void Break();
+
+  // Compound control command for conditional break.
+  void BreakUnless(Node* condition);
+
+ private:
+  Environment* loop_environment_;      // Environment of the loop header.
+  Environment* continue_environment_;  // Environment after the loop body.
+  Environment* break_environment_;     // Environment after the loop exits.
+};
+
+
+// Tracks control flow for a switch statement.
+class SwitchBuilder : public ControlBuilder {
+ public:
+  explicit SwitchBuilder(StructuredGraphBuilder* builder, int case_count)
+      : ControlBuilder(builder),
+        body_environment_(NULL),
+        label_environment_(NULL),
+        break_environment_(NULL),
+        body_environments_(case_count, zone()) {}
+
+  // Primitive control commands.
+  void BeginSwitch();
+  void BeginLabel(int index, Node* condition);
+  void EndLabel();
+  void DefaultAt(int index);
+  void BeginCase(int index);
+  void EndCase();
+  void EndSwitch();
+
+  // Primitive support for break.
+  virtual void Break();
+
+  // The number of cases within a switch is statically known.
+  int case_count() const { return body_environments_.capacity(); }
+
+ private:
+  Environment* body_environment_;   // Environment after last case body.
+  Environment* label_environment_;  // Environment for next label condition.
+  Environment* break_environment_;  // Environment after the switch exits.
+  ZoneList<Environment*> body_environments_;
+};
+
+
+// Tracks control flow for a block statement.
+class BlockBuilder : public ControlBuilder {
+ public:
+  explicit BlockBuilder(StructuredGraphBuilder* builder)
+      : ControlBuilder(builder), break_environment_(NULL) {}
+
+  // Primitive control commands.
+  void BeginBlock();
+  void EndBlock();
+
+  // Primitive support for break.
+  virtual void Break();
+
+ private:
+  Environment* break_environment_;  // Environment after the block exits.
+};
+}
+}
+}  // namespace v8::internal::compiler
+
+#endif  // V8_COMPILER_CONTROL_BUILDERS_H_
diff --git a/src/compiler/frame.h b/src/compiler/frame.h
new file mode 100644 (file)
index 0000000..251bd49
--- /dev/null
@@ -0,0 +1,104 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_FRAME_H_
+#define V8_COMPILER_FRAME_H_
+
+#include "src/v8.h"
+
+#include "src/data-flow.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// Collects the spill slot requirements and the allocated general and double
+// registers for a compiled function. Frames are usually populated by the
+// register allocator and are used by Linkage to generate code for the prologue
+// and epilogue to compiled code.
+class Frame {
+ public:
+  Frame()
+      : register_save_area_size_(0),
+        spill_slot_count_(0),
+        double_spill_slot_count_(0),
+        allocated_registers_(NULL),
+        allocated_double_registers_(NULL) {}
+
+  inline int GetSpillSlotCount() { return spill_slot_count_; }
+  inline int GetDoubleSpillSlotCount() { return double_spill_slot_count_; }
+
+  void SetAllocatedRegisters(BitVector* regs) {
+    ASSERT(allocated_registers_ == NULL);
+    allocated_registers_ = regs;
+  }
+
+  void SetAllocatedDoubleRegisters(BitVector* regs) {
+    ASSERT(allocated_double_registers_ == NULL);
+    allocated_double_registers_ = regs;
+  }
+
+  bool DidAllocateDoubleRegisters() {
+    return !allocated_double_registers_->IsEmpty();
+  }
+
+  void SetRegisterSaveAreaSize(int size) {
+    ASSERT(IsAligned(size, kPointerSize));
+    register_save_area_size_ = size;
+  }
+
+  int GetRegisterSaveAreaSize() { return register_save_area_size_; }
+
+  int AllocateSpillSlot(bool is_double) {
+    // If 32-bit, skip one if the new slot is a double.
+    if (is_double) {
+      if (kDoubleSize > kPointerSize) {
+        ASSERT(kDoubleSize == kPointerSize * 2);
+        spill_slot_count_++;
+        spill_slot_count_ |= 1;
+      }
+      double_spill_slot_count_++;
+    }
+    return spill_slot_count_++;
+  }
+
+ private:
+  int register_save_area_size_;
+  int spill_slot_count_;
+  int double_spill_slot_count_;
+  BitVector* allocated_registers_;
+  BitVector* allocated_double_registers_;
+};
+
+
+// Represents an offset from either the stack pointer or frame pointer.
+class FrameOffset {
+ public:
+  inline bool from_stack_pointer() { return (offset_ & 1) == kFromSp; }
+  inline bool from_frame_pointer() { return (offset_ & 1) == kFromFp; }
+  inline int offset() { return offset_ & ~1; }
+
+  inline static FrameOffset FromStackPointer(int offset) {
+    ASSERT((offset & 1) == 0);
+    return FrameOffset(offset | kFromSp);
+  }
+
+  inline static FrameOffset FromFramePointer(int offset) {
+    ASSERT((offset & 1) == 0);
+    return FrameOffset(offset | kFromFp);
+  }
+
+ private:
+  explicit FrameOffset(int offset) : offset_(offset) {}
+
+  int offset_;  // Encodes SP or FP in the low order bit.
+
+  static const int kFromSp = 1;
+  static const int kFromFp = 0;
+};
+}
+}
+}  // namespace v8::internal::compiler
+
+#endif  // V8_COMPILER_FRAME_H_
diff --git a/src/compiler/gap-resolver.cc b/src/compiler/gap-resolver.cc
new file mode 100644 (file)
index 0000000..bc151c2
--- /dev/null
@@ -0,0 +1,135 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/gap-resolver.h"
+
+#include <algorithm>
+#include <functional>
+#include <set>
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+typedef ZoneList<MoveOperands>::iterator op_iterator;
+
+#ifdef ENABLE_SLOW_ASSERTS
+// TODO(svenpanne) Brush up InstructionOperand with comparison?
+struct InstructionOperandComparator {
+  bool operator()(const InstructionOperand* x, const InstructionOperand* y) {
+    return (x->kind() < y->kind()) ||
+           (x->kind() == y->kind() && x->index() < y->index());
+  }
+};
+#endif
+
+// No operand should be the destination for more than one move.
+static void VerifyMovesAreInjective(ZoneList<MoveOperands>* moves) {
+#ifdef ENABLE_SLOW_ASSERTS
+  std::set<InstructionOperand*, InstructionOperandComparator> seen;
+  for (op_iterator i = moves->begin(); i != moves->end(); ++i) {
+    SLOW_ASSERT(seen.find(i->destination()) == seen.end());
+    seen.insert(i->destination());
+  }
+#endif
+}
+
+
+void GapResolver::Resolve(ParallelMove* parallel_move) const {
+  ZoneList<MoveOperands>* moves = parallel_move->move_operands();
+  // TODO(svenpanne) Use the member version of remove_if when we use real lists.
+  op_iterator end =
+      std::remove_if(moves->begin(), moves->end(),
+                     std::mem_fun_ref(&MoveOperands::IsRedundant));
+  moves->Rewind(static_cast<int>(end - moves->begin()));
+
+  VerifyMovesAreInjective(moves);
+
+  for (op_iterator move = moves->begin(); move != moves->end(); ++move) {
+    if (!move->IsEliminated()) PerformMove(moves, &*move);
+  }
+}
+
+
+void GapResolver::PerformMove(ZoneList<MoveOperands>* moves,
+                              MoveOperands* move) const {
+  // Each call to this function performs a move and deletes it from the move
+  // graph.  We first recursively perform any move blocking this one.  We mark a
+  // move as "pending" on entry to PerformMove in order to detect cycles in the
+  // move graph.  We use operand swaps to resolve cycles, which means that a
+  // call to PerformMove could change any source operand in the move graph.
+  ASSERT(!move->IsPending());
+  ASSERT(!move->IsRedundant());
+
+  // Clear this move's destination to indicate a pending move.  The actual
+  // destination is saved on the side.
+  ASSERT_NOT_NULL(move->source());  // Or else it will look eliminated.
+  InstructionOperand* destination = move->destination();
+  move->set_destination(NULL);
+
+  // Perform a depth-first traversal of the move graph to resolve dependencies.
+  // Any unperformed, unpending move with a source the same as this one's
+  // destination blocks this one so recursively perform all such moves.
+  for (op_iterator other = moves->begin(); other != moves->end(); ++other) {
+    if (other->Blocks(destination) && !other->IsPending()) {
+      // Though PerformMove can change any source operand in the move graph,
+      // this call cannot create a blocking move via a swap (this loop does not
+      // miss any).  Assume there is a non-blocking move with source A and this
+      // move is blocked on source B and there is a swap of A and B.  Then A and
+      // B must be involved in the same cycle (or they would not be swapped).
+      // Since this move's destination is B and there is only a single incoming
+      // edge to an operand, this move must also be involved in the same cycle.
+      // In that case, the blocking move will be created but will be "pending"
+      // when we return from PerformMove.
+      PerformMove(moves, other);
+    }
+  }
+
+  // We are about to resolve this move and don't need it marked as pending, so
+  // restore its destination.
+  move->set_destination(destination);
+
+  // This move's source may have changed due to swaps to resolve cycles and so
+  // it may now be the last move in the cycle.  If so remove it.
+  InstructionOperand* source = move->source();
+  if (source->Equals(destination)) {
+    move->Eliminate();
+    return;
+  }
+
+  // The move may be blocked on a (at most one) pending move, in which case we
+  // have a cycle.  Search for such a blocking move and perform a swap to
+  // resolve it.
+  op_iterator blocker = std::find_if(
+      moves->begin(), moves->end(),
+      std::bind2nd(std::mem_fun_ref(&MoveOperands::Blocks), destination));
+  if (blocker == moves->end()) {
+    // The easy case: This move is not blocked.
+    assembler_->AssembleMove(source, destination);
+    move->Eliminate();
+    return;
+  }
+
+  ASSERT(blocker->IsPending());
+  // Ensure source is a register or both are stack slots, to limit swap cases.
+  if (source->IsStackSlot() || source->IsDoubleStackSlot()) {
+    std::swap(source, destination);
+  }
+  assembler_->AssembleSwap(source, destination);
+  move->Eliminate();
+
+  // Any unperformed (including pending) move with a source of either this
+  // move's source or destination needs to have their source changed to
+  // reflect the state of affairs after the swap.
+  for (op_iterator other = moves->begin(); other != moves->end(); ++other) {
+    if (other->Blocks(source)) {
+      other->set_source(destination);
+    } else if (other->Blocks(destination)) {
+      other->set_source(source);
+    }
+  }
+}
+}
+}
+}  // namespace v8::internal::compiler
diff --git a/src/compiler/gap-resolver.h b/src/compiler/gap-resolver.h
new file mode 100644 (file)
index 0000000..5c3aead
--- /dev/null
@@ -0,0 +1,46 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_GAP_RESOLVER_H_
+#define V8_COMPILER_GAP_RESOLVER_H_
+
+#include "src/compiler/instruction.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+class GapResolver V8_FINAL {
+ public:
+  // Interface used by the gap resolver to emit moves and swaps.
+  class Assembler {
+   public:
+    virtual ~Assembler() {}
+
+    // Assemble move.
+    virtual void AssembleMove(InstructionOperand* source,
+                              InstructionOperand* destination) = 0;
+    // Assemble swap.
+    virtual void AssembleSwap(InstructionOperand* source,
+                              InstructionOperand* destination) = 0;
+  };
+
+  explicit GapResolver(Assembler* assembler) : assembler_(assembler) {}
+
+  // Resolve a set of parallel moves, emitting assembler instructions.
+  void Resolve(ParallelMove* parallel_move) const;
+
+ private:
+  // Perform the given move, possibly requiring other moves to satisfy
+  // dependencies.
+  void PerformMove(ZoneList<MoveOperands>* moves, MoveOperands* move) const;
+
+  // Assembler used to emit moves and save registers.
+  Assembler* const assembler_;
+};
+}
+}
+}  // namespace v8::internal::compiler
+
+#endif  // V8_COMPILER_GAP_RESOLVER_H_
diff --git a/src/compiler/generic-algorithm-inl.h b/src/compiler/generic-algorithm-inl.h
new file mode 100644 (file)
index 0000000..a25131f
--- /dev/null
@@ -0,0 +1,48 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_GENERIC_ALGORITHM_INL_H_
+#define V8_COMPILER_GENERIC_ALGORITHM_INL_H_
+
+#include <vector>
+
+#include "src/compiler/generic-algorithm.h"
+#include "src/compiler/generic-graph.h"
+#include "src/compiler/generic-node.h"
+#include "src/compiler/generic-node-inl.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+template <class N>
+class NodeInputIterationTraits {
+ public:
+  typedef N Node;
+  typedef typename N::Inputs::iterator Iterator;
+
+  static Iterator begin(Node* node) { return node->inputs().begin(); }
+  static Iterator end(Node* node) { return node->inputs().end(); }
+  static int max_id(GenericGraphBase* graph) { return graph->NodeCount(); }
+  static Node* to(Iterator iterator) { return *iterator; }
+  static Node* from(Iterator iterator) { return iterator.edge().from(); }
+};
+
+template <class N>
+class NodeUseIterationTraits {
+ public:
+  typedef N Node;
+  typedef typename N::Uses::iterator Iterator;
+
+  static Iterator begin(Node* node) { return node->uses().begin(); }
+  static Iterator end(Node* node) { return node->uses().end(); }
+  static int max_id(GenericGraphBase* graph) { return graph->NodeCount(); }
+  static Node* to(Iterator iterator) { return *iterator; }
+  static Node* from(Iterator iterator) { return iterator.edge().to(); }
+};
+}
+}
+}  // namespace v8::internal::compiler
+
+#endif  // V8_COMPILER_GENERIC_ALGORITHM_INL_H_
diff --git a/src/compiler/generic-algorithm.h b/src/compiler/generic-algorithm.h
new file mode 100644 (file)
index 0000000..f7bb2fe
--- /dev/null
@@ -0,0 +1,136 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_GENERIC_ALGORITHM_H_
+#define V8_COMPILER_GENERIC_ALGORITHM_H_
+
+#include <deque>
+#include <stack>
+
+#include "src/compiler/generic-graph.h"
+#include "src/compiler/generic-node.h"
+#include "src/zone-containers.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// GenericGraphVisit allows visitation of graphs of nodes and edges in pre- and
+// post-order. Visitation uses an explicitly allocated stack rather than the
+// execution stack to avoid stack overflow. Although GenericGraphVisit is
+// primarily intended to traverse networks of nodes through their
+// dependencies and uses, it also can be used to visit any graph-like network
+// by specifying custom traits.
+class GenericGraphVisit {
+ public:
+  enum Control {
+    CONTINUE = 0x0,  // Continue depth-first normally
+    SKIP = 0x1,      // Skip this node and its successors
+    REENTER = 0x2,   // Allow reentering this node
+    DEFER = SKIP | REENTER
+  };
+
+  // struct Visitor {
+  //   Control Pre(Traits::Node* current);
+  //   Control Post(Traits::Node* current);
+  //   void PreEdge(Traits::Node* from, int index, Traits::Node* to);
+  //   void PostEdge(Traits::Node* from, int index, Traits::Node* to);
+  // }
+  template <class Visitor, class Traits, class RootIterator>
+  static void Visit(GenericGraphBase* graph, RootIterator root_begin,
+                    RootIterator root_end, Visitor* visitor) {
+    // TODO(bmeurer): Pass "local" zone as parameter.
+    Zone* zone = graph->zone();
+    typedef typename Traits::Node Node;
+    typedef typename Traits::Iterator Iterator;
+    typedef std::pair<Iterator, Iterator> NodeState;
+    typedef zone_allocator<NodeState> ZoneNodeStateAllocator;
+    typedef std::deque<NodeState, ZoneNodeStateAllocator> NodeStateDeque;
+    typedef std::stack<NodeState, NodeStateDeque> NodeStateStack;
+    NodeStateStack stack((NodeStateDeque(ZoneNodeStateAllocator(zone))));
+    BoolVector visited(Traits::max_id(graph), false, ZoneBoolAllocator(zone));
+    Node* current = *root_begin;
+    while (true) {
+      ASSERT(current != NULL);
+      const int id = current->id();
+      ASSERT(id >= 0);
+      ASSERT(id < Traits::max_id(graph));  // Must be a valid id.
+      bool visit = !GetVisited(&visited, id);
+      if (visit) {
+        Control control = visitor->Pre(current);
+        visit = !IsSkip(control);
+        if (!IsReenter(control)) SetVisited(&visited, id, true);
+      }
+      Iterator begin(visit ? Traits::begin(current) : Traits::end(current));
+      Iterator end(Traits::end(current));
+      stack.push(NodeState(begin, end));
+      Node* post_order_node = current;
+      while (true) {
+        NodeState top = stack.top();
+        if (top.first == top.second) {
+          if (visit) {
+            Control control = visitor->Post(post_order_node);
+            ASSERT(!IsSkip(control));
+            SetVisited(&visited, post_order_node->id(), !IsReenter(control));
+          }
+          stack.pop();
+          if (stack.empty()) {
+            if (++root_begin == root_end) return;
+            current = *root_begin;
+            break;
+          }
+          post_order_node = Traits::from(stack.top().first);
+          visit = true;
+        } else {
+          visitor->PreEdge(Traits::from(top.first), top.first.edge().index(),
+                           Traits::to(top.first));
+          current = Traits::to(top.first);
+          if (!GetVisited(&visited, current->id())) break;
+        }
+        top = stack.top();
+        visitor->PostEdge(Traits::from(top.first), top.first.edge().index(),
+                          Traits::to(top.first));
+        ++stack.top().first;
+      }
+    }
+  }
+
+  template <class Visitor, class Traits>
+  static void Visit(GenericGraphBase* graph, typename Traits::Node* current,
+                    Visitor* visitor) {
+    typename Traits::Node* array[] = {current};
+    Visit<Visitor, Traits>(graph, &array[0], &array[1], visitor);
+  }
+
+  template <class B, class S>
+  struct NullNodeVisitor {
+    Control Pre(GenericNode<B, S>* node) { return CONTINUE; }
+    Control Post(GenericNode<B, S>* node) { return CONTINUE; }
+    void PreEdge(GenericNode<B, S>* from, int index, GenericNode<B, S>* to) {}
+    void PostEdge(GenericNode<B, S>* from, int index, GenericNode<B, S>* to) {}
+  };
+
+ private:
+  static bool IsSkip(Control c) { return c & SKIP; }
+  static bool IsReenter(Control c) { return c & REENTER; }
+
+  // TODO(turbofan): resizing could be optionally templatized away.
+  static void SetVisited(BoolVector* visited, int id, bool value) {
+    if (id >= static_cast<int>(visited->size())) {
+      // Resize and set all values to unvisited.
+      visited->resize((3 * id) / 2, false);
+    }
+    visited->at(id) = value;
+  }
+
+  static bool GetVisited(BoolVector* visited, int id) {
+    if (id >= static_cast<int>(visited->size())) return false;
+    return visited->at(id);
+  }
+};
+}
+}
+}  // namespace v8::internal::compiler
+
+#endif  // V8_COMPILER_GENERIC_ALGORITHM_H_
diff --git a/src/compiler/generic-graph.h b/src/compiler/generic-graph.h
new file mode 100644 (file)
index 0000000..a555456
--- /dev/null
@@ -0,0 +1,53 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_GENERIC_GRAPH_H_
+#define V8_COMPILER_GENERIC_GRAPH_H_
+
+#include "src/compiler/generic-node.h"
+
+namespace v8 {
+namespace internal {
+
+class Zone;
+
+namespace compiler {
+
+class GenericGraphBase : public ZoneObject {
+ public:
+  explicit GenericGraphBase(Zone* zone) : zone_(zone), next_node_id_(0) {}
+
+  Zone* zone() const { return zone_; }
+
+  NodeId NextNodeID() { return next_node_id_++; }
+  NodeId NodeCount() const { return next_node_id_; }
+
+ private:
+  Zone* zone_;
+  NodeId next_node_id_;
+};
+
+template <class V>
+class GenericGraph : public GenericGraphBase {
+ public:
+  explicit GenericGraph(Zone* zone)
+      : GenericGraphBase(zone), start_(NULL), end_(NULL) {}
+
+  V* start() { return start_; }
+  V* end() { return end_; }
+
+  void SetStart(V* start) { start_ = start; }
+  void SetEnd(V* end) { end_ = end; }
+
+ private:
+  V* start_;
+  V* end_;
+
+  DISALLOW_COPY_AND_ASSIGN(GenericGraph);
+};
+}
+}
+}  // namespace v8::internal::compiler
+
+#endif  // V8_COMPILER_GENERIC_GRAPH_H_
diff --git a/src/compiler/generic-node-inl.h b/src/compiler/generic-node-inl.h
new file mode 100644 (file)
index 0000000..023fa02
--- /dev/null
@@ -0,0 +1,244 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_GENERIC_NODE_INL_H_
+#define V8_COMPILER_GENERIC_NODE_INL_H_
+
+#include "src/v8.h"
+
+#include "src/compiler/generic-graph.h"
+#include "src/compiler/generic-node.h"
+#include "src/zone.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+template <class B, class S>
+GenericNode<B, S>::GenericNode(GenericGraphBase* graph, int input_count)
+    : BaseClass(graph->zone()),
+      input_count_(input_count),
+      has_appendable_inputs_(false),
+      use_count_(0),
+      first_use_(NULL),
+      last_use_(NULL) {
+  inputs_.static_ = reinterpret_cast<Input*>(this + 1), AssignUniqueID(graph);
+}
+
+template <class B, class S>
+inline void GenericNode<B, S>::AssignUniqueID(GenericGraphBase* graph) {
+  id_ = graph->NextNodeID();
+}
+
+template <class B, class S>
+inline typename GenericNode<B, S>::Inputs::iterator
+GenericNode<B, S>::Inputs::begin() {
+  return GenericNode::Inputs::iterator(this->node_, 0);
+}
+
+template <class B, class S>
+inline typename GenericNode<B, S>::Inputs::iterator
+GenericNode<B, S>::Inputs::end() {
+  return GenericNode::Inputs::iterator(this->node_, this->node_->InputCount());
+}
+
+template <class B, class S>
+inline typename GenericNode<B, S>::Uses::iterator
+GenericNode<B, S>::Uses::begin() {
+  return GenericNode::Uses::iterator(this->node_);
+}
+
+template <class B, class S>
+inline typename GenericNode<B, S>::Uses::iterator
+GenericNode<B, S>::Uses::end() {
+  return GenericNode::Uses::iterator();
+}
+
+template <class B, class S>
+void GenericNode<B, S>::ReplaceUses(GenericNode* replace_to) {
+  for (Use* use = first_use_; use != NULL; use = use->next) {
+    use->from->GetInputRecordPtr(use->input_index)->to = replace_to;
+  }
+  if (replace_to->last_use_ == NULL) {
+    ASSERT_EQ(NULL, replace_to->first_use_);
+    replace_to->first_use_ = first_use_;
+  } else {
+    ASSERT_NE(NULL, replace_to->first_use_);
+    replace_to->last_use_->next = first_use_;
+    first_use_->prev = replace_to->last_use_;
+  }
+  replace_to->last_use_ = last_use_;
+  replace_to->use_count_ += use_count_;
+  use_count_ = 0;
+  first_use_ = NULL;
+  last_use_ = NULL;
+}
+
+template <class B, class S>
+template <class UnaryPredicate>
+void GenericNode<B, S>::ReplaceUsesIf(UnaryPredicate pred,
+                                      GenericNode* replace_to) {
+  for (Use* use = first_use_; use != NULL;) {
+    Use* next = use->next;
+    if (pred(static_cast<S*>(use->from))) {
+      RemoveUse(use);
+      replace_to->AppendUse(use);
+      use->from->GetInputRecordPtr(use->input_index)->to = replace_to;
+    }
+    use = next;
+  }
+}
+
+template <class B, class S>
+void GenericNode<B, S>::RemoveAllInputs() {
+  for (typename Inputs::iterator iter(inputs().begin()); iter != inputs().end();
+       ++iter) {
+    iter.GetInput()->Update(NULL);
+  }
+}
+
+template <class B, class S>
+void GenericNode<B, S>::TrimInputCount(int new_input_count) {
+  if (new_input_count == input_count_) return;  // Nothing to do.
+
+  ASSERT(new_input_count < input_count_);
+
+  // Update inline inputs.
+  for (int i = new_input_count; i < input_count_; i++) {
+    GenericNode<B, S>::Input* input = GetInputRecordPtr(i);
+    input->Update(NULL);
+  }
+  input_count_ = new_input_count;
+}
+
+template <class B, class S>
+void GenericNode<B, S>::ReplaceInput(int index, GenericNode<B, S>* new_to) {
+  Input* input = GetInputRecordPtr(index);
+  input->Update(new_to);
+}
+
+template <class B, class S>
+void GenericNode<B, S>::Input::Update(GenericNode<B, S>* new_to) {
+  GenericNode* old_to = this->to;
+  if (new_to == old_to) return;  // Nothing to do.
+  // Snip out the use from where it used to be
+  if (old_to != NULL) {
+    old_to->RemoveUse(use);
+  }
+  to = new_to;
+  // And put it into the new node's use list.
+  if (new_to != NULL) {
+    new_to->AppendUse(use);
+  } else {
+    use->next = NULL;
+    use->prev = NULL;
+  }
+}
+
+template <class B, class S>
+void GenericNode<B, S>::EnsureAppendableInputs(Zone* zone) {
+  if (!has_appendable_inputs_) {
+    void* deque_buffer = zone->New(sizeof(InputDeque));
+    InputDeque* deque = new (deque_buffer) InputDeque(ZoneInputAllocator(zone));
+    for (int i = 0; i < input_count_; ++i) {
+      deque->push_back(inputs_.static_[i]);
+    }
+    inputs_.appendable_ = deque;
+    has_appendable_inputs_ = true;
+  }
+}
+
+template <class B, class S>
+void GenericNode<B, S>::AppendInput(Zone* zone, GenericNode<B, S>* to_append) {
+  EnsureAppendableInputs(zone);
+  Use* new_use = new (zone) Use;
+  Input new_input;
+  new_input.to = to_append;
+  new_input.use = new_use;
+  inputs_.appendable_->push_back(new_input);
+  new_use->input_index = input_count_;
+  new_use->from = this;
+  to_append->AppendUse(new_use);
+  input_count_++;
+}
+
+template <class B, class S>
+void GenericNode<B, S>::InsertInput(Zone* zone, int index,
+                                    GenericNode<B, S>* to_insert) {
+  ASSERT(index >= 0 && index < InputCount());
+  // TODO(turbofan): Optimize this implementation!
+  AppendInput(zone, InputAt(InputCount() - 1));
+  for (int i = InputCount() - 1; i > index; --i) {
+    ReplaceInput(i, InputAt(i - 1));
+  }
+  ReplaceInput(index, to_insert);
+}
+
+template <class B, class S>
+void GenericNode<B, S>::AppendUse(Use* use) {
+  use->next = NULL;
+  use->prev = last_use_;
+  if (last_use_ == NULL) {
+    first_use_ = use;
+  } else {
+    last_use_->next = use;
+  }
+  last_use_ = use;
+  ++use_count_;
+}
+
+template <class B, class S>
+void GenericNode<B, S>::RemoveUse(Use* use) {
+  if (last_use_ == use) {
+    last_use_ = use->prev;
+  }
+  if (use->prev != NULL) {
+    use->prev->next = use->next;
+  } else {
+    first_use_ = use->next;
+  }
+  if (use->next != NULL) {
+    use->next->prev = use->prev;
+  }
+  --use_count_;
+}
+
+template <class B, class S>
+inline bool GenericNode<B, S>::OwnedBy(GenericNode* owner) const {
+  return first_use_ != NULL && first_use_->from == owner &&
+         first_use_->next == NULL;
+}
+
+template <class B, class S>
+S* GenericNode<B, S>::New(GenericGraphBase* graph, int input_count,
+                          S** inputs) {
+  size_t node_size = sizeof(GenericNode);
+  size_t inputs_size = input_count * sizeof(Input);
+  size_t uses_size = input_count * sizeof(Use);
+  size_t size = node_size + inputs_size + uses_size;
+  Zone* zone = graph->zone();
+  void* buffer = zone->New(size);
+  S* result = new (buffer) S(graph, input_count);
+  Input* input =
+      reinterpret_cast<Input*>(reinterpret_cast<char*>(buffer) + node_size);
+  Use* use =
+      reinterpret_cast<Use*>(reinterpret_cast<char*>(input) + inputs_size);
+
+  for (int current = 0; current < input_count; ++current) {
+    GenericNode* to = *inputs++;
+    input->to = to;
+    input->use = use;
+    use->input_index = current;
+    use->from = result;
+    to->AppendUse(use);
+    ++use;
+    ++input;
+  }
+  return result;
+}
+}
+}
+}  // namespace v8::internal::compiler
+
+#endif  // V8_COMPILER_GENERIC_NODE_INL_H_
diff --git a/src/compiler/generic-node.h b/src/compiler/generic-node.h
new file mode 100644 (file)
index 0000000..a7d6661
--- /dev/null
@@ -0,0 +1,271 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_GENERIC_NODE_H_
+#define V8_COMPILER_GENERIC_NODE_H_
+
+#include <deque>
+
+#include "src/v8.h"
+
+#include "src/compiler/operator.h"
+#include "src/zone.h"
+#include "src/zone-allocator.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+class Operator;
+class GenericGraphBase;
+
+typedef int NodeId;
+
+// A GenericNode<> is the basic primitive of graphs. GenericNode's are
+// chained together by input/use chains but by default otherwise contain only an
+// identifying number which specific applications of graphs and nodes can use
+// to index auxiliary out-of-line data, especially transient data.
+// Specializations of the templatized GenericNode<> class must provide a base
+// class B that contains all of the members to be made available in each
+// specialized Node instance. GenericNode uses a mixin template pattern to
+// ensure that common accessors and methods expect the derived class S type
+// rather than the GenericNode<B, S> type.
+template <class B, class S>
+class GenericNode : public B {
+ public:
+  typedef B BaseClass;
+  typedef S DerivedClass;
+
+  inline NodeId id() const { return id_; }
+
+  int InputCount() const { return input_count_; }
+  S* InputAt(int index) const {
+    return static_cast<S*>(GetInputRecordPtr(index)->to);
+  }
+  void ReplaceInput(int index, GenericNode* new_input);
+  void AppendInput(Zone* zone, GenericNode* new_input);
+  void InsertInput(Zone* zone, int index, GenericNode* new_input);
+
+  int UseCount() { return use_count_; }
+  S* UseAt(int index) {
+    ASSERT(index < use_count_);
+    Use* current = first_use_;
+    while (index-- != 0) {
+      current = current->next;
+    }
+    return static_cast<S*>(current->from);
+  }
+  inline void ReplaceUses(GenericNode* replace_to);
+  template <class UnaryPredicate>
+  inline void ReplaceUsesIf(UnaryPredicate pred, GenericNode* replace_to);
+  void RemoveAllInputs();
+
+  void TrimInputCount(int input_count);
+
+  class Inputs {
+   public:
+    class iterator;
+    iterator begin();
+    iterator end();
+
+    explicit Inputs(GenericNode* node) : node_(node) {}
+
+   private:
+    GenericNode* node_;
+  };
+
+  Inputs inputs() { return Inputs(this); }
+
+  class Uses {
+   public:
+    class iterator;
+    iterator begin();
+    iterator end();
+    bool empty() { return begin() == end(); }
+
+    explicit Uses(GenericNode* node) : node_(node) {}
+
+   private:
+    GenericNode* node_;
+  };
+
+  Uses uses() { return Uses(this); }
+
+  class Edge;
+
+  bool OwnedBy(GenericNode* owner) const;
+
+  static S* New(GenericGraphBase* graph, int input_count, S** inputs);
+
+ protected:
+  friend class GenericGraphBase;
+
+  class Use : public ZoneObject {
+   public:
+    GenericNode* from;
+    Use* next;
+    Use* prev;
+    int input_index;
+  };
+
+  class Input {
+   public:
+    GenericNode* to;
+    Use* use;
+
+    void Update(GenericNode* new_to);
+  };
+
+  void EnsureAppendableInputs(Zone* zone);
+
+  Input* GetInputRecordPtr(int index) const {
+    if (has_appendable_inputs_) {
+      return &((*inputs_.appendable_)[index]);
+    } else {
+      return inputs_.static_ + index;
+    }
+  }
+
+  void AppendUse(Use* use);
+  void RemoveUse(Use* use);
+
+  void* operator new(size_t, void* location) { return location; }
+
+  GenericNode(GenericGraphBase* graph, int input_count);
+
+ private:
+  void AssignUniqueID(GenericGraphBase* graph);
+
+  typedef zone_allocator<Input> ZoneInputAllocator;
+  typedef std::deque<Input, ZoneInputAllocator> InputDeque;
+
+  NodeId id_;
+  int input_count_ : 31;
+  bool has_appendable_inputs_ : 1;
+  union {
+    // When a node is initially allocated, it uses a static buffer to hold its
+    // inputs under the assumption that the number of outputs will not increase.
+    // When the first input is appended, the static buffer is converted into a
+    // deque to allow for space-efficient growing.
+    Input* static_;
+    InputDeque* appendable_;
+  } inputs_;
+  int use_count_;
+  Use* first_use_;
+  Use* last_use_;
+
+  DISALLOW_COPY_AND_ASSIGN(GenericNode);
+};
+
+// An encapsulation for information associated with a single use of node as a
+// input from another node, allowing access to both the defining node and
+// the ndoe having the input.
+template <class B, class S>
+class GenericNode<B, S>::Edge {
+ public:
+  S* from() const { return static_cast<S*>(input_->use->from); }
+  S* to() const { return static_cast<S*>(input_->to); }
+  int index() const {
+    int index = input_->use->input_index;
+    ASSERT(index < input_->use->from->input_count_);
+    return index;
+  }
+
+ private:
+  friend class GenericNode<B, S>::Uses::iterator;
+  friend class GenericNode<B, S>::Inputs::iterator;
+
+  explicit Edge(typename GenericNode<B, S>::Input* input) : input_(input) {}
+
+  typename GenericNode<B, S>::Input* input_;
+};
+
+// A forward iterator to visit the nodes which are depended upon by a node
+// in the order of input.
+template <class B, class S>
+class GenericNode<B, S>::Inputs::iterator {
+ public:
+  iterator(const typename GenericNode<B, S>::Inputs::iterator& other)  // NOLINT
+      : node_(other.node_),
+        index_(other.index_) {}
+
+  S* operator*() { return static_cast<S*>(GetInput()->to); }
+  typename GenericNode<B, S>::Edge edge() {
+    return typename GenericNode::Edge(GetInput());
+  }
+  bool operator==(const iterator& other) const {
+    return other.index_ == index_ && other.node_ == node_;
+  }
+  bool operator!=(const iterator& other) const { return !(other == *this); }
+  iterator& operator++() {
+    ASSERT(node_ != NULL);
+    ASSERT(index_ < node_->input_count_);
+    ++index_;
+    return *this;
+  }
+  int index() { return index_; }
+
+ private:
+  friend class GenericNode;
+
+  explicit iterator(GenericNode* node, int index)
+      : node_(node), index_(index) {}
+
+  Input* GetInput() const { return node_->GetInputRecordPtr(index_); }
+
+  GenericNode* node_;
+  int index_;
+};
+
+// A forward iterator to visit the uses of a node. The uses are returned in
+// the order in which they were added as inputs.
+template <class B, class S>
+class GenericNode<B, S>::Uses::iterator {
+ public:
+  iterator(const typename GenericNode<B, S>::Uses::iterator& other)  // NOLINT
+      : current_(other.current_),
+        index_(other.index_) {}
+
+  S* operator*() { return static_cast<S*>(current_->from); }
+  typename GenericNode<B, S>::Edge edge() {
+    return typename GenericNode::Edge(CurrentInput());
+  }
+
+  bool operator==(const iterator& other) { return other.current_ == current_; }
+  bool operator!=(const iterator& other) { return other.current_ != current_; }
+  iterator& operator++() {
+    ASSERT(current_ != NULL);
+    index_++;
+    current_ = current_->next;
+    return *this;
+  }
+  iterator& UpdateToAndIncrement(GenericNode<B, S>* new_to) {
+    ASSERT(current_ != NULL);
+    index_++;
+    typename GenericNode<B, S>::Input* input = CurrentInput();
+    current_ = current_->next;
+    input->Update(new_to);
+    return *this;
+  }
+  int index() const { return index_; }
+
+ private:
+  friend class GenericNode<B, S>::Uses;
+
+  iterator() : current_(NULL), index_(0) {}
+  explicit iterator(GenericNode<B, S>* node)
+      : current_(node->first_use_), index_(0) {}
+
+  Input* CurrentInput() const {
+    return current_->from->GetInputRecordPtr(current_->input_index);
+  }
+
+  typename GenericNode<B, S>::Use* current_;
+  int index_;
+};
+}
+}
+}  // namespace v8::internal::compiler
+
+#endif  // V8_COMPILER_GENERIC_NODE_H_
diff --git a/src/compiler/graph-builder.cc b/src/compiler/graph-builder.cc
new file mode 100644 (file)
index 0000000..94606ce
--- /dev/null
@@ -0,0 +1,253 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/graph-builder.h"
+
+#include "src/compiler.h"
+#include "src/compiler/generic-graph.h"
+#include "src/compiler/generic-node.h"
+#include "src/compiler/generic-node-inl.h"
+#include "src/compiler/graph-visualizer.h"
+#include "src/compiler/node-properties.h"
+#include "src/compiler/node-properties-inl.h"
+#include "src/compiler/operator-properties.h"
+#include "src/compiler/operator-properties-inl.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+
+StructuredGraphBuilder::StructuredGraphBuilder(Graph* graph,
+                                               CommonOperatorBuilder* common)
+    : GraphBuilder(graph),
+      common_(common),
+      environment_(NULL),
+      current_context_(NULL),
+      exit_control_(NULL) {}
+
+
+Node* StructuredGraphBuilder::MakeNode(Operator* op, int value_input_count,
+                                       Node** value_inputs) {
+  bool has_context = OperatorProperties::HasContextInput(op);
+  bool has_control = OperatorProperties::GetControlInputCount(op) == 1;
+  bool has_effect = OperatorProperties::GetEffectInputCount(op) == 1;
+
+  ASSERT(OperatorProperties::GetControlInputCount(op) < 2);
+  ASSERT(OperatorProperties::GetEffectInputCount(op) < 2);
+
+  Node* result = NULL;
+  if (!has_context && !has_control && !has_effect) {
+    result = graph()->NewNode(op, value_input_count, value_inputs);
+  } else {
+    int input_count_with_deps = value_input_count;
+    if (has_context) ++input_count_with_deps;
+    if (has_control) ++input_count_with_deps;
+    if (has_effect) ++input_count_with_deps;
+    void* raw_buffer = alloca(kPointerSize * input_count_with_deps);
+    Node** buffer = reinterpret_cast<Node**>(raw_buffer);
+    memcpy(buffer, value_inputs, kPointerSize * value_input_count);
+    Node** current_input = buffer + value_input_count;
+    if (has_context) {
+      *current_input++ = current_context();
+    }
+    if (has_effect) {
+      *current_input++ = environment_->GetEffectDependency();
+    }
+    if (has_control) {
+      *current_input++ = GetControlDependency();
+    }
+    result = graph()->NewNode(op, input_count_with_deps, buffer);
+    if (has_effect) {
+      environment_->UpdateEffectDependency(result);
+    }
+    if (NodeProperties::HasControlOutput(result) &&
+        !environment_internal()->IsMarkedAsUnreachable()) {
+      UpdateControlDependency(result);
+    }
+  }
+
+  return result;
+}
+
+
+Node* StructuredGraphBuilder::GetControlDependency() {
+  return environment_->GetControlDependency();
+}
+
+
+void StructuredGraphBuilder::UpdateControlDependency(Node* new_control) {
+  environment_->UpdateControlDependency(new_control);
+}
+
+
+void StructuredGraphBuilder::UpdateControlDependencyToLeaveFunction(
+    Node* exit) {
+  if (environment_internal()->IsMarkedAsUnreachable()) return;
+  if (exit_control() != NULL) {
+    exit = MergeControl(exit_control(), exit);
+  }
+  environment_internal()->MarkAsUnreachable();
+  set_exit_control(exit);
+}
+
+
+StructuredGraphBuilder::Environment* StructuredGraphBuilder::CopyEnvironment(
+    Environment* env) {
+  return new (zone()) Environment(*env);
+}
+
+
+StructuredGraphBuilder::Environment::Environment(
+    StructuredGraphBuilder* builder, Node* control_dependency)
+    : builder_(builder),
+      control_dependency_(control_dependency),
+      effect_dependency_(control_dependency),
+      values_(NodeVector::allocator_type(zone())) {}
+
+
+StructuredGraphBuilder::Environment::Environment(const Environment& copy)
+    : builder_(copy.builder()),
+      control_dependency_(copy.control_dependency_),
+      effect_dependency_(copy.effect_dependency_),
+      values_(copy.values_) {}
+
+
+void StructuredGraphBuilder::Environment::Merge(Environment* other) {
+  ASSERT(values_.size() == other->values_.size());
+
+  // Nothing to do if the other environment is dead.
+  if (other->IsMarkedAsUnreachable()) return;
+
+  // Resurrect a dead environment by copying the contents of the other one and
+  // placing a singleton merge as the new control dependency.
+  if (this->IsMarkedAsUnreachable()) {
+    Node* other_control = other->control_dependency_;
+    control_dependency_ = graph()->NewNode(common()->Merge(1), other_control);
+    effect_dependency_ = other->effect_dependency_;
+    values_ = other->values_;
+    return;
+  }
+
+  // Create a merge of the control dependencies of both environments and update
+  // the current environment's control dependency accordingly.
+  Node* control = builder_->MergeControl(this->GetControlDependency(),
+                                         other->GetControlDependency());
+  UpdateControlDependency(control);
+
+  // Create a merge of the effect dependencies of both environments and update
+  // the current environment's effect dependency accordingly.
+  Node* effect = builder_->MergeEffect(this->GetEffectDependency(),
+                                       other->GetEffectDependency(), control);
+  UpdateEffectDependency(effect);
+
+  // Introduce Phi nodes for values that have differing input at merge points,
+  // potentially extending an existing Phi node if possible.
+  for (int i = 0; i < static_cast<int>(values_.size()); ++i) {
+    if (values_[i] == NULL) continue;
+    values_[i] = builder_->MergeValue(values_[i], other->values_[i], control);
+  }
+}
+
+
+void StructuredGraphBuilder::Environment::PrepareForLoop() {
+  Node* control = GetControlDependency();
+  for (int i = 0; i < static_cast<int>(values()->size()); ++i) {
+    if (values()->at(i) == NULL) continue;
+    Node* phi = builder_->NewPhi(1, values()->at(i), control);
+    values()->at(i) = phi;
+  }
+  Node* effect = builder_->NewEffectPhi(1, GetEffectDependency(), control);
+  UpdateEffectDependency(effect);
+}
+
+
+Node* StructuredGraphBuilder::NewPhi(int count, Node* input, Node* control) {
+  Operator* phi_op = common()->Phi(count);
+  void* raw_buffer = alloca(kPointerSize * (count + 1));
+  Node** buffer = reinterpret_cast<Node**>(raw_buffer);
+  MemsetPointer(buffer, input, count);
+  buffer[count] = control;
+  return graph()->NewNode(phi_op, count + 1, buffer);
+}
+
+
+// TODO(mstarzinger): Revisit this once we have proper effect states.
+Node* StructuredGraphBuilder::NewEffectPhi(int count, Node* input,
+                                           Node* control) {
+  Operator* phi_op = common()->EffectPhi(count);
+  void* raw_buffer = alloca(kPointerSize * (count + 1));
+  Node** buffer = reinterpret_cast<Node**>(raw_buffer);
+  MemsetPointer(buffer, input, count);
+  buffer[count] = control;
+  return graph()->NewNode(phi_op, count + 1, buffer);
+}
+
+
+Node* StructuredGraphBuilder::MergeControl(Node* control, Node* other) {
+  int inputs = NodeProperties::GetControlInputCount(control) + 1;
+  if (control->opcode() == IrOpcode::kLoop) {
+    // Control node for loop exists, add input.
+    Operator* op = common()->Loop(inputs);
+    control->AppendInput(zone(), other);
+    control->set_op(op);
+  } else if (control->opcode() == IrOpcode::kMerge) {
+    // Control node for merge exists, add input.
+    Operator* op = common()->Merge(inputs);
+    control->AppendInput(zone(), other);
+    control->set_op(op);
+  } else {
+    // Control node is a singleton, introduce a merge.
+    Operator* op = common()->Merge(inputs);
+    control = graph()->NewNode(op, control, other);
+  }
+  return control;
+}
+
+
+Node* StructuredGraphBuilder::MergeEffect(Node* value, Node* other,
+                                          Node* control) {
+  int inputs = NodeProperties::GetControlInputCount(control);
+  if (value->opcode() == IrOpcode::kEffectPhi &&
+      NodeProperties::GetControlInput(value) == control) {
+    // Phi already exists, add input.
+    value->set_op(common()->EffectPhi(inputs));
+    value->InsertInput(zone(), inputs - 1, other);
+  } else if (value != other) {
+    // Phi does not exist yet, introduce one.
+    value = NewEffectPhi(inputs, value, control);
+    value->ReplaceInput(inputs - 1, other);
+  }
+  return value;
+}
+
+
+Node* StructuredGraphBuilder::MergeValue(Node* value, Node* other,
+                                         Node* control) {
+  int inputs = NodeProperties::GetControlInputCount(control);
+  if (value->opcode() == IrOpcode::kPhi &&
+      NodeProperties::GetControlInput(value) == control) {
+    // Phi already exists, add input.
+    value->set_op(common()->Phi(inputs));
+    value->InsertInput(zone(), inputs - 1, other);
+  } else if (value != other) {
+    // Phi does not exist yet, introduce one.
+    value = NewPhi(inputs, value, control);
+    value->ReplaceInput(inputs - 1, other);
+  }
+  return value;
+}
+
+
+Node* StructuredGraphBuilder::dead_control() {
+  if (!dead_control_.is_set()) {
+    Node* dead_node = graph()->NewNode(common_->Dead());
+    dead_control_.set(dead_node);
+    return dead_node;
+  }
+  return dead_control_.get();
+}
+}
+}
+}  // namespace v8::internal::compiler
diff --git a/src/compiler/graph-builder.h b/src/compiler/graph-builder.h
new file mode 100644 (file)
index 0000000..ac42f6e
--- /dev/null
@@ -0,0 +1,232 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_GRAPH_BUILDER_H_
+#define V8_COMPILER_GRAPH_BUILDER_H_
+
+#include "src/v8.h"
+
+#include "src/allocation.h"
+#include "src/compiler/common-operator.h"
+#include "src/compiler/graph.h"
+#include "src/unique.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+class Node;
+
+// A common base class for anything that creates nodes in a graph.
+class GraphBuilder {
+ public:
+  explicit GraphBuilder(Graph* graph) : graph_(graph) {}
+  virtual ~GraphBuilder() {}
+
+  Node* NewNode(Operator* op) {
+    return MakeNode(op, 0, static_cast<Node**>(NULL));
+  }
+
+  Node* NewNode(Operator* op, Node* n1) { return MakeNode(op, 1, &n1); }
+
+  Node* NewNode(Operator* op, Node* n1, Node* n2) {
+    Node* buffer[] = {n1, n2};
+    return MakeNode(op, ARRAY_SIZE(buffer), buffer);
+  }
+
+  Node* NewNode(Operator* op, Node* n1, Node* n2, Node* n3) {
+    Node* buffer[] = {n1, n2, n3};
+    return MakeNode(op, ARRAY_SIZE(buffer), buffer);
+  }
+
+  Node* NewNode(Operator* op, Node* n1, Node* n2, Node* n3, Node* n4) {
+    Node* buffer[] = {n1, n2, n3, n4};
+    return MakeNode(op, ARRAY_SIZE(buffer), buffer);
+  }
+
+  Node* NewNode(Operator* op, Node* n1, Node* n2, Node* n3, Node* n4,
+                Node* n5) {
+    Node* buffer[] = {n1, n2, n3, n4, n5};
+    return MakeNode(op, ARRAY_SIZE(buffer), buffer);
+  }
+
+  Node* NewNode(Operator* op, Node* n1, Node* n2, Node* n3, Node* n4, Node* n5,
+                Node* n6) {
+    Node* nodes[] = {n1, n2, n3, n4, n5, n6};
+    return MakeNode(op, ARRAY_SIZE(nodes), nodes);
+  }
+
+  Node* NewNode(Operator* op, int value_input_count, Node** value_inputs) {
+    return MakeNode(op, value_input_count, value_inputs);
+  }
+
+  Graph* graph() const { return graph_; }
+
+ protected:
+  // Base implementation used by all factory methods.
+  virtual Node* MakeNode(Operator* op, int value_input_count,
+                         Node** value_inputs) = 0;
+
+ private:
+  Graph* graph_;
+};
+
+
+// The StructuredGraphBuilder produces a high-level IR graph. It is used as the
+// base class for concrete implementations (e.g the AstGraphBuilder or the
+// StubGraphBuilder).
+class StructuredGraphBuilder : public GraphBuilder {
+ public:
+  StructuredGraphBuilder(Graph* graph, CommonOperatorBuilder* common);
+  virtual ~StructuredGraphBuilder() {}
+
+  // Creates a new Phi node having {count} input values.
+  Node* NewPhi(int count, Node* input, Node* control);
+  Node* NewEffectPhi(int count, Node* input, Node* control);
+
+  // Helpers for merging control, effect or value dependencies.
+  Node* MergeControl(Node* control, Node* other);
+  Node* MergeEffect(Node* value, Node* other, Node* control);
+  Node* MergeValue(Node* value, Node* other, Node* control);
+
+  // Helpers to create new control nodes.
+  Node* NewIfTrue() { return NewNode(common()->IfTrue()); }
+  Node* NewIfFalse() { return NewNode(common()->IfFalse()); }
+  Node* NewMerge() { return NewNode(common()->Merge(1)); }
+  Node* NewLoop() { return NewNode(common()->Loop(1)); }
+  Node* NewBranch(Node* condition) {
+    return NewNode(common()->Branch(), condition);
+  }
+
+ protected:
+  class Environment;
+  friend class ControlBuilder;
+
+  // The following method creates a new node having the specified operator and
+  // ensures effect and control dependencies are wired up. The dependencies
+  // tracked by the environment might be mutated.
+  virtual Node* MakeNode(Operator* op, int value_input_count,
+                         Node** value_inputs);
+
+  Environment* environment_internal() const { return environment_; }
+  void set_environment(Environment* env) { environment_ = env; }
+
+  Node* current_context() const { return current_context_; }
+  void set_current_context(Node* context) { current_context_ = context; }
+
+  Node* exit_control() const { return exit_control_; }
+  void set_exit_control(Node* node) { exit_control_ = node; }
+
+  Node* dead_control();
+
+  // TODO(mstarzinger): Use phase-local zone instead!
+  Zone* zone() const { return graph()->zone(); }
+  Isolate* isolate() const { return zone()->isolate(); }
+  CommonOperatorBuilder* common() const { return common_; }
+
+  // Helper to wrap a Handle<T> into a Unique<T>.
+  template <class T>
+  PrintableUnique<T> MakeUnique(Handle<T> object) {
+    return PrintableUnique<T>::CreateUninitialized(zone(), object);
+  }
+
+  // Support for control flow builders. The concrete type of the environment
+  // depends on the graph builder, but environments themselves are not virtual.
+  virtual Environment* CopyEnvironment(Environment* env);
+
+  // Helper when creating node that depends on control.
+  Node* GetControlDependency();
+
+  // Helper when creating node that updates control.
+  void UpdateControlDependency(Node* new_control);
+
+  // Helper to indicate a node exits the function body.
+  void UpdateControlDependencyToLeaveFunction(Node* exit);
+
+ private:
+  CommonOperatorBuilder* common_;
+  Environment* environment_;
+
+  // Node representing the control dependency for dead code.
+  SetOncePointer<Node> dead_control_;
+
+  // Node representing the current context within the function body.
+  Node* current_context_;
+
+  // Merge of all control nodes that exit the function body.
+  Node* exit_control_;
+
+  DISALLOW_COPY_AND_ASSIGN(StructuredGraphBuilder);
+};
+
+
+// The abstract execution environment contains static knowledge about
+// execution state at arbitrary control-flow points. It allows for
+// simulation of the control-flow at compile time.
+class StructuredGraphBuilder::Environment : public ZoneObject {
+ public:
+  Environment(StructuredGraphBuilder* builder, Node* control_dependency);
+  Environment(const Environment& copy);
+
+  // Control dependency tracked by this environment.
+  Node* GetControlDependency() { return control_dependency_; }
+  void UpdateControlDependency(Node* dependency) {
+    control_dependency_ = dependency;
+  }
+
+  // Effect dependency tracked by this environment.
+  Node* GetEffectDependency() { return effect_dependency_; }
+  void UpdateEffectDependency(Node* dependency) {
+    effect_dependency_ = dependency;
+  }
+
+  // Mark this environment as being unreachable.
+  void MarkAsUnreachable() {
+    UpdateControlDependency(builder()->dead_control());
+  }
+  bool IsMarkedAsUnreachable() {
+    return GetControlDependency()->opcode() == IrOpcode::kDead;
+  }
+
+  // Merge another environment into this one.
+  void Merge(Environment* other);
+
+  // Copies this environment at a control-flow split point.
+  Environment* CopyForConditional() { return builder()->CopyEnvironment(this); }
+
+  // Copies this environment to a potentially unreachable control-flow point.
+  Environment* CopyAsUnreachable() {
+    Environment* env = builder()->CopyEnvironment(this);
+    env->MarkAsUnreachable();
+    return env;
+  }
+
+  // Copies this environment at a loop header control-flow point.
+  Environment* CopyForLoop() {
+    PrepareForLoop();
+    return builder()->CopyEnvironment(this);
+  }
+
+ protected:
+  // TODO(mstarzinger): Use phase-local zone instead!
+  Zone* zone() const { return graph()->zone(); }
+  Graph* graph() const { return builder_->graph(); }
+  StructuredGraphBuilder* builder() const { return builder_; }
+  CommonOperatorBuilder* common() { return builder_->common(); }
+  NodeVector* values() { return &values_; }
+
+  // Prepare environment to be used as loop header.
+  void PrepareForLoop();
+
+ private:
+  StructuredGraphBuilder* builder_;
+  Node* control_dependency_;
+  Node* effect_dependency_;
+  NodeVector values_;
+};
+}
+}
+}  // namespace v8::internal::compiler
+
+#endif  // V8_COMPILER_GRAPH_BUILDER_H__
diff --git a/src/compiler/graph-inl.h b/src/compiler/graph-inl.h
new file mode 100644 (file)
index 0000000..f8423c3
--- /dev/null
@@ -0,0 +1,37 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_GRAPH_INL_H_
+#define V8_COMPILER_GRAPH_INL_H_
+
+#include "src/compiler/generic-algorithm-inl.h"
+#include "src/compiler/graph.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+template <class Visitor>
+void Graph::VisitNodeUsesFrom(Node* node, Visitor* visitor) {
+  GenericGraphVisit::Visit<Visitor, NodeUseIterationTraits<Node> >(this, node,
+                                                                   visitor);
+}
+
+
+template <class Visitor>
+void Graph::VisitNodeUsesFromStart(Visitor* visitor) {
+  VisitNodeUsesFrom(start(), visitor);
+}
+
+
+template <class Visitor>
+void Graph::VisitNodeInputsFromEnd(Visitor* visitor) {
+  GenericGraphVisit::Visit<Visitor, NodeInputIterationTraits<Node> >(
+      this, end(), visitor);
+}
+}
+}
+}  // namespace v8::internal::compiler
+
+#endif  // V8_COMPILER_GRAPH_INL_H_
diff --git a/src/compiler/graph-reducer.cc b/src/compiler/graph-reducer.cc
new file mode 100644 (file)
index 0000000..f062d4b
--- /dev/null
@@ -0,0 +1,94 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/graph-reducer.h"
+
+#include <functional>
+
+#include "src/compiler/graph-inl.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+GraphReducer::GraphReducer(Graph* graph)
+    : graph_(graph), reducers_(Reducers::allocator_type(graph->zone())) {}
+
+
+static bool NodeIdIsLessThan(const Node* node, NodeId id) {
+  return node->id() < id;
+}
+
+
+void GraphReducer::ReduceNode(Node* node) {
+  Reducers::iterator skip = reducers_.end();
+  static const unsigned kMaxAttempts = 16;
+  bool reduce = true;
+  for (unsigned attempts = 0; attempts <= kMaxAttempts; ++attempts) {
+    if (!reduce) return;
+    reduce = false;  // Assume we don't need to rerun any reducers.
+    int before = graph_->NodeCount();
+    for (Reducers::iterator i = reducers_.begin(); i != reducers_.end(); ++i) {
+      if (i == skip) continue;  // Skip this reducer.
+      Reduction reduction = (*i)->Reduce(node);
+      Node* replacement = reduction.replacement();
+      if (replacement == NULL) {
+        // No change from this reducer.
+      } else if (replacement == node) {
+        // {replacement == node} represents an in-place reduction.
+        // Rerun all the reducers except the current one for this node,
+        // as now there may be more opportunities for reduction.
+        reduce = true;
+        skip = i;
+        break;
+      } else {
+        if (node == graph_->start()) graph_->SetStart(replacement);
+        if (node == graph_->end()) graph_->SetEnd(replacement);
+        // If {node} was replaced by an old node, unlink {node} and assume that
+        // {replacement} was already reduced and finish.
+        if (replacement->id() < before) {
+          node->RemoveAllInputs();
+          node->ReplaceUses(replacement);
+          return;
+        }
+        // Otherwise, {node} was replaced by a new node. Replace all old uses of
+        // {node} with {replacement}. New nodes created by this reduction can
+        // use {node}.
+        node->ReplaceUsesIf(
+            std::bind2nd(std::ptr_fun(&NodeIdIsLessThan), before), replacement);
+        // Unlink {node} if it's no longer used.
+        if (node->uses().empty()) node->RemoveAllInputs();
+        // Rerun all the reductions on the {replacement}.
+        skip = reducers_.end();
+        node = replacement;
+        reduce = true;
+        break;
+      }
+    }
+  }
+}
+
+
+// A helper class to reuse the node traversal algorithm.
+struct GraphReducerVisitor V8_FINAL : public NullNodeVisitor {
+  explicit GraphReducerVisitor(GraphReducer* reducer) : reducer_(reducer) {}
+  GenericGraphVisit::Control Post(Node* node) {
+    reducer_->ReduceNode(node);
+    return GenericGraphVisit::CONTINUE;
+  }
+  GraphReducer* reducer_;
+};
+
+
+void GraphReducer::ReduceGraph() {
+  GraphReducerVisitor visitor(this);
+  // Perform a post-order reduction of all nodes starting from the end.
+  graph()->VisitNodeInputsFromEnd(&visitor);
+}
+
+
+// TODO(titzer): partial graph reductions.
+}
+}
+}  // namespace v8::internal::compiler
diff --git a/src/compiler/graph-reducer.h b/src/compiler/graph-reducer.h
new file mode 100644 (file)
index 0000000..33cded6
--- /dev/null
@@ -0,0 +1,77 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_GRAPH_REDUCER_H_
+#define V8_COMPILER_GRAPH_REDUCER_H_
+
+#include <list>
+
+#include "src/zone-allocator.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// Forward declarations.
+class Graph;
+class Node;
+
+
+// Represents the result of trying to reduce a node in the graph.
+class Reduction V8_FINAL {
+ public:
+  explicit Reduction(Node* replacement = NULL) : replacement_(replacement) {}
+
+  Node* replacement() const { return replacement_; }
+  bool Changed() const { return replacement() != NULL; }
+
+ private:
+  Node* replacement_;
+};
+
+
+// A reducer can reduce or simplify a given node based on its operator and
+// inputs. This class functions as an extension point for the graph reducer for
+// language-specific reductions (e.g. reduction based on types or constant
+// folding of low-level operators) can be integrated into the graph reduction
+// phase.
+class Reducer {
+ public:
+  virtual ~Reducer() {}
+
+  // Try to reduce a node if possible.
+  virtual Reduction Reduce(Node* node) = 0;
+
+  // Helper functions for subclasses to produce reductions for a node.
+  static Reduction NoChange() { return Reduction(); }
+  static Reduction Replace(Node* node) { return Reduction(node); }
+  static Reduction Changed(Node* node) { return Reduction(node); }
+};
+
+
+// Performs an iterative reduction of a node graph.
+class GraphReducer V8_FINAL {
+ public:
+  explicit GraphReducer(Graph* graph);
+
+  Graph* graph() const { return graph_; }
+
+  void AddReducer(Reducer* reducer) { reducers_.push_back(reducer); }
+
+  // Reduce a single node.
+  void ReduceNode(Node* node);
+  // Reduce the whole graph.
+  void ReduceGraph();
+
+ private:
+  typedef std::list<Reducer*, zone_allocator<Reducer*> > Reducers;
+
+  Graph* graph_;
+  Reducers reducers_;
+};
+}
+}
+}  // namespace v8::internal::compiler
+
+#endif  // V8_COMPILER_GRAPH_REDUCER_H_
diff --git a/src/compiler/graph-replay.cc b/src/compiler/graph-replay.cc
new file mode 100644 (file)
index 0000000..efb1180
--- /dev/null
@@ -0,0 +1,81 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/graph-replay.h"
+
+#include "src/compiler/common-operator.h"
+#include "src/compiler/graph.h"
+#include "src/compiler/graph-inl.h"
+#include "src/compiler/node.h"
+#include "src/compiler/operator.h"
+#include "src/compiler/operator-properties-inl.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+#ifdef DEBUG
+
+void GraphReplayPrinter::PrintReplay(Graph* graph) {
+  GraphReplayPrinter replay;
+  PrintF("  Node* nil = graph.NewNode(common_builder.Dead());\n");
+  graph->VisitNodeInputsFromEnd(&replay);
+}
+
+
+GenericGraphVisit::Control GraphReplayPrinter::Pre(Node* node) {
+  PrintReplayOpCreator(node->op());
+  PrintF("  Node* n%d = graph.NewNode(op", node->id());
+  for (int i = 0; i < node->InputCount(); ++i) {
+    PrintF(", nil");
+  }
+  PrintF("); USE(n%d);\n", node->id());
+  return GenericGraphVisit::CONTINUE;
+}
+
+
+void GraphReplayPrinter::PostEdge(Node* from, int index, Node* to) {
+  PrintF("  n%d->ReplaceInput(%d, n%d);\n", from->id(), index, to->id());
+}
+
+
+void GraphReplayPrinter::PrintReplayOpCreator(Operator* op) {
+  IrOpcode::Value opcode = static_cast<IrOpcode::Value>(op->opcode());
+  const char* builder =
+      IrOpcode::IsCommonOpcode(opcode) ? "common_builder" : "js_builder";
+  const char* mnemonic = IrOpcode::IsCommonOpcode(opcode)
+                             ? IrOpcode::Mnemonic(opcode)
+                             : IrOpcode::Mnemonic(opcode) + 2;
+  PrintF("  op = %s.%s(", builder, mnemonic);
+  switch (opcode) {
+    case IrOpcode::kParameter:
+    case IrOpcode::kNumberConstant:
+      PrintF("0");
+      break;
+    case IrOpcode::kLoad:
+      PrintF("unique_name");
+      break;
+    case IrOpcode::kHeapConstant:
+      PrintF("unique_constant");
+      break;
+    case IrOpcode::kPhi:
+      PrintF("%d", op->InputCount());
+      break;
+    case IrOpcode::kEffectPhi:
+      PrintF("%d", OperatorProperties::GetEffectInputCount(op));
+      break;
+    case IrOpcode::kLoop:
+    case IrOpcode::kMerge:
+      PrintF("%d", OperatorProperties::GetControlInputCount(op));
+      break;
+    default:
+      break;
+  }
+  PrintF(");\n");
+}
+
+#endif  // DEBUG
+}
+}
+}  // namespace v8::internal::compiler
diff --git a/src/compiler/graph-replay.h b/src/compiler/graph-replay.h
new file mode 100644 (file)
index 0000000..cc186d7
--- /dev/null
@@ -0,0 +1,44 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_GRAPH_REPLAY_H_
+#define V8_COMPILER_GRAPH_REPLAY_H_
+
+#include "src/v8.h"
+
+#include "src/compiler/node.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+class Graph;
+class Operator;
+
+// Helper class to print a full replay of a graph. This replay can be used to
+// materialize the same graph within a C++ unit test and hence test subsequent
+// optimization passes on a graph without going through the construction steps.
+class GraphReplayPrinter : public NullNodeVisitor {
+ public:
+#ifdef DEBUG
+  static void PrintReplay(Graph* graph);
+#else
+  static void PrintReplay(Graph* graph) {}
+#endif
+
+  GenericGraphVisit::Control Pre(Node* node);
+  void PostEdge(Node* from, int index, Node* to);
+
+ private:
+  GraphReplayPrinter() {}
+
+  static void PrintReplayOpCreator(Operator* op);
+
+  DISALLOW_COPY_AND_ASSIGN(GraphReplayPrinter);
+};
+}
+}
+}  // namespace v8::internal::compiler
+
+#endif  // V8_COMPILER_GRAPH_REPLAY_H_
diff --git a/src/compiler/graph-visualizer.cc b/src/compiler/graph-visualizer.cc
new file mode 100644 (file)
index 0000000..032d3d9
--- /dev/null
@@ -0,0 +1,260 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/graph-visualizer.h"
+
+#include "src/compiler/generic-algorithm.h"
+#include "src/compiler/generic-node.h"
+#include "src/compiler/generic-node-inl.h"
+#include "src/compiler/graph.h"
+#include "src/compiler/graph-inl.h"
+#include "src/compiler/node.h"
+#include "src/compiler/node-properties.h"
+#include "src/compiler/node-properties-inl.h"
+#include "src/compiler/opcodes.h"
+#include "src/compiler/operator.h"
+#include "src/ostreams.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+#define DEAD_COLOR "#999999"
+
+class GraphVisualizer : public NullNodeVisitor {
+ public:
+  GraphVisualizer(OStream& os, const Graph* graph);  // NOLINT
+
+  void Print();
+
+  GenericGraphVisit::Control Pre(Node* node);
+  GenericGraphVisit::Control PreEdge(Node* from, int index, Node* to);
+
+ private:
+  void AnnotateNode(Node* node);
+  void PrintEdge(Node* from, int index, Node* to);
+
+  NodeSet all_nodes_;
+  NodeSet white_nodes_;
+  bool use_to_def_;
+  OStream& os_;
+  const Graph* const graph_;
+
+  DISALLOW_COPY_AND_ASSIGN(GraphVisualizer);
+};
+
+
+static Node* GetControlCluster(Node* node) {
+  if (NodeProperties::IsBasicBlockBegin(node)) {
+    return node;
+  } else if (NodeProperties::GetControlInputCount(node) == 1) {
+    Node* control = NodeProperties::GetControlInput(node, 0);
+    return NodeProperties::IsBasicBlockBegin(control) ? control : NULL;
+  } else {
+    return NULL;
+  }
+}
+
+
+GenericGraphVisit::Control GraphVisualizer::Pre(Node* node) {
+  if (all_nodes_.count(node) == 0) {
+    Node* control_cluster = GetControlCluster(node);
+    if (control_cluster != NULL) {
+      os_ << "  subgraph cluster_BasicBlock" << control_cluster->id() << " {\n";
+    }
+    os_ << "  ID" << node->id() << " [\n";
+    AnnotateNode(node);
+    os_ << "  ]\n";
+    if (control_cluster != NULL) os_ << "  }\n";
+    all_nodes_.insert(node);
+    if (use_to_def_) white_nodes_.insert(node);
+  }
+  return GenericGraphVisit::CONTINUE;
+}
+
+
+GenericGraphVisit::Control GraphVisualizer::PreEdge(Node* from, int index,
+                                                    Node* to) {
+  if (use_to_def_) return GenericGraphVisit::CONTINUE;
+  // When going from def to use, only consider white -> other edges, which are
+  // the dead nodes that use live nodes. We're probably not interested in
+  // dead nodes that only use other dead nodes.
+  if (white_nodes_.count(from) > 0) return GenericGraphVisit::CONTINUE;
+  return GenericGraphVisit::SKIP;
+}
+
+
+class Escaped {
+ public:
+  explicit Escaped(const OStringStream& os) : str_(os.c_str()) {}
+
+  friend OStream& operator<<(OStream& os, const Escaped& e) {
+    for (const char* s = e.str_; *s != '\0'; ++s) {
+      if (needs_escape(*s)) os << "\\";
+      os << *s;
+    }
+    return os;
+  }
+
+ private:
+  static bool needs_escape(char ch) {
+    switch (ch) {
+      case '>':
+      case '<':
+      case '|':
+      case '}':
+      case '{':
+        return true;
+      default:
+        return false;
+    }
+  }
+
+  const char* const str_;
+};
+
+
+static bool IsLikelyBackEdge(Node* from, int index, Node* to) {
+  if (from->opcode() == IrOpcode::kPhi ||
+      from->opcode() == IrOpcode::kEffectPhi) {
+    Node* control = NodeProperties::GetControlInput(from, 0);
+    return control->opcode() != IrOpcode::kMerge && control != to && index != 0;
+  } else if (from->opcode() == IrOpcode::kLoop) {
+    return index != 0;
+  } else {
+    return false;
+  }
+}
+
+
+void GraphVisualizer::AnnotateNode(Node* node) {
+  if (!use_to_def_) {
+    os_ << "    style=\"filled\"\n"
+        << "    fillcolor=\"" DEAD_COLOR "\"\n";
+  }
+
+  os_ << "    shape=\"record\"\n";
+  switch (node->opcode()) {
+    case IrOpcode::kEnd:
+    case IrOpcode::kDead:
+    case IrOpcode::kStart:
+      os_ << "    style=\"diagonals\"\n";
+      break;
+    case IrOpcode::kMerge:
+    case IrOpcode::kIfTrue:
+    case IrOpcode::kIfFalse:
+    case IrOpcode::kLoop:
+      os_ << "    style=\"rounded\"\n";
+      break;
+    default:
+      break;
+  }
+
+  OStringStream label;
+  label << *node->op();
+  os_ << "    label=\"{{#" << node->id() << ":" << Escaped(label);
+
+  InputIter i = node->inputs().begin();
+  for (int j = NodeProperties::GetValueInputCount(node); j > 0; ++i, j--) {
+    os_ << "|<I" << i.index() << ">#" << (*i)->id();
+  }
+  for (int j = NodeProperties::GetContextInputCount(node); j > 0; ++i, j--) {
+    os_ << "|<I" << i.index() << ">X #" << (*i)->id();
+  }
+  for (int j = NodeProperties::GetEffectInputCount(node); j > 0; ++i, j--) {
+    os_ << "|<I" << i.index() << ">E #" << (*i)->id();
+  }
+
+  if (!use_to_def_ || NodeProperties::IsBasicBlockBegin(node) ||
+      GetControlCluster(node) == NULL) {
+    for (int j = NodeProperties::GetControlInputCount(node); j > 0; ++i, j--) {
+      os_ << "|<I" << i.index() << ">C #" << (*i)->id();
+    }
+  }
+  os_ << "}";
+
+  if (FLAG_trace_turbo_types && !NodeProperties::IsControl(node)) {
+    Bounds bounds = NodeProperties::GetBounds(node);
+    OStringStream upper;
+    bounds.upper->PrintTo(upper);
+    OStringStream lower;
+    bounds.lower->PrintTo(lower);
+    os_ << "|" << Escaped(upper) << "|" << Escaped(lower);
+  }
+  os_ << "}\"\n";
+}
+
+
+void GraphVisualizer::PrintEdge(Node* from, int index, Node* to) {
+  bool unconstrained = IsLikelyBackEdge(from, index, to);
+  os_ << "  ID" << from->id();
+  if (all_nodes_.count(to) == 0) {
+    os_ << ":I" << index << ":n -> DEAD_INPUT";
+  } else if (NodeProperties::IsBasicBlockBegin(from) ||
+             GetControlCluster(from) == NULL ||
+             (NodeProperties::GetControlInputCount(from) > 0 &&
+              NodeProperties::GetControlInput(from) != to)) {
+    os_ << ":I" << index << ":n -> ID" << to->id() << ":s";
+    if (unconstrained) os_ << " [constraint=false,style=dotted]";
+  } else {
+    os_ << " -> ID" << to->id() << ":s [color=transparent"
+        << (unconstrained ? ", constraint=false" : "") << "]";
+  }
+  os_ << "\n";
+}
+
+
+void GraphVisualizer::Print() {
+  os_ << "digraph D {\n"
+      << "  node [fontsize=8,height=0.25]\n"
+      << "  rankdir=\"BT\"\n"
+      << "  \n";
+
+  // Make sure all nodes have been output before writing out the edges.
+  use_to_def_ = true;
+  // TODO(svenpanne) Remove the need for the const_casts.
+  const_cast<Graph*>(graph_)->VisitNodeInputsFromEnd(this);
+  white_nodes_.insert(const_cast<Graph*>(graph_)->start());
+
+  // Visit all uses of white nodes.
+  use_to_def_ = false;
+  GenericGraphVisit::Visit<GraphVisualizer, NodeUseIterationTraits<Node> >(
+      const_cast<Graph*>(graph_), white_nodes_.begin(), white_nodes_.end(),
+      this);
+
+  os_ << "  DEAD_INPUT [\n"
+      << "    style=\"filled\" \n"
+      << "    fillcolor=\"" DEAD_COLOR "\"\n"
+      << "  ]\n"
+      << "\n";
+
+  // With all the nodes written, add the edges.
+  for (NodeSetIter i = all_nodes_.begin(); i != all_nodes_.end(); ++i) {
+    Node::Inputs inputs = (*i)->inputs();
+    for (Node::Inputs::iterator iter(inputs.begin()); iter != inputs.end();
+         ++iter) {
+      PrintEdge(iter.edge().from(), iter.edge().index(), iter.edge().to());
+    }
+  }
+  os_ << "}\n";
+}
+
+
+GraphVisualizer::GraphVisualizer(OStream& os, const Graph* graph)  // NOLINT
+    : all_nodes_(NodeSet::key_compare(),
+                 NodeSet::allocator_type(graph->zone())),
+      white_nodes_(NodeSet::key_compare(),
+                   NodeSet::allocator_type(graph->zone())),
+      use_to_def_(true),
+      os_(os),
+      graph_(graph) {}
+
+
+OStream& operator<<(OStream& os, const AsDOT& ad) {
+  GraphVisualizer(os, &ad.graph).Print();
+  return os;
+}
+}
+}
+}  // namespace v8::internal::compiler
diff --git a/src/compiler/graph-visualizer.h b/src/compiler/graph-visualizer.h
new file mode 100644 (file)
index 0000000..12532ba
--- /dev/null
@@ -0,0 +1,29 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_GRAPH_VISUALIZER_H_
+#define V8_COMPILER_GRAPH_VISUALIZER_H_
+
+#include "src/v8.h"
+
+namespace v8 {
+namespace internal {
+
+class OStream;
+
+namespace compiler {
+
+class Graph;
+
+struct AsDOT {
+  explicit AsDOT(const Graph& g) : graph(g) {}
+  const Graph& graph;
+};
+
+OStream& operator<<(OStream& os, const AsDOT& ad);
+}
+}
+}  // namespace v8::internal::compiler
+
+#endif  // V8_COMPILER_GRAPH_VISUALIZER_H_
diff --git a/src/compiler/graph.cc b/src/compiler/graph.cc
new file mode 100644 (file)
index 0000000..4e69f0f
--- /dev/null
@@ -0,0 +1,53 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/graph.h"
+
+#include "src/compiler/common-operator.h"
+#include "src/compiler/generic-node-inl.h"
+#include "src/compiler/graph-inl.h"
+#include "src/compiler/node.h"
+#include "src/compiler/node-aux-data-inl.h"
+#include "src/compiler/node-properties.h"
+#include "src/compiler/node-properties-inl.h"
+#include "src/compiler/operator-properties.h"
+#include "src/compiler/operator-properties-inl.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+Graph::Graph(Zone* zone)
+    : GenericGraph(zone), decorators_(DecoratorVector::allocator_type(zone)) {}
+
+
+Node* Graph::NewNode(Operator* op, int input_count, Node** inputs) {
+  ASSERT(op->InputCount() <= input_count);
+  Node* result = Node::New(this, input_count, inputs);
+  result->Initialize(op);
+  for (DecoratorVector::iterator i = decorators_.begin();
+       i != decorators_.end(); ++i) {
+    (*i)->Decorate(result);
+  }
+  return result;
+}
+
+
+void Graph::ChangeOperator(Node* node, Operator* op) { node->set_op(op); }
+
+
+void Graph::DeleteNode(Node* node) {
+#if DEBUG
+  // Nodes can't be deleted if they have uses.
+  Node::Uses::iterator use_iterator(node->uses().begin());
+  ASSERT(use_iterator == node->uses().end());
+#endif
+
+#if DEBUG
+  memset(node, 0xDE, sizeof(Node));
+#endif
+}
+}
+}
+}  // namespace v8::internal::compiler
diff --git a/src/compiler/graph.h b/src/compiler/graph.h
new file mode 100644 (file)
index 0000000..bc648d6
--- /dev/null
@@ -0,0 +1,97 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_GRAPH_H_
+#define V8_COMPILER_GRAPH_H_
+
+#include <map>
+#include <set>
+
+#include "src/compiler/generic-algorithm.h"
+#include "src/compiler/node.h"
+#include "src/compiler/node-aux-data.h"
+#include "src/compiler/source-position.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+class GraphDecorator;
+
+
+class Graph : public GenericGraph<Node> {
+ public:
+  explicit Graph(Zone* zone);
+
+  // Base implementation used by all factory methods.
+  Node* NewNode(Operator* op, int input_count, Node** inputs);
+
+  // Factories for nodes with static input counts.
+  Node* NewNode(Operator* op) {
+    return NewNode(op, 0, static_cast<Node**>(NULL));
+  }
+  Node* NewNode(Operator* op, Node* n1) { return NewNode(op, 1, &n1); }
+  Node* NewNode(Operator* op, Node* n1, Node* n2) {
+    Node* nodes[] = {n1, n2};
+    return NewNode(op, ARRAY_SIZE(nodes), nodes);
+  }
+  Node* NewNode(Operator* op, Node* n1, Node* n2, Node* n3) {
+    Node* nodes[] = {n1, n2, n3};
+    return NewNode(op, ARRAY_SIZE(nodes), nodes);
+  }
+  Node* NewNode(Operator* op, Node* n1, Node* n2, Node* n3, Node* n4) {
+    Node* nodes[] = {n1, n2, n3, n4};
+    return NewNode(op, ARRAY_SIZE(nodes), nodes);
+  }
+  Node* NewNode(Operator* op, Node* n1, Node* n2, Node* n3, Node* n4,
+                Node* n5) {
+    Node* nodes[] = {n1, n2, n3, n4, n5};
+    return NewNode(op, ARRAY_SIZE(nodes), nodes);
+  }
+  Node* NewNode(Operator* op, Node* n1, Node* n2, Node* n3, Node* n4, Node* n5,
+                Node* n6) {
+    Node* nodes[] = {n1, n2, n3, n4, n5, n6};
+    return NewNode(op, ARRAY_SIZE(nodes), nodes);
+  }
+
+  void ChangeOperator(Node* node, Operator* op);
+  void DeleteNode(Node* node);
+
+  template <class Visitor>
+  void VisitNodeUsesFrom(Node* node, Visitor* visitor);
+
+  template <class Visitor>
+  void VisitNodeUsesFromStart(Visitor* visitor);
+
+  template <class Visitor>
+  void VisitNodeInputsFromEnd(Visitor* visitor);
+
+  void AddDecorator(GraphDecorator* decorator) {
+    decorators_.push_back(decorator);
+  }
+
+  void RemoveDecorator(GraphDecorator* decorator) {
+    DecoratorVector::iterator it =
+        std::find(decorators_.begin(), decorators_.end(), decorator);
+    ASSERT(it != decorators_.end());
+    decorators_.erase(it, it + 1);
+  }
+
+ private:
+  typedef std::vector<GraphDecorator*, zone_allocator<GraphDecorator*> >
+      DecoratorVector;
+  DecoratorVector decorators_;
+};
+
+
+class GraphDecorator : public ZoneObject {
+ public:
+  virtual ~GraphDecorator() {}
+  virtual void Decorate(Node* node) = 0;
+};
+}
+}
+}  // namespace v8::internal::compiler
+
+#endif  // V8_COMPILER_GRAPH_H_
diff --git a/src/compiler/ia32/code-generator-ia32.cc b/src/compiler/ia32/code-generator-ia32.cc
new file mode 100644 (file)
index 0000000..38f7d4b
--- /dev/null
@@ -0,0 +1,929 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/code-generator.h"
+
+#include "src/compiler/code-generator-impl.h"
+#include "src/compiler/gap-resolver.h"
+#include "src/compiler/node-matchers.h"
+#include "src/compiler/node-properties-inl.h"
+#include "src/ia32/assembler-ia32.h"
+#include "src/ia32/macro-assembler-ia32.h"
+#include "src/scopes.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+#define __ masm()->
+
+
+// Adds IA-32 specific methods for decoding operands.
+class IA32OperandConverter : public InstructionOperandConverter {
+ public:
+  IA32OperandConverter(CodeGenerator* gen, Instruction* instr)
+      : InstructionOperandConverter(gen, instr) {}
+
+  Operand InputOperand(int index) { return ToOperand(instr_->InputAt(index)); }
+
+  Immediate InputImmediate(int index) {
+    return ToImmediate(instr_->InputAt(index));
+  }
+
+  Operand OutputOperand() { return ToOperand(instr_->Output()); }
+
+  Operand TempOperand(int index) { return ToOperand(instr_->TempAt(index)); }
+
+  Operand ToOperand(InstructionOperand* op, int extra = 0) {
+    if (op->IsRegister()) {
+      ASSERT(extra == 0);
+      return Operand(ToRegister(op));
+    } else if (op->IsDoubleRegister()) {
+      ASSERT(extra == 0);
+      return Operand(ToDoubleRegister(op));
+    }
+    ASSERT(op->IsStackSlot() || op->IsDoubleStackSlot());
+    // The linkage computes where all spill slots are located.
+    FrameOffset offset = linkage()->GetFrameOffset(op->index(), frame(), extra);
+    return Operand(offset.from_stack_pointer() ? esp : ebp, offset.offset());
+  }
+
+  Operand HighOperand(InstructionOperand* op) {
+    ASSERT(op->IsDoubleStackSlot());
+    return ToOperand(op, kPointerSize);
+  }
+
+  Immediate ToImmediate(InstructionOperand* operand) {
+    Constant constant = ToConstant(operand);
+    switch (constant.type()) {
+      case Constant::kInt32:
+        return Immediate(constant.ToInt32());
+      case Constant::kFloat64:
+        return Immediate(
+            isolate()->factory()->NewNumber(constant.ToFloat64(), TENURED));
+      case Constant::kExternalReference:
+        return Immediate(constant.ToExternalReference());
+      case Constant::kHeapObject:
+        return Immediate(constant.ToHeapObject());
+      case Constant::kInt64:
+        break;
+    }
+    UNREACHABLE();
+    return Immediate(-1);
+  }
+
+  Operand MemoryOperand(int* first_input) {
+    const int offset = *first_input;
+    switch (AddressingModeField::decode(instr_->opcode())) {
+      case kMode_MR1I:
+        *first_input += 2;
+        return Operand(InputRegister(offset + 0), InputRegister(offset + 1),
+                       times_1,
+                       0);  // TODO(dcarney): K != 0
+      case kMode_MRI:
+        *first_input += 2;
+        return Operand::ForRegisterPlusImmediate(InputRegister(offset + 0),
+                                                 InputImmediate(offset + 1));
+      case kMode_MI:
+        *first_input += 1;
+        return Operand(InputImmediate(offset + 0));
+      default:
+        UNREACHABLE();
+        return Operand(no_reg);
+    }
+  }
+
+  Operand MemoryOperand() {
+    int first_input = 0;
+    return MemoryOperand(&first_input);
+  }
+};
+
+
+static bool HasImmediateInput(Instruction* instr, int index) {
+  return instr->InputAt(index)->IsImmediate();
+}
+
+
+// Assembles an instruction after register allocation, producing machine code.
+void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
+  IA32OperandConverter i(this, instr);
+
+  switch (ArchOpcodeField::decode(instr->opcode())) {
+    case kArchJmp:
+      __ jmp(code()->GetLabel(i.InputBlock(0)));
+      break;
+    case kArchNop:
+      // don't emit code for nops.
+      break;
+    case kArchRet:
+      AssembleReturn();
+      break;
+    case kArchDeoptimize: {
+      int deoptimization_id = MiscField::decode(instr->opcode());
+      BuildTranslation(instr, deoptimization_id);
+
+      Address deopt_entry = Deoptimizer::GetDeoptimizationEntry(
+          isolate(), deoptimization_id, Deoptimizer::LAZY);
+      __ call(deopt_entry, RelocInfo::RUNTIME_ENTRY);
+      break;
+    }
+    case kIA32Add:
+      if (HasImmediateInput(instr, 1)) {
+        __ add(i.InputOperand(0), i.InputImmediate(1));
+      } else {
+        __ add(i.InputRegister(0), i.InputOperand(1));
+      }
+      break;
+    case kIA32And:
+      if (HasImmediateInput(instr, 1)) {
+        __ and_(i.InputOperand(0), i.InputImmediate(1));
+      } else {
+        __ and_(i.InputRegister(0), i.InputOperand(1));
+      }
+      break;
+    case kIA32Cmp:
+      if (HasImmediateInput(instr, 1)) {
+        __ cmp(i.InputOperand(0), i.InputImmediate(1));
+      } else {
+        __ cmp(i.InputRegister(0), i.InputOperand(1));
+      }
+      break;
+    case kIA32Test:
+      if (HasImmediateInput(instr, 1)) {
+        __ test(i.InputOperand(0), i.InputImmediate(1));
+      } else {
+        __ test(i.InputRegister(0), i.InputOperand(1));
+      }
+      break;
+    case kIA32Imul:
+      if (HasImmediateInput(instr, 1)) {
+        __ imul(i.OutputRegister(), i.InputOperand(0), i.InputInt32(1));
+      } else {
+        __ imul(i.OutputRegister(), i.InputOperand(1));
+      }
+      break;
+    case kIA32Idiv:
+      __ cdq();
+      __ idiv(i.InputOperand(1));
+      break;
+    case kIA32Udiv:
+      __ xor_(edx, edx);
+      __ div(i.InputOperand(1));
+      break;
+    case kIA32Not:
+      __ not_(i.OutputOperand());
+      break;
+    case kIA32Neg:
+      __ neg(i.OutputOperand());
+      break;
+    case kIA32Or:
+      if (HasImmediateInput(instr, 1)) {
+        __ or_(i.InputOperand(0), i.InputImmediate(1));
+      } else {
+        __ or_(i.InputRegister(0), i.InputOperand(1));
+      }
+      break;
+    case kIA32Xor:
+      if (HasImmediateInput(instr, 1)) {
+        __ xor_(i.InputOperand(0), i.InputImmediate(1));
+      } else {
+        __ xor_(i.InputRegister(0), i.InputOperand(1));
+      }
+      break;
+    case kIA32Sub:
+      if (HasImmediateInput(instr, 1)) {
+        __ sub(i.InputOperand(0), i.InputImmediate(1));
+      } else {
+        __ sub(i.InputRegister(0), i.InputOperand(1));
+      }
+      break;
+    case kIA32Shl:
+      if (HasImmediateInput(instr, 1)) {
+        __ shl(i.OutputRegister(), i.InputInt5(1));
+      } else {
+        __ shl_cl(i.OutputRegister());
+      }
+      break;
+    case kIA32Shr:
+      if (HasImmediateInput(instr, 1)) {
+        __ shr(i.OutputRegister(), i.InputInt5(1));
+      } else {
+        __ shr_cl(i.OutputRegister());
+      }
+      break;
+    case kIA32Sar:
+      if (HasImmediateInput(instr, 1)) {
+        __ sar(i.OutputRegister(), i.InputInt5(1));
+      } else {
+        __ sar_cl(i.OutputRegister());
+      }
+      break;
+    case kIA32Push:
+      if (HasImmediateInput(instr, 0)) {
+        __ push(i.InputImmediate(0));
+      } else {
+        __ push(i.InputOperand(0));
+      }
+      break;
+    case kIA32CallCodeObject: {
+      if (HasImmediateInput(instr, 0)) {
+        Handle<Code> code = Handle<Code>::cast(i.InputHeapObject(0));
+        __ call(code, RelocInfo::CODE_TARGET);
+      } else {
+        Register reg = i.InputRegister(0);
+        int entry = Code::kHeaderSize - kHeapObjectTag;
+        __ call(Operand(reg, entry));
+      }
+      RecordSafepoint(instr->pointer_map(), Safepoint::kSimple, 0,
+                      Safepoint::kNoLazyDeopt);
+
+      bool lazy_deopt = (MiscField::decode(instr->opcode()) == 1);
+      if (lazy_deopt) {
+        RecordLazyDeoptimizationEntry(instr);
+      }
+      AddNopForSmiCodeInlining();
+      break;
+    }
+    case kIA32CallAddress:
+      if (HasImmediateInput(instr, 0)) {
+        // TODO(dcarney): wire up EXTERNAL_REFERENCE instead of RUNTIME_ENTRY.
+        __ call(reinterpret_cast<byte*>(i.InputInt32(0)),
+                RelocInfo::RUNTIME_ENTRY);
+      } else {
+        __ call(i.InputRegister(0));
+      }
+      break;
+    case kPopStack: {
+      int words = MiscField::decode(instr->opcode());
+      __ add(esp, Immediate(kPointerSize * words));
+      break;
+    }
+    case kIA32CallJSFunction: {
+      Register func = i.InputRegister(0);
+
+      // TODO(jarin) The load of the context should be separated from the call.
+      __ mov(esi, FieldOperand(func, JSFunction::kContextOffset));
+      __ call(FieldOperand(func, JSFunction::kCodeEntryOffset));
+
+      RecordSafepoint(instr->pointer_map(), Safepoint::kSimple, 0,
+                      Safepoint::kNoLazyDeopt);
+      RecordLazyDeoptimizationEntry(instr);
+      break;
+    }
+    case kSSEFloat64Cmp:
+      __ ucomisd(i.InputDoubleRegister(0), i.InputOperand(1));
+      break;
+    case kSSEFloat64Add:
+      __ addsd(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
+      break;
+    case kSSEFloat64Sub:
+      __ subsd(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
+      break;
+    case kSSEFloat64Mul:
+      __ mulsd(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
+      break;
+    case kSSEFloat64Div:
+      __ divsd(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
+      break;
+    case kSSEFloat64Mod: {
+      // TODO(dcarney): alignment is wrong.
+      __ sub(esp, Immediate(kDoubleSize));
+      // Move values to st(0) and st(1).
+      __ movsd(Operand(esp, 0), i.InputDoubleRegister(1));
+      __ fld_d(Operand(esp, 0));
+      __ movsd(Operand(esp, 0), i.InputDoubleRegister(0));
+      __ fld_d(Operand(esp, 0));
+      // Loop while fprem isn't done.
+      Label mod_loop;
+      __ bind(&mod_loop);
+      // This instructions traps on all kinds inputs, but we are assuming the
+      // floating point control word is set to ignore them all.
+      __ fprem();
+      // The following 2 instruction implicitly use eax.
+      __ fnstsw_ax();
+      __ sahf();
+      __ j(parity_even, &mod_loop);
+      // Move output to stack and clean up.
+      __ fstp(1);
+      __ fstp_d(Operand(esp, 0));
+      __ movsd(i.OutputDoubleRegister(), Operand(esp, 0));
+      __ add(esp, Immediate(kDoubleSize));
+      break;
+    }
+    case kSSEFloat64ToInt32:
+      __ cvttsd2si(i.OutputRegister(), i.InputOperand(0));
+      break;
+    case kSSEInt32ToFloat64:
+      __ cvtsi2sd(i.OutputDoubleRegister(), i.InputOperand(0));
+      break;
+    case kSSELoad:
+      __ movsd(i.OutputDoubleRegister(), i.MemoryOperand());
+      break;
+    case kSSEStore: {
+      int index = 0;
+      Operand operand = i.MemoryOperand(&index);
+      __ movsd(operand, i.InputDoubleRegister(index));
+      break;
+    }
+    case kIA32LoadWord8:
+      __ movzx_b(i.OutputRegister(), i.MemoryOperand());
+      break;
+    case kIA32StoreWord8: {
+      int index = 0;
+      Operand operand = i.MemoryOperand(&index);
+      __ mov_b(operand, i.InputRegister(index));
+      break;
+    }
+    case kIA32StoreWord8I: {
+      int index = 0;
+      Operand operand = i.MemoryOperand(&index);
+      __ mov_b(operand, i.InputInt8(index));
+      break;
+    }
+    case kIA32LoadWord16:
+      __ movzx_w(i.OutputRegister(), i.MemoryOperand());
+      break;
+    case kIA32StoreWord16: {
+      int index = 0;
+      Operand operand = i.MemoryOperand(&index);
+      __ mov_w(operand, i.InputRegister(index));
+      break;
+    }
+    case kIA32StoreWord16I: {
+      int index = 0;
+      Operand operand = i.MemoryOperand(&index);
+      __ mov_w(operand, i.InputInt16(index));
+      break;
+    }
+    case kIA32LoadWord32:
+      __ mov(i.OutputRegister(), i.MemoryOperand());
+      break;
+    case kIA32StoreWord32: {
+      int index = 0;
+      Operand operand = i.MemoryOperand(&index);
+      __ mov(operand, i.InputRegister(index));
+      break;
+    }
+    case kIA32StoreWord32I: {
+      int index = 0;
+      Operand operand = i.MemoryOperand(&index);
+      __ mov(operand, i.InputImmediate(index));
+      break;
+    }
+    case kIA32StoreWriteBarrier: {
+      Register object = i.InputRegister(0);
+      Register index = i.InputRegister(1);
+      Register value = i.InputRegister(2);
+      __ mov(Operand(object, index, times_1, 0), value);
+      __ lea(index, Operand(object, index, times_1, 0));
+      SaveFPRegsMode mode = code_->frame()->DidAllocateDoubleRegisters()
+                                ? kSaveFPRegs
+                                : kDontSaveFPRegs;
+      __ RecordWrite(object, index, value, mode);
+      break;
+    }
+  }
+}
+
+
+// Assembles branches after an instruction.
+void CodeGenerator::AssembleArchBranch(Instruction* instr,
+                                       FlagsCondition condition) {
+  IA32OperandConverter i(this, instr);
+  Label done;
+
+  // Emit a branch. The true and false targets are always the last two inputs
+  // to the instruction.
+  BasicBlock* tblock = i.InputBlock(instr->InputCount() - 2);
+  BasicBlock* fblock = i.InputBlock(instr->InputCount() - 1);
+  bool fallthru = IsNextInAssemblyOrder(fblock);
+  Label* tlabel = code()->GetLabel(tblock);
+  Label* flabel = fallthru ? &done : code()->GetLabel(fblock);
+  Label::Distance flabel_distance = fallthru ? Label::kNear : Label::kFar;
+  switch (condition) {
+    case kUnorderedEqual:
+      __ j(parity_even, flabel, flabel_distance);
+    // Fall through.
+    case kEqual:
+      __ j(equal, tlabel);
+      break;
+    case kUnorderedNotEqual:
+      __ j(parity_even, tlabel);
+    // Fall through.
+    case kNotEqual:
+      __ j(not_equal, tlabel);
+      break;
+    case kSignedLessThan:
+      __ j(less, tlabel);
+      break;
+    case kSignedGreaterThanOrEqual:
+      __ j(greater_equal, tlabel);
+      break;
+    case kSignedLessThanOrEqual:
+      __ j(less_equal, tlabel);
+      break;
+    case kSignedGreaterThan:
+      __ j(greater, tlabel);
+      break;
+    case kUnorderedLessThan:
+      __ j(parity_even, flabel, flabel_distance);
+    // Fall through.
+    case kUnsignedLessThan:
+      __ j(below, tlabel);
+      break;
+    case kUnorderedGreaterThanOrEqual:
+      __ j(parity_even, tlabel);
+    // Fall through.
+    case kUnsignedGreaterThanOrEqual:
+      __ j(above_equal, tlabel);
+      break;
+    case kUnorderedLessThanOrEqual:
+      __ j(parity_even, flabel, flabel_distance);
+    // Fall through.
+    case kUnsignedLessThanOrEqual:
+      __ j(below_equal, tlabel);
+      break;
+    case kUnorderedGreaterThan:
+      __ j(parity_even, tlabel);
+    // Fall through.
+    case kUnsignedGreaterThan:
+      __ j(above, tlabel);
+      break;
+  }
+  if (!fallthru) __ jmp(flabel, flabel_distance);  // no fallthru to flabel.
+  __ bind(&done);
+}
+
+
+// Assembles boolean materializations after an instruction.
+void CodeGenerator::AssembleArchBoolean(Instruction* instr,
+                                        FlagsCondition condition) {
+  IA32OperandConverter i(this, instr);
+  Label done;
+
+  // Materialize a full 32-bit 1 or 0 value.
+  Label check;
+  Register reg = i.OutputRegister();
+  Condition cc = no_condition;
+  switch (condition) {
+    case kUnorderedEqual:
+      __ j(parity_odd, &check, Label::kNear);
+      __ mov(reg, Immediate(0));
+      __ jmp(&done, Label::kNear);
+    // Fall through.
+    case kEqual:
+      cc = equal;
+      break;
+    case kUnorderedNotEqual:
+      __ j(parity_odd, &check, Label::kNear);
+      __ mov(reg, Immediate(1));
+      __ jmp(&done, Label::kNear);
+    // Fall through.
+    case kNotEqual:
+      cc = not_equal;
+      break;
+    case kSignedLessThan:
+      cc = less;
+      break;
+    case kSignedGreaterThanOrEqual:
+      cc = greater_equal;
+      break;
+    case kSignedLessThanOrEqual:
+      cc = less_equal;
+      break;
+    case kSignedGreaterThan:
+      cc = greater;
+      break;
+    case kUnorderedLessThan:
+      __ j(parity_odd, &check, Label::kNear);
+      __ mov(reg, Immediate(0));
+      __ jmp(&done, Label::kNear);
+    // Fall through.
+    case kUnsignedLessThan:
+      cc = below;
+      break;
+    case kUnorderedGreaterThanOrEqual:
+      __ j(parity_odd, &check, Label::kNear);
+      __ mov(reg, Immediate(1));
+      __ jmp(&done, Label::kNear);
+    // Fall through.
+    case kUnsignedGreaterThanOrEqual:
+      cc = above_equal;
+      break;
+    case kUnorderedLessThanOrEqual:
+      __ j(parity_odd, &check, Label::kNear);
+      __ mov(reg, Immediate(0));
+      __ jmp(&done, Label::kNear);
+    // Fall through.
+    case kUnsignedLessThanOrEqual:
+      cc = below_equal;
+      break;
+    case kUnorderedGreaterThan:
+      __ j(parity_odd, &check, Label::kNear);
+      __ mov(reg, Immediate(1));
+      __ jmp(&done, Label::kNear);
+    // Fall through.
+    case kUnsignedGreaterThan:
+      cc = above;
+      break;
+  }
+  __ bind(&check);
+  if (reg.is_byte_register()) {
+    // setcc for byte registers (al, bl, cl, dl).
+    __ setcc(cc, reg);
+    __ movzx_b(reg, reg);
+  } else {
+    // Emit a branch to set a register to either 1 or 0.
+    Label set;
+    __ j(cc, &set, Label::kNear);
+    __ mov(reg, Immediate(0));
+    __ jmp(&done, Label::kNear);
+    __ bind(&set);
+    __ mov(reg, Immediate(1));
+  }
+  __ bind(&done);
+}
+
+
+// The calling convention for JSFunctions on IA32 passes arguments on the
+// stack and the JSFunction and context in EDI and ESI, respectively, thus
+// the steps of the call look as follows:
+
+// --{ before the call instruction }--------------------------------------------
+//                                                         |  caller frame |
+//                                                         ^ esp           ^ ebp
+
+// --{ push arguments and setup ESI, EDI }--------------------------------------
+//                                       | args + receiver |  caller frame |
+//                                       ^ esp                             ^ ebp
+//                 [edi = JSFunction, esi = context]
+
+// --{ call [edi + kCodeEntryOffset] }------------------------------------------
+//                                 | RET | args + receiver |  caller frame |
+//                                 ^ esp                                   ^ ebp
+
+// =={ prologue of called function }============================================
+// --{ push ebp }---------------------------------------------------------------
+//                            | FP | RET | args + receiver |  caller frame |
+//                            ^ esp                                        ^ ebp
+
+// --{ mov ebp, esp }-----------------------------------------------------------
+//                            | FP | RET | args + receiver |  caller frame |
+//                            ^ ebp,esp
+
+// --{ push esi }---------------------------------------------------------------
+//                      | CTX | FP | RET | args + receiver |  caller frame |
+//                      ^esp  ^ ebp
+
+// --{ push edi }---------------------------------------------------------------
+//                | FNC | CTX | FP | RET | args + receiver |  caller frame |
+//                ^esp        ^ ebp
+
+// --{ subi esp, #N }-----------------------------------------------------------
+// | callee frame | FNC | CTX | FP | RET | args + receiver |  caller frame |
+// ^esp                       ^ ebp
+
+// =={ body of called function }================================================
+
+// =={ epilogue of called function }============================================
+// --{ mov esp, ebp }-----------------------------------------------------------
+//                            | FP | RET | args + receiver |  caller frame |
+//                            ^ esp,ebp
+
+// --{ pop ebp }-----------------------------------------------------------
+// |                               | RET | args + receiver |  caller frame |
+//                                 ^ esp                                   ^ ebp
+
+// --{ ret #A+1 }-----------------------------------------------------------
+// |                                                       |  caller frame |
+//                                                         ^ esp           ^ ebp
+
+
+// Runtime function calls are accomplished by doing a stub call to the
+// CEntryStub (a real code object). On IA32 passes arguments on the
+// stack, the number of arguments in EAX, the address of the runtime function
+// in EBX, and the context in ESI.
+
+// --{ before the call instruction }--------------------------------------------
+//                                                         |  caller frame |
+//                                                         ^ esp           ^ ebp
+
+// --{ push arguments and setup EAX, EBX, and ESI }-----------------------------
+//                                       | args + receiver |  caller frame |
+//                                       ^ esp                             ^ ebp
+//              [eax = #args, ebx = runtime function, esi = context]
+
+// --{ call #CEntryStub }-------------------------------------------------------
+//                                 | RET | args + receiver |  caller frame |
+//                                 ^ esp                                   ^ ebp
+
+// =={ body of runtime function }===============================================
+
+// --{ runtime returns }--------------------------------------------------------
+//                                                         |  caller frame |
+//                                                         ^ esp           ^ ebp
+
+// Other custom linkages (e.g. for calling directly into and out of C++) may
+// need to save callee-saved registers on the stack, which is done in the
+// function prologue of generated code.
+
+// --{ before the call instruction }--------------------------------------------
+//                                                         |  caller frame |
+//                                                         ^ esp           ^ ebp
+
+// --{ set up arguments in registers on stack }---------------------------------
+//                                                  | args |  caller frame |
+//                                                  ^ esp                  ^ ebp
+//                  [r0 = arg0, r1 = arg1, ...]
+
+// --{ call code }--------------------------------------------------------------
+//                                            | RET | args |  caller frame |
+//                                            ^ esp                        ^ ebp
+
+// =={ prologue of called function }============================================
+// --{ push ebp }---------------------------------------------------------------
+//                                       | FP | RET | args |  caller frame |
+//                                       ^ esp                             ^ ebp
+
+// --{ mov ebp, esp }-----------------------------------------------------------
+//                                       | FP | RET | args |  caller frame |
+//                                       ^ ebp,esp
+
+// --{ save registers }---------------------------------------------------------
+//                                | regs | FP | RET | args |  caller frame |
+//                                ^ esp  ^ ebp
+
+// --{ subi esp, #N }-----------------------------------------------------------
+//                 | callee frame | regs | FP | RET | args |  caller frame |
+//                 ^esp                  ^ ebp
+
+// =={ body of called function }================================================
+
+// =={ epilogue of called function }============================================
+// --{ restore registers }------------------------------------------------------
+//                                | regs | FP | RET | args |  caller frame |
+//                                ^ esp  ^ ebp
+
+// --{ mov esp, ebp }-----------------------------------------------------------
+//                                       | FP | RET | args |  caller frame |
+//                                       ^ esp,ebp
+
+// --{ pop ebp }----------------------------------------------------------------
+//                                            | RET | args |  caller frame |
+//                                            ^ esp                        ^ ebp
+
+
+void CodeGenerator::AssemblePrologue() {
+  CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
+  Frame* frame = code_->frame();
+  int stack_slots = frame->GetSpillSlotCount();
+  if (descriptor->kind() == CallDescriptor::kCallAddress) {
+    // Assemble a prologue similar the to cdecl calling convention.
+    __ push(ebp);
+    __ mov(ebp, esp);
+    const RegList saves = descriptor->CalleeSavedRegisters();
+    if (saves != 0) {  // Save callee-saved registers.
+      int register_save_area_size = 0;
+      for (int i = Register::kNumRegisters - 1; i >= 0; i--) {
+        if (!((1 << i) & saves)) continue;
+        __ push(Register::from_code(i));
+        register_save_area_size += kPointerSize;
+      }
+      frame->SetRegisterSaveAreaSize(register_save_area_size);
+    }
+  } else if (descriptor->IsJSFunctionCall()) {
+    CompilationInfo* info = linkage()->info();
+    __ Prologue(info->IsCodePreAgingActive());
+    frame->SetRegisterSaveAreaSize(
+        StandardFrameConstants::kFixedFrameSizeFromFp);
+
+    // Sloppy mode functions and builtins need to replace the receiver with the
+    // global proxy when called as functions (without an explicit receiver
+    // object).
+    // TODO(mstarzinger/verwaest): Should this be moved back into the CallIC?
+    if (info->strict_mode() == SLOPPY && !info->is_native()) {
+      Label ok;
+      // +2 for return address and saved frame pointer.
+      int receiver_slot = info->scope()->num_parameters() + 2;
+      __ mov(ecx, Operand(ebp, receiver_slot * kPointerSize));
+      __ cmp(ecx, isolate()->factory()->undefined_value());
+      __ j(not_equal, &ok, Label::kNear);
+      __ mov(ecx, GlobalObjectOperand());
+      __ mov(ecx, FieldOperand(ecx, GlobalObject::kGlobalProxyOffset));
+      __ mov(Operand(ebp, receiver_slot * kPointerSize), ecx);
+      __ bind(&ok);
+    }
+
+  } else {
+    __ StubPrologue();
+    frame->SetRegisterSaveAreaSize(
+        StandardFrameConstants::kFixedFrameSizeFromFp);
+  }
+  if (stack_slots > 0) {
+    __ sub(esp, Immediate(stack_slots * kPointerSize));
+  }
+}
+
+
+void CodeGenerator::AssembleReturn() {
+  CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
+  if (descriptor->kind() == CallDescriptor::kCallAddress) {
+    const RegList saves = descriptor->CalleeSavedRegisters();
+    if (frame()->GetRegisterSaveAreaSize() > 0) {
+      // Remove this frame's spill slots first.
+      int stack_slots = frame()->GetSpillSlotCount();
+      if (stack_slots > 0) {
+        __ add(esp, Immediate(stack_slots * kPointerSize));
+      }
+      // Restore registers.
+      if (saves != 0) {
+        for (int i = 0; i < Register::kNumRegisters; i++) {
+          if (!((1 << i) & saves)) continue;
+          __ pop(Register::from_code(i));
+        }
+      }
+      __ pop(ebp);  // Pop caller's frame pointer.
+      __ ret(0);
+    } else {
+      // No saved registers.
+      __ mov(esp, ebp);  // Move stack pointer back to frame pointer.
+      __ pop(ebp);       // Pop caller's frame pointer.
+      __ ret(0);
+    }
+  } else {
+    __ mov(esp, ebp);  // Move stack pointer back to frame pointer.
+    __ pop(ebp);       // Pop caller's frame pointer.
+    int pop_count =
+        descriptor->IsJSFunctionCall() ? descriptor->ParameterCount() : 0;
+    __ ret(pop_count * kPointerSize);
+  }
+}
+
+
+void CodeGenerator::AssembleMove(InstructionOperand* source,
+                                 InstructionOperand* destination) {
+  IA32OperandConverter g(this, NULL);
+  // Dispatch on the source and destination operand kinds.  Not all
+  // combinations are possible.
+  if (source->IsRegister()) {
+    ASSERT(destination->IsRegister() || destination->IsStackSlot());
+    Register src = g.ToRegister(source);
+    Operand dst = g.ToOperand(destination);
+    __ mov(dst, src);
+  } else if (source->IsStackSlot()) {
+    ASSERT(destination->IsRegister() || destination->IsStackSlot());
+    Operand src = g.ToOperand(source);
+    if (destination->IsRegister()) {
+      Register dst = g.ToRegister(destination);
+      __ mov(dst, src);
+    } else {
+      Operand dst = g.ToOperand(destination);
+      __ push(src);
+      __ pop(dst);
+    }
+  } else if (source->IsConstant()) {
+    Constant src_constant = g.ToConstant(source);
+    if (src_constant.type() == Constant::kHeapObject) {
+      Handle<HeapObject> src = src_constant.ToHeapObject();
+      if (destination->IsRegister()) {
+        Register dst = g.ToRegister(destination);
+        __ LoadHeapObject(dst, src);
+      } else {
+        ASSERT(destination->IsStackSlot());
+        Operand dst = g.ToOperand(destination);
+        AllowDeferredHandleDereference embedding_raw_address;
+        if (isolate()->heap()->InNewSpace(*src)) {
+          __ PushHeapObject(src);
+          __ pop(dst);
+        } else {
+          __ mov(dst, src);
+        }
+      }
+    } else if (destination->IsRegister()) {
+      Register dst = g.ToRegister(destination);
+      __ mov(dst, g.ToImmediate(source));
+    } else if (destination->IsStackSlot()) {
+      Operand dst = g.ToOperand(destination);
+      __ mov(dst, g.ToImmediate(source));
+    } else {
+      double v = g.ToDouble(source);
+      uint64_t int_val = BitCast<uint64_t, double>(v);
+      int32_t lower = static_cast<int32_t>(int_val);
+      int32_t upper = static_cast<int32_t>(int_val >> kBitsPerInt);
+      if (destination->IsDoubleRegister()) {
+        XMMRegister dst = g.ToDoubleRegister(destination);
+        __ Move(dst, v);
+      } else {
+        ASSERT(destination->IsDoubleStackSlot());
+        Operand dst0 = g.ToOperand(destination);
+        Operand dst1 = g.HighOperand(destination);
+        __ mov(dst0, Immediate(lower));
+        __ mov(dst1, Immediate(upper));
+      }
+    }
+  } else if (source->IsDoubleRegister()) {
+    XMMRegister src = g.ToDoubleRegister(source);
+    if (destination->IsDoubleRegister()) {
+      XMMRegister dst = g.ToDoubleRegister(destination);
+      __ movaps(dst, src);
+    } else {
+      ASSERT(destination->IsDoubleStackSlot());
+      Operand dst = g.ToOperand(destination);
+      __ movsd(dst, src);
+    }
+  } else if (source->IsDoubleStackSlot()) {
+    ASSERT(destination->IsDoubleRegister() || destination->IsDoubleStackSlot());
+    Operand src = g.ToOperand(source);
+    if (destination->IsDoubleRegister()) {
+      XMMRegister dst = g.ToDoubleRegister(destination);
+      __ movsd(dst, src);
+    } else {
+      // We rely on having xmm0 available as a fixed scratch register.
+      Operand dst = g.ToOperand(destination);
+      __ movsd(xmm0, src);
+      __ movsd(dst, xmm0);
+    }
+  } else {
+    UNREACHABLE();
+  }
+}
+
+
+void CodeGenerator::AssembleSwap(InstructionOperand* source,
+                                 InstructionOperand* destination) {
+  IA32OperandConverter g(this, NULL);
+  // Dispatch on the source and destination operand kinds.  Not all
+  // combinations are possible.
+  if (source->IsRegister() && destination->IsRegister()) {
+    // Register-register.
+    Register src = g.ToRegister(source);
+    Register dst = g.ToRegister(destination);
+    __ xchg(dst, src);
+  } else if (source->IsRegister() && destination->IsStackSlot()) {
+    // Register-memory.
+    __ xchg(g.ToRegister(source), g.ToOperand(destination));
+  } else if (source->IsStackSlot() && destination->IsStackSlot()) {
+    // Memory-memory.
+    Operand src = g.ToOperand(source);
+    Operand dst = g.ToOperand(destination);
+    __ push(dst);
+    __ push(src);
+    __ pop(dst);
+    __ pop(src);
+  } else if (source->IsDoubleRegister() && destination->IsDoubleRegister()) {
+    // XMM register-register swap. We rely on having xmm0
+    // available as a fixed scratch register.
+    XMMRegister src = g.ToDoubleRegister(source);
+    XMMRegister dst = g.ToDoubleRegister(destination);
+    __ movaps(xmm0, src);
+    __ movaps(src, dst);
+    __ movaps(dst, xmm0);
+  } else if (source->IsDoubleRegister() && source->IsDoubleStackSlot()) {
+    // XMM register-memory swap.  We rely on having xmm0
+    // available as a fixed scratch register.
+    XMMRegister reg = g.ToDoubleRegister(source);
+    Operand other = g.ToOperand(destination);
+    __ movsd(xmm0, other);
+    __ movsd(other, reg);
+    __ movaps(reg, xmm0);
+  } else if (source->IsDoubleStackSlot() && destination->IsDoubleStackSlot()) {
+    // Double-width memory-to-memory.
+    Operand src0 = g.ToOperand(source);
+    Operand src1 = g.HighOperand(source);
+    Operand dst0 = g.ToOperand(destination);
+    Operand dst1 = g.HighOperand(destination);
+    __ movsd(xmm0, dst0);  // Save destination in xmm0.
+    __ push(src0);         // Then use stack to copy source to destination.
+    __ pop(dst0);
+    __ push(src1);
+    __ pop(dst1);
+    __ movsd(src0, xmm0);
+  } else {
+    // No other combinations are possible.
+    UNREACHABLE();
+  }
+}
+
+
+void CodeGenerator::AddNopForSmiCodeInlining() { __ nop(); }
+
+#undef __
+
+#ifdef DEBUG
+
+// Checks whether the code between start_pc and end_pc is a no-op.
+bool CodeGenerator::IsNopForSmiCodeInlining(Handle<Code> code, int start_pc,
+                                            int end_pc) {
+  if (start_pc + 1 != end_pc) {
+    return false;
+  }
+  return *(code->instruction_start() + start_pc) ==
+         v8::internal::Assembler::kNopByte;
+}
+
+#endif  // DEBUG
+}
+}
+}  // namespace v8::internal::compiler
diff --git a/src/compiler/ia32/instruction-codes-ia32.h b/src/compiler/ia32/instruction-codes-ia32.h
new file mode 100644 (file)
index 0000000..82fca55
--- /dev/null
@@ -0,0 +1,86 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_IA32_INSTRUCTION_CODES_IA32_H_
+#define V8_COMPILER_IA32_INSTRUCTION_CODES_IA32_H_
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// IA32-specific opcodes that specify which assembly sequence to emit.
+// Most opcodes specify a single instruction.
+#define TARGET_ARCH_OPCODE_LIST(V) \
+  V(IA32Add)                       \
+  V(IA32And)                       \
+  V(IA32Cmp)                       \
+  V(IA32Test)                      \
+  V(IA32Or)                        \
+  V(IA32Xor)                       \
+  V(IA32Sub)                       \
+  V(IA32Imul)                      \
+  V(IA32Idiv)                      \
+  V(IA32Udiv)                      \
+  V(IA32Not)                       \
+  V(IA32Neg)                       \
+  V(IA32Shl)                       \
+  V(IA32Shr)                       \
+  V(IA32Sar)                       \
+  V(IA32Push)                      \
+  V(IA32CallCodeObject)            \
+  V(IA32CallAddress)               \
+  V(PopStack)                      \
+  V(IA32CallJSFunction)            \
+  V(SSEFloat64Cmp)                 \
+  V(SSEFloat64Add)                 \
+  V(SSEFloat64Sub)                 \
+  V(SSEFloat64Mul)                 \
+  V(SSEFloat64Div)                 \
+  V(SSEFloat64Mod)                 \
+  V(SSEFloat64ToInt32)             \
+  V(SSEInt32ToFloat64)             \
+  V(SSELoad)                       \
+  V(SSEStore)                      \
+  V(IA32LoadWord8)                 \
+  V(IA32StoreWord8)                \
+  V(IA32StoreWord8I)               \
+  V(IA32LoadWord16)                \
+  V(IA32StoreWord16)               \
+  V(IA32StoreWord16I)              \
+  V(IA32LoadWord32)                \
+  V(IA32StoreWord32)               \
+  V(IA32StoreWord32I)              \
+  V(IA32StoreWriteBarrier)
+
+
+// Addressing modes represent the "shape" of inputs to an instruction.
+// Many instructions support multiple addressing modes. Addressing modes
+// are encoded into the InstructionCode of the instruction and tell the
+// code generator after register allocation which assembler method to call.
+//
+// We use the following local notation for addressing modes:
+//
+// R = register
+// O = register or stack slot
+// D = double register
+// I = immediate (handle, external, int32)
+// MR = [register]
+// MI = [immediate]
+// MRN = [register + register * N in {1, 2, 4, 8}]
+// MRI = [register + immediate]
+// MRNI = [register + register * N in {1, 2, 4, 8} + immediate]
+#define TARGET_ADDRESSING_MODE_LIST(V) \
+  V(MI)   /* [K] */                    \
+  V(MR)   /* [%r0] */                  \
+  V(MRI)  /* [%r0 + K] */              \
+  V(MR1I) /* [%r0 + %r1 * 1 + K] */    \
+  V(MR2I) /* [%r0 + %r1 * 2 + K] */    \
+  V(MR4I) /* [%r0 + %r1 * 4 + K] */    \
+  V(MR8I) /* [%r0 + %r1 * 8 + K] */
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_COMPILER_IA32_INSTRUCTION_CODES_IA32_H_
diff --git a/src/compiler/ia32/instruction-selector-ia32.cc b/src/compiler/ia32/instruction-selector-ia32.cc
new file mode 100644 (file)
index 0000000..8d6ca1e
--- /dev/null
@@ -0,0 +1,504 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/instruction-selector-impl.h"
+#include "src/compiler/node-matchers.h"
+#include "src/compiler/node-properties-inl.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// Adds IA32-specific methods for generating operands.
+class IA32OperandGenerator V8_FINAL : public OperandGenerator {
+ public:
+  explicit IA32OperandGenerator(InstructionSelector* selector)
+      : OperandGenerator(selector) {}
+
+  InstructionOperand* UseByteRegister(Node* node) {
+    // TODO(dcarney): relax constraint.
+    return UseFixed(node, edx);
+  }
+
+  bool CanBeImmediate(Node* node) {
+    switch (node->opcode()) {
+      case IrOpcode::kInt32Constant:
+      case IrOpcode::kNumberConstant:
+      case IrOpcode::kExternalConstant:
+        return true;
+      case IrOpcode::kHeapConstant: {
+        // Constants in new space cannot be used as immediates in V8 because
+        // the GC does not scan code objects when collecting the new generation.
+        Handle<HeapObject> value = ValueOf<Handle<HeapObject> >(node->op());
+        return !isolate()->heap()->InNewSpace(*value);
+      }
+      default:
+        return false;
+    }
+  }
+};
+
+
+void InstructionSelector::VisitLoad(Node* node) {
+  MachineRepresentation rep = OpParameter<MachineRepresentation>(node);
+  IA32OperandGenerator g(this);
+  Node* base = node->InputAt(0);
+  Node* index = node->InputAt(1);
+
+  InstructionOperand* output = rep == kMachineFloat64
+                                   ? g.DefineAsDoubleRegister(node)
+                                   : g.DefineAsRegister(node);
+  ArchOpcode opcode;
+  switch (rep) {
+    case kMachineFloat64:
+      opcode = kSSELoad;
+      break;
+    case kMachineWord8:
+      opcode = kIA32LoadWord8;
+      break;
+    case kMachineWord16:
+      opcode = kIA32LoadWord16;
+      break;
+    case kMachineTagged:  // Fall through.
+    case kMachineWord32:
+      opcode = kIA32LoadWord32;
+      break;
+    default:
+      UNREACHABLE();
+      return;
+  }
+  if (g.CanBeImmediate(base)) {
+    if (Int32Matcher(index).Is(0)) {  // load [#base + #0]
+      Emit(opcode | AddressingModeField::encode(kMode_MI), output,
+           g.UseImmediate(base));
+    } else {  // load [#base + %index]
+      Emit(opcode | AddressingModeField::encode(kMode_MRI), output,
+           g.UseRegister(index), g.UseImmediate(base));
+    }
+  } else if (g.CanBeImmediate(index)) {  // load [%base + #index]
+    Emit(opcode | AddressingModeField::encode(kMode_MRI), output,
+         g.UseRegister(base), g.UseImmediate(index));
+  } else {  // load [%base + %index + K]
+    Emit(opcode | AddressingModeField::encode(kMode_MR1I), output,
+         g.UseRegister(base), g.UseRegister(index));
+  }
+  // TODO(turbofan): addressing modes [r+r*{2,4,8}+K]
+}
+
+
+void InstructionSelector::VisitStore(Node* node) {
+  IA32OperandGenerator g(this);
+  Node* base = node->InputAt(0);
+  Node* index = node->InputAt(1);
+  Node* value = node->InputAt(2);
+
+  StoreRepresentation store_rep = OpParameter<StoreRepresentation>(node);
+  MachineRepresentation rep = store_rep.rep;
+  if (store_rep.write_barrier_kind == kFullWriteBarrier) {
+    ASSERT_EQ(kMachineTagged, rep);
+    // TODO(dcarney): refactor RecordWrite function to take temp registers
+    //                and pass them here instead of using fixed regs
+    // TODO(dcarney): handle immediate indices.
+    InstructionOperand* temps[] = {g.TempRegister(ecx), g.TempRegister(edx)};
+    Emit(kIA32StoreWriteBarrier, NULL, g.UseFixed(base, ebx),
+         g.UseFixed(index, ecx), g.UseFixed(value, edx), ARRAY_SIZE(temps),
+         temps);
+    return;
+  }
+  ASSERT_EQ(kNoWriteBarrier, store_rep.write_barrier_kind);
+  bool is_immediate = false;
+  InstructionOperand* val;
+  if (rep == kMachineFloat64) {
+    val = g.UseDoubleRegister(value);
+  } else {
+    is_immediate = g.CanBeImmediate(value);
+    if (is_immediate) {
+      val = g.UseImmediate(value);
+    } else if (rep == kMachineWord8) {
+      val = g.UseByteRegister(value);
+    } else {
+      val = g.UseRegister(value);
+    }
+  }
+  ArchOpcode opcode;
+  switch (rep) {
+    case kMachineFloat64:
+      opcode = kSSEStore;
+      break;
+    case kMachineWord8:
+      opcode = is_immediate ? kIA32StoreWord8I : kIA32StoreWord8;
+      break;
+    case kMachineWord16:
+      opcode = is_immediate ? kIA32StoreWord16I : kIA32StoreWord16;
+      break;
+    case kMachineTagged:  // Fall through.
+    case kMachineWord32:
+      opcode = is_immediate ? kIA32StoreWord32I : kIA32StoreWord32;
+      break;
+    default:
+      UNREACHABLE();
+      return;
+  }
+  if (g.CanBeImmediate(base)) {
+    if (Int32Matcher(index).Is(0)) {  // store [#base], %|#value
+      Emit(opcode | AddressingModeField::encode(kMode_MI), NULL,
+           g.UseImmediate(base), val);
+    } else {  // store [#base + %index], %|#value
+      Emit(opcode | AddressingModeField::encode(kMode_MRI), NULL,
+           g.UseRegister(index), g.UseImmediate(base), val);
+    }
+  } else if (g.CanBeImmediate(index)) {  // store [%base + #index], %|#value
+    Emit(opcode | AddressingModeField::encode(kMode_MRI), NULL,
+         g.UseRegister(base), g.UseImmediate(index), val);
+  } else {  // store [%base + %index], %|#value
+    Emit(opcode | AddressingModeField::encode(kMode_MR1I), NULL,
+         g.UseRegister(base), g.UseRegister(index), val);
+  }
+  // TODO(turbofan): addressing modes [r+r*{2,4,8}+K]
+}
+
+
+// Shared routine for multiple binary operations.
+static inline void VisitBinop(InstructionSelector* selector, Node* node,
+                              ArchOpcode opcode) {
+  IA32OperandGenerator g(selector);
+  Node* left = node->InputAt(0);
+  Node* right = node->InputAt(1);
+  // TODO(turbofan): match complex addressing modes.
+  // TODO(turbofan): if commutative, pick the non-live-in operand as the left as
+  // this might be the last use and therefore its register can be reused.
+  if (g.CanBeImmediate(right)) {
+    selector->Emit(opcode, g.DefineSameAsFirst(node), g.Use(left),
+                   g.UseImmediate(right));
+  } else if (g.CanBeImmediate(left) &&
+             node->op()->HasProperty(Operator::kCommutative)) {
+    selector->Emit(opcode, g.DefineSameAsFirst(node), g.Use(right),
+                   g.UseImmediate(left));
+  } else {
+    selector->Emit(opcode, g.DefineSameAsFirst(node), g.UseRegister(left),
+                   g.Use(right));
+  }
+}
+
+
+void InstructionSelector::VisitWord32And(Node* node) {
+  VisitBinop(this, node, kIA32And);
+}
+
+
+void InstructionSelector::VisitWord32Or(Node* node) {
+  VisitBinop(this, node, kIA32Or);
+}
+
+
+void InstructionSelector::VisitWord32Xor(Node* node) {
+  IA32OperandGenerator g(this);
+  Int32BinopMatcher m(node);
+  if (m.right().Is(-1)) {
+    Emit(kIA32Not, g.DefineSameAsFirst(node), g.Use(m.left().node()));
+  } else {
+    VisitBinop(this, node, kIA32Xor);
+  }
+}
+
+
+// Shared routine for multiple shift operations.
+static inline void VisitShift(InstructionSelector* selector, Node* node,
+                              ArchOpcode opcode) {
+  IA32OperandGenerator g(selector);
+  Node* left = node->InputAt(0);
+  Node* right = node->InputAt(1);
+
+  // TODO(turbofan): assembler only supports some addressing modes for shifts.
+  if (g.CanBeImmediate(right)) {
+    selector->Emit(opcode, g.DefineSameAsFirst(node), g.UseRegister(left),
+                   g.UseImmediate(right));
+  } else {
+    Int32BinopMatcher m(node);
+    if (m.right().IsWord32And()) {
+      Int32BinopMatcher mright(right);
+      if (mright.right().Is(0x1F)) {
+        right = mright.left().node();
+      }
+    }
+    selector->Emit(opcode, g.DefineSameAsFirst(node), g.UseRegister(left),
+                   g.UseFixed(right, ecx));
+  }
+}
+
+
+void InstructionSelector::VisitWord32Shl(Node* node) {
+  VisitShift(this, node, kIA32Shl);
+}
+
+
+void InstructionSelector::VisitWord32Shr(Node* node) {
+  VisitShift(this, node, kIA32Shr);
+}
+
+
+void InstructionSelector::VisitWord32Sar(Node* node) {
+  VisitShift(this, node, kIA32Sar);
+}
+
+
+void InstructionSelector::VisitInt32Add(Node* node) {
+  VisitBinop(this, node, kIA32Add);
+}
+
+
+void InstructionSelector::VisitInt32Sub(Node* node) {
+  IA32OperandGenerator g(this);
+  Int32BinopMatcher m(node);
+  if (m.left().Is(0)) {
+    Emit(kIA32Neg, g.DefineSameAsFirst(node), g.Use(m.right().node()));
+  } else {
+    VisitBinop(this, node, kIA32Sub);
+  }
+}
+
+
+void InstructionSelector::VisitInt32Mul(Node* node) {
+  IA32OperandGenerator g(this);
+  Node* left = node->InputAt(0);
+  Node* right = node->InputAt(1);
+  if (g.CanBeImmediate(right)) {
+    Emit(kIA32Imul, g.DefineAsRegister(node), g.Use(left),
+         g.UseImmediate(right));
+  } else if (g.CanBeImmediate(left)) {
+    Emit(kIA32Imul, g.DefineAsRegister(node), g.Use(right),
+         g.UseImmediate(left));
+  } else {
+    // TODO(turbofan): select better left operand.
+    Emit(kIA32Imul, g.DefineSameAsFirst(node), g.UseRegister(left),
+         g.Use(right));
+  }
+}
+
+
+static inline void VisitDiv(InstructionSelector* selector, Node* node,
+                            ArchOpcode opcode) {
+  IA32OperandGenerator g(selector);
+  InstructionOperand* temps[] = {g.TempRegister(edx)};
+  size_t temp_count = ARRAY_SIZE(temps);
+  selector->Emit(opcode, g.DefineAsFixed(node, eax),
+                 g.UseFixed(node->InputAt(0), eax),
+                 g.UseUnique(node->InputAt(1)), temp_count, temps);
+}
+
+
+void InstructionSelector::VisitInt32Div(Node* node) {
+  VisitDiv(this, node, kIA32Idiv);
+}
+
+
+void InstructionSelector::VisitInt32UDiv(Node* node) {
+  VisitDiv(this, node, kIA32Udiv);
+}
+
+
+static inline void VisitMod(InstructionSelector* selector, Node* node,
+                            ArchOpcode opcode) {
+  IA32OperandGenerator g(selector);
+  InstructionOperand* temps[] = {g.TempRegister(eax), g.TempRegister(edx)};
+  size_t temp_count = ARRAY_SIZE(temps);
+  selector->Emit(opcode, g.DefineAsFixed(node, edx),
+                 g.UseFixed(node->InputAt(0), eax),
+                 g.UseUnique(node->InputAt(1)), temp_count, temps);
+}
+
+
+void InstructionSelector::VisitInt32Mod(Node* node) {
+  VisitMod(this, node, kIA32Idiv);
+}
+
+
+void InstructionSelector::VisitInt32UMod(Node* node) {
+  VisitMod(this, node, kIA32Udiv);
+}
+
+
+void InstructionSelector::VisitConvertInt32ToFloat64(Node* node) {
+  IA32OperandGenerator g(this);
+  Emit(kSSEInt32ToFloat64, g.DefineAsDoubleRegister(node),
+       g.Use(node->InputAt(0)));
+}
+
+
+void InstructionSelector::VisitConvertFloat64ToInt32(Node* node) {
+  IA32OperandGenerator g(this);
+  Emit(kSSEFloat64ToInt32, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
+}
+
+
+void InstructionSelector::VisitFloat64Add(Node* node) {
+  IA32OperandGenerator g(this);
+  Emit(kSSEFloat64Add, g.DefineSameAsFirst(node),
+       g.UseDoubleRegister(node->InputAt(0)),
+       g.UseDoubleRegister(node->InputAt(1)));
+}
+
+
+void InstructionSelector::VisitFloat64Sub(Node* node) {
+  IA32OperandGenerator g(this);
+  Emit(kSSEFloat64Sub, g.DefineSameAsFirst(node),
+       g.UseDoubleRegister(node->InputAt(0)),
+       g.UseDoubleRegister(node->InputAt(1)));
+}
+
+
+void InstructionSelector::VisitFloat64Mul(Node* node) {
+  IA32OperandGenerator g(this);
+  Emit(kSSEFloat64Mul, g.DefineSameAsFirst(node),
+       g.UseDoubleRegister(node->InputAt(0)),
+       g.UseDoubleRegister(node->InputAt(1)));
+}
+
+
+void InstructionSelector::VisitFloat64Div(Node* node) {
+  IA32OperandGenerator g(this);
+  Emit(kSSEFloat64Div, g.DefineSameAsFirst(node),
+       g.UseDoubleRegister(node->InputAt(0)),
+       g.UseDoubleRegister(node->InputAt(1)));
+}
+
+
+void InstructionSelector::VisitFloat64Mod(Node* node) {
+  IA32OperandGenerator g(this);
+  InstructionOperand* temps[] = {g.TempRegister(eax)};
+  Emit(kSSEFloat64Mod, g.DefineSameAsFirst(node),
+       g.UseDoubleRegister(node->InputAt(0)),
+       g.UseDoubleRegister(node->InputAt(1)), 1, temps);
+}
+
+
+// Shared routine for multiple compare operations.
+static inline void VisitCompare(InstructionSelector* selector,
+                                InstructionCode opcode,
+                                InstructionOperand* left,
+                                InstructionOperand* right,
+                                FlagsContinuation* cont) {
+  IA32OperandGenerator g(selector);
+  if (cont->IsBranch()) {
+    selector->Emit(cont->Encode(opcode), NULL, left, right,
+                   g.Label(cont->true_block()),
+                   g.Label(cont->false_block()))->MarkAsControl();
+  } else {
+    ASSERT(cont->IsSet());
+    // TODO(titzer): Needs byte register.
+    selector->Emit(cont->Encode(opcode), g.DefineAsRegister(cont->result()),
+                   left, right);
+  }
+}
+
+
+// Shared routine for multiple word compare operations.
+static inline void VisitWordCompare(InstructionSelector* selector, Node* node,
+                                    InstructionCode opcode,
+                                    FlagsContinuation* cont, bool commutative) {
+  IA32OperandGenerator g(selector);
+  Node* left = node->InputAt(0);
+  Node* right = node->InputAt(1);
+
+  // Match immediates on left or right side of comparison.
+  if (g.CanBeImmediate(right)) {
+    VisitCompare(selector, opcode, g.Use(left), g.UseImmediate(right), cont);
+  } else if (g.CanBeImmediate(left)) {
+    if (!commutative) cont->Commute();
+    VisitCompare(selector, opcode, g.Use(right), g.UseImmediate(left), cont);
+  } else {
+    VisitCompare(selector, opcode, g.UseRegister(left), g.Use(right), cont);
+  }
+}
+
+
+void InstructionSelector::VisitWord32Test(Node* node, FlagsContinuation* cont) {
+  switch (node->opcode()) {
+    case IrOpcode::kInt32Sub:
+      return VisitWordCompare(this, node, kIA32Cmp, cont, false);
+    case IrOpcode::kWord32And:
+      return VisitWordCompare(this, node, kIA32Test, cont, true);
+    default:
+      break;
+  }
+
+  IA32OperandGenerator g(this);
+  VisitCompare(this, kIA32Test, g.Use(node), g.TempImmediate(-1), cont);
+}
+
+
+void InstructionSelector::VisitWord32Compare(Node* node,
+                                             FlagsContinuation* cont) {
+  VisitWordCompare(this, node, kIA32Cmp, cont, false);
+}
+
+
+void InstructionSelector::VisitFloat64Compare(Node* node,
+                                              FlagsContinuation* cont) {
+  IA32OperandGenerator g(this);
+  Node* left = node->InputAt(0);
+  Node* right = node->InputAt(1);
+  VisitCompare(this, kSSEFloat64Cmp, g.UseDoubleRegister(left), g.Use(right),
+               cont);
+}
+
+
+void InstructionSelector::VisitCall(Node* call, BasicBlock* continuation,
+                                    BasicBlock* deoptimization) {
+  IA32OperandGenerator g(this);
+  CallDescriptor* descriptor = OpParameter<CallDescriptor*>(call);
+  CallBuffer buffer(zone(), descriptor);
+
+  // Compute InstructionOperands for inputs and outputs.
+  InitializeCallBuffer(call, &buffer, true, true, continuation, deoptimization);
+
+  // Push any stack arguments.
+  for (int i = buffer.pushed_count - 1; i >= 0; --i) {
+    Node* input = buffer.pushed_nodes[i];
+    // TODO(titzer): handle pushing double parameters.
+    Emit(kIA32Push, NULL,
+         g.CanBeImmediate(input) ? g.UseImmediate(input) : g.Use(input));
+  }
+
+  // Select the appropriate opcode based on the call type.
+  InstructionCode opcode;
+  switch (descriptor->kind()) {
+    case CallDescriptor::kCallCodeObject: {
+      bool lazy_deopt = descriptor->CanLazilyDeoptimize();
+      opcode = kIA32CallCodeObject | MiscField::encode(lazy_deopt ? 1 : 0);
+      break;
+    }
+    case CallDescriptor::kCallAddress:
+      opcode = kIA32CallAddress;
+      break;
+    case CallDescriptor::kCallJSFunction:
+      opcode = kIA32CallJSFunction;
+      break;
+    default:
+      UNREACHABLE();
+      return;
+  }
+
+  // Emit the call instruction.
+  Instruction* call_instr =
+      Emit(opcode, buffer.output_count, buffer.outputs,
+           buffer.fixed_and_control_count(), buffer.fixed_and_control_args);
+
+  call_instr->MarkAsCall();
+  if (deoptimization != NULL) {
+    ASSERT(continuation != NULL);
+    call_instr->MarkAsControl();
+  }
+
+  // Caller clean up of stack for C-style calls.
+  if (descriptor->kind() == CallDescriptor::kCallAddress &&
+      buffer.pushed_count > 0) {
+    ASSERT(deoptimization == NULL && continuation == NULL);
+    Emit(kPopStack | MiscField::encode(buffer.pushed_count), NULL);
+  }
+}
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
diff --git a/src/compiler/ia32/linkage-ia32.cc b/src/compiler/ia32/linkage-ia32.cc
new file mode 100644 (file)
index 0000000..803d3f6
--- /dev/null
@@ -0,0 +1,62 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#include "src/assembler.h"
+#include "src/code-stubs.h"
+#include "src/compiler/linkage.h"
+#include "src/compiler/linkage-impl.h"
+#include "src/zone.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+struct LinkageHelperTraits {
+  static Register ReturnValueReg() { return eax; }
+  static Register ReturnValue2Reg() { return edx; }
+  static Register JSCallFunctionReg() { return edi; }
+  static Register ContextReg() { return esi; }
+  static Register RuntimeCallFunctionReg() { return ebx; }
+  static Register RuntimeCallArgCountReg() { return eax; }
+  static RegList CCalleeSaveRegisters() {
+    return esi.bit() | edi.bit() | ebx.bit();
+  }
+  static Register CRegisterParameter(int i) { return no_reg; }
+  static int CRegisterParametersLength() { return 0; }
+};
+
+
+CallDescriptor* Linkage::GetJSCallDescriptor(int parameter_count, Zone* zone) {
+  return LinkageHelper::GetJSCallDescriptor<LinkageHelperTraits>(
+      zone, parameter_count);
+}
+
+
+CallDescriptor* Linkage::GetRuntimeCallDescriptor(
+    Runtime::FunctionId function, int parameter_count,
+    Operator::Property properties,
+    CallDescriptor::DeoptimizationSupport can_deoptimize, Zone* zone) {
+  return LinkageHelper::GetRuntimeCallDescriptor<LinkageHelperTraits>(
+      zone, function, parameter_count, properties, can_deoptimize);
+}
+
+
+CallDescriptor* Linkage::GetStubCallDescriptor(
+    CodeStubInterfaceDescriptor* descriptor, int stack_parameter_count) {
+  return LinkageHelper::GetStubCallDescriptor<LinkageHelperTraits>(
+      this->info_->zone(), descriptor, stack_parameter_count);
+}
+
+
+CallDescriptor* Linkage::GetSimplifiedCDescriptor(
+    Zone* zone, int num_params, MachineRepresentation return_type,
+    const MachineRepresentation* param_types) {
+  return LinkageHelper::GetSimplifiedCDescriptor<LinkageHelperTraits>(
+      zone, num_params, return_type, param_types);
+}
+}
+}
+}  // namespace v8::internal::compiler
diff --git a/src/compiler/instruction-codes.h b/src/compiler/instruction-codes.h
new file mode 100644 (file)
index 0000000..6e59f9c
--- /dev/null
@@ -0,0 +1,114 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_INSTRUCTION_CODES_H_
+#define V8_COMPILER_INSTRUCTION_CODES_H_
+
+#if V8_TARGET_ARCH_ARM
+#include "src/compiler/arm/instruction-codes-arm.h"
+#elif V8_TARGET_ARCH_ARM64
+#include "src/compiler/arm64/instruction-codes-arm64.h"
+#elif V8_TARGET_ARCH_IA32
+#include "src/compiler/ia32/instruction-codes-ia32.h"
+#elif V8_TARGET_ARCH_X64
+#include "src/compiler/x64/instruction-codes-x64.h"
+#else
+#error "Unsupported target architecture."
+#endif
+#include "src/utils.h"
+
+namespace v8 {
+namespace internal {
+
+class OStream;
+
+namespace compiler {
+
+// Target-specific opcodes that specify which assembly sequence to emit.
+// Most opcodes specify a single instruction.
+#define ARCH_OPCODE_LIST(V) \
+  V(ArchDeoptimize)         \
+  V(ArchJmp)                \
+  V(ArchNop)                \
+  V(ArchRet)                \
+  TARGET_ARCH_OPCODE_LIST(V)
+
+enum ArchOpcode {
+#define DECLARE_ARCH_OPCODE(Name) k##Name,
+  ARCH_OPCODE_LIST(DECLARE_ARCH_OPCODE)
+#undef DECLARE_ARCH_OPCODE
+#define COUNT_ARCH_OPCODE(Name) +1
+  kLastArchOpcode = -1 ARCH_OPCODE_LIST(COUNT_ARCH_OPCODE)
+#undef COUNT_ARCH_OPCODE
+};
+
+OStream& operator<<(OStream& os, const ArchOpcode& ao);
+
+// Addressing modes represent the "shape" of inputs to an instruction.
+// Many instructions support multiple addressing modes. Addressing modes
+// are encoded into the InstructionCode of the instruction and tell the
+// code generator after register allocation which assembler method to call.
+#define ADDRESSING_MODE_LIST(V) \
+  V(None)                       \
+  TARGET_ADDRESSING_MODE_LIST(V)
+
+enum AddressingMode {
+#define DECLARE_ADDRESSING_MODE(Name) kMode_##Name,
+  ADDRESSING_MODE_LIST(DECLARE_ADDRESSING_MODE)
+#undef DECLARE_ADDRESSING_MODE
+#define COUNT_ADDRESSING_MODE(Name) +1
+  kLastAddressingMode = -1 ADDRESSING_MODE_LIST(COUNT_ADDRESSING_MODE)
+#undef COUNT_ADDRESSING_MODE
+};
+
+OStream& operator<<(OStream& os, const AddressingMode& am);
+
+// The mode of the flags continuation (see below).
+enum FlagsMode { kFlags_none = 0, kFlags_branch = 1, kFlags_set = 2 };
+
+OStream& operator<<(OStream& os, const FlagsMode& fm);
+
+// The condition of flags continuation (see below).
+enum FlagsCondition {
+  kEqual,
+  kNotEqual,
+  kSignedLessThan,
+  kSignedGreaterThanOrEqual,
+  kSignedLessThanOrEqual,
+  kSignedGreaterThan,
+  kUnsignedLessThan,
+  kUnsignedGreaterThanOrEqual,
+  kUnsignedLessThanOrEqual,
+  kUnsignedGreaterThan,
+  kUnorderedEqual,
+  kUnorderedNotEqual,
+  kUnorderedLessThan,
+  kUnorderedGreaterThanOrEqual,
+  kUnorderedLessThanOrEqual,
+  kUnorderedGreaterThan
+};
+
+OStream& operator<<(OStream& os, const FlagsCondition& fc);
+
+// The InstructionCode is an opaque, target-specific integer that encodes
+// what code to emit for an instruction in the code generator. It is not
+// interesting to the register allocator, as the inputs and flags on the
+// instructions specify everything of interest.
+typedef int32_t InstructionCode;
+
+// Helpers for encoding / decoding InstructionCode into the fields needed
+// for code generation. We encode the instruction, addressing mode, and flags
+// continuation into a single InstructionCode which is stored as part of
+// the instruction.
+typedef BitField<ArchOpcode, 0, 7> ArchOpcodeField;
+typedef BitField<AddressingMode, 7, 4> AddressingModeField;
+typedef BitField<FlagsMode, 11, 2> FlagsModeField;
+typedef BitField<FlagsCondition, 13, 4> FlagsConditionField;
+typedef BitField<int, 13, 19> MiscField;
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_COMPILER_INSTRUCTION_CODES_H_
diff --git a/src/compiler/instruction-selector-impl.h b/src/compiler/instruction-selector-impl.h
new file mode 100644 (file)
index 0000000..d4f1eee
--- /dev/null
@@ -0,0 +1,352 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_INSTRUCTION_SELECTOR_IMPL_H_
+#define V8_COMPILER_INSTRUCTION_SELECTOR_IMPL_H_
+
+#include "src/compiler/instruction.h"
+#include "src/compiler/instruction-selector.h"
+#include "src/compiler/linkage.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// A helper class for the instruction selector that simplifies construction of
+// Operands. This class implements a base for architecture-specific helpers.
+class OperandGenerator {
+ public:
+  explicit OperandGenerator(InstructionSelector* selector)
+      : selector_(selector) {}
+
+  InstructionOperand* DefineAsRegister(Node* node) {
+    return Define(node, new (zone())
+                  UnallocatedOperand(UnallocatedOperand::MUST_HAVE_REGISTER));
+  }
+
+  InstructionOperand* DefineAsDoubleRegister(Node* node) {
+    return Define(node, new (zone())
+                  UnallocatedOperand(UnallocatedOperand::MUST_HAVE_REGISTER));
+  }
+
+  InstructionOperand* DefineSameAsFirst(Node* result) {
+    return Define(result, new (zone())
+                  UnallocatedOperand(UnallocatedOperand::SAME_AS_FIRST_INPUT));
+  }
+
+  InstructionOperand* DefineAsFixed(Node* node, Register reg) {
+    return Define(node, new (zone())
+                  UnallocatedOperand(UnallocatedOperand::FIXED_REGISTER,
+                                     Register::ToAllocationIndex(reg)));
+  }
+
+  InstructionOperand* DefineAsFixedDouble(Node* node, DoubleRegister reg) {
+    return Define(node, new (zone())
+                  UnallocatedOperand(UnallocatedOperand::FIXED_DOUBLE_REGISTER,
+                                     DoubleRegister::ToAllocationIndex(reg)));
+  }
+
+  InstructionOperand* DefineAsConstant(Node* node) {
+    sequence()->AddConstant(node->id(), ToConstant(node));
+    return ConstantOperand::Create(node->id(), zone());
+  }
+
+  InstructionOperand* DefineAsLocation(Node* node, LinkageLocation location) {
+    return Define(node, ToUnallocatedOperand(location));
+  }
+
+  InstructionOperand* Use(Node* node) {
+    return Use(node,
+               new (zone()) UnallocatedOperand(
+                   UnallocatedOperand::ANY, UnallocatedOperand::USED_AT_START));
+  }
+
+  InstructionOperand* UseRegister(Node* node) {
+    return Use(node, new (zone())
+               UnallocatedOperand(UnallocatedOperand::MUST_HAVE_REGISTER,
+                                  UnallocatedOperand::USED_AT_START));
+  }
+
+  InstructionOperand* UseDoubleRegister(Node* node) {
+    return Use(node, new (zone())
+               UnallocatedOperand(UnallocatedOperand::MUST_HAVE_REGISTER,
+                                  UnallocatedOperand::USED_AT_START));
+  }
+
+  // Use register or operand for the node. If a register is chosen, it won't
+  // alias any temporary or output registers.
+  InstructionOperand* UseUnique(Node* node) {
+    return Use(node, new (zone()) UnallocatedOperand(UnallocatedOperand::ANY));
+  }
+
+  // Use a unique register for the node that does not alias any temporary or
+  // output registers.
+  InstructionOperand* UseUniqueRegister(Node* node) {
+    return Use(node, new (zone())
+               UnallocatedOperand(UnallocatedOperand::MUST_HAVE_REGISTER));
+  }
+
+  // Use a unique double register for the node that does not alias any temporary
+  // or output double registers.
+  InstructionOperand* UseUniqueDoubleRegister(Node* node) {
+    return Use(node, new (zone())
+               UnallocatedOperand(UnallocatedOperand::MUST_HAVE_REGISTER));
+  }
+
+  InstructionOperand* UseFixed(Node* node, Register reg) {
+    return Use(node, new (zone())
+               UnallocatedOperand(UnallocatedOperand::FIXED_REGISTER,
+                                  Register::ToAllocationIndex(reg)));
+  }
+
+  InstructionOperand* UseFixedDouble(Node* node, DoubleRegister reg) {
+    return Use(node, new (zone())
+               UnallocatedOperand(UnallocatedOperand::FIXED_DOUBLE_REGISTER,
+                                  DoubleRegister::ToAllocationIndex(reg)));
+  }
+
+  InstructionOperand* UseImmediate(Node* node) {
+    int index = sequence()->AddImmediate(ToConstant(node));
+    return ImmediateOperand::Create(index, zone());
+  }
+
+  InstructionOperand* UseLocation(Node* node, LinkageLocation location) {
+    return Use(node, ToUnallocatedOperand(location));
+  }
+
+  InstructionOperand* TempRegister() {
+    UnallocatedOperand* op =
+        new (zone()) UnallocatedOperand(UnallocatedOperand::MUST_HAVE_REGISTER,
+                                        UnallocatedOperand::USED_AT_START);
+    op->set_virtual_register(sequence()->NextVirtualRegister());
+    return op;
+  }
+
+  InstructionOperand* TempDoubleRegister() {
+    UnallocatedOperand* op =
+        new (zone()) UnallocatedOperand(UnallocatedOperand::MUST_HAVE_REGISTER,
+                                        UnallocatedOperand::USED_AT_START);
+    op->set_virtual_register(sequence()->NextVirtualRegister());
+    sequence()->MarkAsDouble(op->virtual_register());
+    return op;
+  }
+
+  InstructionOperand* TempRegister(Register reg) {
+    return new (zone()) UnallocatedOperand(UnallocatedOperand::FIXED_REGISTER,
+                                           Register::ToAllocationIndex(reg));
+  }
+
+  InstructionOperand* TempImmediate(int32_t imm) {
+    int index = sequence()->AddImmediate(Constant(imm));
+    return ImmediateOperand::Create(index, zone());
+  }
+
+  InstructionOperand* Label(BasicBlock* block) {
+    // TODO(bmeurer): We misuse ImmediateOperand here.
+    return ImmediateOperand::Create(block->id(), zone());
+  }
+
+ protected:
+  Graph* graph() const { return selector()->graph(); }
+  InstructionSelector* selector() const { return selector_; }
+  InstructionSequence* sequence() const { return selector()->sequence(); }
+  Isolate* isolate() const { return zone()->isolate(); }
+  Zone* zone() const { return selector()->instruction_zone(); }
+
+ private:
+  static Constant ToConstant(const Node* node) {
+    switch (node->opcode()) {
+      case IrOpcode::kInt32Constant:
+        return Constant(ValueOf<int32_t>(node->op()));
+      case IrOpcode::kInt64Constant:
+        return Constant(ValueOf<int64_t>(node->op()));
+      case IrOpcode::kNumberConstant:
+      case IrOpcode::kFloat64Constant:
+        return Constant(ValueOf<double>(node->op()));
+      case IrOpcode::kExternalConstant:
+        return Constant(ValueOf<ExternalReference>(node->op()));
+      case IrOpcode::kHeapConstant:
+        return Constant(ValueOf<Handle<HeapObject> >(node->op()));
+      default:
+        break;
+    }
+    UNREACHABLE();
+    return Constant(static_cast<int32_t>(0));
+  }
+
+  UnallocatedOperand* Define(Node* node, UnallocatedOperand* operand) {
+    ASSERT_NOT_NULL(node);
+    ASSERT_NOT_NULL(operand);
+    operand->set_virtual_register(node->id());
+    return operand;
+  }
+
+  UnallocatedOperand* Use(Node* node, UnallocatedOperand* operand) {
+    selector_->MarkAsUsed(node);
+    return Define(node, operand);
+  }
+
+  UnallocatedOperand* ToUnallocatedOperand(LinkageLocation location) {
+    if (location.location_ == LinkageLocation::ANY_REGISTER) {
+      return new (zone())
+          UnallocatedOperand(UnallocatedOperand::MUST_HAVE_REGISTER);
+    }
+    if (location.location_ < 0) {
+      return new (zone()) UnallocatedOperand(UnallocatedOperand::FIXED_SLOT,
+                                             location.location_);
+    }
+    if (location.rep_ == kMachineFloat64) {
+      return new (zone()) UnallocatedOperand(
+          UnallocatedOperand::FIXED_DOUBLE_REGISTER, location.location_);
+    }
+    return new (zone()) UnallocatedOperand(UnallocatedOperand::FIXED_REGISTER,
+                                           location.location_);
+  }
+
+  InstructionSelector* selector_;
+};
+
+
+// The flags continuation is a way to combine a branch or a materialization
+// of a boolean value with an instruction that sets the flags register.
+// The whole instruction is treated as a unit by the register allocator, and
+// thus no spills or moves can be introduced between the flags-setting
+// instruction and the branch or set it should be combined with.
+class FlagsContinuation V8_FINAL {
+ public:
+  // Creates a new flags continuation from the given condition and true/false
+  // blocks.
+  FlagsContinuation(FlagsCondition condition, BasicBlock* true_block,
+                    BasicBlock* false_block)
+      : mode_(kFlags_branch),
+        condition_(condition),
+        true_block_(true_block),
+        false_block_(false_block) {
+    ASSERT_NOT_NULL(true_block);
+    ASSERT_NOT_NULL(false_block);
+  }
+
+  // Creates a new flags continuation from the given condition and result node.
+  FlagsContinuation(FlagsCondition condition, Node* result)
+      : mode_(kFlags_set), condition_(condition), result_(result) {
+    ASSERT_NOT_NULL(result);
+  }
+
+  bool IsNone() const { return mode_ == kFlags_none; }
+  bool IsBranch() const { return mode_ == kFlags_branch; }
+  bool IsSet() const { return mode_ == kFlags_set; }
+  FlagsCondition condition() const { return condition_; }
+  Node* result() const {
+    ASSERT(IsSet());
+    return result_;
+  }
+  BasicBlock* true_block() const {
+    ASSERT(IsBranch());
+    return true_block_;
+  }
+  BasicBlock* false_block() const {
+    ASSERT(IsBranch());
+    return false_block_;
+  }
+
+  void Negate() { condition_ = static_cast<FlagsCondition>(condition_ ^ 1); }
+
+  void Commute() {
+    switch (condition_) {
+      case kEqual:
+      case kNotEqual:
+        return;
+      case kSignedLessThan:
+        condition_ = kSignedGreaterThan;
+        return;
+      case kSignedGreaterThanOrEqual:
+        condition_ = kSignedLessThanOrEqual;
+        return;
+      case kSignedLessThanOrEqual:
+        condition_ = kSignedGreaterThanOrEqual;
+        return;
+      case kSignedGreaterThan:
+        condition_ = kSignedLessThan;
+        return;
+      case kUnsignedLessThan:
+        condition_ = kUnsignedGreaterThan;
+        return;
+      case kUnsignedGreaterThanOrEqual:
+        condition_ = kUnsignedLessThanOrEqual;
+        return;
+      case kUnsignedLessThanOrEqual:
+        condition_ = kUnsignedGreaterThanOrEqual;
+        return;
+      case kUnsignedGreaterThan:
+        condition_ = kUnsignedLessThan;
+        return;
+      case kUnorderedEqual:
+      case kUnorderedNotEqual:
+        return;
+      case kUnorderedLessThan:
+        condition_ = kUnorderedGreaterThan;
+        return;
+      case kUnorderedGreaterThanOrEqual:
+        condition_ = kUnorderedLessThanOrEqual;
+        return;
+      case kUnorderedLessThanOrEqual:
+        condition_ = kUnorderedGreaterThanOrEqual;
+        return;
+      case kUnorderedGreaterThan:
+        condition_ = kUnorderedLessThan;
+        return;
+    }
+    UNREACHABLE();
+  }
+
+  void OverwriteAndNegateIfEqual(FlagsCondition condition) {
+    bool negate = condition_ == kEqual;
+    condition_ = condition;
+    if (negate) Negate();
+  }
+
+  void SwapBlocks() { std::swap(true_block_, false_block_); }
+
+  // Encodes this flags continuation into the given opcode.
+  InstructionCode Encode(InstructionCode opcode) {
+    return opcode | FlagsModeField::encode(mode_) |
+           FlagsConditionField::encode(condition_);
+  }
+
+ private:
+  FlagsMode mode_;
+  FlagsCondition condition_;
+  Node* result_;             // Only valid if mode_ == kFlags_set.
+  BasicBlock* true_block_;   // Only valid if mode_ == kFlags_branch.
+  BasicBlock* false_block_;  // Only valid if mode_ == kFlags_branch.
+};
+
+
+// An internal helper class for generating the operands to calls.
+// TODO(bmeurer): Get rid of the CallBuffer business and make
+// InstructionSelector::VisitCall platform independent instead.
+struct CallBuffer {
+  CallBuffer(Zone* zone, CallDescriptor* descriptor);
+
+  int output_count;
+  CallDescriptor* descriptor;
+  Node** output_nodes;
+  InstructionOperand** outputs;
+  InstructionOperand** fixed_and_control_args;
+  int fixed_count;
+  Node** pushed_nodes;
+  int pushed_count;
+
+  int input_count() { return descriptor->InputCount(); }
+
+  int control_count() { return descriptor->CanLazilyDeoptimize() ? 2 : 0; }
+
+  int fixed_and_control_count() { return fixed_count + control_count(); }
+};
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_COMPILER_INSTRUCTION_SELECTOR_IMPL_H_
diff --git a/src/compiler/instruction-selector.cc b/src/compiler/instruction-selector.cc
new file mode 100644 (file)
index 0000000..dce3c1c
--- /dev/null
@@ -0,0 +1,873 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/instruction-selector.h"
+
+#include "src/compiler/instruction-selector-impl.h"
+#include "src/compiler/node-matchers.h"
+#include "src/compiler/node-properties-inl.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+InstructionSelector::InstructionSelector(InstructionSequence* sequence,
+                                         SourcePositionTable* source_positions)
+    : zone_(sequence->isolate()),
+      sequence_(sequence),
+      source_positions_(source_positions),
+      current_block_(NULL),
+      instructions_(InstructionDeque::allocator_type(zone())),
+      used_(graph()->NodeCount(), false, BoolVector::allocator_type(zone())) {}
+
+
+void InstructionSelector::SelectInstructions() {
+  // Mark the inputs of all phis in loop headers as used.
+  BasicBlockVector* blocks = schedule()->rpo_order();
+  for (BasicBlockVectorIter i = blocks->begin(); i != blocks->end(); ++i) {
+    BasicBlock* block = *i;
+    if (!block->IsLoopHeader()) continue;
+    ASSERT_NE(0, block->PredecessorCount());
+    ASSERT_NE(1, block->PredecessorCount());
+    for (BasicBlock::const_iterator j = block->begin(); j != block->end();
+         ++j) {
+      Node* phi = *j;
+      if (phi->opcode() != IrOpcode::kPhi) continue;
+
+      // Mark all inputs as used.
+      Node::Inputs inputs = phi->inputs();
+      for (InputIter k = inputs.begin(); k != inputs.end(); ++k) {
+        MarkAsUsed(*k);
+      }
+    }
+  }
+
+  // Visit each basic block in post order.
+  for (BasicBlockVectorRIter i = blocks->rbegin(); i != blocks->rend(); ++i) {
+    VisitBlock(*i);
+  }
+
+  // Schedule the selected instructions.
+  for (BasicBlockVectorIter i = blocks->begin(); i != blocks->end(); ++i) {
+    BasicBlock* block = *i;
+    size_t end = block->code_end_;
+    size_t start = block->code_start_;
+    sequence()->StartBlock(block);
+    while (start-- > end) {
+      sequence()->AddInstruction(instructions_[start], block);
+    }
+    sequence()->EndBlock(block);
+  }
+}
+
+
+Instruction* InstructionSelector::Emit(InstructionCode opcode,
+                                       InstructionOperand* output,
+                                       size_t temp_count,
+                                       InstructionOperand** temps) {
+  size_t output_count = output == NULL ? 0 : 1;
+  return Emit(opcode, output_count, &output, 0, NULL, temp_count, temps);
+}
+
+
+Instruction* InstructionSelector::Emit(InstructionCode opcode,
+                                       InstructionOperand* output,
+                                       InstructionOperand* a, size_t temp_count,
+                                       InstructionOperand** temps) {
+  size_t output_count = output == NULL ? 0 : 1;
+  return Emit(opcode, output_count, &output, 1, &a, temp_count, temps);
+}
+
+
+Instruction* InstructionSelector::Emit(InstructionCode opcode,
+                                       InstructionOperand* output,
+                                       InstructionOperand* a,
+                                       InstructionOperand* b, size_t temp_count,
+                                       InstructionOperand** temps) {
+  size_t output_count = output == NULL ? 0 : 1;
+  InstructionOperand* inputs[] = {a, b};
+  size_t input_count = ARRAY_SIZE(inputs);
+  return Emit(opcode, output_count, &output, input_count, inputs, temp_count,
+              temps);
+}
+
+
+Instruction* InstructionSelector::Emit(InstructionCode opcode,
+                                       InstructionOperand* output,
+                                       InstructionOperand* a,
+                                       InstructionOperand* b,
+                                       InstructionOperand* c, size_t temp_count,
+                                       InstructionOperand** temps) {
+  size_t output_count = output == NULL ? 0 : 1;
+  InstructionOperand* inputs[] = {a, b, c};
+  size_t input_count = ARRAY_SIZE(inputs);
+  return Emit(opcode, output_count, &output, input_count, inputs, temp_count,
+              temps);
+}
+
+
+Instruction* InstructionSelector::Emit(
+    InstructionCode opcode, InstructionOperand* output, InstructionOperand* a,
+    InstructionOperand* b, InstructionOperand* c, InstructionOperand* d,
+    size_t temp_count, InstructionOperand** temps) {
+  size_t output_count = output == NULL ? 0 : 1;
+  InstructionOperand* inputs[] = {a, b, c, d};
+  size_t input_count = ARRAY_SIZE(inputs);
+  return Emit(opcode, output_count, &output, input_count, inputs, temp_count,
+              temps);
+}
+
+
+Instruction* InstructionSelector::Emit(
+    InstructionCode opcode, size_t output_count, InstructionOperand** outputs,
+    size_t input_count, InstructionOperand** inputs, size_t temp_count,
+    InstructionOperand** temps) {
+  Instruction* instr =
+      Instruction::New(instruction_zone(), opcode, output_count, outputs,
+                       input_count, inputs, temp_count, temps);
+  return Emit(instr);
+}
+
+
+Instruction* InstructionSelector::Emit(Instruction* instr) {
+  instructions_.push_back(instr);
+  return instr;
+}
+
+
+bool InstructionSelector::IsNextInAssemblyOrder(const BasicBlock* block) const {
+  return block->rpo_number_ == (current_block_->rpo_number_ + 1) &&
+         block->deferred_ == current_block_->deferred_;
+}
+
+
+bool InstructionSelector::CanCover(Node* user, Node* node) const {
+  return node->OwnedBy(user) &&
+         schedule()->block(node) == schedule()->block(user);
+}
+
+
+bool InstructionSelector::IsUsed(Node* node) const {
+  if (!node->op()->HasProperty(Operator::kEliminatable)) return true;
+  NodeId id = node->id();
+  ASSERT(id >= 0);
+  ASSERT(id < static_cast<NodeId>(used_.size()));
+  return used_[id];
+}
+
+
+void InstructionSelector::MarkAsUsed(Node* node) {
+  ASSERT_NOT_NULL(node);
+  NodeId id = node->id();
+  ASSERT(id >= 0);
+  ASSERT(id < static_cast<NodeId>(used_.size()));
+  used_[id] = true;
+}
+
+
+bool InstructionSelector::IsDouble(const Node* node) const {
+  ASSERT_NOT_NULL(node);
+  return sequence()->IsDouble(node->id());
+}
+
+
+void InstructionSelector::MarkAsDouble(Node* node) {
+  ASSERT_NOT_NULL(node);
+  ASSERT(!IsReference(node));
+  sequence()->MarkAsDouble(node->id());
+
+  // Propagate "doubleness" throughout phis.
+  for (UseIter i = node->uses().begin(); i != node->uses().end(); ++i) {
+    Node* user = *i;
+    if (user->opcode() != IrOpcode::kPhi) continue;
+    if (IsDouble(user)) continue;
+    MarkAsDouble(user);
+  }
+}
+
+
+bool InstructionSelector::IsReference(const Node* node) const {
+  ASSERT_NOT_NULL(node);
+  return sequence()->IsReference(node->id());
+}
+
+
+void InstructionSelector::MarkAsReference(Node* node) {
+  ASSERT_NOT_NULL(node);
+  ASSERT(!IsDouble(node));
+  sequence()->MarkAsReference(node->id());
+
+  // Propagate "referenceness" throughout phis.
+  for (UseIter i = node->uses().begin(); i != node->uses().end(); ++i) {
+    Node* user = *i;
+    if (user->opcode() != IrOpcode::kPhi) continue;
+    if (IsReference(user)) continue;
+    MarkAsReference(user);
+  }
+}
+
+
+void InstructionSelector::MarkAsRepresentation(MachineRepresentation rep,
+                                               Node* node) {
+  ASSERT_NOT_NULL(node);
+  if (rep == kMachineFloat64) MarkAsDouble(node);
+  if (rep == kMachineTagged) MarkAsReference(node);
+}
+
+
+// TODO(bmeurer): Get rid of the CallBuffer business and make
+// InstructionSelector::VisitCall platform independent instead.
+CallBuffer::CallBuffer(Zone* zone, CallDescriptor* d)
+    : output_count(0),
+      descriptor(d),
+      output_nodes(zone->NewArray<Node*>(d->ReturnCount())),
+      outputs(zone->NewArray<InstructionOperand*>(d->ReturnCount())),
+      fixed_and_control_args(
+          zone->NewArray<InstructionOperand*>(input_count() + control_count())),
+      fixed_count(0),
+      pushed_nodes(zone->NewArray<Node*>(input_count())),
+      pushed_count(0) {
+  if (d->ReturnCount() > 1) {
+    memset(output_nodes, 0, sizeof(Node*) * d->ReturnCount());  // NOLINT
+  }
+  memset(pushed_nodes, 0, sizeof(Node*) * input_count());  // NOLINT
+}
+
+
+// TODO(bmeurer): Get rid of the CallBuffer business and make
+// InstructionSelector::VisitCall platform independent instead.
+void InstructionSelector::InitializeCallBuffer(Node* call, CallBuffer* buffer,
+                                               bool call_code_immediate,
+                                               bool call_address_immediate,
+                                               BasicBlock* cont_node,
+                                               BasicBlock* deopt_node) {
+  OperandGenerator g(this);
+  ASSERT_EQ(call->op()->OutputCount(), buffer->descriptor->ReturnCount());
+  ASSERT_EQ(NodeProperties::GetValueInputCount(call), buffer->input_count());
+
+  if (buffer->descriptor->ReturnCount() > 0) {
+    // Collect the projections that represent multiple outputs from this call.
+    if (buffer->descriptor->ReturnCount() == 1) {
+      buffer->output_nodes[0] = call;
+    } else {
+      // Iterate over all uses of {call} and collect the projections into the
+      // {result} buffer.
+      for (UseIter i = call->uses().begin(); i != call->uses().end(); ++i) {
+        if ((*i)->opcode() == IrOpcode::kProjection) {
+          int index = OpParameter<int32_t>(*i);
+          ASSERT_GE(index, 0);
+          ASSERT_LT(index, buffer->descriptor->ReturnCount());
+          ASSERT_EQ(NULL, buffer->output_nodes[index]);
+          buffer->output_nodes[index] = *i;
+        }
+      }
+    }
+
+    // Filter out the outputs that aren't live because no projection uses them.
+    for (int i = 0; i < buffer->descriptor->ReturnCount(); i++) {
+      if (buffer->output_nodes[i] != NULL) {
+        Node* output = buffer->output_nodes[i];
+        LinkageLocation location = buffer->descriptor->GetReturnLocation(i);
+        MarkAsRepresentation(location.representation(), output);
+        buffer->outputs[buffer->output_count++] =
+            g.DefineAsLocation(output, location);
+      }
+    }
+  }
+
+  buffer->fixed_count = 1;  // First argument is always the callee.
+  Node* callee = call->InputAt(0);
+  switch (buffer->descriptor->kind()) {
+    case CallDescriptor::kCallCodeObject:
+      buffer->fixed_and_control_args[0] =
+          (call_code_immediate && callee->opcode() == IrOpcode::kHeapConstant)
+              ? g.UseImmediate(callee)
+              : g.UseRegister(callee);
+      break;
+    case CallDescriptor::kCallAddress:
+      buffer->fixed_and_control_args[0] =
+          (call_address_immediate &&
+           (callee->opcode() == IrOpcode::kInt32Constant ||
+            callee->opcode() == IrOpcode::kInt64Constant))
+              ? g.UseImmediate(callee)
+              : g.UseRegister(callee);
+      break;
+    case CallDescriptor::kCallJSFunction:
+      buffer->fixed_and_control_args[0] =
+          g.UseLocation(callee, buffer->descriptor->GetInputLocation(0));
+      break;
+  }
+
+  int input_count = buffer->input_count();
+
+  // Split the arguments into pushed_nodes and fixed_args. Pushed arguments
+  // require an explicit push instruction before the call and do not appear
+  // as arguments to the call. Everything else ends up as an InstructionOperand
+  // argument to the call.
+  InputIter iter(call->inputs().begin());
+  for (int index = 0; index < input_count; ++iter, ++index) {
+    ASSERT(iter != call->inputs().end());
+    ASSERT(index == iter.index());
+    if (index == 0) continue;  // The first argument (callee) is already done.
+    InstructionOperand* op =
+        g.UseLocation(*iter, buffer->descriptor->GetInputLocation(index));
+    if (UnallocatedOperand::cast(op)->HasFixedSlotPolicy()) {
+      int stack_index = -UnallocatedOperand::cast(op)->fixed_slot_index() - 1;
+      ASSERT(buffer->pushed_nodes[stack_index] == NULL);
+      buffer->pushed_nodes[stack_index] = *iter;
+      buffer->pushed_count++;
+    } else {
+      buffer->fixed_and_control_args[buffer->fixed_count] = op;
+      buffer->fixed_count++;
+    }
+  }
+
+  // If the call can deoptimize, we add the continuation and deoptimization
+  // block labels.
+  if (buffer->descriptor->CanLazilyDeoptimize()) {
+    ASSERT(cont_node != NULL);
+    ASSERT(deopt_node != NULL);
+    buffer->fixed_and_control_args[buffer->fixed_count] = g.Label(cont_node);
+    buffer->fixed_and_control_args[buffer->fixed_count + 1] =
+        g.Label(deopt_node);
+  } else {
+    ASSERT(cont_node == NULL);
+    ASSERT(deopt_node == NULL);
+  }
+
+  ASSERT(input_count == (buffer->fixed_count + buffer->pushed_count));
+}
+
+
+void InstructionSelector::VisitBlock(BasicBlock* block) {
+  ASSERT_EQ(NULL, current_block_);
+  current_block_ = block;
+  size_t current_block_end = instructions_.size();
+
+  // Generate code for the block control "top down", but schedule the code
+  // "bottom up".
+  VisitControl(block);
+  std::reverse(instructions_.begin() + current_block_end, instructions_.end());
+
+  // Visit code in reverse control flow order, because architecture-specific
+  // matching may cover more than one node at a time.
+  for (BasicBlock::reverse_iterator i = block->rbegin(); i != block->rend();
+       ++i) {
+    Node* node = *i;
+    if (!IsUsed(node)) continue;
+    // Generate code for this node "top down", but schedule the code "bottom
+    // up".
+    size_t current_node_end = instructions_.size();
+    VisitNode(node);
+    std::reverse(instructions_.begin() + current_node_end, instructions_.end());
+  }
+
+  // We're done with the block.
+  // TODO(bmeurer): We should not mutate the schedule.
+  block->code_end_ = current_block_end;
+  block->code_start_ = instructions_.size();
+
+  current_block_ = NULL;
+}
+
+
+static inline void CheckNoPhis(const BasicBlock* block) {
+#ifdef DEBUG
+  // Branch targets should not have phis.
+  for (BasicBlock::const_iterator i = block->begin(); i != block->end(); ++i) {
+    const Node* node = *i;
+    CHECK_NE(IrOpcode::kPhi, node->opcode());
+  }
+#endif
+}
+
+
+void InstructionSelector::VisitControl(BasicBlock* block) {
+  Node* input = block->control_input_;
+  switch (block->control_) {
+    case BasicBlockData::kGoto:
+      return VisitGoto(block->SuccessorAt(0));
+    case BasicBlockData::kBranch: {
+      ASSERT_EQ(IrOpcode::kBranch, input->opcode());
+      BasicBlock* tbranch = block->SuccessorAt(0);
+      BasicBlock* fbranch = block->SuccessorAt(1);
+      // SSA deconstruction requires targets of branches not to have phis.
+      // Edge split form guarantees this property, but is more strict.
+      CheckNoPhis(tbranch);
+      CheckNoPhis(fbranch);
+      if (tbranch == fbranch) return VisitGoto(tbranch);
+      return VisitBranch(input, tbranch, fbranch);
+    }
+    case BasicBlockData::kReturn: {
+      // If the result itself is a return, return its input.
+      Node* value = (input != NULL && input->opcode() == IrOpcode::kReturn)
+                        ? input->InputAt(0)
+                        : input;
+      return VisitReturn(value);
+    }
+    case BasicBlockData::kThrow:
+      return VisitThrow(input);
+    case BasicBlockData::kDeoptimize:
+      return VisitDeoptimization(input);
+    case BasicBlockData::kCall: {
+      BasicBlock* deoptimization = block->SuccessorAt(0);
+      BasicBlock* continuation = block->SuccessorAt(1);
+      VisitCall(input, continuation, deoptimization);
+      break;
+    }
+    case BasicBlockData::kNone: {
+      // TODO(titzer): exit block doesn't have control.
+      ASSERT(input == NULL);
+      break;
+    }
+    default:
+      UNREACHABLE();
+      break;
+  }
+}
+
+
+void InstructionSelector::VisitNode(Node* node) {
+  ASSERT_NOT_NULL(schedule()->block(node));  // should only use scheduled nodes.
+  SourcePosition source_position = source_positions_->GetSourcePosition(node);
+  if (!source_position.IsUnknown()) {
+    ASSERT(!source_position.IsInvalid());
+    if (FLAG_turbo_source_positions || node->opcode() == IrOpcode::kCall) {
+      Emit(SourcePositionInstruction::New(instruction_zone(), source_position));
+    }
+  }
+  switch (node->opcode()) {
+    case IrOpcode::kStart:
+    case IrOpcode::kLoop:
+    case IrOpcode::kEnd:
+    case IrOpcode::kBranch:
+    case IrOpcode::kIfTrue:
+    case IrOpcode::kIfFalse:
+    case IrOpcode::kEffectPhi:
+    case IrOpcode::kMerge:
+    case IrOpcode::kProjection:
+    case IrOpcode::kLazyDeoptimization:
+    case IrOpcode::kContinuation:
+      // No code needed for these graph artifacts.
+      return;
+    case IrOpcode::kPhi:
+      return VisitPhi(node);
+    case IrOpcode::kParameter: {
+      int index = OpParameter<int>(node);
+      MachineRepresentation rep = linkage()
+                                      ->GetIncomingDescriptor()
+                                      ->GetInputLocation(index)
+                                      .representation();
+      MarkAsRepresentation(rep, node);
+      return VisitParameter(node);
+    }
+    case IrOpcode::kInt32Constant:
+    case IrOpcode::kInt64Constant:
+    case IrOpcode::kExternalConstant:
+      return VisitConstant(node);
+    case IrOpcode::kFloat64Constant:
+      return MarkAsDouble(node), VisitConstant(node);
+    case IrOpcode::kHeapConstant:
+    case IrOpcode::kNumberConstant:
+      // TODO(turbofan): only mark non-smis as references.
+      return MarkAsReference(node), VisitConstant(node);
+    case IrOpcode::kCall:
+      return VisitCall(node, NULL, NULL);
+    case IrOpcode::kFrameState:
+      // TODO(titzer): state nodes should be combined into their users.
+      return;
+    case IrOpcode::kLoad: {
+      MachineRepresentation load_rep = OpParameter<MachineRepresentation>(node);
+      MarkAsRepresentation(load_rep, node);
+      return VisitLoad(node);
+    }
+    case IrOpcode::kStore:
+      return VisitStore(node);
+    case IrOpcode::kWord32And:
+      return VisitWord32And(node);
+    case IrOpcode::kWord32Or:
+      return VisitWord32Or(node);
+    case IrOpcode::kWord32Xor:
+      return VisitWord32Xor(node);
+    case IrOpcode::kWord32Shl:
+      return VisitWord32Shl(node);
+    case IrOpcode::kWord32Shr:
+      return VisitWord32Shr(node);
+    case IrOpcode::kWord32Sar:
+      return VisitWord32Sar(node);
+    case IrOpcode::kWord32Equal:
+      return VisitWord32Equal(node);
+    case IrOpcode::kWord64And:
+      return VisitWord64And(node);
+    case IrOpcode::kWord64Or:
+      return VisitWord64Or(node);
+    case IrOpcode::kWord64Xor:
+      return VisitWord64Xor(node);
+    case IrOpcode::kWord64Shl:
+      return VisitWord64Shl(node);
+    case IrOpcode::kWord64Shr:
+      return VisitWord64Shr(node);
+    case IrOpcode::kWord64Sar:
+      return VisitWord64Sar(node);
+    case IrOpcode::kWord64Equal:
+      return VisitWord64Equal(node);
+    case IrOpcode::kInt32Add:
+      return VisitInt32Add(node);
+    case IrOpcode::kInt32Sub:
+      return VisitInt32Sub(node);
+    case IrOpcode::kInt32Mul:
+      return VisitInt32Mul(node);
+    case IrOpcode::kInt32Div:
+      return VisitInt32Div(node);
+    case IrOpcode::kInt32UDiv:
+      return VisitInt32UDiv(node);
+    case IrOpcode::kInt32Mod:
+      return VisitInt32Mod(node);
+    case IrOpcode::kInt32UMod:
+      return VisitInt32UMod(node);
+    case IrOpcode::kInt32LessThan:
+      return VisitInt32LessThan(node);
+    case IrOpcode::kInt32LessThanOrEqual:
+      return VisitInt32LessThanOrEqual(node);
+    case IrOpcode::kUint32LessThan:
+      return VisitUint32LessThan(node);
+    case IrOpcode::kUint32LessThanOrEqual:
+      return VisitUint32LessThanOrEqual(node);
+    case IrOpcode::kInt64Add:
+      return VisitInt64Add(node);
+    case IrOpcode::kInt64Sub:
+      return VisitInt64Sub(node);
+    case IrOpcode::kInt64Mul:
+      return VisitInt64Mul(node);
+    case IrOpcode::kInt64Div:
+      return VisitInt64Div(node);
+    case IrOpcode::kInt64UDiv:
+      return VisitInt64UDiv(node);
+    case IrOpcode::kInt64Mod:
+      return VisitInt64Mod(node);
+    case IrOpcode::kInt64UMod:
+      return VisitInt64UMod(node);
+    case IrOpcode::kInt64LessThan:
+      return VisitInt64LessThan(node);
+    case IrOpcode::kInt64LessThanOrEqual:
+      return VisitInt64LessThanOrEqual(node);
+    case IrOpcode::kConvertInt32ToInt64:
+      return VisitConvertInt32ToInt64(node);
+    case IrOpcode::kConvertInt64ToInt32:
+      return VisitConvertInt64ToInt32(node);
+    case IrOpcode::kConvertInt32ToFloat64:
+      return MarkAsDouble(node), VisitConvertInt32ToFloat64(node);
+    case IrOpcode::kConvertFloat64ToInt32:
+      return VisitConvertFloat64ToInt32(node);
+    case IrOpcode::kFloat64Add:
+      return MarkAsDouble(node), VisitFloat64Add(node);
+    case IrOpcode::kFloat64Sub:
+      return MarkAsDouble(node), VisitFloat64Sub(node);
+    case IrOpcode::kFloat64Mul:
+      return MarkAsDouble(node), VisitFloat64Mul(node);
+    case IrOpcode::kFloat64Div:
+      return MarkAsDouble(node), VisitFloat64Div(node);
+    case IrOpcode::kFloat64Mod:
+      return MarkAsDouble(node), VisitFloat64Mod(node);
+    case IrOpcode::kFloat64Equal:
+      return VisitFloat64Equal(node);
+    case IrOpcode::kFloat64LessThan:
+      return VisitFloat64LessThan(node);
+    case IrOpcode::kFloat64LessThanOrEqual:
+      return VisitFloat64LessThanOrEqual(node);
+    default:
+      V8_Fatal(__FILE__, __LINE__, "Unexpected operator #%d:%s @ node #%d",
+               node->opcode(), node->op()->mnemonic(), node->id());
+  }
+}
+
+
+void InstructionSelector::VisitWord32Equal(Node* node) {
+  FlagsContinuation cont(kEqual, node);
+  Int32BinopMatcher m(node);
+  if (m.right().Is(0)) {
+    return VisitWord32Test(m.left().node(), &cont);
+  }
+  VisitWord32Compare(node, &cont);
+}
+
+
+void InstructionSelector::VisitInt32LessThan(Node* node) {
+  FlagsContinuation cont(kSignedLessThan, node);
+  VisitWord32Compare(node, &cont);
+}
+
+
+void InstructionSelector::VisitInt32LessThanOrEqual(Node* node) {
+  FlagsContinuation cont(kSignedLessThanOrEqual, node);
+  VisitWord32Compare(node, &cont);
+}
+
+
+void InstructionSelector::VisitUint32LessThan(Node* node) {
+  FlagsContinuation cont(kUnsignedLessThan, node);
+  VisitWord32Compare(node, &cont);
+}
+
+
+void InstructionSelector::VisitUint32LessThanOrEqual(Node* node) {
+  FlagsContinuation cont(kUnsignedLessThanOrEqual, node);
+  VisitWord32Compare(node, &cont);
+}
+
+
+void InstructionSelector::VisitWord64Equal(Node* node) {
+  FlagsContinuation cont(kEqual, node);
+  Int64BinopMatcher m(node);
+  if (m.right().Is(0)) {
+    return VisitWord64Test(m.left().node(), &cont);
+  }
+  VisitWord64Compare(node, &cont);
+}
+
+
+void InstructionSelector::VisitInt64LessThan(Node* node) {
+  FlagsContinuation cont(kSignedLessThan, node);
+  VisitWord64Compare(node, &cont);
+}
+
+
+void InstructionSelector::VisitInt64LessThanOrEqual(Node* node) {
+  FlagsContinuation cont(kSignedLessThanOrEqual, node);
+  VisitWord64Compare(node, &cont);
+}
+
+
+void InstructionSelector::VisitFloat64Equal(Node* node) {
+  FlagsContinuation cont(kUnorderedEqual, node);
+  VisitFloat64Compare(node, &cont);
+}
+
+
+void InstructionSelector::VisitFloat64LessThan(Node* node) {
+  FlagsContinuation cont(kUnorderedLessThan, node);
+  VisitFloat64Compare(node, &cont);
+}
+
+
+void InstructionSelector::VisitFloat64LessThanOrEqual(Node* node) {
+  FlagsContinuation cont(kUnorderedLessThanOrEqual, node);
+  VisitFloat64Compare(node, &cont);
+}
+
+
+// 32 bit targets do not implement the following instructions.
+#if V8_TARGET_ARCH_32_BIT
+
+void InstructionSelector::VisitWord64And(Node* node) { UNIMPLEMENTED(); }
+
+
+void InstructionSelector::VisitWord64Or(Node* node) { UNIMPLEMENTED(); }
+
+
+void InstructionSelector::VisitWord64Xor(Node* node) { UNIMPLEMENTED(); }
+
+
+void InstructionSelector::VisitWord64Shl(Node* node) { UNIMPLEMENTED(); }
+
+
+void InstructionSelector::VisitWord64Shr(Node* node) { UNIMPLEMENTED(); }
+
+
+void InstructionSelector::VisitWord64Sar(Node* node) { UNIMPLEMENTED(); }
+
+
+void InstructionSelector::VisitInt64Add(Node* node) { UNIMPLEMENTED(); }
+
+
+void InstructionSelector::VisitInt64Sub(Node* node) { UNIMPLEMENTED(); }
+
+
+void InstructionSelector::VisitInt64Mul(Node* node) { UNIMPLEMENTED(); }
+
+
+void InstructionSelector::VisitInt64Div(Node* node) { UNIMPLEMENTED(); }
+
+
+void InstructionSelector::VisitInt64UDiv(Node* node) { UNIMPLEMENTED(); }
+
+
+void InstructionSelector::VisitInt64Mod(Node* node) { UNIMPLEMENTED(); }
+
+
+void InstructionSelector::VisitInt64UMod(Node* node) { UNIMPLEMENTED(); }
+
+
+void InstructionSelector::VisitConvertInt64ToInt32(Node* node) {
+  UNIMPLEMENTED();
+}
+
+
+void InstructionSelector::VisitConvertInt32ToInt64(Node* node) {
+  UNIMPLEMENTED();
+}
+
+
+void InstructionSelector::VisitWord64Test(Node* node, FlagsContinuation* cont) {
+  UNIMPLEMENTED();
+}
+
+
+void InstructionSelector::VisitWord64Compare(Node* node,
+                                             FlagsContinuation* cont) {
+  UNIMPLEMENTED();
+}
+
+#endif  // V8_TARGET_ARCH_32_BIT
+
+
+void InstructionSelector::VisitPhi(Node* node) {
+  // TODO(bmeurer): Emit a PhiInstruction here.
+  for (InputIter i = node->inputs().begin(); i != node->inputs().end(); ++i) {
+    MarkAsUsed(*i);
+  }
+}
+
+
+void InstructionSelector::VisitParameter(Node* node) {
+  OperandGenerator g(this);
+  Emit(kArchNop, g.DefineAsLocation(node, linkage()->GetParameterLocation(
+                                              OpParameter<int>(node))));
+}
+
+
+void InstructionSelector::VisitConstant(Node* node) {
+  // We must emit a NOP here because every live range needs a defining
+  // instruction in the register allocator.
+  OperandGenerator g(this);
+  Emit(kArchNop, g.DefineAsConstant(node));
+}
+
+
+void InstructionSelector::VisitGoto(BasicBlock* target) {
+  if (IsNextInAssemblyOrder(target)) {
+    // fall through to the next block.
+    Emit(kArchNop, NULL)->MarkAsControl();
+  } else {
+    // jump to the next block.
+    OperandGenerator g(this);
+    Emit(kArchJmp, NULL, g.Label(target))->MarkAsControl();
+  }
+}
+
+
+void InstructionSelector::VisitBranch(Node* branch, BasicBlock* tbranch,
+                                      BasicBlock* fbranch) {
+  OperandGenerator g(this);
+  Node* user = branch;
+  Node* value = branch->InputAt(0);
+
+  FlagsContinuation cont(kNotEqual, tbranch, fbranch);
+
+  // If we can fall through to the true block, invert the branch.
+  if (IsNextInAssemblyOrder(tbranch)) {
+    cont.Negate();
+    cont.SwapBlocks();
+  }
+
+  // Try to combine with comparisons against 0 by simply inverting the branch.
+  while (CanCover(user, value)) {
+    if (value->opcode() == IrOpcode::kWord32Equal) {
+      Int32BinopMatcher m(value);
+      if (m.right().Is(0)) {
+        user = value;
+        value = m.left().node();
+        cont.Negate();
+      } else {
+        break;
+      }
+    } else if (value->opcode() == IrOpcode::kWord64Equal) {
+      Int64BinopMatcher m(value);
+      if (m.right().Is(0)) {
+        user = value;
+        value = m.left().node();
+        cont.Negate();
+      } else {
+        break;
+      }
+    } else {
+      break;
+    }
+  }
+
+  // Try to combine the branch with a comparison.
+  if (CanCover(user, value)) {
+    switch (value->opcode()) {
+      case IrOpcode::kWord32Equal:
+        cont.OverwriteAndNegateIfEqual(kEqual);
+        return VisitWord32Compare(value, &cont);
+      case IrOpcode::kInt32LessThan:
+        cont.OverwriteAndNegateIfEqual(kSignedLessThan);
+        return VisitWord32Compare(value, &cont);
+      case IrOpcode::kInt32LessThanOrEqual:
+        cont.OverwriteAndNegateIfEqual(kSignedLessThanOrEqual);
+        return VisitWord32Compare(value, &cont);
+      case IrOpcode::kUint32LessThan:
+        cont.OverwriteAndNegateIfEqual(kUnsignedLessThan);
+        return VisitWord32Compare(value, &cont);
+      case IrOpcode::kUint32LessThanOrEqual:
+        cont.OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
+        return VisitWord32Compare(value, &cont);
+      case IrOpcode::kWord64Equal:
+        cont.OverwriteAndNegateIfEqual(kEqual);
+        return VisitWord64Compare(value, &cont);
+      case IrOpcode::kInt64LessThan:
+        cont.OverwriteAndNegateIfEqual(kSignedLessThan);
+        return VisitWord64Compare(value, &cont);
+      case IrOpcode::kInt64LessThanOrEqual:
+        cont.OverwriteAndNegateIfEqual(kSignedLessThanOrEqual);
+        return VisitWord64Compare(value, &cont);
+      case IrOpcode::kFloat64Equal:
+        cont.OverwriteAndNegateIfEqual(kUnorderedEqual);
+        return VisitFloat64Compare(value, &cont);
+      case IrOpcode::kFloat64LessThan:
+        cont.OverwriteAndNegateIfEqual(kUnorderedLessThan);
+        return VisitFloat64Compare(value, &cont);
+      case IrOpcode::kFloat64LessThanOrEqual:
+        cont.OverwriteAndNegateIfEqual(kUnorderedLessThanOrEqual);
+        return VisitFloat64Compare(value, &cont);
+      default:
+        break;
+    }
+  }
+
+  // Branch could not be combined with a compare, emit compare against 0.
+  VisitWord32Test(value, &cont);
+}
+
+
+void InstructionSelector::VisitReturn(Node* value) {
+  OperandGenerator g(this);
+  if (value != NULL) {
+    Emit(kArchRet, NULL, g.UseLocation(value, linkage()->GetReturnLocation()));
+  } else {
+    Emit(kArchRet, NULL);
+  }
+}
+
+
+void InstructionSelector::VisitThrow(Node* value) {
+  UNIMPLEMENTED();  // TODO(titzer)
+}
+
+
+void InstructionSelector::VisitDeoptimization(Node* deopt) {
+  ASSERT(deopt->op()->opcode() == IrOpcode::kDeoptimize);
+  Node* state = deopt->InputAt(0);
+  ASSERT(state->op()->opcode() == IrOpcode::kFrameState);
+  FrameStateDescriptor descriptor = OpParameter<FrameStateDescriptor>(state);
+  // TODO(jarin) We should also add an instruction input for every input to
+  // the framestate node (and recurse for the inlined framestates).
+  int deoptimization_id = sequence()->AddDeoptimizationEntry(descriptor);
+  Emit(kArchDeoptimize | MiscField::encode(deoptimization_id), NULL);
+}
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
diff --git a/src/compiler/instruction-selector.h b/src/compiler/instruction-selector.h
new file mode 100644 (file)
index 0000000..8be6e95
--- /dev/null
@@ -0,0 +1,169 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_INSTRUCTION_SELECTOR_H_
+#define V8_COMPILER_INSTRUCTION_SELECTOR_H_
+
+#include <deque>
+
+#include "src/compiler/common-operator.h"
+#include "src/compiler/instruction.h"
+#include "src/compiler/machine-operator.h"
+#include "src/zone-containers.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// Forward declarations.
+struct CallBuffer;  // TODO(bmeurer): Remove this.
+class FlagsContinuation;
+
+class InstructionSelector V8_FINAL {
+ public:
+  explicit InstructionSelector(InstructionSequence* sequence,
+                               SourcePositionTable* source_positions);
+
+  // Visit code for the entire graph with the included schedule.
+  void SelectInstructions();
+
+  // ===========================================================================
+  // ============= Architecture-independent code emission methods. =============
+  // ===========================================================================
+
+  Instruction* Emit(InstructionCode opcode, InstructionOperand* output,
+                    size_t temp_count = 0, InstructionOperand* *temps = NULL);
+  Instruction* Emit(InstructionCode opcode, InstructionOperand* output,
+                    InstructionOperand* a, size_t temp_count = 0,
+                    InstructionOperand* *temps = NULL);
+  Instruction* Emit(InstructionCode opcode, InstructionOperand* output,
+                    InstructionOperand* a, InstructionOperand* b,
+                    size_t temp_count = 0, InstructionOperand* *temps = NULL);
+  Instruction* Emit(InstructionCode opcode, InstructionOperand* output,
+                    InstructionOperand* a, InstructionOperand* b,
+                    InstructionOperand* c, size_t temp_count = 0,
+                    InstructionOperand* *temps = NULL);
+  Instruction* Emit(InstructionCode opcode, InstructionOperand* output,
+                    InstructionOperand* a, InstructionOperand* b,
+                    InstructionOperand* c, InstructionOperand* d,
+                    size_t temp_count = 0, InstructionOperand* *temps = NULL);
+  Instruction* Emit(InstructionCode opcode, size_t output_count,
+                    InstructionOperand** outputs, size_t input_count,
+                    InstructionOperand** inputs, size_t temp_count = 0,
+                    InstructionOperand* *temps = NULL);
+  Instruction* Emit(Instruction* instr);
+
+ private:
+  friend class OperandGenerator;
+
+  // ===========================================================================
+  // ============ Architecture-independent graph covering methods. =============
+  // ===========================================================================
+
+  // Checks if {block} will appear directly after {current_block_} when
+  // assembling code, in which case, a fall-through can be used.
+  bool IsNextInAssemblyOrder(const BasicBlock* block) const;
+
+  // Used in pattern matching during code generation.
+  // Check if {node} can be covered while generating code for the current
+  // instruction. A node can be covered if the {user} of the node has the only
+  // edge and the two are in the same basic block.
+  bool CanCover(Node* user, Node* node) const;
+
+  // Checks if {node} has any uses, and therefore code has to be generated for
+  // it.
+  bool IsUsed(Node* node) const;
+
+  // Inform the instruction selection that {node} has at least one use and we
+  // will need to generate code for it.
+  void MarkAsUsed(Node* node);
+
+  // Checks if {node} is marked as double.
+  bool IsDouble(const Node* node) const;
+
+  // Inform the register allocator of a double result.
+  void MarkAsDouble(Node* node);
+
+  // Checks if {node} is marked as reference.
+  bool IsReference(const Node* node) const;
+
+  // Inform the register allocator of a reference result.
+  void MarkAsReference(Node* node);
+
+  // Inform the register allocation of the representation of the value produced
+  // by {node}.
+  void MarkAsRepresentation(MachineRepresentation rep, Node* node);
+
+  // Initialize the call buffer with the InstructionOperands, nodes, etc,
+  // corresponding
+  // to the inputs and outputs of the call.
+  // {call_code_immediate} to generate immediate operands to calls of code.
+  // {call_address_immediate} to generate immediate operands to address calls.
+  void InitializeCallBuffer(Node* call, CallBuffer* buffer,
+                            bool call_code_immediate,
+                            bool call_address_immediate, BasicBlock* cont_node,
+                            BasicBlock* deopt_node);
+
+  // ===========================================================================
+  // ============= Architecture-specific graph covering methods. ===============
+  // ===========================================================================
+
+  // Visit nodes in the given block and generate code.
+  void VisitBlock(BasicBlock* block);
+
+  // Visit the node for the control flow at the end of the block, generating
+  // code if necessary.
+  void VisitControl(BasicBlock* block);
+
+  // Visit the node and generate code, if any.
+  void VisitNode(Node* node);
+
+#define DECLARE_GENERATOR(x) void Visit##x(Node* node);
+  MACHINE_OP_LIST(DECLARE_GENERATOR)
+#undef DECLARE_GENERATOR
+
+  void VisitWord32Test(Node* node, FlagsContinuation* cont);
+  void VisitWord64Test(Node* node, FlagsContinuation* cont);
+  void VisitWord32Compare(Node* node, FlagsContinuation* cont);
+  void VisitWord64Compare(Node* node, FlagsContinuation* cont);
+  void VisitFloat64Compare(Node* node, FlagsContinuation* cont);
+
+  void VisitPhi(Node* node);
+  void VisitParameter(Node* node);
+  void VisitConstant(Node* node);
+  void VisitCall(Node* call, BasicBlock* continuation,
+                 BasicBlock* deoptimization);
+  void VisitGoto(BasicBlock* target);
+  void VisitBranch(Node* input, BasicBlock* tbranch, BasicBlock* fbranch);
+  void VisitReturn(Node* value);
+  void VisitThrow(Node* value);
+  void VisitDeoptimization(Node* deopt);
+
+  // ===========================================================================
+
+  Graph* graph() const { return sequence()->graph(); }
+  Linkage* linkage() const { return sequence()->linkage(); }
+  Schedule* schedule() const { return sequence()->schedule(); }
+  InstructionSequence* sequence() const { return sequence_; }
+  Zone* instruction_zone() const { return sequence()->zone(); }
+  Zone* zone() { return &zone_; }
+
+  // ===========================================================================
+
+  typedef zone_allocator<Instruction*> InstructionPtrZoneAllocator;
+  typedef std::deque<Instruction*, InstructionPtrZoneAllocator> Instructions;
+
+  Zone zone_;
+  InstructionSequence* sequence_;
+  SourcePositionTable* source_positions_;
+  BasicBlock* current_block_;
+  Instructions instructions_;
+  BoolVector used_;
+};
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_COMPILER_INSTRUCTION_SELECTOR_H_
diff --git a/src/compiler/instruction.cc b/src/compiler/instruction.cc
new file mode 100644 (file)
index 0000000..39852bf
--- /dev/null
@@ -0,0 +1,479 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/instruction.h"
+
+#include "src/compiler/common-operator.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+OStream& operator<<(OStream& os, const InstructionOperand& op) {
+  switch (op.kind()) {
+    case InstructionOperand::INVALID:
+      return os << "(0)";
+    case InstructionOperand::UNALLOCATED: {
+      const UnallocatedOperand* unalloc = UnallocatedOperand::cast(&op);
+      os << "v" << unalloc->virtual_register();
+      if (unalloc->basic_policy() == UnallocatedOperand::FIXED_SLOT) {
+        return os << "(=" << unalloc->fixed_slot_index() << "S)";
+      }
+      switch (unalloc->extended_policy()) {
+        case UnallocatedOperand::NONE:
+          return os;
+        case UnallocatedOperand::FIXED_REGISTER:
+          return os << "(=" << Register::AllocationIndexToString(
+                                   unalloc->fixed_register_index()) << ")";
+        case UnallocatedOperand::FIXED_DOUBLE_REGISTER:
+          return os << "(=" << DoubleRegister::AllocationIndexToString(
+                                   unalloc->fixed_register_index()) << ")";
+        case UnallocatedOperand::MUST_HAVE_REGISTER:
+          return os << "(R)";
+        case UnallocatedOperand::SAME_AS_FIRST_INPUT:
+          return os << "(1)";
+        case UnallocatedOperand::ANY:
+          return os << "(-)";
+      }
+    }
+    case InstructionOperand::CONSTANT:
+      return os << "[constant:" << op.index() << "]";
+    case InstructionOperand::IMMEDIATE:
+      return os << "[immediate:" << op.index() << "]";
+    case InstructionOperand::STACK_SLOT:
+      return os << "[stack:" << op.index() << "]";
+    case InstructionOperand::DOUBLE_STACK_SLOT:
+      return os << "[double_stack:" << op.index() << "]";
+    case InstructionOperand::REGISTER:
+      return os << "[" << Register::AllocationIndexToString(op.index())
+                << "|R]";
+    case InstructionOperand::DOUBLE_REGISTER:
+      return os << "[" << DoubleRegister::AllocationIndexToString(op.index())
+                << "|R]";
+  }
+  UNREACHABLE();
+  return os;
+}
+
+
+template <InstructionOperand::Kind kOperandKind, int kNumCachedOperands>
+SubKindOperand<kOperandKind, kNumCachedOperands>*
+    SubKindOperand<kOperandKind, kNumCachedOperands>::cache = NULL;
+
+
+template <InstructionOperand::Kind kOperandKind, int kNumCachedOperands>
+void SubKindOperand<kOperandKind, kNumCachedOperands>::SetUpCache() {
+  if (cache) return;
+  cache = new SubKindOperand[kNumCachedOperands];
+  for (int i = 0; i < kNumCachedOperands; i++) {
+    cache[i].ConvertTo(kOperandKind, i);
+  }
+}
+
+
+template <InstructionOperand::Kind kOperandKind, int kNumCachedOperands>
+void SubKindOperand<kOperandKind, kNumCachedOperands>::TearDownCache() {
+  delete[] cache;
+}
+
+
+void InstructionOperand::SetUpCaches() {
+#define INSTRUCTION_OPERAND_SETUP(name, type, number) \
+  name##Operand::SetUpCache();
+  INSTRUCTION_OPERAND_LIST(INSTRUCTION_OPERAND_SETUP)
+#undef INSTRUCTION_OPERAND_SETUP
+}
+
+
+void InstructionOperand::TearDownCaches() {
+#define INSTRUCTION_OPERAND_TEARDOWN(name, type, number) \
+  name##Operand::TearDownCache();
+  INSTRUCTION_OPERAND_LIST(INSTRUCTION_OPERAND_TEARDOWN)
+#undef INSTRUCTION_OPERAND_TEARDOWN
+}
+
+
+OStream& operator<<(OStream& os, const MoveOperands& mo) {
+  os << *mo.destination();
+  if (!mo.source()->Equals(mo.destination())) os << " = " << *mo.source();
+  return os << ";";
+}
+
+
+bool ParallelMove::IsRedundant() const {
+  for (int i = 0; i < move_operands_.length(); ++i) {
+    if (!move_operands_[i].IsRedundant()) return false;
+  }
+  return true;
+}
+
+
+OStream& operator<<(OStream& os, const ParallelMove& pm) {
+  bool first = true;
+  for (ZoneList<MoveOperands>::iterator move = pm.move_operands()->begin();
+       move != pm.move_operands()->end(); ++move) {
+    if (move->IsEliminated()) continue;
+    if (!first) os << " ";
+    first = false;
+    os << *move;
+  }
+  return os;
+}
+
+
+void PointerMap::RecordPointer(InstructionOperand* op, Zone* zone) {
+  // Do not record arguments as pointers.
+  if (op->IsStackSlot() && op->index() < 0) return;
+  ASSERT(!op->IsDoubleRegister() && !op->IsDoubleStackSlot());
+  pointer_operands_.Add(op, zone);
+}
+
+
+void PointerMap::RemovePointer(InstructionOperand* op) {
+  // Do not record arguments as pointers.
+  if (op->IsStackSlot() && op->index() < 0) return;
+  ASSERT(!op->IsDoubleRegister() && !op->IsDoubleStackSlot());
+  for (int i = 0; i < pointer_operands_.length(); ++i) {
+    if (pointer_operands_[i]->Equals(op)) {
+      pointer_operands_.Remove(i);
+      --i;
+    }
+  }
+}
+
+
+void PointerMap::RecordUntagged(InstructionOperand* op, Zone* zone) {
+  // Do not record arguments as pointers.
+  if (op->IsStackSlot() && op->index() < 0) return;
+  ASSERT(!op->IsDoubleRegister() && !op->IsDoubleStackSlot());
+  untagged_operands_.Add(op, zone);
+}
+
+
+OStream& operator<<(OStream& os, const PointerMap& pm) {
+  os << "{";
+  for (ZoneList<InstructionOperand*>::iterator op =
+           pm.pointer_operands_.begin();
+       op != pm.pointer_operands_.end(); ++op) {
+    if (op != pm.pointer_operands_.begin()) os << ";";
+    os << *op;
+  }
+  return os << "}";
+}
+
+
+OStream& operator<<(OStream& os, const ArchOpcode& ao) {
+  switch (ao) {
+#define CASE(Name) \
+  case k##Name:    \
+    return os << #Name;
+    ARCH_OPCODE_LIST(CASE)
+#undef CASE
+  }
+  UNREACHABLE();
+  return os;
+}
+
+
+OStream& operator<<(OStream& os, const AddressingMode& am) {
+  switch (am) {
+    case kMode_None:
+      return os;
+#define CASE(Name)   \
+  case kMode_##Name: \
+    return os << #Name;
+      TARGET_ADDRESSING_MODE_LIST(CASE)
+#undef CASE
+  }
+  UNREACHABLE();
+  return os;
+}
+
+
+OStream& operator<<(OStream& os, const FlagsMode& fm) {
+  switch (fm) {
+    case kFlags_none:
+      return os;
+    case kFlags_branch:
+      return os << "branch";
+    case kFlags_set:
+      return os << "set";
+  }
+  UNREACHABLE();
+  return os;
+}
+
+
+OStream& operator<<(OStream& os, const FlagsCondition& fc) {
+  switch (fc) {
+    case kEqual:
+      return os << "equal";
+    case kNotEqual:
+      return os << "not equal";
+    case kSignedLessThan:
+      return os << "signed less than";
+    case kSignedGreaterThanOrEqual:
+      return os << "signed greater than or equal";
+    case kSignedLessThanOrEqual:
+      return os << "signed less than or equal";
+    case kSignedGreaterThan:
+      return os << "signed greater than";
+    case kUnsignedLessThan:
+      return os << "unsigned less than";
+    case kUnsignedGreaterThanOrEqual:
+      return os << "unsigned greater than or equal";
+    case kUnsignedLessThanOrEqual:
+      return os << "unsigned less than or equal";
+    case kUnsignedGreaterThan:
+      return os << "unsigned greater than";
+    case kUnorderedEqual:
+      return os << "unordered equal";
+    case kUnorderedNotEqual:
+      return os << "unordered not equal";
+    case kUnorderedLessThan:
+      return os << "unordered less than";
+    case kUnorderedGreaterThanOrEqual:
+      return os << "unordered greater than or equal";
+    case kUnorderedLessThanOrEqual:
+      return os << "unordered less than or equal";
+    case kUnorderedGreaterThan:
+      return os << "unordered greater than";
+  }
+  UNREACHABLE();
+  return os;
+}
+
+
+OStream& operator<<(OStream& os, const Instruction& instr) {
+  if (instr.OutputCount() > 1) os << "(";
+  for (size_t i = 0; i < instr.OutputCount(); i++) {
+    if (i > 0) os << ", ";
+    os << *instr.OutputAt(i);
+  }
+
+  if (instr.OutputCount() > 1) os << ") = ";
+  if (instr.OutputCount() == 1) os << " = ";
+
+  if (instr.IsGapMoves()) {
+    const GapInstruction* gap = GapInstruction::cast(&instr);
+    os << (instr.IsBlockStart() ? " block-start" : "gap ");
+    for (int i = GapInstruction::FIRST_INNER_POSITION;
+         i <= GapInstruction::LAST_INNER_POSITION; i++) {
+      os << "(";
+      if (gap->parallel_moves_[i] != NULL) os << *gap->parallel_moves_[i];
+      os << ") ";
+    }
+  } else if (instr.IsSourcePosition()) {
+    const SourcePositionInstruction* pos =
+        SourcePositionInstruction::cast(&instr);
+    os << "position (" << pos->source_position().raw() << ")";
+  } else {
+    os << ArchOpcodeField::decode(instr.opcode());
+    AddressingMode am = AddressingModeField::decode(instr.opcode());
+    if (am != kMode_None) {
+      os << " : " << AddressingModeField::decode(instr.opcode());
+    }
+    FlagsMode fm = FlagsModeField::decode(instr.opcode());
+    if (fm != kFlags_none) {
+      os << " && " << fm << " if "
+         << FlagsConditionField::decode(instr.opcode());
+    }
+  }
+  if (instr.InputCount() > 0) {
+    for (size_t i = 0; i < instr.InputCount(); i++) {
+      os << " " << *instr.InputAt(i);
+    }
+  }
+  return os << "\n";
+}
+
+
+OStream& operator<<(OStream& os, const Constant& constant) {
+  switch (constant.type()) {
+    case Constant::kInt32:
+      return os << constant.ToInt32();
+    case Constant::kInt64:
+      return os << constant.ToInt64() << "l";
+    case Constant::kFloat64:
+      return os << constant.ToFloat64();
+    case Constant::kExternalReference:
+      return os << constant.ToExternalReference().address();
+    case Constant::kHeapObject:
+      return os << Brief(*constant.ToHeapObject());
+  }
+  UNREACHABLE();
+  return os;
+}
+
+
+Label* InstructionSequence::GetLabel(BasicBlock* block) {
+  return GetBlockStart(block)->label();
+}
+
+
+BlockStartInstruction* InstructionSequence::GetBlockStart(BasicBlock* block) {
+  return BlockStartInstruction::cast(InstructionAt(block->code_start_));
+}
+
+
+void InstructionSequence::StartBlock(BasicBlock* block) {
+  block->code_start_ = instructions_.size();
+  BlockStartInstruction* block_start =
+      BlockStartInstruction::New(zone(), block);
+  AddInstruction(block_start, block);
+}
+
+
+void InstructionSequence::EndBlock(BasicBlock* block) {
+  int end = instructions_.size();
+  ASSERT(block->code_start_ >= 0 && block->code_start_ < end);
+  block->code_end_ = end;
+}
+
+
+int InstructionSequence::AddInstruction(Instruction* instr, BasicBlock* block) {
+  // TODO(titzer): the order of these gaps is a holdover from Lithium.
+  GapInstruction* gap = GapInstruction::New(zone());
+  if (instr->IsControl()) instructions_.push_back(gap);
+  int index = instructions_.size();
+  instructions_.push_back(instr);
+  if (!instr->IsControl()) instructions_.push_back(gap);
+  if (instr->NeedsPointerMap()) {
+    ASSERT(instr->pointer_map() == NULL);
+    PointerMap* pointer_map = new (zone()) PointerMap(zone());
+    pointer_map->set_instruction_position(index);
+    instr->set_pointer_map(pointer_map);
+    pointer_maps_.push_back(pointer_map);
+  }
+  return index;
+}
+
+
+BasicBlock* InstructionSequence::GetBasicBlock(int instruction_index) {
+  // TODO(turbofan): Optimize this.
+  for (;;) {
+    ASSERT_LE(0, instruction_index);
+    Instruction* instruction = InstructionAt(instruction_index--);
+    if (instruction->IsBlockStart()) {
+      return BlockStartInstruction::cast(instruction)->block();
+    }
+  }
+}
+
+
+bool InstructionSequence::IsReference(int virtual_register) const {
+  return references_.find(virtual_register) != references_.end();
+}
+
+
+bool InstructionSequence::IsDouble(int virtual_register) const {
+  return doubles_.find(virtual_register) != doubles_.end();
+}
+
+
+void InstructionSequence::MarkAsReference(int virtual_register) {
+  references_.insert(virtual_register);
+}
+
+
+void InstructionSequence::MarkAsDouble(int virtual_register) {
+  doubles_.insert(virtual_register);
+}
+
+
+void InstructionSequence::AddGapMove(int index, InstructionOperand* from,
+                                     InstructionOperand* to) {
+  GapAt(index)->GetOrCreateParallelMove(GapInstruction::START, zone())->AddMove(
+      from, to, zone());
+}
+
+
+int InstructionSequence::AddDeoptimizationEntry(
+    const FrameStateDescriptor& descriptor) {
+  int deoptimization_id = deoptimization_entries_.size();
+  deoptimization_entries_.push_back(descriptor);
+  return deoptimization_id;
+}
+
+FrameStateDescriptor InstructionSequence::GetDeoptimizationEntry(
+    int deoptimization_id) {
+  return deoptimization_entries_[deoptimization_id];
+}
+
+
+int InstructionSequence::GetDeoptimizationEntryCount() {
+  return deoptimization_entries_.size();
+}
+
+
+OStream& operator<<(OStream& os, const InstructionSequence& code) {
+  for (size_t i = 0; i < code.immediates_.size(); ++i) {
+    Constant constant = code.immediates_[i];
+    os << "IMM#" << i << ": " << constant << "\n";
+  }
+  int i = 0;
+  for (ConstantMap::const_iterator it = code.constants_.begin();
+       it != code.constants_.end(); ++i, ++it) {
+    os << "CST#" << i << ": v" << it->first << " = " << it->second << "\n";
+  }
+  for (int i = 0; i < code.BasicBlockCount(); i++) {
+    BasicBlock* block = code.BlockAt(i);
+
+    int bid = block->id();
+    os << "RPO#" << block->rpo_number_ << ": B" << bid;
+    CHECK(block->rpo_number_ == i);
+    if (block->IsLoopHeader()) {
+      os << " loop blocks: [" << block->rpo_number_ << ", " << block->loop_end_
+         << ")";
+    }
+    os << "  instructions: [" << block->code_start_ << ", " << block->code_end_
+       << ")\n  predecessors:";
+
+    BasicBlock::Predecessors predecessors = block->predecessors();
+    for (BasicBlock::Predecessors::iterator iter = predecessors.begin();
+         iter != predecessors.end(); ++iter) {
+      os << " B" << (*iter)->id();
+    }
+    os << "\n";
+
+    for (BasicBlock::const_iterator j = block->begin(); j != block->end();
+         ++j) {
+      Node* phi = *j;
+      if (phi->opcode() != IrOpcode::kPhi) continue;
+      os << "     phi: v" << phi->id() << " =";
+      Node::Inputs inputs = phi->inputs();
+      for (Node::Inputs::iterator iter(inputs.begin()); iter != inputs.end();
+           ++iter) {
+        os << " v" << (*iter)->id();
+      }
+      os << "\n";
+    }
+
+    Vector<char> buf = Vector<char>::New(32);
+    for (int j = block->first_instruction_index();
+         j <= block->last_instruction_index(); j++) {
+      // TODO(svenpanne) Add some basic formatting to our streams.
+      SNPrintF(buf, "%5d", j);
+      os << "   " << buf.start() << ": " << *code.InstructionAt(j);
+    }
+
+    os << "  " << block->control_;
+
+    if (block->control_input_ != NULL) {
+      os << " v" << block->control_input_->id();
+    }
+
+    BasicBlock::Successors successors = block->successors();
+    for (BasicBlock::Successors::iterator iter = successors.begin();
+         iter != successors.end(); ++iter) {
+      os << " B" << (*iter)->id();
+    }
+    os << "\n";
+  }
+  return os;
+}
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
diff --git a/src/compiler/instruction.h b/src/compiler/instruction.h
new file mode 100644 (file)
index 0000000..c461955
--- /dev/null
@@ -0,0 +1,843 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_INSTRUCTION_H_
+#define V8_COMPILER_INSTRUCTION_H_
+
+#include <deque>
+#include <map>
+#include <set>
+
+// TODO(titzer): don't include the assembler?
+#include "src/assembler.h"
+#include "src/compiler/common-operator.h"
+#include "src/compiler/frame.h"
+#include "src/compiler/graph.h"
+#include "src/compiler/instruction-codes.h"
+#include "src/compiler/opcodes.h"
+#include "src/compiler/schedule.h"
+#include "src/zone-allocator.h"
+
+namespace v8 {
+namespace internal {
+
+// Forward declarations.
+class OStream;
+
+namespace compiler {
+
+// Forward declarations.
+class Linkage;
+
+// A couple of reserved opcodes are used for internal use.
+const InstructionCode kGapInstruction = -1;
+const InstructionCode kBlockStartInstruction = -2;
+const InstructionCode kSourcePositionInstruction = -3;
+
+
+#define INSTRUCTION_OPERAND_LIST(V)              \
+  V(Constant, CONSTANT, 128)                     \
+  V(Immediate, IMMEDIATE, 128)                   \
+  V(StackSlot, STACK_SLOT, 128)                  \
+  V(DoubleStackSlot, DOUBLE_STACK_SLOT, 128)     \
+  V(Register, REGISTER, Register::kNumRegisters) \
+  V(DoubleRegister, DOUBLE_REGISTER, DoubleRegister::kMaxNumRegisters)
+
+class InstructionOperand : public ZoneObject {
+ public:
+  enum Kind {
+    INVALID,
+    UNALLOCATED,
+    CONSTANT,
+    IMMEDIATE,
+    STACK_SLOT,
+    DOUBLE_STACK_SLOT,
+    REGISTER,
+    DOUBLE_REGISTER
+  };
+
+  InstructionOperand() : value_(KindField::encode(INVALID)) {}
+  InstructionOperand(Kind kind, int index) { ConvertTo(kind, index); }
+
+  Kind kind() const { return KindField::decode(value_); }
+  int index() const { return static_cast<int>(value_) >> KindField::kSize; }
+#define INSTRUCTION_OPERAND_PREDICATE(name, type, number) \
+  bool Is##name() const { return kind() == type; }
+  INSTRUCTION_OPERAND_LIST(INSTRUCTION_OPERAND_PREDICATE)
+  INSTRUCTION_OPERAND_PREDICATE(Unallocated, UNALLOCATED, 0)
+  INSTRUCTION_OPERAND_PREDICATE(Ignored, INVALID, 0)
+#undef INSTRUCTION_OPERAND_PREDICATE
+  bool Equals(InstructionOperand* other) const {
+    return value_ == other->value_;
+  }
+
+  void ConvertTo(Kind kind, int index) {
+    if (kind == REGISTER || kind == DOUBLE_REGISTER) ASSERT(index >= 0);
+    value_ = KindField::encode(kind);
+    value_ |= index << KindField::kSize;
+    ASSERT(this->index() == index);
+  }
+
+  // Calls SetUpCache()/TearDownCache() for each subclass.
+  static void SetUpCaches();
+  static void TearDownCaches();
+
+ protected:
+  typedef BitField<Kind, 0, 3> KindField;
+
+  unsigned value_;
+};
+
+OStream& operator<<(OStream& os, const InstructionOperand& op);
+
+class UnallocatedOperand : public InstructionOperand {
+ public:
+  enum BasicPolicy { FIXED_SLOT, EXTENDED_POLICY };
+
+  enum ExtendedPolicy {
+    NONE,
+    ANY,
+    FIXED_REGISTER,
+    FIXED_DOUBLE_REGISTER,
+    MUST_HAVE_REGISTER,
+    SAME_AS_FIRST_INPUT
+  };
+
+  // Lifetime of operand inside the instruction.
+  enum Lifetime {
+    // USED_AT_START operand is guaranteed to be live only at
+    // instruction start. Register allocator is free to assign the same register
+    // to some other operand used inside instruction (i.e. temporary or
+    // output).
+    USED_AT_START,
+
+    // USED_AT_END operand is treated as live until the end of
+    // instruction. This means that register allocator will not reuse it's
+    // register for any other operand inside instruction.
+    USED_AT_END
+  };
+
+  explicit UnallocatedOperand(ExtendedPolicy policy)
+      : InstructionOperand(UNALLOCATED, 0) {
+    value_ |= BasicPolicyField::encode(EXTENDED_POLICY);
+    value_ |= ExtendedPolicyField::encode(policy);
+    value_ |= LifetimeField::encode(USED_AT_END);
+  }
+
+  UnallocatedOperand(BasicPolicy policy, int index)
+      : InstructionOperand(UNALLOCATED, 0) {
+    ASSERT(policy == FIXED_SLOT);
+    value_ |= BasicPolicyField::encode(policy);
+    value_ |= index << FixedSlotIndexField::kShift;
+    ASSERT(this->fixed_slot_index() == index);
+  }
+
+  UnallocatedOperand(ExtendedPolicy policy, int index)
+      : InstructionOperand(UNALLOCATED, 0) {
+    ASSERT(policy == FIXED_REGISTER || policy == FIXED_DOUBLE_REGISTER);
+    value_ |= BasicPolicyField::encode(EXTENDED_POLICY);
+    value_ |= ExtendedPolicyField::encode(policy);
+    value_ |= LifetimeField::encode(USED_AT_END);
+    value_ |= FixedRegisterField::encode(index);
+  }
+
+  UnallocatedOperand(ExtendedPolicy policy, Lifetime lifetime)
+      : InstructionOperand(UNALLOCATED, 0) {
+    value_ |= BasicPolicyField::encode(EXTENDED_POLICY);
+    value_ |= ExtendedPolicyField::encode(policy);
+    value_ |= LifetimeField::encode(lifetime);
+  }
+
+  UnallocatedOperand* CopyUnconstrained(Zone* zone) {
+    UnallocatedOperand* result = new (zone) UnallocatedOperand(ANY);
+    result->set_virtual_register(virtual_register());
+    return result;
+  }
+
+  static const UnallocatedOperand* cast(const InstructionOperand* op) {
+    ASSERT(op->IsUnallocated());
+    return static_cast<const UnallocatedOperand*>(op);
+  }
+
+  static UnallocatedOperand* cast(InstructionOperand* op) {
+    ASSERT(op->IsUnallocated());
+    return static_cast<UnallocatedOperand*>(op);
+  }
+
+  // The encoding used for UnallocatedOperand operands depends on the policy
+  // that is
+  // stored within the operand. The FIXED_SLOT policy uses a compact encoding
+  // because it accommodates a larger pay-load.
+  //
+  // For FIXED_SLOT policy:
+  //     +------------------------------------------+
+  //     |       slot_index      |  vreg  | 0 | 001 |
+  //     +------------------------------------------+
+  //
+  // For all other (extended) policies:
+  //     +------------------------------------------+
+  //     |  reg_index  | L | PPP |  vreg  | 1 | 001 |    L ... Lifetime
+  //     +------------------------------------------+    P ... Policy
+  //
+  // The slot index is a signed value which requires us to decode it manually
+  // instead of using the BitField utility class.
+
+  // The superclass has a KindField.
+  STATIC_ASSERT(KindField::kSize == 3);
+
+  // BitFields for all unallocated operands.
+  class BasicPolicyField : public BitField<BasicPolicy, 3, 1> {};
+  class VirtualRegisterField : public BitField<unsigned, 4, 18> {};
+
+  // BitFields specific to BasicPolicy::FIXED_SLOT.
+  class FixedSlotIndexField : public BitField<int, 22, 10> {};
+
+  // BitFields specific to BasicPolicy::EXTENDED_POLICY.
+  class ExtendedPolicyField : public BitField<ExtendedPolicy, 22, 3> {};
+  class LifetimeField : public BitField<Lifetime, 25, 1> {};
+  class FixedRegisterField : public BitField<int, 26, 6> {};
+
+  static const int kMaxVirtualRegisters = VirtualRegisterField::kMax + 1;
+  static const int kFixedSlotIndexWidth = FixedSlotIndexField::kSize;
+  static const int kMaxFixedSlotIndex = (1 << (kFixedSlotIndexWidth - 1)) - 1;
+  static const int kMinFixedSlotIndex = -(1 << (kFixedSlotIndexWidth - 1));
+
+  // Predicates for the operand policy.
+  bool HasAnyPolicy() const {
+    return basic_policy() == EXTENDED_POLICY && extended_policy() == ANY;
+  }
+  bool HasFixedPolicy() const {
+    return basic_policy() == FIXED_SLOT ||
+           extended_policy() == FIXED_REGISTER ||
+           extended_policy() == FIXED_DOUBLE_REGISTER;
+  }
+  bool HasRegisterPolicy() const {
+    return basic_policy() == EXTENDED_POLICY &&
+           extended_policy() == MUST_HAVE_REGISTER;
+  }
+  bool HasSameAsInputPolicy() const {
+    return basic_policy() == EXTENDED_POLICY &&
+           extended_policy() == SAME_AS_FIRST_INPUT;
+  }
+  bool HasFixedSlotPolicy() const { return basic_policy() == FIXED_SLOT; }
+  bool HasFixedRegisterPolicy() const {
+    return basic_policy() == EXTENDED_POLICY &&
+           extended_policy() == FIXED_REGISTER;
+  }
+  bool HasFixedDoubleRegisterPolicy() const {
+    return basic_policy() == EXTENDED_POLICY &&
+           extended_policy() == FIXED_DOUBLE_REGISTER;
+  }
+
+  // [basic_policy]: Distinguish between FIXED_SLOT and all other policies.
+  BasicPolicy basic_policy() const { return BasicPolicyField::decode(value_); }
+
+  // [extended_policy]: Only for non-FIXED_SLOT. The finer-grained policy.
+  ExtendedPolicy extended_policy() const {
+    ASSERT(basic_policy() == EXTENDED_POLICY);
+    return ExtendedPolicyField::decode(value_);
+  }
+
+  // [fixed_slot_index]: Only for FIXED_SLOT.
+  int fixed_slot_index() const {
+    ASSERT(HasFixedSlotPolicy());
+    return static_cast<int>(value_) >> FixedSlotIndexField::kShift;
+  }
+
+  // [fixed_register_index]: Only for FIXED_REGISTER or FIXED_DOUBLE_REGISTER.
+  int fixed_register_index() const {
+    ASSERT(HasFixedRegisterPolicy() || HasFixedDoubleRegisterPolicy());
+    return FixedRegisterField::decode(value_);
+  }
+
+  // [virtual_register]: The virtual register ID for this operand.
+  int virtual_register() const { return VirtualRegisterField::decode(value_); }
+  void set_virtual_register(unsigned id) {
+    value_ = VirtualRegisterField::update(value_, id);
+  }
+
+  // [lifetime]: Only for non-FIXED_SLOT.
+  bool IsUsedAtStart() {
+    ASSERT(basic_policy() == EXTENDED_POLICY);
+    return LifetimeField::decode(value_) == USED_AT_START;
+  }
+};
+
+
+class MoveOperands V8_FINAL BASE_EMBEDDED {
+ public:
+  MoveOperands(InstructionOperand* source, InstructionOperand* destination)
+      : source_(source), destination_(destination) {}
+
+  InstructionOperand* source() const { return source_; }
+  void set_source(InstructionOperand* operand) { source_ = operand; }
+
+  InstructionOperand* destination() const { return destination_; }
+  void set_destination(InstructionOperand* operand) { destination_ = operand; }
+
+  // The gap resolver marks moves as "in-progress" by clearing the
+  // destination (but not the source).
+  bool IsPending() const { return destination_ == NULL && source_ != NULL; }
+
+  // True if this move a move into the given destination operand.
+  bool Blocks(InstructionOperand* operand) const {
+    return !IsEliminated() && source()->Equals(operand);
+  }
+
+  // A move is redundant if it's been eliminated, if its source and
+  // destination are the same, or if its destination is unneeded or constant.
+  bool IsRedundant() const {
+    return IsEliminated() || source_->Equals(destination_) || IsIgnored() ||
+           (destination_ != NULL && destination_->IsConstant());
+  }
+
+  bool IsIgnored() const {
+    return destination_ != NULL && destination_->IsIgnored();
+  }
+
+  // We clear both operands to indicate move that's been eliminated.
+  void Eliminate() { source_ = destination_ = NULL; }
+  bool IsEliminated() const {
+    ASSERT(source_ != NULL || destination_ == NULL);
+    return source_ == NULL;
+  }
+
+ private:
+  InstructionOperand* source_;
+  InstructionOperand* destination_;
+};
+
+OStream& operator<<(OStream& os, const MoveOperands& mo);
+
+template <InstructionOperand::Kind kOperandKind, int kNumCachedOperands>
+class SubKindOperand V8_FINAL : public InstructionOperand {
+ public:
+  static SubKindOperand* Create(int index, Zone* zone) {
+    ASSERT(index >= 0);
+    if (index < kNumCachedOperands) return &cache[index];
+    return new (zone) SubKindOperand(index);
+  }
+
+  static SubKindOperand* cast(InstructionOperand* op) {
+    ASSERT(op->kind() == kOperandKind);
+    return reinterpret_cast<SubKindOperand*>(op);
+  }
+
+  static void SetUpCache();
+  static void TearDownCache();
+
+ private:
+  static SubKindOperand* cache;
+
+  SubKindOperand() : InstructionOperand() {}
+  explicit SubKindOperand(int index)
+      : InstructionOperand(kOperandKind, index) {}
+};
+
+
+#define INSTRUCTION_TYPEDEF_SUBKIND_OPERAND_CLASS(name, type, number) \
+  typedef SubKindOperand<InstructionOperand::type, number> name##Operand;
+INSTRUCTION_OPERAND_LIST(INSTRUCTION_TYPEDEF_SUBKIND_OPERAND_CLASS)
+#undef INSTRUCTION_TYPEDEF_SUBKIND_OPERAND_CLASS
+
+
+class ParallelMove V8_FINAL : public ZoneObject {
+ public:
+  explicit ParallelMove(Zone* zone) : move_operands_(4, zone) {}
+
+  void AddMove(InstructionOperand* from, InstructionOperand* to, Zone* zone) {
+    move_operands_.Add(MoveOperands(from, to), zone);
+  }
+
+  bool IsRedundant() const;
+
+  ZoneList<MoveOperands>* move_operands() { return &move_operands_; }
+  const ZoneList<MoveOperands>* move_operands() const {
+    return &move_operands_;
+  }
+
+ private:
+  ZoneList<MoveOperands> move_operands_;
+};
+
+OStream& operator<<(OStream& os, const ParallelMove& pm);
+
+class PointerMap V8_FINAL : public ZoneObject {
+ public:
+  explicit PointerMap(Zone* zone)
+      : pointer_operands_(8, zone),
+        untagged_operands_(0, zone),
+        instruction_position_(-1) {}
+
+  const ZoneList<InstructionOperand*>* GetNormalizedOperands() {
+    for (int i = 0; i < untagged_operands_.length(); ++i) {
+      RemovePointer(untagged_operands_[i]);
+    }
+    untagged_operands_.Clear();
+    return &pointer_operands_;
+  }
+  int instruction_position() const { return instruction_position_; }
+
+  void set_instruction_position(int pos) {
+    ASSERT(instruction_position_ == -1);
+    instruction_position_ = pos;
+  }
+
+  void RecordPointer(InstructionOperand* op, Zone* zone);
+  void RemovePointer(InstructionOperand* op);
+  void RecordUntagged(InstructionOperand* op, Zone* zone);
+
+ private:
+  friend OStream& operator<<(OStream& os, const PointerMap& pm);
+
+  ZoneList<InstructionOperand*> pointer_operands_;
+  ZoneList<InstructionOperand*> untagged_operands_;
+  int instruction_position_;
+};
+
+OStream& operator<<(OStream& os, const PointerMap& pm);
+
+// TODO(titzer): s/PointerMap/ReferenceMap/
+class Instruction : public ZoneObject {
+ public:
+  size_t OutputCount() const { return OutputCountField::decode(bit_field_); }
+  InstructionOperand* Output() const { return OutputAt(0); }
+  InstructionOperand* OutputAt(size_t i) const {
+    ASSERT(i < OutputCount());
+    return operands_[i];
+  }
+
+  size_t InputCount() const { return InputCountField::decode(bit_field_); }
+  InstructionOperand* InputAt(size_t i) const {
+    ASSERT(i < InputCount());
+    return operands_[OutputCount() + i];
+  }
+
+  size_t TempCount() const { return TempCountField::decode(bit_field_); }
+  InstructionOperand* TempAt(size_t i) const {
+    ASSERT(i < TempCount());
+    return operands_[OutputCount() + InputCount() + i];
+  }
+
+  InstructionCode opcode() const { return opcode_; }
+  ArchOpcode arch_opcode() const { return ArchOpcodeField::decode(opcode()); }
+  AddressingMode addressing_mode() const {
+    return AddressingModeField::decode(opcode());
+  }
+  FlagsMode flags_mode() const { return FlagsModeField::decode(opcode()); }
+  FlagsCondition flags_condition() const {
+    return FlagsConditionField::decode(opcode());
+  }
+
+  // TODO(titzer): make control and call into flags.
+  static Instruction* New(Zone* zone, InstructionCode opcode) {
+    return New(zone, opcode, 0, NULL, 0, NULL, 0, NULL);
+  }
+
+  static Instruction* New(Zone* zone, InstructionCode opcode,
+                          size_t output_count, InstructionOperand** outputs,
+                          size_t input_count, InstructionOperand** inputs,
+                          size_t temp_count, InstructionOperand** temps) {
+    ASSERT(opcode >= 0);
+    ASSERT(output_count == 0 || outputs != NULL);
+    ASSERT(input_count == 0 || inputs != NULL);
+    ASSERT(temp_count == 0 || temps != NULL);
+    InstructionOperand* none = NULL;
+    USE(none);
+    size_t size = RoundUp(sizeof(Instruction), kPointerSize) +
+                  (output_count + input_count + temp_count - 1) * sizeof(none);
+    return new (zone->New(size)) Instruction(
+        opcode, output_count, outputs, input_count, inputs, temp_count, temps);
+  }
+
+  // TODO(titzer): another holdover from lithium days; register allocator
+  // should not need to know about control instructions.
+  Instruction* MarkAsControl() {
+    bit_field_ = IsControlField::update(bit_field_, true);
+    return this;
+  }
+  Instruction* MarkAsCall() {
+    bit_field_ = IsCallField::update(bit_field_, true);
+    return this;
+  }
+  bool IsControl() const { return IsControlField::decode(bit_field_); }
+  bool IsCall() const { return IsCallField::decode(bit_field_); }
+  bool NeedsPointerMap() const { return IsCall(); }
+  bool HasPointerMap() const { return pointer_map_ != NULL; }
+
+  bool IsGapMoves() const {
+    return opcode() == kGapInstruction || opcode() == kBlockStartInstruction;
+  }
+  bool IsBlockStart() const { return opcode() == kBlockStartInstruction; }
+  bool IsSourcePosition() const {
+    return opcode() == kSourcePositionInstruction;
+  }
+
+  bool ClobbersRegisters() const { return IsCall(); }
+  bool ClobbersTemps() const { return IsCall(); }
+  bool ClobbersDoubleRegisters() const { return IsCall(); }
+  PointerMap* pointer_map() const { return pointer_map_; }
+
+  void set_pointer_map(PointerMap* map) {
+    ASSERT(NeedsPointerMap());
+    ASSERT_EQ(NULL, pointer_map_);
+    pointer_map_ = map;
+  }
+
+  // Placement new operator so that we can smash instructions into
+  // zone-allocated memory.
+  void* operator new(size_t, void* location) { return location; }
+
+ protected:
+  explicit Instruction(InstructionCode opcode)
+      : opcode_(opcode),
+        bit_field_(OutputCountField::encode(0) | InputCountField::encode(0) |
+                   TempCountField::encode(0) | IsCallField::encode(false) |
+                   IsControlField::encode(false)),
+        pointer_map_(NULL) {}
+
+  Instruction(InstructionCode opcode, size_t output_count,
+              InstructionOperand** outputs, size_t input_count,
+              InstructionOperand** inputs, size_t temp_count,
+              InstructionOperand** temps)
+      : opcode_(opcode),
+        bit_field_(OutputCountField::encode(output_count) |
+                   InputCountField::encode(input_count) |
+                   TempCountField::encode(temp_count) |
+                   IsCallField::encode(false) | IsControlField::encode(false)),
+        pointer_map_(NULL) {
+    for (size_t i = 0; i < output_count; ++i) {
+      operands_[i] = outputs[i];
+    }
+    for (size_t i = 0; i < input_count; ++i) {
+      operands_[output_count + i] = inputs[i];
+    }
+    for (size_t i = 0; i < temp_count; ++i) {
+      operands_[output_count + input_count + i] = temps[i];
+    }
+  }
+
+ protected:
+  typedef BitField<size_t, 0, 8> OutputCountField;
+  typedef BitField<size_t, 8, 16> InputCountField;
+  typedef BitField<size_t, 24, 6> TempCountField;
+  typedef BitField<bool, 30, 1> IsCallField;
+  typedef BitField<bool, 31, 1> IsControlField;
+
+  InstructionCode opcode_;
+  uint32_t bit_field_;
+  PointerMap* pointer_map_;
+  InstructionOperand* operands_[1];
+};
+
+OStream& operator<<(OStream& os, const Instruction& instr);
+
+// Represents moves inserted before an instruction due to register allocation.
+// TODO(titzer): squash GapInstruction back into Instruction, since essentially
+// every instruction can possibly have moves inserted before it.
+class GapInstruction : public Instruction {
+ public:
+  enum InnerPosition {
+    BEFORE,
+    START,
+    END,
+    AFTER,
+    FIRST_INNER_POSITION = BEFORE,
+    LAST_INNER_POSITION = AFTER
+  };
+
+  ParallelMove* GetOrCreateParallelMove(InnerPosition pos, Zone* zone) {
+    if (parallel_moves_[pos] == NULL) {
+      parallel_moves_[pos] = new (zone) ParallelMove(zone);
+    }
+    return parallel_moves_[pos];
+  }
+
+  ParallelMove* GetParallelMove(InnerPosition pos) {
+    return parallel_moves_[pos];
+  }
+
+  static GapInstruction* New(Zone* zone) {
+    void* buffer = zone->New(sizeof(GapInstruction));
+    return new (buffer) GapInstruction(kGapInstruction);
+  }
+
+  static GapInstruction* cast(Instruction* instr) {
+    ASSERT(instr->IsGapMoves());
+    return static_cast<GapInstruction*>(instr);
+  }
+
+  static const GapInstruction* cast(const Instruction* instr) {
+    ASSERT(instr->IsGapMoves());
+    return static_cast<const GapInstruction*>(instr);
+  }
+
+ protected:
+  explicit GapInstruction(InstructionCode opcode) : Instruction(opcode) {
+    parallel_moves_[BEFORE] = NULL;
+    parallel_moves_[START] = NULL;
+    parallel_moves_[END] = NULL;
+    parallel_moves_[AFTER] = NULL;
+  }
+
+ private:
+  friend OStream& operator<<(OStream& os, const Instruction& instr);
+  ParallelMove* parallel_moves_[LAST_INNER_POSITION + 1];
+};
+
+
+// This special kind of gap move instruction represents the beginning of a
+// block of code.
+// TODO(titzer): move code_start and code_end from BasicBlock to here.
+class BlockStartInstruction V8_FINAL : public GapInstruction {
+ public:
+  BasicBlock* block() const { return block_; }
+  Label* label() { return &label_; }
+
+  static BlockStartInstruction* New(Zone* zone, BasicBlock* block) {
+    void* buffer = zone->New(sizeof(BlockStartInstruction));
+    return new (buffer) BlockStartInstruction(block);
+  }
+
+  static BlockStartInstruction* cast(Instruction* instr) {
+    ASSERT(instr->IsBlockStart());
+    return static_cast<BlockStartInstruction*>(instr);
+  }
+
+ private:
+  explicit BlockStartInstruction(BasicBlock* block)
+      : GapInstruction(kBlockStartInstruction), block_(block) {}
+
+  BasicBlock* block_;
+  Label label_;
+};
+
+
+class SourcePositionInstruction V8_FINAL : public Instruction {
+ public:
+  static SourcePositionInstruction* New(Zone* zone, SourcePosition position) {
+    void* buffer = zone->New(sizeof(SourcePositionInstruction));
+    return new (buffer) SourcePositionInstruction(position);
+  }
+
+  SourcePosition source_position() const { return source_position_; }
+
+  static SourcePositionInstruction* cast(Instruction* instr) {
+    ASSERT(instr->IsSourcePosition());
+    return static_cast<SourcePositionInstruction*>(instr);
+  }
+
+  static const SourcePositionInstruction* cast(const Instruction* instr) {
+    ASSERT(instr->IsSourcePosition());
+    return static_cast<const SourcePositionInstruction*>(instr);
+  }
+
+ private:
+  explicit SourcePositionInstruction(SourcePosition source_position)
+      : Instruction(kSourcePositionInstruction),
+        source_position_(source_position) {
+    ASSERT(!source_position_.IsInvalid());
+    ASSERT(!source_position_.IsUnknown());
+  }
+
+  SourcePosition source_position_;
+};
+
+
+class Constant V8_FINAL {
+ public:
+  enum Type { kInt32, kInt64, kFloat64, kExternalReference, kHeapObject };
+
+  explicit Constant(int32_t v) : type_(kInt32), value_(v) {}
+  explicit Constant(int64_t v) : type_(kInt64), value_(v) {}
+  explicit Constant(double v) : type_(kFloat64), value_(BitCast<int64_t>(v)) {}
+  explicit Constant(ExternalReference ref)
+      : type_(kExternalReference), value_(BitCast<intptr_t>(ref)) {}
+  explicit Constant(Handle<HeapObject> obj)
+      : type_(kHeapObject), value_(BitCast<intptr_t>(obj)) {}
+
+  Type type() const { return type_; }
+
+  int32_t ToInt32() const {
+    ASSERT_EQ(kInt32, type());
+    return static_cast<int32_t>(value_);
+  }
+
+  int64_t ToInt64() const {
+    if (type() == kInt32) return ToInt32();
+    ASSERT_EQ(kInt64, type());
+    return value_;
+  }
+
+  double ToFloat64() const {
+    if (type() == kInt32) return ToInt32();
+    ASSERT_EQ(kFloat64, type());
+    return BitCast<double>(value_);
+  }
+
+  ExternalReference ToExternalReference() const {
+    ASSERT_EQ(kExternalReference, type());
+    return BitCast<ExternalReference>(static_cast<intptr_t>(value_));
+  }
+
+  Handle<HeapObject> ToHeapObject() const {
+    ASSERT_EQ(kHeapObject, type());
+    return BitCast<Handle<HeapObject> >(static_cast<intptr_t>(value_));
+  }
+
+ private:
+  Type type_;
+  int64_t value_;
+};
+
+OStream& operator<<(OStream& os, const Constant& constant);
+
+typedef std::deque<Constant, zone_allocator<Constant> > ConstantDeque;
+typedef std::map<int, Constant, std::less<int>,
+                 zone_allocator<std::pair<int, Constant> > > ConstantMap;
+
+
+typedef std::deque<Instruction*, zone_allocator<Instruction*> >
+    InstructionDeque;
+typedef std::deque<PointerMap*, zone_allocator<PointerMap*> > PointerMapDeque;
+typedef std::vector<FrameStateDescriptor, zone_allocator<FrameStateDescriptor> >
+    DeoptimizationVector;
+
+
+// Represents architecture-specific generated code before, during, and after
+// register allocation.
+// TODO(titzer): s/IsDouble/IsFloat64/
+class InstructionSequence V8_FINAL {
+ public:
+  InstructionSequence(Linkage* linkage, Graph* graph, Schedule* schedule)
+      : graph_(graph),
+        linkage_(linkage),
+        schedule_(schedule),
+        constants_(ConstantMap::key_compare(),
+                   ConstantMap::allocator_type(zone())),
+        immediates_(ConstantDeque::allocator_type(zone())),
+        instructions_(InstructionDeque::allocator_type(zone())),
+        next_virtual_register_(graph->NodeCount()),
+        pointer_maps_(PointerMapDeque::allocator_type(zone())),
+        doubles_(std::less<int>(), VirtualRegisterSet::allocator_type(zone())),
+        references_(std::less<int>(),
+                    VirtualRegisterSet::allocator_type(zone())),
+        deoptimization_entries_(DeoptimizationVector::allocator_type(zone())) {}
+
+  int NextVirtualRegister() { return next_virtual_register_++; }
+  int VirtualRegisterCount() const { return next_virtual_register_; }
+
+  int ValueCount() const { return graph_->NodeCount(); }
+
+  int BasicBlockCount() const {
+    return static_cast<int>(schedule_->rpo_order()->size());
+  }
+
+  BasicBlock* BlockAt(int rpo_number) const {
+    return (*schedule_->rpo_order())[rpo_number];
+  }
+
+  BasicBlock* GetContainingLoop(BasicBlock* block) {
+    return block->loop_header_;
+  }
+
+  int GetLoopEnd(BasicBlock* block) const { return block->loop_end_; }
+
+  BasicBlock* GetBasicBlock(int instruction_index);
+
+  int GetVirtualRegister(Node* node) const { return node->id(); }
+
+  bool IsReference(int virtual_register) const;
+  bool IsDouble(int virtual_register) const;
+
+  void MarkAsReference(int virtual_register);
+  void MarkAsDouble(int virtual_register);
+
+  void AddGapMove(int index, InstructionOperand* from, InstructionOperand* to);
+
+  Label* GetLabel(BasicBlock* block);
+  BlockStartInstruction* GetBlockStart(BasicBlock* block);
+
+  typedef InstructionDeque::const_iterator const_iterator;
+  const_iterator begin() const { return instructions_.begin(); }
+  const_iterator end() const { return instructions_.end(); }
+
+  GapInstruction* GapAt(int index) const {
+    return GapInstruction::cast(InstructionAt(index));
+  }
+  bool IsGapAt(int index) const { return InstructionAt(index)->IsGapMoves(); }
+  Instruction* InstructionAt(int index) const {
+    ASSERT(index >= 0);
+    ASSERT(index < static_cast<int>(instructions_.size()));
+    return instructions_[index];
+  }
+
+  Frame* frame() { return &frame_; }
+  Graph* graph() const { return graph_; }
+  Isolate* isolate() const { return zone()->isolate(); }
+  Linkage* linkage() const { return linkage_; }
+  Schedule* schedule() const { return schedule_; }
+  const PointerMapDeque* pointer_maps() const { return &pointer_maps_; }
+  Zone* zone() const { return graph_->zone(); }
+
+  // Used by the code generator while adding instructions.
+  int AddInstruction(Instruction* instr, BasicBlock* block);
+  void StartBlock(BasicBlock* block);
+  void EndBlock(BasicBlock* block);
+
+  void AddConstant(int virtual_register, Constant constant) {
+    ASSERT(constants_.find(virtual_register) == constants_.end());
+    constants_.insert(std::make_pair(virtual_register, constant));
+  }
+  Constant GetConstant(int virtual_register) const {
+    ConstantMap::const_iterator it = constants_.find(virtual_register);
+    ASSERT(it != constants_.end());
+    ASSERT_EQ(virtual_register, it->first);
+    return it->second;
+  }
+
+  typedef ConstantDeque Immediates;
+  const Immediates& immediates() const { return immediates_; }
+
+  int AddImmediate(Constant constant) {
+    int index = immediates_.size();
+    immediates_.push_back(constant);
+    return index;
+  }
+  Constant GetImmediate(int index) const {
+    ASSERT(index >= 0);
+    ASSERT(index < static_cast<int>(immediates_.size()));
+    return immediates_[index];
+  }
+
+  int AddDeoptimizationEntry(const FrameStateDescriptor& descriptor);
+  FrameStateDescriptor GetDeoptimizationEntry(int deoptimization_id);
+  int GetDeoptimizationEntryCount();
+
+ private:
+  friend OStream& operator<<(OStream& os, const InstructionSequence& code);
+
+  typedef std::set<int, std::less<int>, ZoneIntAllocator> VirtualRegisterSet;
+
+  Graph* graph_;
+  Linkage* linkage_;
+  Schedule* schedule_;
+  ConstantMap constants_;
+  ConstantDeque immediates_;
+  InstructionDeque instructions_;
+  int next_virtual_register_;
+  PointerMapDeque pointer_maps_;
+  VirtualRegisterSet doubles_;
+  VirtualRegisterSet references_;
+  Frame frame_;
+  DeoptimizationVector deoptimization_entries_;
+};
+
+OStream& operator<<(OStream& os, const InstructionSequence& code);
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_COMPILER_INSTRUCTION_H_
diff --git a/src/compiler/ir-operations.txt b/src/compiler/ir-operations.txt
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/src/compiler/js-context-specialization.cc b/src/compiler/js-context-specialization.cc
new file mode 100644 (file)
index 0000000..658c99b
--- /dev/null
@@ -0,0 +1,93 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/common-operator.h"
+#include "src/compiler/generic-node-inl.h"
+#include "src/compiler/js-context-specialization.h"
+#include "src/compiler/js-operator.h"
+#include "src/compiler/node-aux-data-inl.h"
+#include "src/compiler/node-matchers.h"
+#include "src/compiler/node-properties-inl.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// TODO(titzer): factor this out to a common routine with js-typed-lowering.
+static void ReplaceEffectfulWithValue(Node* node, Node* value) {
+  Node* effect = NodeProperties::GetEffectInput(node);
+
+  // Requires distinguishing between value and effect edges.
+  UseIter iter = node->uses().begin();
+  while (iter != node->uses().end()) {
+    if (NodeProperties::IsEffectEdge(iter.edge())) {
+      iter = iter.UpdateToAndIncrement(effect);
+    } else {
+      iter = iter.UpdateToAndIncrement(value);
+    }
+  }
+}
+
+
+void JSContextSpecializer::SpecializeToContext() {
+  ValueMatcher<Handle<Context> > match(context_);
+
+  // Iterate over all uses of the context and try to replace {LoadContext}
+  // nodes with their values from the constant context.
+  UseIter iter = match.node()->uses().begin();
+  while (iter != match.node()->uses().end()) {
+    Node* use = *iter;
+    if (use->opcode() == IrOpcode::kJSLoadContext) {
+      Reduction r = ReduceJSLoadContext(use);
+      if (r.Changed() && r.replacement() != use) {
+        ReplaceEffectfulWithValue(use, r.replacement());
+      }
+    }
+    ++iter;
+  }
+}
+
+
+Reduction JSContextSpecializer::ReduceJSLoadContext(Node* node) {
+  ASSERT_EQ(IrOpcode::kJSLoadContext, node->opcode());
+
+  ContextAccess access =
+      static_cast<Operator1<ContextAccess>*>(node->op())->parameter();
+
+  // Find the right parent context.
+  Context* context = *info_->context();
+  for (int i = access.depth(); i > 0; --i) {
+    context = context->previous();
+  }
+
+  // If the access itself is mutable, only fold-in the parent.
+  if (!access.immutable()) {
+    // The access does not have to look up a parent, nothing to fold.
+    if (access.depth() == 0) {
+      return Reducer::NoChange();
+    }
+    Operator* op = jsgraph_->javascript()->LoadContext(0, access.index(),
+                                                       access.immutable());
+    node->set_op(op);
+    Handle<Object> context_handle = Handle<Object>(context, info_->isolate());
+    node->ReplaceInput(0, jsgraph_->Constant(context_handle));
+    return Reducer::Changed(node);
+  }
+  Handle<Object> value =
+      Handle<Object>(context->get(access.index()), info_->isolate());
+
+  // Even though the context slot is immutable, the context might have escaped
+  // before the function to which it belongs has initialized the slot.
+  // We must be conservative and check if the value in the slot is currently the
+  // hole or undefined. If it is neither of these, then it must be initialized.
+  if (value->IsUndefined() || value->IsTheHole()) return Reducer::NoChange();
+
+  // Success. The context load can be replaced with the constant.
+  // TODO(titzer): record the specialization for sharing code across multiple
+  // contexts that have the same value in the corresponding context slot.
+  return Reducer::Replace(jsgraph_->Constant(value));
+}
+}
+}
+}  // namespace v8::internal::compiler
diff --git a/src/compiler/js-context-specialization.h b/src/compiler/js-context-specialization.h
new file mode 100644 (file)
index 0000000..dc0b50e
--- /dev/null
@@ -0,0 +1,36 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_JS_CONTEXT_SPECIALIZATION_H_
+#define V8_COMPILER_JS_CONTEXT_SPECIALIZATION_H_
+
+#include "src/compiler/graph-reducer.h"
+#include "src/compiler/js-graph.h"
+#include "src/contexts.h"
+#include "src/v8.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// Specializes a given JSGraph to a given context, potentially constant folding
+// some {LoadContext} nodes.
+class JSContextSpecializer {
+ public:
+  JSContextSpecializer(CompilationInfo* info, JSGraph* jsgraph, Node* context)
+      : info_(info), jsgraph_(jsgraph), context_(context) {}
+
+  void SpecializeToContext();
+  Reduction ReduceJSLoadContext(Node* node);
+
+ private:
+  CompilationInfo* info_;
+  JSGraph* jsgraph_;
+  Node* context_;
+};
+}
+}
+}  // namespace v8::internal::compiler
+
+#endif  // V8_COMPILER_JS_CONTEXT_SPECIALIZATION_H_
diff --git a/src/compiler/js-generic-lowering.cc b/src/compiler/js-generic-lowering.cc
new file mode 100644 (file)
index 0000000..5085e9f
--- /dev/null
@@ -0,0 +1,425 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/code-stubs.h"
+#include "src/compiler/common-operator.h"
+#include "src/compiler/graph-inl.h"
+#include "src/compiler/js-generic-lowering.h"
+#include "src/compiler/machine-operator.h"
+#include "src/compiler/node-aux-data-inl.h"
+#include "src/compiler/node-properties-inl.h"
+#include "src/unique.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+
+// TODO(mstarzinger): This is a temporary workaround for non-hydrogen stubs for
+// which we don't have an interface descriptor yet. Use ReplaceWithICStubCall
+// once these stub have been made into a HydrogenCodeStub.
+template <typename T>
+static CodeStubInterfaceDescriptor* GetInterfaceDescriptor(Isolate* isolate,
+                                                           T* stub) {
+  CodeStub::Major key = static_cast<CodeStub*>(stub)->MajorKey();
+  CodeStubInterfaceDescriptor* d = isolate->code_stub_interface_descriptor(key);
+  stub->InitializeInterfaceDescriptor(isolate, d);
+  return d;
+}
+
+
+JSGenericLowering::JSGenericLowering(CompilationInfo* info, JSGraph* jsgraph,
+                                     MachineOperatorBuilder* machine,
+                                     SourcePositionTable* source_positions)
+    : LoweringBuilder(jsgraph->graph(), source_positions),
+      info_(info),
+      jsgraph_(jsgraph),
+      linkage_(new (jsgraph->zone()) Linkage(info)),
+      machine_(machine) {}
+
+
+void JSGenericLowering::PatchOperator(Node* node, Operator* op) {
+  node->set_op(op);
+}
+
+
+void JSGenericLowering::PatchInsertInput(Node* node, int index, Node* input) {
+  node->InsertInput(zone(), index, input);
+}
+
+
+Node* JSGenericLowering::SmiConstant(int32_t immediate) {
+  return jsgraph()->SmiConstant(immediate);
+}
+
+
+Node* JSGenericLowering::Int32Constant(int immediate) {
+  return jsgraph()->Int32Constant(immediate);
+}
+
+
+Node* JSGenericLowering::CodeConstant(Handle<Code> code) {
+  return jsgraph()->HeapConstant(code);
+}
+
+
+Node* JSGenericLowering::FunctionConstant(Handle<JSFunction> function) {
+  return jsgraph()->HeapConstant(function);
+}
+
+
+Node* JSGenericLowering::ExternalConstant(ExternalReference ref) {
+  return jsgraph()->ExternalConstant(ref);
+}
+
+
+void JSGenericLowering::Lower(Node* node) {
+  Node* replacement = NULL;
+  // Dispatch according to the opcode.
+  switch (node->opcode()) {
+#define DECLARE_CASE(x)           \
+  case IrOpcode::k##x:            \
+    replacement = Lower##x(node); \
+    break;
+    DECLARE_CASE(Branch)
+    JS_OP_LIST(DECLARE_CASE)
+#undef DECLARE_CASE
+    default:
+      // Nothing to see.
+      return;
+  }
+
+  // Nothing to do if lowering was done by patching the existing node.
+  if (replacement == node) return;
+
+  // Iterate through uses of the original node and replace uses accordingly.
+  UNIMPLEMENTED();
+}
+
+
+#define REPLACE_IC_STUB_CALL(op, StubDeclaration)  \
+  Node* JSGenericLowering::Lower##op(Node* node) { \
+    StubDeclaration;                               \
+    ReplaceWithICStubCall(node, &stub);            \
+    return node;                                   \
+  }
+REPLACE_IC_STUB_CALL(JSBitwiseOr, BinaryOpICStub stub(isolate(), Token::BIT_OR))
+REPLACE_IC_STUB_CALL(JSBitwiseXor,
+                     BinaryOpICStub stub(isolate(), Token::BIT_XOR))
+REPLACE_IC_STUB_CALL(JSBitwiseAnd,
+                     BinaryOpICStub stub(isolate(), Token::BIT_AND))
+REPLACE_IC_STUB_CALL(JSShiftLeft, BinaryOpICStub stub(isolate(), Token::SHL))
+REPLACE_IC_STUB_CALL(JSShiftRight, BinaryOpICStub stub(isolate(), Token::SAR))
+REPLACE_IC_STUB_CALL(JSShiftRightLogical,
+                     BinaryOpICStub stub(isolate(), Token::SHR))
+REPLACE_IC_STUB_CALL(JSAdd, BinaryOpICStub stub(isolate(), Token::ADD))
+REPLACE_IC_STUB_CALL(JSSubtract, BinaryOpICStub stub(isolate(), Token::SUB))
+REPLACE_IC_STUB_CALL(JSMultiply, BinaryOpICStub stub(isolate(), Token::MUL))
+REPLACE_IC_STUB_CALL(JSDivide, BinaryOpICStub stub(isolate(), Token::DIV))
+REPLACE_IC_STUB_CALL(JSModulus, BinaryOpICStub stub(isolate(), Token::MOD))
+REPLACE_IC_STUB_CALL(JSToNumber, ToNumberStub stub(isolate()))
+#undef REPLACE_IC_STUB_CALL
+
+
+#define REPLACE_COMPARE_IC_CALL(op, token, pure)   \
+  Node* JSGenericLowering::Lower##op(Node* node) { \
+    ReplaceWithCompareIC(node, token, pure);       \
+    return node;                                   \
+  }
+REPLACE_COMPARE_IC_CALL(JSEqual, Token::EQ, false)
+REPLACE_COMPARE_IC_CALL(JSNotEqual, Token::NE, false)
+REPLACE_COMPARE_IC_CALL(JSStrictEqual, Token::EQ_STRICT, true)
+REPLACE_COMPARE_IC_CALL(JSStrictNotEqual, Token::NE_STRICT, true)
+REPLACE_COMPARE_IC_CALL(JSLessThan, Token::LT, false)
+REPLACE_COMPARE_IC_CALL(JSGreaterThan, Token::GT, false)
+REPLACE_COMPARE_IC_CALL(JSLessThanOrEqual, Token::LTE, false)
+REPLACE_COMPARE_IC_CALL(JSGreaterThanOrEqual, Token::GTE, false)
+#undef REPLACE_COMPARE_IC_CALL
+
+
+#define REPLACE_RUNTIME_CALL(op, fun)              \
+  Node* JSGenericLowering::Lower##op(Node* node) { \
+    ReplaceWithRuntimeCall(node, fun);             \
+    return node;                                   \
+  }
+REPLACE_RUNTIME_CALL(JSTypeOf, Runtime::kTypeof)
+REPLACE_RUNTIME_CALL(JSCreate, Runtime::kAbort)
+REPLACE_RUNTIME_CALL(JSCreateFunctionContext, Runtime::kNewFunctionContext)
+REPLACE_RUNTIME_CALL(JSCreateCatchContext, Runtime::kPushCatchContext)
+REPLACE_RUNTIME_CALL(JSCreateWithContext, Runtime::kPushWithContext)
+REPLACE_RUNTIME_CALL(JSCreateBlockContext, Runtime::kPushBlockContext)
+REPLACE_RUNTIME_CALL(JSCreateModuleContext, Runtime::kPushModuleContext)
+REPLACE_RUNTIME_CALL(JSCreateGlobalContext, Runtime::kAbort)
+#undef REPLACE_RUNTIME
+
+
+#define REPLACE_UNIMPLEMENTED(op)                  \
+  Node* JSGenericLowering::Lower##op(Node* node) { \
+    UNIMPLEMENTED();                               \
+    return node;                                   \
+  }
+REPLACE_UNIMPLEMENTED(JSToString)
+REPLACE_UNIMPLEMENTED(JSToName)
+REPLACE_UNIMPLEMENTED(JSYield)
+REPLACE_UNIMPLEMENTED(JSDebugger)
+#undef REPLACE_UNIMPLEMENTED
+
+
+void JSGenericLowering::ReplaceWithCompareIC(Node* node, Token::Value token,
+                                             bool pure) {
+  BinaryOpICStub stub(isolate(), Token::ADD);  // TODO(mstarzinger): Hack.
+  CodeStubInterfaceDescriptor* d = stub.GetInterfaceDescriptor();
+  CallDescriptor* desc_compare = linkage()->GetStubCallDescriptor(d);
+  Handle<Code> ic = CompareIC::GetUninitialized(isolate(), token);
+  Node* compare;
+  if (pure) {
+    // A pure (strict) comparison doesn't have an effect or control.
+    // But for the graph, we need to add these inputs.
+    compare = graph()->NewNode(common()->Call(desc_compare), CodeConstant(ic),
+                               NodeProperties::GetValueInput(node, 0),
+                               NodeProperties::GetValueInput(node, 1),
+                               NodeProperties::GetContextInput(node),
+                               graph()->start(), graph()->start());
+  } else {
+    compare = graph()->NewNode(common()->Call(desc_compare), CodeConstant(ic),
+                               NodeProperties::GetValueInput(node, 0),
+                               NodeProperties::GetValueInput(node, 1),
+                               NodeProperties::GetContextInput(node),
+                               NodeProperties::GetEffectInput(node),
+                               NodeProperties::GetControlInput(node));
+  }
+  node->ReplaceInput(0, compare);
+  node->ReplaceInput(1, SmiConstant(token));
+  ReplaceWithRuntimeCall(node, Runtime::kBooleanize);
+}
+
+
+void JSGenericLowering::ReplaceWithICStubCall(Node* node,
+                                              HydrogenCodeStub* stub) {
+  CodeStubInterfaceDescriptor* d = stub->GetInterfaceDescriptor();
+  CallDescriptor* desc = linkage()->GetStubCallDescriptor(d);
+  Node* stub_code = CodeConstant(stub->GetCode());
+  PatchInsertInput(node, 0, stub_code);
+  PatchOperator(node, common()->Call(desc));
+}
+
+
+void JSGenericLowering::ReplaceWithBuiltinCall(Node* node,
+                                               Builtins::JavaScript id,
+                                               int nargs) {
+  CallFunctionStub stub(isolate(), nargs - 1, NO_CALL_FUNCTION_FLAGS);
+  CodeStubInterfaceDescriptor* d = GetInterfaceDescriptor(isolate(), &stub);
+  CallDescriptor* desc = linkage()->GetStubCallDescriptor(d, nargs);
+  // TODO(mstarzinger): Accessing the builtins object this way prevents sharing
+  // of code across native contexts. Fix this by loading from given context.
+  Handle<JSFunction> function(
+      JSFunction::cast(info()->context()->builtins()->javascript_builtin(id)));
+  Node* stub_code = CodeConstant(stub.GetCode());
+  Node* function_node = FunctionConstant(function);
+  PatchInsertInput(node, 0, stub_code);
+  PatchInsertInput(node, 1, function_node);
+  PatchOperator(node, common()->Call(desc));
+}
+
+
+void JSGenericLowering::ReplaceWithRuntimeCall(Node* node,
+                                               Runtime::FunctionId f,
+                                               int nargs_override) {
+  Operator::Property props = node->op()->properties();
+  const Runtime::Function* fun = Runtime::FunctionForId(f);
+  int nargs = (nargs_override < 0) ? fun->nargs : nargs_override;
+  CallDescriptor::DeoptimizationSupport deopt =
+      NodeProperties::CanLazilyDeoptimize(node)
+          ? CallDescriptor::kCanDeoptimize
+          : CallDescriptor::kCannotDeoptimize;
+  CallDescriptor* desc =
+      linkage()->GetRuntimeCallDescriptor(f, nargs, props, deopt);
+  Node* ref = ExternalConstant(ExternalReference(f, isolate()));
+  Node* arity = Int32Constant(nargs);
+  if (!centrystub_constant_.is_set()) {
+    centrystub_constant_.set(CodeConstant(CEntryStub(isolate(), 1).GetCode()));
+  }
+  PatchInsertInput(node, 0, centrystub_constant_.get());
+  PatchInsertInput(node, nargs + 1, ref);
+  PatchInsertInput(node, nargs + 2, arity);
+  PatchOperator(node, common()->Call(desc));
+}
+
+
+Node* JSGenericLowering::LowerBranch(Node* node) {
+  Node* test = graph()->NewNode(machine()->WordEqual(), node->InputAt(0),
+                                jsgraph()->TrueConstant());
+  node->ReplaceInput(0, test);
+  return node;
+}
+
+
+Node* JSGenericLowering::LowerJSUnaryNot(Node* node) {
+  ToBooleanStub stub(isolate());
+  CodeStubInterfaceDescriptor* d = stub.GetInterfaceDescriptor();
+  CallDescriptor* desc = linkage()->GetStubCallDescriptor(d);
+  Node* to_bool =
+      graph()->NewNode(common()->Call(desc), CodeConstant(stub.GetCode()),
+                       NodeProperties::GetValueInput(node, 0),
+                       NodeProperties::GetContextInput(node),
+                       NodeProperties::GetEffectInput(node),
+                       NodeProperties::GetControlInput(node));
+  node->ReplaceInput(0, to_bool);
+  PatchInsertInput(node, 1, SmiConstant(Token::EQ));
+  ReplaceWithRuntimeCall(node, Runtime::kBooleanize);
+  return node;
+}
+
+
+Node* JSGenericLowering::LowerJSToBoolean(Node* node) {
+  ToBooleanStub stub(isolate());
+  CodeStubInterfaceDescriptor* d = stub.GetInterfaceDescriptor();
+  CallDescriptor* desc = linkage()->GetStubCallDescriptor(d);
+  Node* to_bool =
+      graph()->NewNode(common()->Call(desc), CodeConstant(stub.GetCode()),
+                       NodeProperties::GetValueInput(node, 0),
+                       NodeProperties::GetContextInput(node),
+                       NodeProperties::GetEffectInput(node),
+                       NodeProperties::GetControlInput(node));
+  node->ReplaceInput(0, to_bool);
+  PatchInsertInput(node, 1, SmiConstant(Token::NE));
+  ReplaceWithRuntimeCall(node, Runtime::kBooleanize);
+  return node;
+}
+
+
+Node* JSGenericLowering::LowerJSToObject(Node* node) {
+  ReplaceWithBuiltinCall(node, Builtins::TO_OBJECT, 1);
+  return node;
+}
+
+
+Node* JSGenericLowering::LowerJSLoadProperty(Node* node) {
+  if (FLAG_compiled_keyed_generic_loads) {
+    KeyedLoadGenericStub stub(isolate());
+    ReplaceWithICStubCall(node, &stub);
+  } else {
+    ReplaceWithRuntimeCall(node, Runtime::kKeyedGetProperty);
+  }
+  return node;
+}
+
+
+Node* JSGenericLowering::LowerJSLoadNamed(Node* node) {
+  Node* key =
+      jsgraph()->HeapConstant(OpParameter<PrintableUnique<Name> >(node));
+  PatchInsertInput(node, 1, key);
+  // TODO(mstarzinger): We cannot yet use KeyedLoadGenericElementStub here,
+  // because named interceptors would not fire correctly yet.
+  ReplaceWithRuntimeCall(node, Runtime::kGetProperty);
+  return node;
+}
+
+
+Node* JSGenericLowering::LowerJSStoreProperty(Node* node) {
+  // TODO(mstarzinger): The strict_mode needs to be carried along in the
+  // operator so that graphs are fully compositional for inlining.
+  StrictMode strict_mode = info()->strict_mode();
+  PatchInsertInput(node, 3, SmiConstant(strict_mode));
+  ReplaceWithRuntimeCall(node, Runtime::kSetProperty, 4);
+  return node;
+}
+
+
+Node* JSGenericLowering::LowerJSStoreNamed(Node* node) {
+  // TODO(mstarzinger): The strict_mode needs to be carried along in the
+  // operator so that graphs are fully compositional for inlining.
+  StrictMode strict_mode = info()->strict_mode();
+  Node* key =
+      jsgraph()->HeapConstant(OpParameter<PrintableUnique<Name> >(node));
+  PatchInsertInput(node, 1, key);
+  PatchInsertInput(node, 3, SmiConstant(strict_mode));
+  ReplaceWithRuntimeCall(node, Runtime::kSetProperty, 4);
+  return node;
+}
+
+
+Node* JSGenericLowering::LowerJSDeleteProperty(Node* node) {
+  StrictMode strict_mode = OpParameter<StrictMode>(node);
+  PatchInsertInput(node, 2, SmiConstant(strict_mode));
+  ReplaceWithBuiltinCall(node, Builtins::DELETE, 3);
+  return node;
+}
+
+
+Node* JSGenericLowering::LowerJSHasProperty(Node* node) {
+  ReplaceWithBuiltinCall(node, Builtins::IN, 2);
+  return node;
+}
+
+
+Node* JSGenericLowering::LowerJSInstanceOf(Node* node) {
+  InstanceofStub::Flags flags = static_cast<InstanceofStub::Flags>(
+      InstanceofStub::kReturnTrueFalseObject |
+      InstanceofStub::kArgsInRegisters);
+  InstanceofStub stub(isolate(), flags);
+  CodeStubInterfaceDescriptor* d = GetInterfaceDescriptor(isolate(), &stub);
+  CallDescriptor* desc = linkage()->GetStubCallDescriptor(d, 0);
+  Node* stub_code = CodeConstant(stub.GetCode());
+  PatchInsertInput(node, 0, stub_code);
+  PatchOperator(node, common()->Call(desc));
+  return node;
+}
+
+
+Node* JSGenericLowering::LowerJSLoadContext(Node* node) {
+  ContextAccess access = OpParameter<ContextAccess>(node);
+  PatchInsertInput(node, 1, SmiConstant(access.depth()));
+  PatchInsertInput(node, 2, SmiConstant(access.index()));
+  ReplaceWithRuntimeCall(node, Runtime::kLoadContextRelative, 3);
+  return node;
+}
+
+
+Node* JSGenericLowering::LowerJSStoreContext(Node* node) {
+  ContextAccess access = OpParameter<ContextAccess>(node);
+  PatchInsertInput(node, 1, SmiConstant(access.depth()));
+  PatchInsertInput(node, 2, SmiConstant(access.index()));
+  ReplaceWithRuntimeCall(node, Runtime::kStoreContextRelative, 4);
+  return node;
+}
+
+
+Node* JSGenericLowering::LowerJSCallConstruct(Node* node) {
+  int arity = OpParameter<int>(node);
+  CallConstructStub stub(isolate(), NO_CALL_CONSTRUCTOR_FLAGS);
+  CodeStubInterfaceDescriptor* d = GetInterfaceDescriptor(isolate(), &stub);
+  CallDescriptor* desc = linkage()->GetStubCallDescriptor(d, arity);
+  Node* stub_code = CodeConstant(stub.GetCode());
+  Node* construct = NodeProperties::GetValueInput(node, 0);
+  PatchInsertInput(node, 0, stub_code);
+  PatchInsertInput(node, 1, Int32Constant(arity - 1));
+  PatchInsertInput(node, 2, construct);
+  PatchInsertInput(node, 3, jsgraph()->UndefinedConstant());
+  PatchOperator(node, common()->Call(desc));
+  return node;
+}
+
+
+Node* JSGenericLowering::LowerJSCallFunction(Node* node) {
+  CallParameters p = OpParameter<CallParameters>(node);
+  CallFunctionStub stub(isolate(), p.arity - 2, p.flags);
+  CodeStubInterfaceDescriptor* d = GetInterfaceDescriptor(isolate(), &stub);
+  CallDescriptor* desc = linkage()->GetStubCallDescriptor(d, p.arity - 1);
+  Node* stub_code = CodeConstant(stub.GetCode());
+  PatchInsertInput(node, 0, stub_code);
+  PatchOperator(node, common()->Call(desc));
+  return node;
+}
+
+
+Node* JSGenericLowering::LowerJSCallRuntime(Node* node) {
+  Runtime::FunctionId function = OpParameter<Runtime::FunctionId>(node);
+  int arity = NodeProperties::GetValueInputCount(node);
+  ReplaceWithRuntimeCall(node, function, arity);
+  return node;
+}
+}
+}
+}  // namespace v8::internal::compiler
diff --git a/src/compiler/js-generic-lowering.h b/src/compiler/js-generic-lowering.h
new file mode 100644 (file)
index 0000000..e3113e5
--- /dev/null
@@ -0,0 +1,83 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_JS_GENERIC_LOWERING_H_
+#define V8_COMPILER_JS_GENERIC_LOWERING_H_
+
+#include "src/v8.h"
+
+#include "src/allocation.h"
+#include "src/compiler/graph.h"
+#include "src/compiler/js-graph.h"
+#include "src/compiler/lowering-builder.h"
+#include "src/compiler/opcodes.h"
+#include "src/unique.h"
+
+namespace v8 {
+namespace internal {
+
+// Forward declarations.
+class HydrogenCodeStub;
+
+namespace compiler {
+
+// Forward declarations.
+class CommonOperatorBuilder;
+class MachineOperatorBuilder;
+class Linkage;
+
+// Lowers JS-level operators to runtime and IC calls in the "generic" case.
+class JSGenericLowering : public LoweringBuilder {
+ public:
+  JSGenericLowering(CompilationInfo* info, JSGraph* graph,
+                    MachineOperatorBuilder* machine,
+                    SourcePositionTable* source_positions);
+  virtual ~JSGenericLowering() {}
+
+  virtual void Lower(Node* node);
+
+ protected:
+// Dispatched depending on opcode.
+#define DECLARE_LOWER(x) Node* Lower##x(Node* node);
+  ALL_OP_LIST(DECLARE_LOWER)
+#undef DECLARE_LOWER
+
+  // Helpers to create new constant nodes.
+  Node* SmiConstant(int immediate);
+  Node* Int32Constant(int immediate);
+  Node* CodeConstant(Handle<Code> code);
+  Node* FunctionConstant(Handle<JSFunction> function);
+  Node* ExternalConstant(ExternalReference ref);
+
+  // Helpers to patch existing nodes in the graph.
+  void PatchOperator(Node* node, Operator* new_op);
+  void PatchInsertInput(Node* node, int index, Node* input);
+
+  // Helpers to replace existing nodes with a generic call.
+  void ReplaceWithCompareIC(Node* node, Token::Value token, bool pure);
+  void ReplaceWithICStubCall(Node* node, HydrogenCodeStub* stub);
+  void ReplaceWithBuiltinCall(Node* node, Builtins::JavaScript id, int args);
+  void ReplaceWithRuntimeCall(Node* node, Runtime::FunctionId f, int args = -1);
+
+  Zone* zone() const { return graph()->zone(); }
+  Isolate* isolate() const { return zone()->isolate(); }
+  JSGraph* jsgraph() const { return jsgraph_; }
+  Graph* graph() const { return jsgraph()->graph(); }
+  Linkage* linkage() const { return linkage_; }
+  CompilationInfo* info() const { return info_; }
+  CommonOperatorBuilder* common() const { return jsgraph()->common(); }
+  MachineOperatorBuilder* machine() const { return machine_; }
+
+ private:
+  CompilationInfo* info_;
+  JSGraph* jsgraph_;
+  Linkage* linkage_;
+  MachineOperatorBuilder* machine_;
+  SetOncePointer<Node> centrystub_constant_;
+};
+}
+}
+}  // namespace v8::internal::compiler
+
+#endif  // V8_COMPILER_JS_GENERIC_LOWERING_H_
diff --git a/src/compiler/js-graph.cc b/src/compiler/js-graph.cc
new file mode 100644 (file)
index 0000000..2cebbc7
--- /dev/null
@@ -0,0 +1,174 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/js-graph.h"
+#include "src/compiler/node-properties-inl.h"
+#include "src/compiler/typer.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+Node* JSGraph::ImmovableHeapConstant(Handle<Object> object) {
+  PrintableUnique<Object> unique =
+      PrintableUnique<Object>::CreateImmovable(zone(), object);
+  return NewNode(common()->HeapConstant(unique));
+}
+
+
+Node* JSGraph::NewNode(Operator* op) {
+  Node* node = graph()->NewNode(op);
+  typer_->Init(node);
+  return node;
+}
+
+
+Node* JSGraph::UndefinedConstant() {
+  if (!undefined_constant_.is_set()) {
+    undefined_constant_.set(
+        ImmovableHeapConstant(factory()->undefined_value()));
+  }
+  return undefined_constant_.get();
+}
+
+
+Node* JSGraph::TheHoleConstant() {
+  if (!the_hole_constant_.is_set()) {
+    the_hole_constant_.set(ImmovableHeapConstant(factory()->the_hole_value()));
+  }
+  return the_hole_constant_.get();
+}
+
+
+Node* JSGraph::TrueConstant() {
+  if (!true_constant_.is_set()) {
+    true_constant_.set(ImmovableHeapConstant(factory()->true_value()));
+  }
+  return true_constant_.get();
+}
+
+
+Node* JSGraph::FalseConstant() {
+  if (!false_constant_.is_set()) {
+    false_constant_.set(ImmovableHeapConstant(factory()->false_value()));
+  }
+  return false_constant_.get();
+}
+
+
+Node* JSGraph::NullConstant() {
+  if (!null_constant_.is_set()) {
+    null_constant_.set(ImmovableHeapConstant(factory()->null_value()));
+  }
+  return null_constant_.get();
+}
+
+
+Node* JSGraph::ZeroConstant() {
+  if (!zero_constant_.is_set()) zero_constant_.set(NumberConstant(0.0));
+  return zero_constant_.get();
+}
+
+
+Node* JSGraph::OneConstant() {
+  if (!one_constant_.is_set()) one_constant_.set(NumberConstant(1.0));
+  return one_constant_.get();
+}
+
+
+Node* JSGraph::NaNConstant() {
+  if (!nan_constant_.is_set()) {
+    nan_constant_.set(NumberConstant(base::OS::nan_value()));
+  }
+  return nan_constant_.get();
+}
+
+
+Node* JSGraph::HeapConstant(PrintableUnique<Object> value) {
+  // TODO(turbofan): canonicalize heap constants using Unique<T>
+  return NewNode(common()->HeapConstant(value));
+}
+
+
+Node* JSGraph::HeapConstant(Handle<Object> value) {
+  // TODO(titzer): We could also match against the addresses of immortable
+  // immovables here, even without access to the heap, thus always
+  // canonicalizing references to them.
+  return HeapConstant(
+      PrintableUnique<Object>::CreateUninitialized(zone(), value));
+}
+
+
+Node* JSGraph::Constant(Handle<Object> value) {
+  // Dereference the handle to determine if a number constant or other
+  // canonicalized node can be used.
+  if (value->IsNumber()) {
+    return Constant(value->Number());
+  } else if (value->IsUndefined()) {
+    return UndefinedConstant();
+  } else if (value->IsTrue()) {
+    return TrueConstant();
+  } else if (value->IsFalse()) {
+    return FalseConstant();
+  } else if (value->IsNull()) {
+    return NullConstant();
+  } else if (value->IsTheHole()) {
+    return TheHoleConstant();
+  } else {
+    return HeapConstant(value);
+  }
+}
+
+
+Node* JSGraph::Constant(double value) {
+  if (BitCast<int64_t>(value) == BitCast<int64_t>(0.0)) return ZeroConstant();
+  if (BitCast<int64_t>(value) == BitCast<int64_t>(1.0)) return OneConstant();
+  return NumberConstant(value);
+}
+
+
+Node* JSGraph::Constant(int32_t value) {
+  if (value == 0) return ZeroConstant();
+  if (value == 1) return OneConstant();
+  return NumberConstant(value);
+}
+
+
+Node* JSGraph::Int32Constant(int32_t value) {
+  Node** loc = cache_.FindInt32Constant(value);
+  if (*loc == NULL) {
+    *loc = NewNode(common()->Int32Constant(value));
+  }
+  return *loc;
+}
+
+
+Node* JSGraph::NumberConstant(double value) {
+  Node** loc = cache_.FindNumberConstant(value);
+  if (*loc == NULL) {
+    *loc = NewNode(common()->NumberConstant(value));
+  }
+  return *loc;
+}
+
+
+Node* JSGraph::Float64Constant(double value) {
+  Node** loc = cache_.FindFloat64Constant(value);
+  if (*loc == NULL) {
+    *loc = NewNode(common()->Float64Constant(value));
+  }
+  return *loc;
+}
+
+
+Node* JSGraph::ExternalConstant(ExternalReference reference) {
+  Node** loc = cache_.FindExternalConstant(reference);
+  if (*loc == NULL) {
+    *loc = NewNode(common()->ExternalConstant(reference));
+  }
+  return *loc;
+}
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
diff --git a/src/compiler/js-graph.h b/src/compiler/js-graph.h
new file mode 100644 (file)
index 0000000..3a5e687
--- /dev/null
@@ -0,0 +1,107 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_JS_GRAPH_H_
+#define V8_COMPILER_JS_GRAPH_H_
+
+#include "src/compiler/common-node-cache.h"
+#include "src/compiler/common-operator.h"
+#include "src/compiler/graph.h"
+#include "src/compiler/js-operator.h"
+#include "src/compiler/node-properties.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+class Typer;
+
+// Implements a facade on a Graph, enhancing the graph with JS-specific
+// notions, including a builder for for JS* operators, canonicalized global
+// constants, and various helper methods.
+class JSGraph : public ZoneObject {
+ public:
+  JSGraph(Graph* graph, CommonOperatorBuilder* common, Typer* typer)
+      : graph_(graph),
+        common_(common),
+        javascript_(zone()),
+        typer_(typer),
+        cache_(zone()) {}
+
+  // Canonicalized global constants.
+  Node* UndefinedConstant();
+  Node* TheHoleConstant();
+  Node* TrueConstant();
+  Node* FalseConstant();
+  Node* NullConstant();
+  Node* ZeroConstant();
+  Node* OneConstant();
+  Node* NaNConstant();
+
+  // Creates a HeapConstant node, possibly canonicalized, without inspecting the
+  // object.
+  Node* HeapConstant(PrintableUnique<Object> value);
+
+  // Creates a HeapConstant node, possibly canonicalized, and may access the
+  // heap to inspect the object.
+  Node* HeapConstant(Handle<Object> value);
+
+  // Creates a Constant node of the appropriate type for the given object.
+  // Accesses the heap to inspect the object and determine whether one of the
+  // canonicalized globals or a number constant should be returned.
+  Node* Constant(Handle<Object> value);
+
+  // Creates a NumberConstant node, usually canonicalized.
+  Node* Constant(double value);
+
+  // Creates a NumberConstant node, usually canonicalized.
+  Node* Constant(int32_t value);
+
+  // Creates a Int32Constant node, usually canonicalized.
+  Node* Int32Constant(int32_t value);
+
+  // Creates a Float64Constant node, usually canonicalized.
+  Node* Float64Constant(double value);
+
+  // Creates an ExternalConstant node, usually canonicalized.
+  Node* ExternalConstant(ExternalReference ref);
+
+  Node* SmiConstant(int32_t immediate) {
+    ASSERT(Smi::IsValid(immediate));
+    return Constant(immediate);
+  }
+
+  JSOperatorBuilder* javascript() { return &javascript_; }
+  CommonOperatorBuilder* common() { return common_; }
+  Graph* graph() { return graph_; }
+  Zone* zone() { return graph()->zone(); }
+
+ private:
+  Graph* graph_;
+  CommonOperatorBuilder* common_;
+  JSOperatorBuilder javascript_;
+  Typer* typer_;
+
+  SetOncePointer<Node> undefined_constant_;
+  SetOncePointer<Node> the_hole_constant_;
+  SetOncePointer<Node> true_constant_;
+  SetOncePointer<Node> false_constant_;
+  SetOncePointer<Node> null_constant_;
+  SetOncePointer<Node> zero_constant_;
+  SetOncePointer<Node> one_constant_;
+  SetOncePointer<Node> nan_constant_;
+
+  CommonNodeCache cache_;
+
+  Node* ImmovableHeapConstant(Handle<Object> value);
+  Node* NumberConstant(double value);
+  Node* NewNode(Operator* op);
+
+  Factory* factory() { return zone()->isolate()->factory(); }
+};
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
+
+#endif
diff --git a/src/compiler/js-operator.h b/src/compiler/js-operator.h
new file mode 100644 (file)
index 0000000..9f4b83a
--- /dev/null
@@ -0,0 +1,204 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_JS_OPERATOR_H_
+#define V8_COMPILER_JS_OPERATOR_H_
+
+#include "src/compiler/opcodes.h"
+#include "src/compiler/operator.h"
+#include "src/unique.h"
+#include "src/zone.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// Defines the location of a context slot relative to a specific scope. This is
+// used as a parameter by JSLoadContext and JSStoreContext operators and allows
+// accessing a context-allocated variable without keeping track of the scope.
+class ContextAccess {
+ public:
+  ContextAccess(int depth, int index, bool immutable)
+      : immutable_(immutable), depth_(depth), index_(index) {
+    ASSERT(0 <= depth && depth <= kMaxUInt16);
+    ASSERT(0 <= index && static_cast<uint32_t>(index) <= kMaxUInt32);
+  }
+  int depth() const { return depth_; }
+  int index() const { return index_; }
+  bool immutable() const { return immutable_; }
+
+ private:
+  // For space reasons, we keep this tightly packed, otherwise we could just use
+  // a simple int/int/bool POD.
+  const bool immutable_;
+  const uint16_t depth_;
+  const uint32_t index_;
+};
+
+// Defines the arity and the call flags for a JavaScript function call. This is
+// used as a parameter by JSCall operators.
+struct CallParameters {
+  int arity;
+  CallFunctionFlags flags;
+};
+
+// Interface for building JavaScript-level operators, e.g. directly from the
+// AST. Most operators have no parameters, thus can be globally shared for all
+// graphs.
+class JSOperatorBuilder {
+ public:
+  explicit JSOperatorBuilder(Zone* zone) : zone_(zone) {}
+
+#define SIMPLE(name, properties, inputs, outputs) \
+  return new (zone_)                              \
+      SimpleOperator(IrOpcode::k##name, properties, inputs, outputs, #name);
+
+#define NOPROPS(name, inputs, outputs) \
+  SIMPLE(name, Operator::kNoProperties, inputs, outputs)
+
+#define OP1(name, ptype, pname, properties, inputs, outputs)                 \
+  return new (zone_) Operator1<ptype>(IrOpcode::k##name, properties, inputs, \
+                                      outputs, #name, pname)
+
+#define BINOP(name) NOPROPS(name, 2, 1)
+#define UNOP(name) NOPROPS(name, 1, 1)
+
+#define PURE_BINOP(name) SIMPLE(name, Operator::kPure, 2, 1)
+
+  Operator* Equal() { BINOP(JSEqual); }
+  Operator* NotEqual() { BINOP(JSNotEqual); }
+  Operator* StrictEqual() { PURE_BINOP(JSStrictEqual); }
+  Operator* StrictNotEqual() { PURE_BINOP(JSStrictNotEqual); }
+  Operator* LessThan() { BINOP(JSLessThan); }
+  Operator* GreaterThan() { BINOP(JSGreaterThan); }
+  Operator* LessThanOrEqual() { BINOP(JSLessThanOrEqual); }
+  Operator* GreaterThanOrEqual() { BINOP(JSGreaterThanOrEqual); }
+  Operator* BitwiseOr() { BINOP(JSBitwiseOr); }
+  Operator* BitwiseXor() { BINOP(JSBitwiseXor); }
+  Operator* BitwiseAnd() { BINOP(JSBitwiseAnd); }
+  Operator* ShiftLeft() { BINOP(JSShiftLeft); }
+  Operator* ShiftRight() { BINOP(JSShiftRight); }
+  Operator* ShiftRightLogical() { BINOP(JSShiftRightLogical); }
+  Operator* Add() { BINOP(JSAdd); }
+  Operator* Subtract() { BINOP(JSSubtract); }
+  Operator* Multiply() { BINOP(JSMultiply); }
+  Operator* Divide() { BINOP(JSDivide); }
+  Operator* Modulus() { BINOP(JSModulus); }
+
+  Operator* UnaryNot() { UNOP(JSUnaryNot); }
+  Operator* ToBoolean() { UNOP(JSToBoolean); }
+  Operator* ToNumber() { UNOP(JSToNumber); }
+  Operator* ToString() { UNOP(JSToString); }
+  Operator* ToName() { UNOP(JSToName); }
+  Operator* ToObject() { UNOP(JSToObject); }
+  Operator* Yield() { UNOP(JSYield); }
+
+  Operator* Create() { SIMPLE(JSCreate, Operator::kEliminatable, 0, 1); }
+
+  Operator* Call(int arguments, CallFunctionFlags flags) {
+    CallParameters parameters = {arguments, flags};
+    OP1(JSCallFunction, CallParameters, parameters, Operator::kNoProperties,
+        arguments, 1);
+  }
+
+  Operator* CallNew(int arguments) {
+    return new (zone_)
+        Operator1<int>(IrOpcode::kJSCallConstruct, Operator::kNoProperties,
+                       arguments, 1, "JSCallConstruct", arguments);
+  }
+
+  Operator* LoadProperty() { BINOP(JSLoadProperty); }
+  Operator* LoadNamed(PrintableUnique<Name> name) {
+    OP1(JSLoadNamed, PrintableUnique<Name>, name, Operator::kNoProperties, 1,
+        1);
+  }
+
+  Operator* StoreProperty() { NOPROPS(JSStoreProperty, 3, 0); }
+  Operator* StoreNamed(PrintableUnique<Name> name) {
+    OP1(JSStoreNamed, PrintableUnique<Name>, name, Operator::kNoProperties, 2,
+        0);
+  }
+
+  Operator* DeleteProperty(StrictMode strict_mode) {
+    OP1(JSDeleteProperty, StrictMode, strict_mode, Operator::kNoProperties, 2,
+        1);
+  }
+
+  Operator* HasProperty() { NOPROPS(JSHasProperty, 2, 1); }
+
+  Operator* LoadContext(uint16_t depth, uint32_t index, bool immutable) {
+    ContextAccess access(depth, index, immutable);
+    OP1(JSLoadContext, ContextAccess, access,
+        Operator::kEliminatable | Operator::kNoWrite, 1, 1);
+  }
+  Operator* StoreContext(uint16_t depth, uint32_t index) {
+    ContextAccess access(depth, index, false);
+    OP1(JSStoreContext, ContextAccess, access, Operator::kNoProperties, 2, 1);
+  }
+
+  Operator* TypeOf() { SIMPLE(JSTypeOf, Operator::kPure, 1, 1); }
+  Operator* InstanceOf() { NOPROPS(JSInstanceOf, 2, 1); }
+  Operator* Debugger() { NOPROPS(JSDebugger, 0, 0); }
+
+  // TODO(titzer): nail down the static parts of each of these context flavors.
+  Operator* CreateFunctionContext() { NOPROPS(JSCreateFunctionContext, 1, 1); }
+  Operator* CreateCatchContext(PrintableUnique<String> name) {
+    OP1(JSCreateCatchContext, PrintableUnique<String>, name,
+        Operator::kNoProperties, 1, 1);
+  }
+  Operator* CreateWithContext() { NOPROPS(JSCreateWithContext, 2, 1); }
+  Operator* CreateBlockContext() { NOPROPS(JSCreateBlockContext, 2, 1); }
+  Operator* CreateModuleContext() { NOPROPS(JSCreateModuleContext, 2, 1); }
+  Operator* CreateGlobalContext() { NOPROPS(JSCreateGlobalContext, 2, 1); }
+
+  Operator* Runtime(Runtime::FunctionId function, int arguments) {
+    const Runtime::Function* f = Runtime::FunctionForId(function);
+    ASSERT(f->nargs == -1 || f->nargs == arguments);
+    OP1(JSCallRuntime, Runtime::FunctionId, function, Operator::kNoProperties,
+        arguments, f->result_size);
+  }
+
+#undef SIMPLE
+#undef NOPROPS
+#undef OP1
+#undef BINOP
+#undef UNOP
+
+ private:
+  Zone* zone_;
+};
+
+// Specialization for static parameters of type {ContextAccess}.
+template <>
+struct StaticParameterTraits<ContextAccess> {
+  static OStream& PrintTo(OStream& os, ContextAccess val) {  // NOLINT
+    return os << val.depth() << "," << val.index()
+              << (val.immutable() ? ",imm" : "");
+  }
+  static int HashCode(ContextAccess val) {
+    return (val.depth() << 16) | (val.index() & 0xffff);
+  }
+  static bool Equals(ContextAccess a, ContextAccess b) {
+    return a.immutable() == b.immutable() && a.depth() == b.depth() &&
+           a.index() == b.index();
+  }
+};
+
+// Specialization for static parameters of type {Runtime::FunctionId}.
+template <>
+struct StaticParameterTraits<Runtime::FunctionId> {
+  static OStream& PrintTo(OStream& os, Runtime::FunctionId val) {  // NOLINT
+    const Runtime::Function* f = Runtime::FunctionForId(val);
+    return os << (f->name ? f->name : "?Runtime?");
+  }
+  static int HashCode(Runtime::FunctionId val) { return static_cast<int>(val); }
+  static bool Equals(Runtime::FunctionId a, Runtime::FunctionId b) {
+    return a == b;
+  }
+};
+}
+}
+}  // namespace v8::internal::compiler
+
+#endif  // V8_COMPILER_JS_OPERATOR_H_
diff --git a/src/compiler/js-typed-lowering.cc b/src/compiler/js-typed-lowering.cc
new file mode 100644 (file)
index 0000000..18e1f2c
--- /dev/null
@@ -0,0 +1,604 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/graph-inl.h"
+#include "src/compiler/js-typed-lowering.h"
+#include "src/compiler/node-aux-data-inl.h"
+#include "src/compiler/node-properties-inl.h"
+#include "src/types.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// TODO(turbofan): js-typed-lowering improvements possible
+// - immediately put in type bounds for all new nodes
+// - relax effects from generic but not-side-effecting operations
+// - relax effects for ToNumber(mixed)
+
+// Replace value uses of {node} with {value} and effect uses of {node} with
+// {effect}. If {effect == NULL}, then use the effect input to {node}.
+// TODO(titzer): move into a GraphEditor?
+static void ReplaceUses(Node* node, Node* value, Node* effect) {
+  if (value == effect) {
+    // Effect and value updates are the same; no special iteration needed.
+    if (value != node) node->ReplaceUses(value);
+    return;
+  }
+
+  if (effect == NULL) effect = NodeProperties::GetEffectInput(node);
+
+  // The iteration requires distinguishing between value and effect edges.
+  UseIter iter = node->uses().begin();
+  while (iter != node->uses().end()) {
+    if (NodeProperties::IsEffectEdge(iter.edge())) {
+      iter = iter.UpdateToAndIncrement(effect);
+    } else {
+      iter = iter.UpdateToAndIncrement(value);
+    }
+  }
+}
+
+
+// Relax the effects of {node} by immediately replacing effect uses of {node}
+// with the effect input to {node}.
+// TODO(turbofan): replace the effect input to {node} with {graph->start()}.
+// TODO(titzer): move into a GraphEditor?
+static void RelaxEffects(Node* node) { ReplaceUses(node, node, NULL); }
+
+
+Reduction JSTypedLowering::ReplaceEagerly(Node* old, Node* node) {
+  ReplaceUses(old, node, node);
+  return Reducer::Changed(node);
+}
+
+
+// A helper class to simplify the process of reducing a single binop node with a
+// JSOperator. This class manages the rewriting of context, control, and effect
+// dependencies during lowering of a binop and contains numerous helper
+// functions for matching the types of inputs to an operation.
+class JSBinopReduction {
+ public:
+  JSBinopReduction(JSTypedLowering* lowering, Node* node)
+      : lowering_(lowering),
+        node_(node),
+        left_type_(NodeProperties::GetBounds(node->InputAt(0)).upper),
+        right_type_(NodeProperties::GetBounds(node->InputAt(1)).upper) {}
+
+  void ConvertInputsToNumber() {
+    node_->ReplaceInput(0, ConvertToNumber(left()));
+    node_->ReplaceInput(1, ConvertToNumber(right()));
+  }
+
+  void ConvertInputsToInt32(bool left_signed, bool right_signed) {
+    node_->ReplaceInput(0, ConvertToI32(left_signed, left()));
+    node_->ReplaceInput(1, ConvertToI32(right_signed, right()));
+  }
+
+  void ConvertInputsToString() {
+    node_->ReplaceInput(0, ConvertToString(left()));
+    node_->ReplaceInput(1, ConvertToString(right()));
+  }
+
+  // Convert inputs for bitwise shift operation (ES5 spec 11.7).
+  void ConvertInputsForShift(bool left_signed) {
+    node_->ReplaceInput(0, ConvertToI32(left_signed, left()));
+    Node* rnum = ConvertToI32(false, right());
+    node_->ReplaceInput(1, graph()->NewNode(machine()->Word32And(), rnum,
+                                            jsgraph()->Int32Constant(0x1F)));
+  }
+
+  void SwapInputs() {
+    Node* l = left();
+    Node* r = right();
+    node_->ReplaceInput(0, r);
+    node_->ReplaceInput(1, l);
+    std::swap(left_type_, right_type_);
+  }
+
+  // Remove all effect and control inputs and outputs to this node and change
+  // to the pure operator {op}, possibly inserting a boolean inversion.
+  Reduction ChangeToPureOperator(Operator* op, bool invert = false) {
+    ASSERT_EQ(0, OperatorProperties::GetEffectInputCount(op));
+    ASSERT_EQ(false, OperatorProperties::HasContextInput(op));
+    ASSERT_EQ(0, OperatorProperties::GetControlInputCount(op));
+    ASSERT_EQ(2, OperatorProperties::GetValueInputCount(op));
+
+    // Remove the effects from the node, if any, and update its effect usages.
+    if (OperatorProperties::GetEffectInputCount(node_->op()) > 0) {
+      RelaxEffects(node_);
+    }
+    // Remove the inputs corresponding to context, effect, and control.
+    NodeProperties::RemoveNonValueInputs(node_);
+    // Finally, update the operator to the new one.
+    node_->set_op(op);
+
+    if (invert) {
+      // Insert an boolean not to invert the value.
+      Node* value = graph()->NewNode(simplified()->BooleanNot(), node_);
+      node_->ReplaceUses(value);
+      // Note: ReplaceUses() smashes all uses, so smash it back here.
+      value->ReplaceInput(0, node_);
+      return lowering_->ReplaceWith(value);
+    }
+    return lowering_->Changed(node_);
+  }
+
+  bool OneInputIs(Type* t) { return left_type_->Is(t) || right_type_->Is(t); }
+
+  bool BothInputsAre(Type* t) {
+    return left_type_->Is(t) && right_type_->Is(t);
+  }
+
+  bool OneInputCannotBe(Type* t) {
+    return !left_type_->Maybe(t) || !right_type_->Maybe(t);
+  }
+
+  bool NeitherInputCanBe(Type* t) {
+    return !left_type_->Maybe(t) && !right_type_->Maybe(t);
+  }
+
+  Node* effect() { return NodeProperties::GetEffectInput(node_); }
+  Node* control() { return NodeProperties::GetControlInput(node_); }
+  Node* context() { return NodeProperties::GetContextInput(node_); }
+  Node* left() { return NodeProperties::GetValueInput(node_, 0); }
+  Node* right() { return NodeProperties::GetValueInput(node_, 1); }
+  Type* left_type() { return left_type_; }
+  Type* right_type() { return right_type_; }
+
+  SimplifiedOperatorBuilder* simplified() { return lowering_->simplified(); }
+  Graph* graph() { return lowering_->graph(); }
+  JSGraph* jsgraph() { return lowering_->jsgraph(); }
+  JSOperatorBuilder* javascript() { return lowering_->javascript(); }
+  MachineOperatorBuilder* machine() { return lowering_->machine(); }
+
+ private:
+  JSTypedLowering* lowering_;  // The containing lowering instance.
+  Node* node_;                 // The original node.
+  Type* left_type_;            // Cache of the left input's type.
+  Type* right_type_;           // Cache of the right input's type.
+
+  Node* ConvertToString(Node* node) {
+    // Avoid introducing too many eager ToString() operations.
+    Reduction reduced = lowering_->ReduceJSToStringInput(node);
+    if (reduced.Changed()) return reduced.replacement();
+    Node* n = graph()->NewNode(javascript()->ToString(), node, context(),
+                               effect(), control());
+    update_effect(n);
+    return n;
+  }
+
+  Node* ConvertToNumber(Node* node) {
+    // Avoid introducing too many eager ToNumber() operations.
+    Reduction reduced = lowering_->ReduceJSToNumberInput(node);
+    if (reduced.Changed()) return reduced.replacement();
+    Node* n = graph()->NewNode(javascript()->ToNumber(), node, context(),
+                               effect(), control());
+    update_effect(n);
+    return n;
+  }
+
+  // Try to narrowing a double or number operation to an Int32 operation.
+  bool TryNarrowingToI32(Type* type, Node* node) {
+    switch (node->opcode()) {
+      case IrOpcode::kFloat64Add:
+      case IrOpcode::kNumberAdd: {
+        JSBinopReduction r(lowering_, node);
+        if (r.BothInputsAre(Type::Integral32())) {
+          node->set_op(lowering_->machine()->Int32Add());
+          // TODO(titzer): narrow bounds instead of overwriting.
+          NodeProperties::SetBounds(node, Bounds(type));
+          return true;
+        }
+      }
+      case IrOpcode::kFloat64Sub:
+      case IrOpcode::kNumberSubtract: {
+        JSBinopReduction r(lowering_, node);
+        if (r.BothInputsAre(Type::Integral32())) {
+          node->set_op(lowering_->machine()->Int32Sub());
+          // TODO(titzer): narrow bounds instead of overwriting.
+          NodeProperties::SetBounds(node, Bounds(type));
+          return true;
+        }
+      }
+      default:
+        return false;
+    }
+  }
+
+  Node* ConvertToI32(bool is_signed, Node* node) {
+    Type* type = is_signed ? Type::Signed32() : Type::Unsigned32();
+    if (node->OwnedBy(node_)) {
+      // If this node {node_} has the only edge to {node}, then try narrowing
+      // its operation to an Int32 add or subtract.
+      if (TryNarrowingToI32(type, node)) return node;
+    } else {
+      // Otherwise, {node} has multiple uses. Leave it as is and let the
+      // further lowering passes deal with it, which use a full backwards
+      // fixpoint.
+    }
+
+    // Avoid introducing too many eager NumberToXXnt32() operations.
+    node = ConvertToNumber(node);
+    Type* input_type = NodeProperties::GetBounds(node).upper;
+
+    if (input_type->Is(type)) return node;  // already in the value range.
+
+    Operator* op = is_signed ? simplified()->NumberToInt32()
+                             : simplified()->NumberToUint32();
+    Node* n = graph()->NewNode(op, node);
+    return n;
+  }
+
+  void update_effect(Node* effect) {
+    NodeProperties::ReplaceEffectInput(node_, effect);
+  }
+};
+
+
+Reduction JSTypedLowering::ReduceJSAdd(Node* node) {
+  JSBinopReduction r(this, node);
+  if (r.OneInputIs(Type::String())) {
+    r.ConvertInputsToString();
+    return r.ChangeToPureOperator(simplified()->StringAdd());
+  } else if (r.NeitherInputCanBe(Type::String())) {
+    r.ConvertInputsToNumber();
+    return r.ChangeToPureOperator(simplified()->NumberAdd());
+  }
+  return NoChange();
+}
+
+
+Reduction JSTypedLowering::ReduceNumberBinop(Node* node, Operator* numberOp) {
+  JSBinopReduction r(this, node);
+  if (r.OneInputIs(Type::Primitive())) {
+    // If at least one input is a primitive, then insert appropriate conversions
+    // to number and reduce this operator to the given numeric one.
+    // TODO(turbofan): make this heuristic configurable for code size.
+    r.ConvertInputsToNumber();
+    return r.ChangeToPureOperator(numberOp);
+  }
+  // TODO(turbofan): relax/remove the effects of this operator in other cases.
+  return NoChange();
+}
+
+
+Reduction JSTypedLowering::ReduceI32Binop(Node* node, bool left_signed,
+                                          bool right_signed, Operator* intOp) {
+  JSBinopReduction r(this, node);
+  // TODO(titzer): some Smi bitwise operations don't really require going
+  // all the way to int32, which can save tagging/untagging for some operations
+  // on some platforms.
+  // TODO(turbofan): make this heuristic configurable for code size.
+  r.ConvertInputsToInt32(left_signed, right_signed);
+  return r.ChangeToPureOperator(intOp);
+}
+
+
+Reduction JSTypedLowering::ReduceI32Shift(Node* node, bool left_signed,
+                                          Operator* shift_op) {
+  JSBinopReduction r(this, node);
+  r.ConvertInputsForShift(left_signed);
+  return r.ChangeToPureOperator(shift_op);
+}
+
+
+Reduction JSTypedLowering::ReduceJSComparison(Node* node) {
+  JSBinopReduction r(this, node);
+  if (r.BothInputsAre(Type::String())) {
+    // If both inputs are definitely strings, perform a string comparison.
+    Operator* stringOp;
+    switch (node->opcode()) {
+      case IrOpcode::kJSLessThan:
+        stringOp = simplified()->StringLessThan();
+        break;
+      case IrOpcode::kJSGreaterThan:
+        stringOp = simplified()->StringLessThan();
+        r.SwapInputs();  // a > b => b < a
+        break;
+      case IrOpcode::kJSLessThanOrEqual:
+        stringOp = simplified()->StringLessThanOrEqual();
+        break;
+      case IrOpcode::kJSGreaterThanOrEqual:
+        stringOp = simplified()->StringLessThanOrEqual();
+        r.SwapInputs();  // a >= b => b <= a
+        break;
+      default:
+        return NoChange();
+    }
+    return r.ChangeToPureOperator(stringOp);
+  } else if (r.OneInputCannotBe(Type::String())) {
+    // If one input cannot be a string, then emit a number comparison.
+    Operator* less_than;
+    Operator* less_than_or_equal;
+    if (r.BothInputsAre(Type::Unsigned32())) {
+      less_than = machine()->Uint32LessThan();
+      less_than_or_equal = machine()->Uint32LessThanOrEqual();
+    } else if (r.BothInputsAre(Type::Signed32())) {
+      less_than = machine()->Int32LessThan();
+      less_than_or_equal = machine()->Int32LessThanOrEqual();
+    } else {
+      // TODO(turbofan): mixed signed/unsigned int32 comparisons.
+      r.ConvertInputsToNumber();
+      less_than = simplified()->NumberLessThan();
+      less_than_or_equal = simplified()->NumberLessThanOrEqual();
+    }
+    Operator* comparison;
+    switch (node->opcode()) {
+      case IrOpcode::kJSLessThan:
+        comparison = less_than;
+        break;
+      case IrOpcode::kJSGreaterThan:
+        comparison = less_than;
+        r.SwapInputs();  // a > b => b < a
+        break;
+      case IrOpcode::kJSLessThanOrEqual:
+        comparison = less_than_or_equal;
+        break;
+      case IrOpcode::kJSGreaterThanOrEqual:
+        comparison = less_than_or_equal;
+        r.SwapInputs();  // a >= b => b <= a
+        break;
+      default:
+        return NoChange();
+    }
+    return r.ChangeToPureOperator(comparison);
+  }
+  // TODO(turbofan): relax/remove effects of this operator in other cases.
+  return NoChange();  // Keep a generic comparison.
+}
+
+
+Reduction JSTypedLowering::ReduceJSEqual(Node* node, bool invert) {
+  JSBinopReduction r(this, node);
+
+  if (r.BothInputsAre(Type::Number())) {
+    return r.ChangeToPureOperator(simplified()->NumberEqual(), invert);
+  }
+  if (r.BothInputsAre(Type::String())) {
+    return r.ChangeToPureOperator(simplified()->StringEqual(), invert);
+  }
+  if (r.BothInputsAre(Type::Receiver())) {
+    return r.ChangeToPureOperator(
+        simplified()->ReferenceEqual(Type::Receiver()), invert);
+  }
+  // TODO(turbofan): js-typed-lowering of Equal(undefined)
+  // TODO(turbofan): js-typed-lowering of Equal(null)
+  // TODO(turbofan): js-typed-lowering of Equal(boolean)
+  return NoChange();
+}
+
+
+Reduction JSTypedLowering::ReduceJSStrictEqual(Node* node, bool invert) {
+  JSBinopReduction r(this, node);
+  if (r.left() == r.right()) {
+    // x === x is always true if x != NaN
+    if (!r.left_type()->Maybe(Type::NaN())) {
+      return ReplaceEagerly(node, invert ? jsgraph()->FalseConstant()
+                                         : jsgraph()->TrueConstant());
+    }
+  }
+  if (!r.left_type()->Maybe(r.right_type())) {
+    // Type intersection is empty; === is always false unless both
+    // inputs could be strings (one internalized and one not).
+    if (r.OneInputCannotBe(Type::String())) {
+      return ReplaceEagerly(node, invert ? jsgraph()->TrueConstant()
+                                         : jsgraph()->FalseConstant());
+    }
+  }
+  if (r.OneInputIs(Type::Undefined())) {
+    return r.ChangeToPureOperator(
+        simplified()->ReferenceEqual(Type::Undefined()), invert);
+  }
+  if (r.OneInputIs(Type::Null())) {
+    return r.ChangeToPureOperator(simplified()->ReferenceEqual(Type::Null()),
+                                  invert);
+  }
+  if (r.OneInputIs(Type::Boolean())) {
+    return r.ChangeToPureOperator(simplified()->ReferenceEqual(Type::Boolean()),
+                                  invert);
+  }
+  if (r.OneInputIs(Type::Object())) {
+    return r.ChangeToPureOperator(simplified()->ReferenceEqual(Type::Object()),
+                                  invert);
+  }
+  if (r.OneInputIs(Type::Receiver())) {
+    return r.ChangeToPureOperator(
+        simplified()->ReferenceEqual(Type::Receiver()), invert);
+  }
+  if (r.BothInputsAre(Type::String())) {
+    return r.ChangeToPureOperator(simplified()->StringEqual(), invert);
+  }
+  if (r.BothInputsAre(Type::Number())) {
+    return r.ChangeToPureOperator(simplified()->NumberEqual(), invert);
+  }
+  // TODO(turbofan): js-typed-lowering of StrictEqual(mixed types)
+  return NoChange();
+}
+
+
+Reduction JSTypedLowering::ReduceJSToNumberInput(Node* input) {
+  if (input->opcode() == IrOpcode::kJSToNumber) {
+    // Recursively try to reduce the input first.
+    Reduction result = ReduceJSToNumberInput(input->InputAt(0));
+    if (result.Changed()) {
+      RelaxEffects(input);
+      return result;
+    }
+    return Changed(input);  // JSToNumber(JSToNumber(x)) => JSToNumber(x)
+  }
+  Type* input_type = NodeProperties::GetBounds(input).upper;
+  if (input_type->Is(Type::Number())) {
+    // JSToNumber(number) => x
+    return Changed(input);
+  }
+  if (input_type->Is(Type::Undefined())) {
+    // JSToNumber(undefined) => #NaN
+    return ReplaceWith(jsgraph()->NaNConstant());
+  }
+  if (input_type->Is(Type::Null())) {
+    // JSToNumber(null) => #0
+    return ReplaceWith(jsgraph()->ZeroConstant());
+  }
+  // TODO(turbofan): js-typed-lowering of ToNumber(boolean)
+  // TODO(turbofan): js-typed-lowering of ToNumber(string)
+  return NoChange();
+}
+
+
+Reduction JSTypedLowering::ReduceJSToStringInput(Node* input) {
+  if (input->opcode() == IrOpcode::kJSToString) {
+    // Recursively try to reduce the input first.
+    Reduction result = ReduceJSToStringInput(input->InputAt(0));
+    if (result.Changed()) {
+      RelaxEffects(input);
+      return result;
+    }
+    return Changed(input);  // JSToString(JSToString(x)) => JSToString(x)
+  }
+  Type* input_type = NodeProperties::GetBounds(input).upper;
+  if (input_type->Is(Type::String())) {
+    return Changed(input);  // JSToString(string) => x
+  }
+  if (input_type->Is(Type::Undefined())) {
+    return ReplaceWith(jsgraph()->HeapConstant(
+        graph()->zone()->isolate()->factory()->undefined_string()));
+  }
+  if (input_type->Is(Type::Null())) {
+    return ReplaceWith(jsgraph()->HeapConstant(
+        graph()->zone()->isolate()->factory()->null_string()));
+  }
+  // TODO(turbofan): js-typed-lowering of ToString(boolean)
+  // TODO(turbofan): js-typed-lowering of ToString(number)
+  return NoChange();
+}
+
+
+Reduction JSTypedLowering::ReduceJSToBooleanInput(Node* input) {
+  if (input->opcode() == IrOpcode::kJSToBoolean) {
+    // Recursively try to reduce the input first.
+    Reduction result = ReduceJSToBooleanInput(input->InputAt(0));
+    if (result.Changed()) {
+      RelaxEffects(input);
+      return result;
+    }
+    return Changed(input);  // JSToBoolean(JSToBoolean(x)) => JSToBoolean(x)
+  }
+  Type* input_type = NodeProperties::GetBounds(input).upper;
+  if (input_type->Is(Type::Boolean())) {
+    return Changed(input);  // JSToBoolean(boolean) => x
+  }
+  if (input_type->Is(Type::Undefined())) {
+    // JSToBoolean(undefined) => #false
+    return ReplaceWith(jsgraph()->FalseConstant());
+  }
+  if (input_type->Is(Type::Null())) {
+    // JSToBoolean(null) => #false
+    return ReplaceWith(jsgraph()->FalseConstant());
+  }
+  if (input_type->Is(Type::DetectableReceiver())) {
+    // JSToBoolean(detectable) => #true
+    return ReplaceWith(jsgraph()->TrueConstant());
+  }
+  if (input_type->Is(Type::Undetectable())) {
+    // JSToBoolean(undetectable) => #false
+    return ReplaceWith(jsgraph()->FalseConstant());
+  }
+  if (input_type->Is(Type::Number())) {
+    // JSToBoolean(number) => BooleanNot(NumberEqual(x, #0))
+    Node* cmp = graph()->NewNode(simplified()->NumberEqual(), input,
+                                 jsgraph()->ZeroConstant());
+    Node* inv = graph()->NewNode(simplified()->BooleanNot(), cmp);
+    ReplaceEagerly(input, inv);
+    // TODO(titzer): Ugly. ReplaceEagerly smashes all uses. Smash it back here.
+    cmp->ReplaceInput(0, input);
+    return Changed(inv);
+  }
+  // TODO(turbofan): js-typed-lowering of ToBoolean(string)
+  return NoChange();
+}
+
+
+static Reduction ReplaceWithReduction(Node* node, Reduction reduction) {
+  if (reduction.Changed()) {
+    ReplaceUses(node, reduction.replacement(), NULL);
+    return reduction;
+  }
+  return Reducer::NoChange();
+}
+
+
+Reduction JSTypedLowering::Reduce(Node* node) {
+  switch (node->opcode()) {
+    case IrOpcode::kJSEqual:
+      return ReduceJSEqual(node, false);
+    case IrOpcode::kJSNotEqual:
+      return ReduceJSEqual(node, true);
+    case IrOpcode::kJSStrictEqual:
+      return ReduceJSStrictEqual(node, false);
+    case IrOpcode::kJSStrictNotEqual:
+      return ReduceJSStrictEqual(node, true);
+    case IrOpcode::kJSLessThan:         // fall through
+    case IrOpcode::kJSGreaterThan:      // fall through
+    case IrOpcode::kJSLessThanOrEqual:  // fall through
+    case IrOpcode::kJSGreaterThanOrEqual:
+      return ReduceJSComparison(node);
+    case IrOpcode::kJSBitwiseOr:
+      return ReduceI32Binop(node, true, true, machine()->Word32Or());
+    case IrOpcode::kJSBitwiseXor:
+      return ReduceI32Binop(node, true, true, machine()->Word32Xor());
+    case IrOpcode::kJSBitwiseAnd:
+      return ReduceI32Binop(node, true, true, machine()->Word32And());
+    case IrOpcode::kJSShiftLeft:
+      return ReduceI32Shift(node, true, machine()->Word32Shl());
+    case IrOpcode::kJSShiftRight:
+      return ReduceI32Shift(node, true, machine()->Word32Sar());
+    case IrOpcode::kJSShiftRightLogical:
+      return ReduceI32Shift(node, false, machine()->Word32Shr());
+    case IrOpcode::kJSAdd:
+      return ReduceJSAdd(node);
+    case IrOpcode::kJSSubtract:
+      return ReduceNumberBinop(node, simplified()->NumberSubtract());
+    case IrOpcode::kJSMultiply:
+      return ReduceNumberBinop(node, simplified()->NumberMultiply());
+    case IrOpcode::kJSDivide:
+      return ReduceNumberBinop(node, simplified()->NumberDivide());
+    case IrOpcode::kJSModulus:
+      return ReduceNumberBinop(node, simplified()->NumberModulus());
+    case IrOpcode::kJSUnaryNot: {
+      Reduction result = ReduceJSToBooleanInput(node->InputAt(0));
+      Node* value;
+      if (result.Changed()) {
+        // !x => BooleanNot(x)
+        value =
+            graph()->NewNode(simplified()->BooleanNot(), result.replacement());
+        ReplaceUses(node, value, NULL);
+        return Changed(value);
+      } else {
+        // !x => BooleanNot(JSToBoolean(x))
+        value = graph()->NewNode(simplified()->BooleanNot(), node);
+        node->set_op(javascript()->ToBoolean());
+        ReplaceUses(node, value, node);
+        // Note: ReplaceUses() smashes all uses, so smash it back here.
+        value->ReplaceInput(0, node);
+        return ReplaceWith(value);
+      }
+    }
+    case IrOpcode::kJSToBoolean:
+      return ReplaceWithReduction(node,
+                                  ReduceJSToBooleanInput(node->InputAt(0)));
+    case IrOpcode::kJSToNumber:
+      return ReplaceWithReduction(node,
+                                  ReduceJSToNumberInput(node->InputAt(0)));
+    case IrOpcode::kJSToString:
+      return ReplaceWithReduction(node,
+                                  ReduceJSToStringInput(node->InputAt(0)));
+    default:
+      break;
+  }
+  return NoChange();
+}
+}
+}
+}  // namespace v8::internal::compiler
diff --git a/src/compiler/js-typed-lowering.h b/src/compiler/js-typed-lowering.h
new file mode 100644 (file)
index 0000000..184058b
--- /dev/null
@@ -0,0 +1,69 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_OPERATOR_REDUCERS_H_
+#define V8_COMPILER_OPERATOR_REDUCERS_H_
+
+#include "src/compiler/graph-reducer.h"
+#include "src/compiler/js-graph.h"
+#include "src/compiler/lowering-builder.h"
+#include "src/compiler/machine-operator.h"
+#include "src/compiler/node.h"
+#include "src/compiler/simplified-operator.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+class JSBinopReduction;
+
+// Lowers JS-level operators to simplified operators based on types.
+class JSTypedLowering : public LoweringBuilder {
+ public:
+  explicit JSTypedLowering(JSGraph* jsgraph,
+                           SourcePositionTable* source_positions)
+      : LoweringBuilder(jsgraph->graph(), source_positions),
+        jsgraph_(jsgraph),
+        simplified_(jsgraph->zone()),
+        machine_(jsgraph->zone()) {}
+  virtual ~JSTypedLowering() {}
+
+  Reduction Reduce(Node* node);
+  virtual void Lower(Node* node) { Reduce(node); }
+
+  JSGraph* jsgraph() { return jsgraph_; }
+  Graph* graph() { return jsgraph_->graph(); }
+
+ private:
+  friend class JSBinopReduction;
+  JSGraph* jsgraph_;
+  SimplifiedOperatorBuilder simplified_;
+  MachineOperatorBuilder machine_;
+
+  Reduction ReplaceEagerly(Node* old, Node* node);
+  Reduction NoChange() { return Reducer::NoChange(); }
+  Reduction ReplaceWith(Node* node) { return Reducer::Replace(node); }
+  Reduction Changed(Node* node) { return Reducer::Changed(node); }
+  Reduction ReduceJSAdd(Node* node);
+  Reduction ReduceJSComparison(Node* node);
+  Reduction ReduceJSEqual(Node* node, bool invert);
+  Reduction ReduceJSStrictEqual(Node* node, bool invert);
+  Reduction ReduceJSToNumberInput(Node* input);
+  Reduction ReduceJSToStringInput(Node* input);
+  Reduction ReduceJSToBooleanInput(Node* input);
+  Reduction ReduceNumberBinop(Node* node, Operator* numberOp);
+  Reduction ReduceI32Binop(Node* node, bool left_signed, bool right_signed,
+                           Operator* intOp);
+  Reduction ReduceI32Shift(Node* node, bool left_signed, Operator* shift_op);
+
+  JSOperatorBuilder* javascript() { return jsgraph_->javascript(); }
+  CommonOperatorBuilder* common() { return jsgraph_->common(); }
+  SimplifiedOperatorBuilder* simplified() { return &simplified_; }
+  MachineOperatorBuilder* machine() { return &machine_; }
+};
+}
+}
+}  // namespace v8::internal::compiler
+
+#endif  // V8_COMPILER_OPERATOR_REDUCERS_H_
diff --git a/src/compiler/linkage-impl.h b/src/compiler/linkage-impl.h
new file mode 100644 (file)
index 0000000..7159bde
--- /dev/null
@@ -0,0 +1,206 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_LINKAGE_IMPL_H_
+#define V8_COMPILER_LINKAGE_IMPL_H_
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+class LinkageHelper {
+ public:
+  static LinkageLocation TaggedStackSlot(int index) {
+    ASSERT(index < 0);
+    return LinkageLocation(kMachineTagged, index);
+  }
+
+  static LinkageLocation TaggedRegisterLocation(Register reg) {
+    return LinkageLocation(kMachineTagged, Register::ToAllocationIndex(reg));
+  }
+
+  static inline LinkageLocation WordRegisterLocation(Register reg) {
+    return LinkageLocation(MachineOperatorBuilder::pointer_rep(),
+                           Register::ToAllocationIndex(reg));
+  }
+
+  static LinkageLocation UnconstrainedRegister(MachineRepresentation rep) {
+    return LinkageLocation(rep, LinkageLocation::ANY_REGISTER);
+  }
+
+  static const RegList kNoCalleeSaved = 0;
+
+  // TODO(turbofan): cache call descriptors for JSFunction calls.
+  template <typename LinkageTraits>
+  static CallDescriptor* GetJSCallDescriptor(Zone* zone, int parameter_count) {
+    const int jsfunction_count = 1;
+    const int context_count = 1;
+    int input_count = jsfunction_count + parameter_count + context_count;
+
+    const int return_count = 1;
+    LinkageLocation* locations =
+        zone->NewArray<LinkageLocation>(return_count + input_count);
+
+    int index = 0;
+    locations[index++] =
+        TaggedRegisterLocation(LinkageTraits::ReturnValueReg());
+    locations[index++] =
+        TaggedRegisterLocation(LinkageTraits::JSCallFunctionReg());
+
+    for (int i = 0; i < parameter_count; i++) {
+      // All parameters to JS calls go on the stack.
+      int spill_slot_index = i - parameter_count;
+      locations[index++] = TaggedStackSlot(spill_slot_index);
+    }
+    locations[index++] = TaggedRegisterLocation(LinkageTraits::ContextReg());
+
+    // TODO(titzer): refactor TurboFan graph to consider context a value input.
+    return new (zone)
+        CallDescriptor(CallDescriptor::kCallJSFunction,  // kind
+                       return_count,                     // return_count
+                       parameter_count,                  // parameter_count
+                       input_count - context_count,      // input_count
+                       locations,                        // locations
+                       Operator::kNoProperties,          // properties
+                       kNoCalleeSaved,  // callee-saved registers
+                       CallDescriptor::kCanDeoptimize);  // deoptimization
+  }
+
+
+  // TODO(turbofan): cache call descriptors for runtime calls.
+  template <typename LinkageTraits>
+  static CallDescriptor* GetRuntimeCallDescriptor(
+      Zone* zone, Runtime::FunctionId function_id, int parameter_count,
+      Operator::Property properties,
+      CallDescriptor::DeoptimizationSupport can_deoptimize) {
+    const int code_count = 1;
+    const int function_count = 1;
+    const int num_args_count = 1;
+    const int context_count = 1;
+    const int input_count = code_count + parameter_count + function_count +
+                            num_args_count + context_count;
+
+    const Runtime::Function* function = Runtime::FunctionForId(function_id);
+    const int return_count = function->result_size;
+    LinkageLocation* locations =
+        zone->NewArray<LinkageLocation>(return_count + input_count);
+
+    int index = 0;
+    if (return_count > 0) {
+      locations[index++] =
+          TaggedRegisterLocation(LinkageTraits::ReturnValueReg());
+    }
+    if (return_count > 1) {
+      locations[index++] =
+          TaggedRegisterLocation(LinkageTraits::ReturnValue2Reg());
+    }
+
+    ASSERT_LE(return_count, 2);
+
+    locations[index++] = UnconstrainedRegister(kMachineTagged);  // CEntryStub
+
+    for (int i = 0; i < parameter_count; i++) {
+      // All parameters to runtime calls go on the stack.
+      int spill_slot_index = i - parameter_count;
+      locations[index++] = TaggedStackSlot(spill_slot_index);
+    }
+    locations[index++] =
+        TaggedRegisterLocation(LinkageTraits::RuntimeCallFunctionReg());
+    locations[index++] =
+        WordRegisterLocation(LinkageTraits::RuntimeCallArgCountReg());
+    locations[index++] = TaggedRegisterLocation(LinkageTraits::ContextReg());
+
+    // TODO(titzer): refactor TurboFan graph to consider context a value input.
+    return new (zone) CallDescriptor(CallDescriptor::kCallCodeObject,  // kind
+                                     return_count,     // return_count
+                                     parameter_count,  // parameter_count
+                                     input_count,      // input_count
+                                     locations,        // locations
+                                     properties,       // properties
+                                     kNoCalleeSaved,   // callee-saved registers
+                                     can_deoptimize,   // deoptimization
+                                     function->name);
+  }
+
+
+  // TODO(turbofan): cache call descriptors for code stub calls.
+  template <typename LinkageTraits>
+  static CallDescriptor* GetStubCallDescriptor(
+      Zone* zone, CodeStubInterfaceDescriptor* descriptor,
+      int stack_parameter_count) {
+    int register_parameter_count = descriptor->GetEnvironmentParameterCount();
+    int parameter_count = register_parameter_count + stack_parameter_count;
+    const int code_count = 1;
+    const int context_count = 1;
+    int input_count = code_count + parameter_count + context_count;
+
+    const int return_count = 1;
+    LinkageLocation* locations =
+        zone->NewArray<LinkageLocation>(return_count + input_count);
+
+    int index = 0;
+    locations[index++] =
+        TaggedRegisterLocation(LinkageTraits::ReturnValueReg());
+    locations[index++] = UnconstrainedRegister(kMachineTagged);  // code
+    for (int i = 0; i < parameter_count; i++) {
+      if (i < register_parameter_count) {
+        // The first parameters to code stub calls go in registers.
+        Register reg = descriptor->GetEnvironmentParameterRegister(i);
+        locations[index++] = TaggedRegisterLocation(reg);
+      } else {
+        // The rest of the parameters go on the stack.
+        int stack_slot = i - register_parameter_count - stack_parameter_count;
+        locations[index++] = TaggedStackSlot(stack_slot);
+      }
+    }
+    locations[index++] = TaggedRegisterLocation(LinkageTraits::ContextReg());
+
+    // TODO(titzer): refactor TurboFan graph to consider context a value input.
+    return new (zone)
+        CallDescriptor(CallDescriptor::kCallCodeObject,  // kind
+                       return_count,                     // return_count
+                       parameter_count,                  // parameter_count
+                       input_count,                      // input_count
+                       locations,                        // locations
+                       Operator::kNoProperties,          // properties
+                       kNoCalleeSaved,  // callee-saved registers
+                       CallDescriptor::kCannotDeoptimize,  // deoptimization
+                       CodeStub::MajorName(descriptor->MajorKey(), false));
+    // TODO(jarin) should deoptimize!
+  }
+
+
+  template <typename LinkageTraits>
+  static CallDescriptor* GetSimplifiedCDescriptor(
+      Zone* zone, int num_params, MachineRepresentation return_type,
+      const MachineRepresentation* param_types) {
+    LinkageLocation* locations =
+        zone->NewArray<LinkageLocation>(num_params + 2);
+    int index = 0;
+    locations[index++] =
+        TaggedRegisterLocation(LinkageTraits::ReturnValueReg());
+    locations[index++] = LinkageHelper::UnconstrainedRegister(
+        MachineOperatorBuilder::pointer_rep());
+    // TODO(dcarney): test with lots of parameters.
+    int i = 0;
+    for (; i < LinkageTraits::CRegisterParametersLength() && i < num_params;
+         i++) {
+      locations[index++] = LinkageLocation(
+          param_types[i],
+          Register::ToAllocationIndex(LinkageTraits::CRegisterParameter(i)));
+    }
+    for (; i < num_params; i++) {
+      locations[index++] = LinkageLocation(param_types[i], -1 - i);
+    }
+    return new (zone) CallDescriptor(
+        CallDescriptor::kCallAddress, 1, num_params, num_params + 1, locations,
+        Operator::kNoProperties, LinkageTraits::CCalleeSaveRegisters(),
+        CallDescriptor::kCannotDeoptimize);  // TODO(jarin) should deoptimize!
+  }
+};
+}
+}
+}  // namespace v8::internal::compiler
+
+#endif  // V8_COMPILER_LINKAGE_IMPL_H_
diff --git a/src/compiler/linkage.cc b/src/compiler/linkage.cc
new file mode 100644 (file)
index 0000000..b08f694
--- /dev/null
@@ -0,0 +1,140 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/linkage.h"
+
+#include "src/code-stubs.h"
+#include "src/compiler.h"
+#include "src/compiler/node.h"
+#include "src/compiler/pipeline.h"
+#include "src/scopes.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+
+OStream& operator<<(OStream& os, const CallDescriptor::Kind& k) {
+  switch (k) {
+    case CallDescriptor::kCallCodeObject:
+      os << "Code";
+      break;
+    case CallDescriptor::kCallJSFunction:
+      os << "JS";
+      break;
+    case CallDescriptor::kCallAddress:
+      os << "Addr";
+      break;
+  }
+  return os;
+}
+
+
+OStream& operator<<(OStream& os, const CallDescriptor& d) {
+  // TODO(svenpanne) Output properties etc. and be less cryptic.
+  return os << d.kind() << ":" << d.debug_name() << ":r" << d.ReturnCount()
+            << "p" << d.ParameterCount() << "i" << d.InputCount()
+            << (d.CanLazilyDeoptimize() ? "deopt" : "");
+}
+
+
+Linkage::Linkage(CompilationInfo* info) : info_(info) {
+  if (info->function() != NULL) {
+    // If we already have the function literal, use the number of parameters
+    // plus the receiver.
+    incoming_ = GetJSCallDescriptor(1 + info->function()->parameter_count());
+  } else if (!info->closure().is_null()) {
+    // If we are compiling a JS function, use a JS call descriptor,
+    // plus the receiver.
+    SharedFunctionInfo* shared = info->closure()->shared();
+    incoming_ = GetJSCallDescriptor(1 + shared->formal_parameter_count());
+  } else if (info->code_stub() != NULL) {
+    // Use the code stub interface descriptor.
+    HydrogenCodeStub* stub = info->code_stub();
+    CodeStubInterfaceDescriptor* descriptor =
+        info_->isolate()->code_stub_interface_descriptor(stub->MajorKey());
+    incoming_ = GetStubCallDescriptor(descriptor);
+  } else {
+    incoming_ = NULL;  // TODO(titzer): ?
+  }
+}
+
+
+FrameOffset Linkage::GetFrameOffset(int spill_slot, Frame* frame, int extra) {
+  if (frame->GetSpillSlotCount() > 0 || incoming_->IsJSFunctionCall() ||
+      incoming_->kind() == CallDescriptor::kCallAddress) {
+    int offset;
+    int register_save_area_size = frame->GetRegisterSaveAreaSize();
+    if (spill_slot >= 0) {
+      // Local or spill slot. Skip the frame pointer, function, and
+      // context in the fixed part of the frame.
+      offset =
+          -(spill_slot + 1) * kPointerSize - register_save_area_size + extra;
+    } else {
+      // Incoming parameter. Skip the return address.
+      offset = -(spill_slot + 1) * kPointerSize + kFPOnStackSize +
+               kPCOnStackSize + extra;
+    }
+    return FrameOffset::FromFramePointer(offset);
+  } else {
+    // No frame. Retrieve all parameters relative to stack pointer.
+    ASSERT(spill_slot < 0);  // Must be a parameter.
+    int register_save_area_size = frame->GetRegisterSaveAreaSize();
+    int offset = register_save_area_size - (spill_slot + 1) * kPointerSize +
+                 kPCOnStackSize + extra;
+    return FrameOffset::FromStackPointer(offset);
+  }
+}
+
+
+CallDescriptor* Linkage::GetJSCallDescriptor(int parameter_count) {
+  return GetJSCallDescriptor(parameter_count, this->info_->zone());
+}
+
+
+CallDescriptor* Linkage::GetRuntimeCallDescriptor(
+    Runtime::FunctionId function, int parameter_count,
+    Operator::Property properties,
+    CallDescriptor::DeoptimizationSupport can_deoptimize) {
+  return GetRuntimeCallDescriptor(function, parameter_count, properties,
+                                  can_deoptimize, this->info_->zone());
+}
+
+
+//==============================================================================
+// Provide unimplemented methods on unsupported architectures, to at least link.
+//==============================================================================
+#if !V8_TURBOFAN_TARGET
+CallDescriptor* Linkage::GetJSCallDescriptor(int parameter_count, Zone* zone) {
+  UNIMPLEMENTED();
+  return NULL;
+}
+
+
+CallDescriptor* Linkage::GetRuntimeCallDescriptor(
+    Runtime::FunctionId function, int parameter_count,
+    Operator::Property properties,
+    CallDescriptor::DeoptimizationSupport can_deoptimize, Zone* zone) {
+  UNIMPLEMENTED();
+  return NULL;
+}
+
+
+CallDescriptor* Linkage::GetStubCallDescriptor(
+    CodeStubInterfaceDescriptor* descriptor, int stack_parameter_count) {
+  UNIMPLEMENTED();
+  return NULL;
+}
+
+
+CallDescriptor* Linkage::GetSimplifiedCDescriptor(
+    Zone* zone, int num_params, MachineRepresentation return_type,
+    MachineRepresentation* param_types) {
+  UNIMPLEMENTED();
+  return NULL;
+}
+#endif  // !V8_TURBOFAN_TARGET
+}
+}
+}  // namespace v8::internal::compiler
diff --git a/src/compiler/linkage.h b/src/compiler/linkage.h
new file mode 100644 (file)
index 0000000..9d9c508
--- /dev/null
@@ -0,0 +1,188 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_LINKAGE_H_
+#define V8_COMPILER_LINKAGE_H_
+
+#include "src/v8.h"
+
+#include "src/code-stubs.h"
+#include "src/compiler/frame.h"
+#include "src/compiler/machine-operator.h"
+#include "src/compiler/node.h"
+#include "src/compiler/operator.h"
+#include "src/zone.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// Describes the location for a parameter or a return value to a call.
+// TODO(titzer): replace with Radium locations when they are ready.
+class LinkageLocation {
+ public:
+  LinkageLocation(MachineRepresentation rep, int location)
+      : rep_(rep), location_(location) {}
+
+  inline MachineRepresentation representation() const { return rep_; }
+
+  static const int16_t ANY_REGISTER = 32767;
+
+ private:
+  friend class CallDescriptor;
+  friend class OperandGenerator;
+  MachineRepresentation rep_;
+  int16_t location_;  // >= 0 implies register, otherwise stack slot.
+};
+
+
+class CallDescriptor : public ZoneObject {
+ public:
+  // Describes whether the first parameter is a code object, a JSFunction,
+  // or an address--all of which require different machine sequences to call.
+  enum Kind { kCallCodeObject, kCallJSFunction, kCallAddress };
+
+  enum DeoptimizationSupport { kCanDeoptimize, kCannotDeoptimize };
+
+  CallDescriptor(Kind kind, int8_t return_count, int16_t parameter_count,
+                 int16_t input_count, LinkageLocation* locations,
+                 Operator::Property properties, RegList callee_saved_registers,
+                 DeoptimizationSupport deoptimization_support,
+                 const char* debug_name = "")
+      : kind_(kind),
+        return_count_(return_count),
+        parameter_count_(parameter_count),
+        input_count_(input_count),
+        locations_(locations),
+        properties_(properties),
+        callee_saved_registers_(callee_saved_registers),
+        deoptimization_support_(deoptimization_support),
+        debug_name_(debug_name) {}
+  // Returns the kind of this call.
+  Kind kind() const { return kind_; }
+
+  // Returns {true} if this descriptor is a call to a JSFunction.
+  bool IsJSFunctionCall() const { return kind_ == kCallJSFunction; }
+
+  // The number of return values from this call, usually 0 or 1.
+  int ReturnCount() const { return return_count_; }
+
+  // The number of JavaScript parameters to this call, including receiver,
+  // but not the context.
+  int ParameterCount() const { return parameter_count_; }
+
+  int InputCount() const { return input_count_; }
+
+  bool CanLazilyDeoptimize() const {
+    return deoptimization_support_ == kCanDeoptimize;
+  }
+
+  LinkageLocation GetReturnLocation(int index) {
+    ASSERT(index < return_count_);
+    return locations_[0 + index];  // return locations start at 0.
+  }
+
+  LinkageLocation GetInputLocation(int index) {
+    ASSERT(index < input_count_ + 1);  // input_count + 1 is the context.
+    return locations_[return_count_ + index];  // inputs start after returns.
+  }
+
+  // Operator properties describe how this call can be optimized, if at all.
+  Operator::Property properties() const { return properties_; }
+
+  // Get the callee-saved registers, if any, across this call.
+  RegList CalleeSavedRegisters() { return callee_saved_registers_; }
+
+  const char* debug_name() const { return debug_name_; }
+
+ private:
+  friend class Linkage;
+
+  Kind kind_;
+  int8_t return_count_;
+  int16_t parameter_count_;
+  int16_t input_count_;
+  LinkageLocation* locations_;
+  Operator::Property properties_;
+  RegList callee_saved_registers_;
+  DeoptimizationSupport deoptimization_support_;
+  const char* debug_name_;
+};
+
+OStream& operator<<(OStream& os, const CallDescriptor& d);
+OStream& operator<<(OStream& os, const CallDescriptor::Kind& k);
+
+// Defines the linkage for a compilation, including the calling conventions
+// for incoming parameters and return value(s) as well as the outgoing calling
+// convention for any kind of call. Linkage is generally architecture-specific.
+//
+// Can be used to translate {arg_index} (i.e. index of the call node input) as
+// well as {param_index} (i.e. as stored in parameter nodes) into an operator
+// representing the architecture-specific location. The following call node
+// layouts are supported (where {n} is the number value inputs):
+//
+//                  #0          #1     #2     #3     [...]             #n
+// Call[CodeStub]   code,       arg 1, arg 2, arg 3, [...],            context
+// Call[JSFunction] function,   rcvr,  arg 1, arg 2, [...],            context
+// Call[Runtime]    CEntryStub, arg 1, arg 2, arg 3, [...], fun, #arg, context
+class Linkage : public ZoneObject {
+ public:
+  explicit Linkage(CompilationInfo* info);
+  explicit Linkage(CompilationInfo* info, CallDescriptor* incoming)
+      : info_(info), incoming_(incoming) {}
+
+  // The call descriptor for this compilation unit describes the locations
+  // of incoming parameters and the outgoing return value(s).
+  CallDescriptor* GetIncomingDescriptor() { return incoming_; }
+  CallDescriptor* GetJSCallDescriptor(int parameter_count);
+  static CallDescriptor* GetJSCallDescriptor(int parameter_count, Zone* zone);
+  CallDescriptor* GetRuntimeCallDescriptor(
+      Runtime::FunctionId function, int parameter_count,
+      Operator::Property properties,
+      CallDescriptor::DeoptimizationSupport can_deoptimize =
+          CallDescriptor::kCannotDeoptimize);
+  static CallDescriptor* GetRuntimeCallDescriptor(
+      Runtime::FunctionId function, int parameter_count,
+      Operator::Property properties,
+      CallDescriptor::DeoptimizationSupport can_deoptimize, Zone* zone);
+
+  CallDescriptor* GetStubCallDescriptor(CodeStubInterfaceDescriptor* descriptor,
+                                        int stack_parameter_count = 0);
+
+  // Creates a call descriptor for simplified C calls that is appropriate
+  // for the host platform. This simplified calling convention only supports
+  // integers and pointers of one word size each, i.e. no floating point,
+  // structs, pointers to members, etc.
+  static CallDescriptor* GetSimplifiedCDescriptor(
+      Zone* zone, int num_params, MachineRepresentation return_type,
+      const MachineRepresentation* param_types);
+
+  // Get the location of an (incoming) parameter to this function.
+  LinkageLocation GetParameterLocation(int index) {
+    return incoming_->GetInputLocation(index + 1);
+  }
+
+  // Get the location where this function should place its return value.
+  LinkageLocation GetReturnLocation() {
+    return incoming_->GetReturnLocation(0);
+  }
+
+  // Get the frame offset for a given spill slot. The location depends on the
+  // calling convention and the specific frame layout, and may thus be
+  // architecture-specific. Negative spill slots indicate arguments on the
+  // caller's frame. The {extra} parameter indicates an additional offset from
+  // the frame offset, e.g. to index into part of a double slot.
+  FrameOffset GetFrameOffset(int spill_slot, Frame* frame, int extra = 0);
+
+  CompilationInfo* info() const { return info_; }
+
+ private:
+  CompilationInfo* info_;
+  CallDescriptor* incoming_;
+};
+}
+}
+}  // namespace v8::internal::compiler
+
+#endif  // V8_COMPILER_LINKAGE_H_
diff --git a/src/compiler/lowering-builder.cc b/src/compiler/lowering-builder.cc
new file mode 100644 (file)
index 0000000..f3644cf
--- /dev/null
@@ -0,0 +1,41 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/graph-inl.h"
+#include "src/compiler/lowering-builder.h"
+#include "src/compiler/node-aux-data-inl.h"
+#include "src/compiler/node-properties-inl.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+class LoweringBuilder::NodeVisitor : public NullNodeVisitor {
+ public:
+  explicit NodeVisitor(LoweringBuilder* lowering) : lowering_(lowering) {}
+
+  GenericGraphVisit::Control Post(Node* node) {
+    SourcePositionTable::Scope pos(lowering_->source_positions_, node);
+    lowering_->Lower(node);
+    return GenericGraphVisit::CONTINUE;
+  }
+
+ private:
+  LoweringBuilder* lowering_;
+};
+
+
+LoweringBuilder::LoweringBuilder(Graph* graph,
+                                 SourcePositionTable* source_positions)
+    : graph_(graph), source_positions_(source_positions) {}
+
+
+void LoweringBuilder::LowerAllNodes() {
+  NodeVisitor visitor(this);
+  graph()->VisitNodeInputsFromEnd(&visitor);
+}
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
diff --git a/src/compiler/lowering-builder.h b/src/compiler/lowering-builder.h
new file mode 100644 (file)
index 0000000..aeaaaac
--- /dev/null
@@ -0,0 +1,38 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_LOWERING_BUILDER_H_
+#define V8_COMPILER_LOWERING_BUILDER_H_
+
+#include "src/v8.h"
+
+#include "src/compiler/graph.h"
+
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// TODO(dcarney): rename this class.
+class LoweringBuilder {
+ public:
+  explicit LoweringBuilder(Graph* graph, SourcePositionTable* source_positions);
+  virtual ~LoweringBuilder() {}
+
+  void LowerAllNodes();
+  virtual void Lower(Node* node) = 0;  // Exposed for testing.
+
+  Graph* graph() const { return graph_; }
+
+ private:
+  class NodeVisitor;
+  Graph* graph_;
+  SourcePositionTable* source_positions_;
+};
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_COMPILER_LOWERING_BUILDER_H_
diff --git a/src/compiler/machine-node-factory.h b/src/compiler/machine-node-factory.h
new file mode 100644 (file)
index 0000000..3a7b947
--- /dev/null
@@ -0,0 +1,367 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_MACHINE_NODE_FACTORY_H_
+#define V8_COMPILER_MACHINE_NODE_FACTORY_H_
+
+#ifdef USE_SIMULATOR
+#define MACHINE_ASSEMBLER_SUPPORTS_CALL_C 0
+#else
+#define MACHINE_ASSEMBLER_SUPPORTS_CALL_C 1
+#endif
+
+#include "src/v8.h"
+
+#include "src/compiler/machine-operator.h"
+#include "src/compiler/node.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+class MachineCallDescriptorBuilder : public ZoneObject {
+ public:
+  MachineCallDescriptorBuilder(MachineRepresentation return_type,
+                               int parameter_count,
+                               const MachineRepresentation* parameter_types)
+      : return_type_(return_type),
+        parameter_count_(parameter_count),
+        parameter_types_(parameter_types) {}
+
+  int parameter_count() const { return parameter_count_; }
+  const MachineRepresentation* parameter_types() const {
+    return parameter_types_;
+  }
+
+  CallDescriptor* BuildCallDescriptor(Zone* zone) {
+    return Linkage::GetSimplifiedCDescriptor(zone, parameter_count_,
+                                             return_type_, parameter_types_);
+  }
+
+ private:
+  const MachineRepresentation return_type_;
+  const int parameter_count_;
+  const MachineRepresentation* const parameter_types_;
+};
+
+
+#define ZONE() static_cast<NodeFactory*>(this)->zone()
+#define COMMON() static_cast<NodeFactory*>(this)->common()
+#define MACHINE() static_cast<NodeFactory*>(this)->machine()
+#define NEW_NODE_0(op) static_cast<NodeFactory*>(this)->NewNode(op)
+#define NEW_NODE_1(op, a) static_cast<NodeFactory*>(this)->NewNode(op, a)
+#define NEW_NODE_2(op, a, b) static_cast<NodeFactory*>(this)->NewNode(op, a, b)
+#define NEW_NODE_3(op, a, b, c) \
+  static_cast<NodeFactory*>(this)->NewNode(op, a, b, c)
+
+template <typename NodeFactory>
+class MachineNodeFactory {
+ public:
+  // Constants.
+  Node* PointerConstant(void* value) {
+    return IntPtrConstant(reinterpret_cast<intptr_t>(value));
+  }
+  Node* IntPtrConstant(intptr_t value) {
+    // TODO(dcarney): mark generated code as unserializable if value != 0.
+    return kPointerSize == 8 ? Int64Constant(value) : Int32Constant(value);
+  }
+  Node* Int32Constant(int32_t value) {
+    return NEW_NODE_0(COMMON()->Int32Constant(value));
+  }
+  Node* Int64Constant(int64_t value) {
+    return NEW_NODE_0(COMMON()->Int64Constant(value));
+  }
+  Node* NumberConstant(double value) {
+    return NEW_NODE_0(COMMON()->NumberConstant(value));
+  }
+  Node* Float64Constant(double value) {
+    return NEW_NODE_0(COMMON()->Float64Constant(value));
+  }
+  Node* HeapConstant(Handle<Object> object) {
+    PrintableUnique<Object> val =
+        PrintableUnique<Object>::CreateUninitialized(ZONE(), object);
+    return NEW_NODE_0(COMMON()->HeapConstant(val));
+  }
+
+  // Memory Operations.
+  Node* Load(MachineRepresentation rep, Node* base) {
+    return Load(rep, base, Int32Constant(0));
+  }
+  Node* Load(MachineRepresentation rep, Node* base, Node* index) {
+    return NEW_NODE_2(MACHINE()->Load(rep), base, index);
+  }
+  void Store(MachineRepresentation rep, Node* base, Node* value) {
+    Store(rep, base, Int32Constant(0), value);
+  }
+  void Store(MachineRepresentation rep, Node* base, Node* index, Node* value) {
+    NEW_NODE_3(MACHINE()->Store(rep), base, index, value);
+  }
+  // Arithmetic Operations.
+  Node* WordAnd(Node* a, Node* b) {
+    return NEW_NODE_2(MACHINE()->WordAnd(), a, b);
+  }
+  Node* WordOr(Node* a, Node* b) {
+    return NEW_NODE_2(MACHINE()->WordOr(), a, b);
+  }
+  Node* WordXor(Node* a, Node* b) {
+    return NEW_NODE_2(MACHINE()->WordXor(), a, b);
+  }
+  Node* WordShl(Node* a, Node* b) {
+    return NEW_NODE_2(MACHINE()->WordShl(), a, b);
+  }
+  Node* WordShr(Node* a, Node* b) {
+    return NEW_NODE_2(MACHINE()->WordShr(), a, b);
+  }
+  Node* WordSar(Node* a, Node* b) {
+    return NEW_NODE_2(MACHINE()->WordSar(), a, b);
+  }
+  Node* WordEqual(Node* a, Node* b) {
+    return NEW_NODE_2(MACHINE()->WordEqual(), a, b);
+  }
+  Node* WordNotEqual(Node* a, Node* b) {
+    return WordBinaryNot(WordEqual(a, b));
+  }
+  Node* WordNot(Node* a) {
+    if (MACHINE()->is32()) {
+      return Word32Not(a);
+    } else {
+      return Word64Not(a);
+    }
+  }
+  Node* WordBinaryNot(Node* a) {
+    if (MACHINE()->is32()) {
+      return Word32BinaryNot(a);
+    } else {
+      return Word64BinaryNot(a);
+    }
+  }
+
+  Node* Word32And(Node* a, Node* b) {
+    return NEW_NODE_2(MACHINE()->Word32And(), a, b);
+  }
+  Node* Word32Or(Node* a, Node* b) {
+    return NEW_NODE_2(MACHINE()->Word32Or(), a, b);
+  }
+  Node* Word32Xor(Node* a, Node* b) {
+    return NEW_NODE_2(MACHINE()->Word32Xor(), a, b);
+  }
+  Node* Word32Shl(Node* a, Node* b) {
+    return NEW_NODE_2(MACHINE()->Word32Shl(), a, b);
+  }
+  Node* Word32Shr(Node* a, Node* b) {
+    return NEW_NODE_2(MACHINE()->Word32Shr(), a, b);
+  }
+  Node* Word32Sar(Node* a, Node* b) {
+    return NEW_NODE_2(MACHINE()->Word32Sar(), a, b);
+  }
+  Node* Word32Equal(Node* a, Node* b) {
+    return NEW_NODE_2(MACHINE()->Word32Equal(), a, b);
+  }
+  Node* Word32NotEqual(Node* a, Node* b) {
+    return Word32BinaryNot(Word32Equal(a, b));
+  }
+  Node* Word32Not(Node* a) { return Word32Xor(a, Int32Constant(-1)); }
+  Node* Word32BinaryNot(Node* a) { return Word32Equal(a, Int32Constant(0)); }
+
+  Node* Word64And(Node* a, Node* b) {
+    return NEW_NODE_2(MACHINE()->Word64And(), a, b);
+  }
+  Node* Word64Or(Node* a, Node* b) {
+    return NEW_NODE_2(MACHINE()->Word64Or(), a, b);
+  }
+  Node* Word64Xor(Node* a, Node* b) {
+    return NEW_NODE_2(MACHINE()->Word64Xor(), a, b);
+  }
+  Node* Word64Shl(Node* a, Node* b) {
+    return NEW_NODE_2(MACHINE()->Word64Shl(), a, b);
+  }
+  Node* Word64Shr(Node* a, Node* b) {
+    return NEW_NODE_2(MACHINE()->Word64Shr(), a, b);
+  }
+  Node* Word64Sar(Node* a, Node* b) {
+    return NEW_NODE_2(MACHINE()->Word64Sar(), a, b);
+  }
+  Node* Word64Equal(Node* a, Node* b) {
+    return NEW_NODE_2(MACHINE()->Word64Equal(), a, b);
+  }
+  Node* Word64NotEqual(Node* a, Node* b) {
+    return Word64BinaryNot(Word64Equal(a, b));
+  }
+  Node* Word64Not(Node* a) { return Word64Xor(a, Int64Constant(-1)); }
+  Node* Word64BinaryNot(Node* a) { return Word64Equal(a, Int64Constant(0)); }
+
+  Node* Int32Add(Node* a, Node* b) {
+    return NEW_NODE_2(MACHINE()->Int32Add(), a, b);
+  }
+  Node* Int32Sub(Node* a, Node* b) {
+    return NEW_NODE_2(MACHINE()->Int32Sub(), a, b);
+  }
+  Node* Int32Mul(Node* a, Node* b) {
+    return NEW_NODE_2(MACHINE()->Int32Mul(), a, b);
+  }
+  Node* Int32Div(Node* a, Node* b) {
+    return NEW_NODE_2(MACHINE()->Int32Div(), a, b);
+  }
+  Node* Int32UDiv(Node* a, Node* b) {
+    return NEW_NODE_2(MACHINE()->Int32UDiv(), a, b);
+  }
+  Node* Int32Mod(Node* a, Node* b) {
+    return NEW_NODE_2(MACHINE()->Int32Mod(), a, b);
+  }
+  Node* Int32UMod(Node* a, Node* b) {
+    return NEW_NODE_2(MACHINE()->Int32UMod(), a, b);
+  }
+  Node* Int32LessThan(Node* a, Node* b) {
+    return NEW_NODE_2(MACHINE()->Int32LessThan(), a, b);
+  }
+  Node* Int32LessThanOrEqual(Node* a, Node* b) {
+    return NEW_NODE_2(MACHINE()->Int32LessThanOrEqual(), a, b);
+  }
+  Node* Uint32LessThan(Node* a, Node* b) {
+    return NEW_NODE_2(MACHINE()->Uint32LessThan(), a, b);
+  }
+  Node* Uint32LessThanOrEqual(Node* a, Node* b) {
+    return NEW_NODE_2(MACHINE()->Uint32LessThanOrEqual(), a, b);
+  }
+  Node* Int32GreaterThan(Node* a, Node* b) { return Int32LessThan(b, a); }
+  Node* Int32GreaterThanOrEqual(Node* a, Node* b) {
+    return Int32LessThanOrEqual(b, a);
+  }
+  Node* Int32Neg(Node* a) { return Int32Sub(Int32Constant(0), a); }
+
+  Node* Int64Add(Node* a, Node* b) {
+    return NEW_NODE_2(MACHINE()->Int64Add(), a, b);
+  }
+  Node* Int64Sub(Node* a, Node* b) {
+    return NEW_NODE_2(MACHINE()->Int64Sub(), a, b);
+  }
+  Node* Int64Mul(Node* a, Node* b) {
+    return NEW_NODE_2(MACHINE()->Int64Mul(), a, b);
+  }
+  Node* Int64Div(Node* a, Node* b) {
+    return NEW_NODE_2(MACHINE()->Int64Div(), a, b);
+  }
+  Node* Int64UDiv(Node* a, Node* b) {
+    return NEW_NODE_2(MACHINE()->Int64UDiv(), a, b);
+  }
+  Node* Int64Mod(Node* a, Node* b) {
+    return NEW_NODE_2(MACHINE()->Int64Mod(), a, b);
+  }
+  Node* Int64UMod(Node* a, Node* b) {
+    return NEW_NODE_2(MACHINE()->Int64UMod(), a, b);
+  }
+  Node* Int64Neg(Node* a) { return Int64Sub(Int64Constant(0), a); }
+  Node* Int64LessThan(Node* a, Node* b) {
+    return NEW_NODE_2(MACHINE()->Int64LessThan(), a, b);
+  }
+  Node* Int64LessThanOrEqual(Node* a, Node* b) {
+    return NEW_NODE_2(MACHINE()->Int64LessThanOrEqual(), a, b);
+  }
+  Node* Int64GreaterThan(Node* a, Node* b) { return Int64LessThan(b, a); }
+  Node* Int64GreaterThanOrEqual(Node* a, Node* b) {
+    return Int64LessThanOrEqual(b, a);
+  }
+
+  Node* ConvertIntPtrToInt32(Node* a) {
+    return kPointerSize == 8 ? NEW_NODE_1(MACHINE()->ConvertInt64ToInt32(), a)
+                             : a;
+  }
+  Node* ConvertInt32ToIntPtr(Node* a) {
+    return kPointerSize == 8 ? NEW_NODE_1(MACHINE()->ConvertInt32ToInt64(), a)
+                             : a;
+  }
+
+#define INTPTR_BINOP(prefix, name)                     \
+  Node* IntPtr##name(Node* a, Node* b) {               \
+    return kPointerSize == 8 ? prefix##64##name(a, b)  \
+                             : prefix##32##name(a, b); \
+  }
+
+  INTPTR_BINOP(Int, Add);
+  INTPTR_BINOP(Int, Sub);
+  INTPTR_BINOP(Int, LessThan);
+  INTPTR_BINOP(Int, LessThanOrEqual);
+  INTPTR_BINOP(Word, Equal);
+  INTPTR_BINOP(Word, NotEqual);
+  INTPTR_BINOP(Int, GreaterThanOrEqual);
+  INTPTR_BINOP(Int, GreaterThan);
+
+#undef INTPTR_BINOP
+
+  Node* Float64Add(Node* a, Node* b) {
+    return NEW_NODE_2(MACHINE()->Float64Add(), a, b);
+  }
+  Node* Float64Sub(Node* a, Node* b) {
+    return NEW_NODE_2(MACHINE()->Float64Sub(), a, b);
+  }
+  Node* Float64Mul(Node* a, Node* b) {
+    return NEW_NODE_2(MACHINE()->Float64Mul(), a, b);
+  }
+  Node* Float64Div(Node* a, Node* b) {
+    return NEW_NODE_2(MACHINE()->Float64Div(), a, b);
+  }
+  Node* Float64Mod(Node* a, Node* b) {
+    return NEW_NODE_2(MACHINE()->Float64Mod(), a, b);
+  }
+  Node* Float64Equal(Node* a, Node* b) {
+    return NEW_NODE_2(MACHINE()->Float64Equal(), a, b);
+  }
+  Node* Float64NotEqual(Node* a, Node* b) {
+    return WordBinaryNot(Float64Equal(a, b));
+  }
+  Node* Float64LessThan(Node* a, Node* b) {
+    return NEW_NODE_2(MACHINE()->Float64LessThan(), a, b);
+  }
+  Node* Float64LessThanOrEqual(Node* a, Node* b) {
+    return NEW_NODE_2(MACHINE()->Float64LessThanOrEqual(), a, b);
+  }
+  Node* Float64GreaterThan(Node* a, Node* b) { return Float64LessThan(b, a); }
+  Node* Float64GreaterThanOrEqual(Node* a, Node* b) {
+    return Float64LessThanOrEqual(b, a);
+  }
+
+  // Conversions.
+  Node* ConvertInt32ToInt64(Node* a) {
+    return NEW_NODE_1(MACHINE()->ConvertInt32ToInt64(), a);
+  }
+  Node* ConvertInt64ToInt32(Node* a) {
+    return NEW_NODE_1(MACHINE()->ConvertInt64ToInt32(), a);
+  }
+  Node* ConvertInt32ToFloat64(Node* a) {
+    return NEW_NODE_1(MACHINE()->ConvertInt32ToFloat64(), a);
+  }
+  Node* ConvertFloat64ToInt32(Node* a) {
+    return NEW_NODE_1(MACHINE()->ConvertFloat64ToInt32(), a);
+  }
+
+#ifdef MACHINE_ASSEMBLER_SUPPORTS_CALL_C
+  // Call to C.
+  Node* CallC(Node* function_address, MachineRepresentation return_type,
+              MachineRepresentation* arg_types, Node** args, int n_args) {
+    CallDescriptor* descriptor = Linkage::GetSimplifiedCDescriptor(
+        ZONE(), n_args, return_type, arg_types);
+    Node** passed_args =
+        static_cast<Node**>(alloca((n_args + 1) * sizeof(args[0])));
+    passed_args[0] = function_address;
+    for (int i = 0; i < n_args; ++i) {
+      passed_args[i + 1] = args[i];
+    }
+    return NEW_NODE_2(COMMON()->Call(descriptor), n_args + 1, passed_args);
+  }
+#endif
+};
+
+#undef NEW_NODE_0
+#undef NEW_NODE_1
+#undef NEW_NODE_2
+#undef NEW_NODE_3
+#undef MACHINE
+#undef COMMON
+#undef ZONE
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_COMPILER_MACHINE_NODE_FACTORY_H_
diff --git a/src/compiler/machine-operator-reducer.cc b/src/compiler/machine-operator-reducer.cc
new file mode 100644 (file)
index 0000000..4a40576
--- /dev/null
@@ -0,0 +1,343 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/machine-operator-reducer.h"
+
+#include "src/compiler/common-node-cache.h"
+#include "src/compiler/generic-node-inl.h"
+#include "src/compiler/graph.h"
+#include "src/compiler/node-matchers.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+MachineOperatorReducer::MachineOperatorReducer(Graph* graph)
+    : graph_(graph),
+      cache_(new (graph->zone()) CommonNodeCache(graph->zone())),
+      common_(graph->zone()),
+      machine_(graph->zone()) {}
+
+
+MachineOperatorReducer::MachineOperatorReducer(Graph* graph,
+                                               CommonNodeCache* cache)
+    : graph_(graph),
+      cache_(cache),
+      common_(graph->zone()),
+      machine_(graph->zone()) {}
+
+
+Node* MachineOperatorReducer::Int32Constant(int32_t value) {
+  Node** loc = cache_->FindInt32Constant(value);
+  if (*loc == NULL) {
+    *loc = graph_->NewNode(common_.Int32Constant(value));
+  }
+  return *loc;
+}
+
+
+Node* MachineOperatorReducer::Float64Constant(volatile double value) {
+  Node** loc = cache_->FindFloat64Constant(value);
+  if (*loc == NULL) {
+    *loc = graph_->NewNode(common_.Float64Constant(value));
+  }
+  return *loc;
+}
+
+
+// Perform constant folding and strength reduction on machine operators.
+Reduction MachineOperatorReducer::Reduce(Node* node) {
+  switch (node->opcode()) {
+    case IrOpcode::kWord32And: {
+      Int32BinopMatcher m(node);
+      if (m.right().Is(0)) return Replace(m.right().node());  // x & 0  => 0
+      if (m.right().Is(-1)) return Replace(m.left().node());  // x & -1 => x
+      if (m.IsFoldable()) {                                   // K & K  => K
+        return ReplaceInt32(m.left().Value() & m.right().Value());
+      }
+      if (m.LeftEqualsRight()) return Replace(m.left().node());  // x & x => x
+      break;
+    }
+    case IrOpcode::kWord32Or: {
+      Int32BinopMatcher m(node);
+      if (m.right().Is(0)) return Replace(m.left().node());    // x | 0  => x
+      if (m.right().Is(-1)) return Replace(m.right().node());  // x | -1 => -1
+      if (m.IsFoldable()) {                                    // K | K  => K
+        return ReplaceInt32(m.left().Value() | m.right().Value());
+      }
+      if (m.LeftEqualsRight()) return Replace(m.left().node());  // x | x => x
+      break;
+    }
+    case IrOpcode::kWord32Xor: {
+      Int32BinopMatcher m(node);
+      if (m.right().Is(0)) return Replace(m.left().node());  // x ^ 0 => x
+      if (m.IsFoldable()) {                                  // K ^ K => K
+        return ReplaceInt32(m.left().Value() ^ m.right().Value());
+      }
+      if (m.LeftEqualsRight()) return ReplaceInt32(0);  // x ^ x => 0
+      break;
+    }
+    case IrOpcode::kWord32Shl: {
+      Int32BinopMatcher m(node);
+      if (m.right().Is(0)) return Replace(m.left().node());  // x << 0 => x
+      if (m.IsFoldable()) {                                  // K << K => K
+        return ReplaceInt32(m.left().Value() << m.right().Value());
+      }
+      break;
+    }
+    case IrOpcode::kWord32Shr: {
+      Uint32BinopMatcher m(node);
+      if (m.right().Is(0)) return Replace(m.left().node());  // x >>> 0 => x
+      if (m.IsFoldable()) {                                  // K >>> K => K
+        return ReplaceInt32(m.left().Value() >> m.right().Value());
+      }
+      break;
+    }
+    case IrOpcode::kWord32Sar: {
+      Int32BinopMatcher m(node);
+      if (m.right().Is(0)) return Replace(m.left().node());  // x >> 0 => x
+      if (m.IsFoldable()) {                                  // K >> K => K
+        return ReplaceInt32(m.left().Value() >> m.right().Value());
+      }
+      break;
+    }
+    case IrOpcode::kWord32Equal: {
+      Int32BinopMatcher m(node);
+      if (m.IsFoldable()) {  // K == K => K
+        return ReplaceBool(m.left().Value() == m.right().Value());
+      }
+      if (m.left().IsInt32Sub() && m.right().Is(0)) {  // x - y == 0 => x == y
+        Int32BinopMatcher msub(m.left().node());
+        node->ReplaceInput(0, msub.left().node());
+        node->ReplaceInput(1, msub.right().node());
+        return Changed(node);
+      }
+      // TODO(turbofan): fold HeapConstant, ExternalReference, pointer compares
+      if (m.LeftEqualsRight()) return ReplaceBool(true);  // x == x => true
+      break;
+    }
+    case IrOpcode::kInt32Add: {
+      Int32BinopMatcher m(node);
+      if (m.right().Is(0)) return Replace(m.left().node());  // x + 0 => x
+      if (m.IsFoldable()) {                                  // K + K => K
+        return ReplaceInt32(static_cast<uint32_t>(m.left().Value()) +
+                            static_cast<uint32_t>(m.right().Value()));
+      }
+      break;
+    }
+    case IrOpcode::kInt32Sub: {
+      Int32BinopMatcher m(node);
+      if (m.right().Is(0)) return Replace(m.left().node());  // x - 0 => x
+      if (m.IsFoldable()) {                                  // K - K => K
+        return ReplaceInt32(static_cast<uint32_t>(m.left().Value()) -
+                            static_cast<uint32_t>(m.right().Value()));
+      }
+      if (m.LeftEqualsRight()) return ReplaceInt32(0);  // x - x => 0
+      break;
+    }
+    case IrOpcode::kInt32Mul: {
+      Int32BinopMatcher m(node);
+      if (m.right().Is(0)) return Replace(m.right().node());  // x * 0 => 0
+      if (m.right().Is(1)) return Replace(m.left().node());   // x * 1 => x
+      if (m.IsFoldable()) {                                   // K * K => K
+        return ReplaceInt32(m.left().Value() * m.right().Value());
+      }
+      if (m.right().Is(-1)) {  // x * -1 => 0 - x
+        graph_->ChangeOperator(node, machine_.Int32Sub());
+        node->ReplaceInput(0, Int32Constant(0));
+        node->ReplaceInput(1, m.left().node());
+        return Changed(node);
+      }
+      if (m.right().IsPowerOf2()) {  // x * 2^n => x << n
+        graph_->ChangeOperator(node, machine_.Word32Shl());
+        node->ReplaceInput(1, Int32Constant(WhichPowerOf2(m.right().Value())));
+        return Changed(node);
+      }
+      break;
+    }
+    case IrOpcode::kInt32Div: {
+      Int32BinopMatcher m(node);
+      if (m.right().Is(1)) return Replace(m.left().node());  // x / 1 => x
+      // TODO(turbofan): if (m.left().Is(0))
+      // TODO(turbofan): if (m.right().IsPowerOf2())
+      // TODO(turbofan): if (m.right().Is(0))
+      // TODO(turbofan): if (m.LeftEqualsRight())
+      if (m.IsFoldable() && !m.right().Is(0)) {  // K / K => K
+        if (m.right().Is(-1)) return ReplaceInt32(-m.left().Value());
+        return ReplaceInt32(m.left().Value() / m.right().Value());
+      }
+      if (m.right().Is(-1)) {  // x / -1 => 0 - x
+        graph_->ChangeOperator(node, machine_.Int32Sub());
+        node->ReplaceInput(0, Int32Constant(0));
+        node->ReplaceInput(1, m.left().node());
+        return Changed(node);
+      }
+      break;
+    }
+    case IrOpcode::kInt32UDiv: {
+      Uint32BinopMatcher m(node);
+      if (m.right().Is(1)) return Replace(m.left().node());  // x / 1 => x
+      // TODO(turbofan): if (m.left().Is(0))
+      // TODO(turbofan): if (m.right().Is(0))
+      // TODO(turbofan): if (m.LeftEqualsRight())
+      if (m.IsFoldable() && !m.right().Is(0)) {  // K / K => K
+        return ReplaceInt32(m.left().Value() / m.right().Value());
+      }
+      if (m.right().IsPowerOf2()) {  // x / 2^n => x >> n
+        graph_->ChangeOperator(node, machine_.Word32Shr());
+        node->ReplaceInput(1, Int32Constant(WhichPowerOf2(m.right().Value())));
+        return Changed(node);
+      }
+      break;
+    }
+    case IrOpcode::kInt32Mod: {
+      Int32BinopMatcher m(node);
+      if (m.right().Is(1)) return ReplaceInt32(0);   // x % 1  => 0
+      if (m.right().Is(-1)) return ReplaceInt32(0);  // x % -1 => 0
+      // TODO(turbofan): if (m.left().Is(0))
+      // TODO(turbofan): if (m.right().IsPowerOf2())
+      // TODO(turbofan): if (m.right().Is(0))
+      // TODO(turbofan): if (m.LeftEqualsRight())
+      if (m.IsFoldable() && !m.right().Is(0)) {  // K % K => K
+        return ReplaceInt32(m.left().Value() % m.right().Value());
+      }
+      break;
+    }
+    case IrOpcode::kInt32UMod: {
+      Uint32BinopMatcher m(node);
+      if (m.right().Is(1)) return ReplaceInt32(0);  // x % 1 => 0
+      // TODO(turbofan): if (m.left().Is(0))
+      // TODO(turbofan): if (m.right().Is(0))
+      // TODO(turbofan): if (m.LeftEqualsRight())
+      if (m.IsFoldable() && !m.right().Is(0)) {  // K % K => K
+        return ReplaceInt32(m.left().Value() % m.right().Value());
+      }
+      if (m.right().IsPowerOf2()) {  // x % 2^n => x & 2^n-1
+        graph_->ChangeOperator(node, machine_.Word32And());
+        node->ReplaceInput(1, Int32Constant(m.right().Value() - 1));
+        return Changed(node);
+      }
+      break;
+    }
+    case IrOpcode::kInt32LessThan: {
+      Int32BinopMatcher m(node);
+      if (m.IsFoldable()) {  // K < K => K
+        return ReplaceBool(m.left().Value() < m.right().Value());
+      }
+      if (m.left().IsInt32Sub() && m.right().Is(0)) {  // x - y < 0 => x < y
+        Int32BinopMatcher msub(m.left().node());
+        node->ReplaceInput(0, msub.left().node());
+        node->ReplaceInput(1, msub.right().node());
+        return Changed(node);
+      }
+      if (m.left().Is(0) && m.right().IsInt32Sub()) {  // 0 < x - y => y < x
+        Int32BinopMatcher msub(m.right().node());
+        node->ReplaceInput(0, msub.right().node());
+        node->ReplaceInput(1, msub.left().node());
+        return Changed(node);
+      }
+      if (m.LeftEqualsRight()) return ReplaceBool(false);  // x < x => false
+      break;
+    }
+    case IrOpcode::kInt32LessThanOrEqual: {
+      Int32BinopMatcher m(node);
+      if (m.IsFoldable()) {  // K <= K => K
+        return ReplaceBool(m.left().Value() <= m.right().Value());
+      }
+      if (m.left().IsInt32Sub() && m.right().Is(0)) {  // x - y <= 0 => x <= y
+        Int32BinopMatcher msub(m.left().node());
+        node->ReplaceInput(0, msub.left().node());
+        node->ReplaceInput(1, msub.right().node());
+        return Changed(node);
+      }
+      if (m.left().Is(0) && m.right().IsInt32Sub()) {  // 0 <= x - y => y <= x
+        Int32BinopMatcher msub(m.right().node());
+        node->ReplaceInput(0, msub.right().node());
+        node->ReplaceInput(1, msub.left().node());
+        return Changed(node);
+      }
+      if (m.LeftEqualsRight()) return ReplaceBool(true);  // x <= x => true
+      break;
+    }
+    case IrOpcode::kUint32LessThan: {
+      Uint32BinopMatcher m(node);
+      if (m.left().Is(kMaxUInt32)) return ReplaceBool(false);  // M < x => false
+      if (m.right().Is(0)) return ReplaceBool(false);          // x < 0 => false
+      if (m.IsFoldable()) {                                    // K < K => K
+        return ReplaceBool(m.left().Value() < m.right().Value());
+      }
+      if (m.LeftEqualsRight()) return ReplaceBool(false);  // x < x => false
+      break;
+    }
+    case IrOpcode::kUint32LessThanOrEqual: {
+      Uint32BinopMatcher m(node);
+      if (m.left().Is(0)) return ReplaceBool(true);            // 0 <= x => true
+      if (m.right().Is(kMaxUInt32)) return ReplaceBool(true);  // x <= M => true
+      if (m.IsFoldable()) {                                    // K <= K => K
+        return ReplaceBool(m.left().Value() <= m.right().Value());
+      }
+      if (m.LeftEqualsRight()) return ReplaceBool(true);  // x <= x => true
+      break;
+    }
+    case IrOpcode::kFloat64Add: {
+      Float64BinopMatcher m(node);
+      if (m.IsFoldable()) {  // K + K => K
+        return ReplaceFloat64(m.left().Value() + m.right().Value());
+      }
+      break;
+    }
+    case IrOpcode::kFloat64Sub: {
+      Float64BinopMatcher m(node);
+      if (m.IsFoldable()) {  // K - K => K
+        return ReplaceFloat64(m.left().Value() - m.right().Value());
+      }
+      break;
+    }
+    case IrOpcode::kFloat64Mul: {
+      Float64BinopMatcher m(node);
+      if (m.right().Is(1)) return Replace(m.left().node());  // x * 1.0 => x
+      if (m.right().IsNaN()) {                               // x * NaN => NaN
+        return Replace(m.right().node());
+      }
+      if (m.IsFoldable()) {  // K * K => K
+        return ReplaceFloat64(m.left().Value() * m.right().Value());
+      }
+      break;
+    }
+    case IrOpcode::kFloat64Div: {
+      Float64BinopMatcher m(node);
+      if (m.right().Is(1)) return Replace(m.left().node());  // x / 1.0 => x
+      if (m.right().IsNaN()) {                               // x / NaN => NaN
+        return Replace(m.right().node());
+      }
+      if (m.left().IsNaN()) {  // NaN / x => NaN
+        return Replace(m.left().node());
+      }
+      if (m.IsFoldable()) {  // K / K => K
+        return ReplaceFloat64(m.left().Value() / m.right().Value());
+      }
+      break;
+    }
+    case IrOpcode::kFloat64Mod: {
+      Float64BinopMatcher m(node);
+      if (m.right().IsNaN()) {  // x % NaN => NaN
+        return Replace(m.right().node());
+      }
+      if (m.left().IsNaN()) {  // NaN % x => NaN
+        return Replace(m.left().node());
+      }
+      if (m.IsFoldable()) {  // K % K => K
+        return ReplaceFloat64(modulo(m.left().Value(), m.right().Value()));
+      }
+      break;
+    }
+    // TODO(turbofan): strength-reduce and fold floating point operations.
+    default:
+      break;
+  }
+  return NoChange();
+}
+}
+}
+}  // namespace v8::internal::compiler
diff --git a/src/compiler/machine-operator-reducer.h b/src/compiler/machine-operator-reducer.h
new file mode 100644 (file)
index 0000000..46d2931
--- /dev/null
@@ -0,0 +1,52 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_MACHINE_OPERATOR_REDUCER_H_
+#define V8_COMPILER_MACHINE_OPERATOR_REDUCER_H_
+
+#include "src/compiler/common-operator.h"
+#include "src/compiler/graph-reducer.h"
+#include "src/compiler/machine-operator.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// Forward declarations.
+class CommonNodeCache;
+
+// Performs constant folding and strength reduction on nodes that have
+// machine operators.
+class MachineOperatorReducer : public Reducer {
+ public:
+  explicit MachineOperatorReducer(Graph* graph);
+
+  MachineOperatorReducer(Graph* graph, CommonNodeCache* cache);
+
+  virtual Reduction Reduce(Node* node);
+
+ private:
+  Graph* graph_;
+  CommonNodeCache* cache_;
+  CommonOperatorBuilder common_;
+  MachineOperatorBuilder machine_;
+
+  Node* Int32Constant(int32_t value);
+  Node* Float64Constant(volatile double value);
+
+  Reduction ReplaceBool(bool value) { return ReplaceInt32(value ? 1 : 0); }
+
+  Reduction ReplaceInt32(int32_t value) {
+    return Replace(Int32Constant(value));
+  }
+
+  Reduction ReplaceFloat64(volatile double value) {
+    return Replace(Float64Constant(value));
+  }
+};
+}
+}
+}  // namespace v8::internal::compiler
+
+#endif  // V8_COMPILER_MACHINE_OPERATOR_REDUCER_H_
diff --git a/src/compiler/machine-operator.h b/src/compiler/machine-operator.h
new file mode 100644 (file)
index 0000000..88f257c
--- /dev/null
@@ -0,0 +1,177 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_MACHINE_OPERATOR_H_
+#define V8_COMPILER_MACHINE_OPERATOR_H_
+
+#include "src/compiler/opcodes.h"
+#include "src/compiler/operator.h"
+#include "src/zone.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// An enumeration of the storage representations at the machine level.
+// - Words are uninterpreted bits of a given fixed size that can be used
+//   to store integers and pointers. They are normally allocated to general
+//   purpose registers by the backend and are not tracked for GC.
+// - Floats are bits of a given fixed size that are used to store floating
+//   point numbers. They are normally allocated to the floating point
+//   registers of the machine and are not tracked for the GC.
+// - Tagged values are the size of a reference into the heap and can store
+//   small words or references into the heap using a language and potentially
+//   machine-dependent tagging scheme. These values are tracked by the code
+//   generator for precise GC.
+enum MachineRepresentation {
+  kMachineWord8,
+  kMachineWord16,
+  kMachineWord32,
+  kMachineWord64,
+  kMachineFloat64,
+  kMachineTagged,
+  kMachineLast
+};
+
+
+// TODO(turbofan): other write barriers are possible based on type
+enum WriteBarrierKind { kNoWriteBarrier, kFullWriteBarrier };
+
+
+// A Store needs a MachineRepresentation and a WriteBarrierKind
+// in order to emit the correct write barrier.
+struct StoreRepresentation {
+  MachineRepresentation rep;
+  WriteBarrierKind write_barrier_kind;
+};
+
+
+// Interface for building machine-level operators. These operators are
+// machine-level but machine-independent and thus define a language suitable
+// for generating code to run on architectures such as ia32, x64, arm, etc.
+class MachineOperatorBuilder {
+ public:
+  explicit MachineOperatorBuilder(Zone* zone,
+                                  MachineRepresentation word = pointer_rep())
+      : zone_(zone), word_(word) {
+    CHECK(word == kMachineWord32 || word == kMachineWord64);
+  }
+
+#define SIMPLE(name, properties, inputs, outputs) \
+  return new (zone_)                              \
+      SimpleOperator(IrOpcode::k##name, properties, inputs, outputs, #name);
+
+#define OP1(name, ptype, pname, properties, inputs, outputs)               \
+  return new (zone_)                                                       \
+      Operator1<ptype>(IrOpcode::k##name, properties | Operator::kNoThrow, \
+                       inputs, outputs, #name, pname)
+
+#define BINOP(name) SIMPLE(name, Operator::kPure, 2, 1)
+#define BINOP_C(name) \
+  SIMPLE(name, Operator::kCommutative | Operator::kPure, 2, 1)
+#define BINOP_AC(name)                                                         \
+  SIMPLE(name,                                                                 \
+         Operator::kAssociative | Operator::kCommutative | Operator::kPure, 2, \
+         1)
+#define UNOP(name) SIMPLE(name, Operator::kPure, 1, 1)
+
+#define WORD_SIZE(x) return is64() ? Word64##x() : Word32##x()
+
+  Operator* Load(MachineRepresentation rep) {  // load [base + index]
+    OP1(Load, MachineRepresentation, rep, Operator::kNoWrite, 2, 1);
+  }
+  // store [base + index], value
+  Operator* Store(MachineRepresentation rep,
+                  WriteBarrierKind kind = kNoWriteBarrier) {
+    StoreRepresentation store_rep = {rep, kind};
+    OP1(Store, StoreRepresentation, store_rep, Operator::kNoRead, 3, 0);
+  }
+
+  Operator* WordAnd() { WORD_SIZE(And); }
+  Operator* WordOr() { WORD_SIZE(Or); }
+  Operator* WordXor() { WORD_SIZE(Xor); }
+  Operator* WordShl() { WORD_SIZE(Shl); }
+  Operator* WordShr() { WORD_SIZE(Shr); }
+  Operator* WordSar() { WORD_SIZE(Sar); }
+  Operator* WordEqual() { WORD_SIZE(Equal); }
+
+  Operator* Word32And() { BINOP_AC(Word32And); }
+  Operator* Word32Or() { BINOP_AC(Word32Or); }
+  Operator* Word32Xor() { BINOP_AC(Word32Xor); }
+  Operator* Word32Shl() { BINOP(Word32Shl); }
+  Operator* Word32Shr() { BINOP(Word32Shr); }
+  Operator* Word32Sar() { BINOP(Word32Sar); }
+  Operator* Word32Equal() { BINOP_C(Word32Equal); }
+
+  Operator* Word64And() { BINOP_AC(Word64And); }
+  Operator* Word64Or() { BINOP_AC(Word64Or); }
+  Operator* Word64Xor() { BINOP_AC(Word64Xor); }
+  Operator* Word64Shl() { BINOP(Word64Shl); }
+  Operator* Word64Shr() { BINOP(Word64Shr); }
+  Operator* Word64Sar() { BINOP(Word64Sar); }
+  Operator* Word64Equal() { BINOP_C(Word64Equal); }
+
+  Operator* Int32Add() { BINOP_AC(Int32Add); }
+  Operator* Int32Sub() { BINOP(Int32Sub); }
+  Operator* Int32Mul() { BINOP_AC(Int32Mul); }
+  Operator* Int32Div() { BINOP(Int32Div); }
+  Operator* Int32UDiv() { BINOP(Int32UDiv); }
+  Operator* Int32Mod() { BINOP(Int32Mod); }
+  Operator* Int32UMod() { BINOP(Int32UMod); }
+  Operator* Int32LessThan() { BINOP(Int32LessThan); }
+  Operator* Int32LessThanOrEqual() { BINOP(Int32LessThanOrEqual); }
+  Operator* Uint32LessThan() { BINOP(Uint32LessThan); }
+  Operator* Uint32LessThanOrEqual() { BINOP(Uint32LessThanOrEqual); }
+
+  Operator* Int64Add() { BINOP_AC(Int64Add); }
+  Operator* Int64Sub() { BINOP(Int64Sub); }
+  Operator* Int64Mul() { BINOP_AC(Int64Mul); }
+  Operator* Int64Div() { BINOP(Int64Div); }
+  Operator* Int64UDiv() { BINOP(Int64UDiv); }
+  Operator* Int64Mod() { BINOP(Int64Mod); }
+  Operator* Int64UMod() { BINOP(Int64UMod); }
+  Operator* Int64LessThan() { BINOP(Int64LessThan); }
+  Operator* Int64LessThanOrEqual() { BINOP(Int64LessThanOrEqual); }
+
+  Operator* ConvertInt32ToInt64() { UNOP(ConvertInt32ToInt64); }
+  Operator* ConvertInt64ToInt32() { UNOP(ConvertInt64ToInt32); }
+  Operator* ConvertInt32ToFloat64() { UNOP(ConvertInt32ToFloat64); }
+  Operator* ConvertUint32ToFloat64() { UNOP(ConvertUint32ToFloat64); }
+  // TODO(titzer): add rounding mode to floating point conversion.
+  Operator* ConvertFloat64ToInt32() { UNOP(ConvertFloat64ToInt32); }
+  Operator* ConvertFloat64ToUint32() { UNOP(ConvertFloat64ToUint32); }
+
+  // TODO(titzer): do we need different rounding modes for float arithmetic?
+  Operator* Float64Add() { BINOP_C(Float64Add); }
+  Operator* Float64Sub() { BINOP(Float64Sub); }
+  Operator* Float64Mul() { BINOP_C(Float64Mul); }
+  Operator* Float64Div() { BINOP(Float64Div); }
+  Operator* Float64Mod() { BINOP(Float64Mod); }
+  Operator* Float64Equal() { BINOP_C(Float64Equal); }
+  Operator* Float64LessThan() { BINOP(Float64LessThan); }
+  Operator* Float64LessThanOrEqual() { BINOP(Float64LessThanOrEqual); }
+
+  inline bool is32() const { return word_ == kMachineWord32; }
+  inline bool is64() const { return word_ == kMachineWord64; }
+  inline MachineRepresentation word() const { return word_; }
+
+  static inline MachineRepresentation pointer_rep() {
+    return kPointerSize == 8 ? kMachineWord64 : kMachineWord32;
+  }
+
+#undef WORD_SIZE
+#undef UNOP
+#undef BINOP
+#undef OP1
+#undef SIMPLE
+
+ private:
+  Zone* zone_;
+  MachineRepresentation word_;
+};
+}
+}
+}  // namespace v8::internal::compiler
+
+#endif  // V8_COMPILER_MACHINE_OPERATOR_H_
diff --git a/src/compiler/node-aux-data-inl.h b/src/compiler/node-aux-data-inl.h
new file mode 100644 (file)
index 0000000..0ec8eca
--- /dev/null
@@ -0,0 +1,43 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_NODE_AUX_DATA_INL_H_
+#define V8_COMPILER_NODE_AUX_DATA_INL_H_
+
+#include "src/compiler/graph.h"
+#include "src/compiler/node.h"
+#include "src/compiler/node-aux-data.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+template <class T>
+NodeAuxData<T>::NodeAuxData(Graph* graph)
+    : aux_data_(ZoneAllocator(graph->zone())) {}
+
+
+template <class T>
+void NodeAuxData<T>::Set(Node* node, const T& data) {
+  int id = node->id();
+  if (id >= static_cast<int>(aux_data_.size())) {
+    aux_data_.resize(id + 1);
+  }
+  aux_data_[id] = data;
+}
+
+
+template <class T>
+T NodeAuxData<T>::Get(Node* node) {
+  int id = node->id();
+  if (id >= static_cast<int>(aux_data_.size())) {
+    return T();
+  }
+  return aux_data_[id];
+}
+}
+}
+}  // namespace v8::internal::compiler
+
+#endif
diff --git a/src/compiler/node-aux-data.h b/src/compiler/node-aux-data.h
new file mode 100644 (file)
index 0000000..839d994
--- /dev/null
@@ -0,0 +1,38 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_NODE_AUX_DATA_H_
+#define V8_COMPILER_NODE_AUX_DATA_H_
+
+#include <vector>
+
+#include "src/zone-allocator.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// Forward declarations.
+class Graph;
+class Node;
+
+template <class T>
+class NodeAuxData {
+ public:
+  inline explicit NodeAuxData(Graph* graph);
+
+  inline void Set(Node* node, const T& data);
+  inline T Get(Node* node);
+
+ private:
+  typedef zone_allocator<T> ZoneAllocator;
+  typedef std::vector<T, ZoneAllocator> TZoneVector;
+
+  TZoneVector aux_data_;
+};
+}
+}
+}  // namespace v8::internal::compiler
+
+#endif
diff --git a/src/compiler/node-cache.cc b/src/compiler/node-cache.cc
new file mode 100644 (file)
index 0000000..c3ee58c
--- /dev/null
@@ -0,0 +1,120 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/node-cache.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+#define INITIAL_SIZE 16
+#define LINEAR_PROBE 5
+
+template <typename Key>
+int32_t NodeCacheHash(Key key) {
+  UNIMPLEMENTED();
+  return 0;
+}
+
+template <>
+inline int32_t NodeCacheHash(int32_t key) {
+  return ComputeIntegerHash(key, 0);
+}
+
+
+template <>
+inline int32_t NodeCacheHash(int64_t key) {
+  return ComputeLongHash(key);
+}
+
+
+template <>
+inline int32_t NodeCacheHash(double key) {
+  return ComputeLongHash(BitCast<int64_t>(key));
+}
+
+
+template <>
+inline int32_t NodeCacheHash(void* key) {
+  return ComputePointerHash(key);
+}
+
+
+template <typename Key>
+bool NodeCache<Key>::Resize(Zone* zone) {
+  if (size_ >= max_) return false;  // Don't grow past the maximum size.
+
+  // Allocate a new block of entries 4x the size.
+  Entry* old_entries = entries_;
+  int old_size = size_ + LINEAR_PROBE;
+  size_ = size_ * 4;
+  int num_entries = size_ + LINEAR_PROBE;
+  entries_ = zone->NewArray<Entry>(num_entries);
+  memset(entries_, 0, sizeof(Entry) * num_entries);
+
+  // Insert the old entries into the new block.
+  for (int i = 0; i < old_size; i++) {
+    Entry* old = &old_entries[i];
+    if (old->value_ != NULL) {
+      int hash = NodeCacheHash(old->key_);
+      int start = hash & (size_ - 1);
+      int end = start + LINEAR_PROBE;
+      for (int j = start; j < end; j++) {
+        Entry* entry = &entries_[j];
+        if (entry->value_ == NULL) {
+          entry->key_ = old->key_;
+          entry->value_ = old->value_;
+          break;
+        }
+      }
+    }
+  }
+  return true;
+}
+
+
+template <typename Key>
+Node** NodeCache<Key>::Find(Zone* zone, Key key) {
+  int32_t hash = NodeCacheHash(key);
+  if (entries_ == NULL) {
+    // Allocate the initial entries and insert the first entry.
+    int num_entries = INITIAL_SIZE + LINEAR_PROBE;
+    entries_ = zone->NewArray<Entry>(num_entries);
+    size_ = INITIAL_SIZE;
+    memset(entries_, 0, sizeof(Entry) * num_entries);
+    Entry* entry = &entries_[hash & (INITIAL_SIZE - 1)];
+    entry->key_ = key;
+    return &entry->value_;
+  }
+
+  while (true) {
+    // Search up to N entries after (linear probing).
+    int start = hash & (size_ - 1);
+    int end = start + LINEAR_PROBE;
+    for (int i = start; i < end; i++) {
+      Entry* entry = &entries_[i];
+      if (entry->key_ == key) return &entry->value_;
+      if (entry->value_ == NULL) {
+        entry->key_ = key;
+        return &entry->value_;
+      }
+    }
+
+    if (!Resize(zone)) break;  // Don't grow past the maximum size.
+  }
+
+  // If resized to maximum and still didn't find space, overwrite an entry.
+  Entry* entry = &entries_[hash & (size_ - 1)];
+  entry->key_ = key;
+  entry->value_ = NULL;
+  return &entry->value_;
+}
+
+
+template class NodeCache<int64_t>;
+template class NodeCache<int32_t>;
+template class NodeCache<void*>;
+}
+}
+}  // namespace v8::internal::compiler
diff --git a/src/compiler/node-cache.h b/src/compiler/node-cache.h
new file mode 100644 (file)
index 0000000..35352ea
--- /dev/null
@@ -0,0 +1,53 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_NODE_CACHE_H_
+#define V8_COMPILER_NODE_CACHE_H_
+
+#include "src/v8.h"
+
+#include "src/compiler/node.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// A cache for nodes based on a key. Useful for implementing canonicalization of
+// nodes such as constants, parameters, etc.
+template <typename Key>
+class NodeCache {
+ public:
+  explicit NodeCache(int max = 256) : entries_(NULL), size_(0), max_(max) {}
+
+  // Search for node associated with {key} and return a pointer to a memory
+  // location in this cache that stores an entry for the key. If the location
+  // returned by this method contains a non-NULL node, the caller can use that
+  // node. Otherwise it is the responsibility of the caller to fill the entry
+  // with a new node.
+  // Note that a previous cache entry may be overwritten if the cache becomes
+  // too full or encounters too many hash collisions.
+  Node** Find(Zone* zone, Key key);
+
+ private:
+  struct Entry {
+    Key key_;
+    Node* value_;
+  };
+
+  Entry* entries_;  // lazily-allocated hash entries.
+  int32_t size_;
+  int32_t max_;
+
+  bool Resize(Zone* zone);
+};
+
+// Various default cache types.
+typedef NodeCache<int64_t> Int64NodeCache;
+typedef NodeCache<int32_t> Int32NodeCache;
+typedef NodeCache<void*> PtrNodeCache;
+}
+}
+}  // namespace v8::internal::compiler
+
+#endif  // V8_COMPILER_NODE_CACHE_H_
diff --git a/src/compiler/node-matchers.h b/src/compiler/node-matchers.h
new file mode 100644 (file)
index 0000000..faf7f4e
--- /dev/null
@@ -0,0 +1,133 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_NODE_MATCHERS_H_
+#define V8_COMPILER_NODE_MATCHERS_H_
+
+#include "src/compiler/common-operator.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// A pattern matcher for nodes.
+struct NodeMatcher {
+  explicit NodeMatcher(Node* node) : node_(node) {}
+
+  Node* node() const { return node_; }
+  Operator* op() const { return node()->op(); }
+  IrOpcode::Value opcode() const { return node()->opcode(); }
+
+  bool HasProperty(Operator::Property property) const {
+    return op()->HasProperty(property);
+  }
+  Node* InputAt(int index) const { return node()->InputAt(index); }
+
+#define DEFINE_IS_OPCODE(Opcode) \
+  bool Is##Opcode() const { return opcode() == IrOpcode::k##Opcode; }
+  ALL_OP_LIST(DEFINE_IS_OPCODE)
+#undef DEFINE_IS_OPCODE
+
+ private:
+  Node* node_;
+};
+
+
+// A pattern matcher for abitrary value constants.
+template <typename T>
+struct ValueMatcher : public NodeMatcher {
+  explicit ValueMatcher(Node* node)
+      : NodeMatcher(node),
+        value_(),
+        has_value_(CommonOperatorTraits<T>::HasValue(node->op())) {
+    if (has_value_) value_ = CommonOperatorTraits<T>::ValueOf(node->op());
+  }
+
+  bool HasValue() const { return has_value_; }
+  T Value() const {
+    ASSERT(HasValue());
+    return value_;
+  }
+
+  bool Is(T value) const {
+    return HasValue() && CommonOperatorTraits<T>::Equals(Value(), value);
+  }
+
+  bool IsInRange(T low, T high) const {
+    return HasValue() && low <= value_ && value_ <= high;
+  }
+
+ private:
+  T value_;
+  bool has_value_;
+};
+
+
+// A pattern matcher for integer constants.
+template <typename T>
+struct IntMatcher V8_FINAL : public ValueMatcher<T> {
+  explicit IntMatcher(Node* node) : ValueMatcher<T>(node) {}
+
+  bool IsPowerOf2() const {
+    return this->HasValue() && this->Value() > 0 &&
+           (this->Value() & (this->Value() - 1)) == 0;
+  }
+};
+
+typedef IntMatcher<int32_t> Int32Matcher;
+typedef IntMatcher<uint32_t> Uint32Matcher;
+typedef IntMatcher<int64_t> Int64Matcher;
+typedef IntMatcher<uint64_t> Uint64Matcher;
+
+
+// A pattern matcher for floating point constants.
+template <typename T>
+struct FloatMatcher V8_FINAL : public ValueMatcher<T> {
+  explicit FloatMatcher(Node* node) : ValueMatcher<T>(node) {}
+
+  bool IsNaN() const { return this->HasValue() && std::isnan(this->Value()); }
+};
+
+typedef FloatMatcher<double> Float64Matcher;
+
+
+// For shorter pattern matching code, this struct matches both the left and
+// right hand sides of a binary operation and can put constants on the right
+// if they appear on the left hand side of a commutative operation.
+template <typename Left, typename Right>
+struct BinopMatcher V8_FINAL : public NodeMatcher {
+  explicit BinopMatcher(Node* node)
+      : NodeMatcher(node), left_(InputAt(0)), right_(InputAt(1)) {
+    if (HasProperty(Operator::kCommutative)) PutConstantOnRight();
+  }
+
+  const Left& left() const { return left_; }
+  const Right& right() const { return right_; }
+
+  bool IsFoldable() const { return left().HasValue() && right().HasValue(); }
+  bool LeftEqualsRight() const { return left().node() == right().node(); }
+
+ private:
+  void PutConstantOnRight() {
+    if (left().HasValue() && !right().HasValue()) {
+      std::swap(left_, right_);
+      node()->ReplaceInput(0, left().node());
+      node()->ReplaceInput(1, right().node());
+    }
+  }
+
+  Left left_;
+  Right right_;
+};
+
+typedef BinopMatcher<Int32Matcher, Int32Matcher> Int32BinopMatcher;
+typedef BinopMatcher<Uint32Matcher, Uint32Matcher> Uint32BinopMatcher;
+typedef BinopMatcher<Int64Matcher, Int64Matcher> Int64BinopMatcher;
+typedef BinopMatcher<Uint64Matcher, Uint64Matcher> Uint64BinopMatcher;
+typedef BinopMatcher<Float64Matcher, Float64Matcher> Float64BinopMatcher;
+}
+}
+}  // namespace v8::internal::compiler
+
+#endif  // V8_COMPILER_NODE_MATCHERS_H_
diff --git a/src/compiler/node-properties-inl.h b/src/compiler/node-properties-inl.h
new file mode 100644 (file)
index 0000000..1eef6f3
--- /dev/null
@@ -0,0 +1,238 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_NODE_PROPERTIES_INL_H_
+#define V8_COMPILER_NODE_PROPERTIES_INL_H_
+
+#include "src/v8.h"
+
+#include "src/compiler/common-operator.h"
+#include "src/compiler/node-properties.h"
+#include "src/compiler/opcodes.h"
+#include "src/compiler/operator.h"
+#include "src/compiler/operator-properties-inl.h"
+#include "src/compiler/operator-properties.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// -----------------------------------------------------------------------------
+// Input counts & layout.
+// Inputs are always arranged in order as follows:
+//     0 [ values, context, effects, control ] node->InputCount()
+
+inline bool NodeProperties::HasValueInput(Node* node) {
+  return OperatorProperties::GetValueInputCount(node->op()) > 0;
+}
+
+inline bool NodeProperties::HasContextInput(Node* node) {
+  return OperatorProperties::HasContextInput(node->op());
+}
+
+inline bool NodeProperties::HasEffectInput(Node* node) {
+  return OperatorProperties::GetEffectInputCount(node->op()) > 0;
+}
+
+inline bool NodeProperties::HasControlInput(Node* node) {
+  return OperatorProperties::GetControlInputCount(node->op()) > 0;
+}
+
+
+inline int NodeProperties::GetValueInputCount(Node* node) {
+  return OperatorProperties::GetValueInputCount(node->op());
+}
+
+inline int NodeProperties::GetContextInputCount(Node* node) {
+  return OperatorProperties::HasContextInput(node->op()) ? 1 : 0;
+}
+
+inline int NodeProperties::GetEffectInputCount(Node* node) {
+  return OperatorProperties::GetEffectInputCount(node->op());
+}
+
+inline int NodeProperties::GetControlInputCount(Node* node) {
+  return OperatorProperties::GetControlInputCount(node->op());
+}
+
+
+inline int NodeProperties::FirstValueIndex(Node* node) { return 0; }
+
+inline int NodeProperties::FirstContextIndex(Node* node) {
+  return PastValueIndex(node);
+}
+
+inline int NodeProperties::FirstEffectIndex(Node* node) {
+  return PastContextIndex(node);
+}
+
+inline int NodeProperties::FirstControlIndex(Node* node) {
+  return PastEffectIndex(node);
+}
+
+
+inline int NodeProperties::PastValueIndex(Node* node) {
+  return FirstValueIndex(node) + GetValueInputCount(node);
+}
+
+inline int NodeProperties::PastContextIndex(Node* node) {
+  return FirstContextIndex(node) + GetContextInputCount(node);
+}
+
+inline int NodeProperties::PastEffectIndex(Node* node) {
+  return FirstEffectIndex(node) + GetEffectInputCount(node);
+}
+
+inline int NodeProperties::PastControlIndex(Node* node) {
+  return FirstControlIndex(node) + GetControlInputCount(node);
+}
+
+
+// -----------------------------------------------------------------------------
+// Input accessors.
+
+inline Node* NodeProperties::GetValueInput(Node* node, int index) {
+  ASSERT(0 <= index && index < GetValueInputCount(node));
+  return node->InputAt(FirstValueIndex(node) + index);
+}
+
+inline Node* NodeProperties::GetContextInput(Node* node) {
+  ASSERT(GetContextInputCount(node) > 0);
+  return node->InputAt(FirstContextIndex(node));
+}
+
+inline Node* NodeProperties::GetEffectInput(Node* node, int index) {
+  ASSERT(0 <= index && index < GetEffectInputCount(node));
+  return node->InputAt(FirstEffectIndex(node) + index);
+}
+
+inline Node* NodeProperties::GetControlInput(Node* node, int index) {
+  ASSERT(0 <= index && index < GetControlInputCount(node));
+  return node->InputAt(FirstControlIndex(node) + index);
+}
+
+
+// -----------------------------------------------------------------------------
+// Output counts.
+
+inline bool NodeProperties::HasValueOutput(Node* node) {
+  return GetValueOutputCount(node) > 0;
+}
+
+inline bool NodeProperties::HasEffectOutput(Node* node) {
+  return node->opcode() == IrOpcode::kStart ||
+         NodeProperties::GetEffectInputCount(node) > 0;
+}
+
+inline bool NodeProperties::HasControlOutput(Node* node) {
+  return (node->opcode() != IrOpcode::kEnd && IsControl(node)) ||
+         NodeProperties::CanLazilyDeoptimize(node);
+}
+
+
+inline int NodeProperties::GetValueOutputCount(Node* node) {
+  return OperatorProperties::GetValueOutputCount(node->op());
+}
+
+inline int NodeProperties::GetEffectOutputCount(Node* node) {
+  return HasEffectOutput(node) ? 1 : 0;
+}
+
+inline int NodeProperties::GetControlOutputCount(Node* node) {
+  return node->opcode() == IrOpcode::kBranch ? 2 : HasControlOutput(node) ? 1
+                                                                          : 0;
+}
+
+
+// -----------------------------------------------------------------------------
+// Edge kinds.
+
+inline bool NodeProperties::IsInputRange(Node::Edge edge, int first, int num) {
+  // TODO(titzer): edge.index() is linear time;
+  // edges maybe need to be marked as value/effect/control.
+  if (num == 0) return false;
+  int index = edge.index();
+  return first <= index && index < first + num;
+}
+
+inline bool NodeProperties::IsValueEdge(Node::Edge edge) {
+  Node* node = edge.from();
+  return IsInputRange(edge, FirstValueIndex(node), GetValueInputCount(node));
+}
+
+inline bool NodeProperties::IsContextEdge(Node::Edge edge) {
+  Node* node = edge.from();
+  return IsInputRange(edge, FirstContextIndex(node),
+                      GetContextInputCount(node));
+}
+
+inline bool NodeProperties::IsEffectEdge(Node::Edge edge) {
+  Node* node = edge.from();
+  return IsInputRange(edge, FirstEffectIndex(node), GetEffectInputCount(node));
+}
+
+inline bool NodeProperties::IsControlEdge(Node::Edge edge) {
+  Node* node = edge.from();
+  return IsInputRange(edge, FirstControlIndex(node),
+                      GetControlInputCount(node));
+}
+
+
+// -----------------------------------------------------------------------------
+// Miscellaneous predicates.
+
+inline bool NodeProperties::IsControl(Node* node) {
+  return IrOpcode::IsControlOpcode(node->opcode());
+}
+
+inline bool NodeProperties::IsBasicBlockBegin(Node* node) {
+  return OperatorProperties::IsBasicBlockBegin(node->op());
+}
+
+inline bool NodeProperties::CanBeScheduled(Node* node) {
+  return OperatorProperties::CanBeScheduled(node->op());
+}
+
+inline bool NodeProperties::HasFixedSchedulePosition(Node* node) {
+  return OperatorProperties::HasFixedSchedulePosition(node->op());
+}
+
+inline bool NodeProperties::IsScheduleRoot(Node* node) {
+  return OperatorProperties::IsScheduleRoot(node->op());
+}
+
+
+// -----------------------------------------------------------------------------
+// Miscellaneous mutators.
+
+inline void NodeProperties::ReplaceEffectInput(Node* node, Node* effect,
+                                               int index) {
+  ASSERT(index < GetEffectInputCount(node));
+  return node->ReplaceInput(
+      GetValueInputCount(node) + GetContextInputCount(node) + index, effect);
+}
+
+inline void NodeProperties::RemoveNonValueInputs(Node* node) {
+  node->TrimInputCount(GetValueInputCount(node));
+}
+
+
+// -----------------------------------------------------------------------------
+// Type Bounds.
+
+inline Bounds NodeProperties::GetBounds(Node* node) { return node->bounds(); }
+
+inline void NodeProperties::SetBounds(Node* node, Bounds b) {
+  node->set_bounds(b);
+}
+
+
+inline bool NodeProperties::CanLazilyDeoptimize(Node* node) {
+  return OperatorProperties::CanLazilyDeoptimize(node->op());
+}
+}
+}
+}  // namespace v8::internal::compiler
+
+#endif  // V8_COMPILER_NODE_PROPERTIES_INL_H_
diff --git a/src/compiler/node-properties.h b/src/compiler/node-properties.h
new file mode 100644 (file)
index 0000000..7afcf4e
--- /dev/null
@@ -0,0 +1,82 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_NODE_PROPERTIES_H_
+#define V8_COMPILER_NODE_PROPERTIES_H_
+
+#include "src/v8.h"
+
+#include "src/types.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+class Node;
+class Operator;
+
+// A facade that simplifies access to the different kinds of inputs to a node.
+class NodeProperties {
+ public:
+  static inline bool HasValueInput(Node* node);
+  static inline bool HasContextInput(Node* node);
+  static inline bool HasEffectInput(Node* node);
+  static inline bool HasControlInput(Node* node);
+
+  static inline int GetValueInputCount(Node* node);
+  static inline int GetContextInputCount(Node* node);
+  static inline int GetEffectInputCount(Node* node);
+  static inline int GetControlInputCount(Node* node);
+
+  static inline Node* GetValueInput(Node* node, int index);
+  static inline Node* GetContextInput(Node* node);
+  static inline Node* GetEffectInput(Node* node, int index = 0);
+  static inline Node* GetControlInput(Node* node, int index = 0);
+
+  static inline bool HasValueOutput(Node* node);
+  static inline bool HasEffectOutput(Node* node);
+  static inline bool HasControlOutput(Node* node);
+
+  static inline int GetValueOutputCount(Node* node);
+  static inline int GetEffectOutputCount(Node* node);
+  static inline int GetControlOutputCount(Node* node);
+
+  static inline bool IsValueEdge(Node::Edge edge);
+  static inline bool IsContextEdge(Node::Edge edge);
+  static inline bool IsEffectEdge(Node::Edge edge);
+  static inline bool IsControlEdge(Node::Edge edge);
+
+  static inline bool IsControl(Node* node);
+  static inline bool IsBasicBlockBegin(Node* node);
+
+  static inline bool CanBeScheduled(Node* node);
+  static inline bool HasFixedSchedulePosition(Node* node);
+  static inline bool IsScheduleRoot(Node* node);
+
+  static inline void ReplaceEffectInput(Node* node, Node* effect,
+                                        int index = 0);
+  static inline void RemoveNonValueInputs(Node* node);
+
+  static inline Bounds GetBounds(Node* node);
+  static inline void SetBounds(Node* node, Bounds bounds);
+
+  static inline bool CanLazilyDeoptimize(Node* node);
+
+ private:
+  static inline int FirstValueIndex(Node* node);
+  static inline int FirstContextIndex(Node* node);
+  static inline int FirstEffectIndex(Node* node);
+  static inline int FirstControlIndex(Node* node);
+  static inline int PastValueIndex(Node* node);
+  static inline int PastContextIndex(Node* node);
+  static inline int PastEffectIndex(Node* node);
+  static inline int PastControlIndex(Node* node);
+
+  static inline bool IsInputRange(Node::Edge edge, int first, int count);
+};
+}
+}
+}  // namespace v8::internal::compiler
+
+#endif  // V8_COMPILER_NODE_PROPERTIES_H_
diff --git a/src/compiler/node.cc b/src/compiler/node.cc
new file mode 100644 (file)
index 0000000..260870f
--- /dev/null
@@ -0,0 +1,28 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/node.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+OStream& operator<<(OStream& os, const Operator& op) { return op.PrintTo(os); }
+
+
+OStream& operator<<(OStream& os, const Node& n) {
+  os << n.id() << ": " << *n.op();
+  if (n.op()->InputCount() != 0) {
+    os << "(";
+    for (int i = 0; i < n.op()->InputCount(); ++i) {
+      if (i != 0) os << ", ";
+      os << n.InputAt(i)->id();
+    }
+    os << ")";
+  }
+  return os;
+}
+}
+}
+}  // namespace v8::internal::compiler
diff --git a/src/compiler/node.h b/src/compiler/node.h
new file mode 100644 (file)
index 0000000..0f63b2a
--- /dev/null
@@ -0,0 +1,92 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_NODE_H_
+#define V8_COMPILER_NODE_H_
+
+#include <deque>
+#include <set>
+#include <vector>
+
+#include "src/compiler/generic-algorithm.h"
+#include "src/compiler/generic-node.h"
+#include "src/compiler/opcodes.h"
+#include "src/compiler/operator.h"
+#include "src/types.h"
+#include "src/zone.h"
+#include "src/zone-allocator.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+class NodeData {
+ public:
+  Operator* op() const { return op_; }
+  void set_op(Operator* op) { op_ = op; }
+
+  IrOpcode::Value opcode() const {
+    ASSERT(op_->opcode() <= IrOpcode::kLast);
+    return static_cast<IrOpcode::Value>(op_->opcode());
+  }
+
+  Bounds bounds() { return bounds_; }
+
+ protected:
+  Operator* op_;
+  Bounds bounds_;
+  explicit NodeData(Zone* zone) : bounds_(Bounds(Type::None(zone))) {}
+
+  friend class NodeProperties;
+  void set_bounds(Bounds b) { bounds_ = b; }
+};
+
+// A Node is the basic primitive of an IR graph. In addition to the members
+// inherited from Vector, Nodes only contain a mutable Operator that may change
+// during compilation, e.g. during lowering passes.  Other information that
+// needs to be associated with Nodes during compilation must be stored
+// out-of-line indexed by the Node's id.
+class Node : public GenericNode<NodeData, Node> {
+ public:
+  Node(GenericGraphBase* graph, int input_count)
+      : GenericNode<NodeData, Node>(graph, input_count) {}
+
+  void Initialize(Operator* op) { set_op(op); }
+};
+
+OStream& operator<<(OStream& os, const Node& n);
+
+typedef GenericGraphVisit::NullNodeVisitor<NodeData, Node> NullNodeVisitor;
+
+typedef zone_allocator<Node*> NodePtrZoneAllocator;
+
+typedef std::set<Node*, std::less<Node*>, NodePtrZoneAllocator> NodeSet;
+typedef NodeSet::iterator NodeSetIter;
+typedef NodeSet::reverse_iterator NodeSetRIter;
+
+typedef std::deque<Node*, NodePtrZoneAllocator> NodeDeque;
+typedef NodeDeque::iterator NodeDequeIter;
+
+typedef std::vector<Node*, NodePtrZoneAllocator> NodeVector;
+typedef NodeVector::iterator NodeVectorIter;
+typedef NodeVector::reverse_iterator NodeVectorRIter;
+
+typedef zone_allocator<NodeVector> ZoneNodeVectorAllocator;
+typedef std::vector<NodeVector, ZoneNodeVectorAllocator> NodeVectorVector;
+typedef NodeVectorVector::iterator NodeVectorVectorIter;
+typedef NodeVectorVector::reverse_iterator NodeVectorVectorRIter;
+
+typedef Node::Uses::iterator UseIter;
+typedef Node::Inputs::iterator InputIter;
+
+// Helper to extract parameters from Operator1<*> nodes.
+template <typename T>
+static inline T OpParameter(Node* node) {
+  return reinterpret_cast<Operator1<T>*>(node->op())->parameter();
+}
+}
+}
+}  // namespace v8::internal::compiler
+
+#endif  // V8_COMPILER_NODE_H_
diff --git a/src/compiler/opcodes.h b/src/compiler/opcodes.h
new file mode 100644 (file)
index 0000000..ea107c9
--- /dev/null
@@ -0,0 +1,294 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_OPCODES_H_
+#define V8_COMPILER_OPCODES_H_
+
+// Opcodes for control operators.
+#define CONTROL_OP_LIST(V) \
+  V(Start)                 \
+  V(Dead)                  \
+  V(Loop)                  \
+  V(End)                   \
+  V(Branch)                \
+  V(IfTrue)                \
+  V(IfFalse)               \
+  V(Merge)                 \
+  V(Return)                \
+  V(Throw)                 \
+  V(Continuation)          \
+  V(LazyDeoptimization)    \
+  V(Deoptimize)
+
+// Opcodes for common operators.
+#define LEAF_OP_LIST(V) \
+  V(Parameter)          \
+  V(Int32Constant)      \
+  V(Int64Constant)      \
+  V(Float64Constant)    \
+  V(ExternalConstant)   \
+  V(NumberConstant)     \
+  V(HeapConstant)
+
+#define INNER_OP_LIST(V) \
+  V(Phi)                 \
+  V(EffectPhi)           \
+  V(FrameState)          \
+  V(Call)                \
+  V(Projection)
+
+#define COMMON_OP_LIST(V) \
+  LEAF_OP_LIST(V)         \
+  INNER_OP_LIST(V)
+
+// Opcodes for JavaScript operators.
+#define JS_COMPARE_BINOP_LIST(V) \
+  V(JSEqual)                     \
+  V(JSNotEqual)                  \
+  V(JSStrictEqual)               \
+  V(JSStrictNotEqual)            \
+  V(JSLessThan)                  \
+  V(JSGreaterThan)               \
+  V(JSLessThanOrEqual)           \
+  V(JSGreaterThanOrEqual)
+
+#define JS_BITWISE_BINOP_LIST(V) \
+  V(JSBitwiseOr)                 \
+  V(JSBitwiseXor)                \
+  V(JSBitwiseAnd)                \
+  V(JSShiftLeft)                 \
+  V(JSShiftRight)                \
+  V(JSShiftRightLogical)
+
+#define JS_ARITH_BINOP_LIST(V) \
+  V(JSAdd)                     \
+  V(JSSubtract)                \
+  V(JSMultiply)                \
+  V(JSDivide)                  \
+  V(JSModulus)
+
+#define JS_SIMPLE_BINOP_LIST(V) \
+  JS_COMPARE_BINOP_LIST(V)      \
+  JS_BITWISE_BINOP_LIST(V)      \
+  JS_ARITH_BINOP_LIST(V)
+
+#define JS_LOGIC_UNOP_LIST(V) V(JSUnaryNot)
+
+#define JS_CONVERSION_UNOP_LIST(V) \
+  V(JSToBoolean)                   \
+  V(JSToNumber)                    \
+  V(JSToString)                    \
+  V(JSToName)                      \
+  V(JSToObject)
+
+#define JS_OTHER_UNOP_LIST(V) V(JSTypeOf)
+
+#define JS_SIMPLE_UNOP_LIST(V) \
+  JS_LOGIC_UNOP_LIST(V)        \
+  JS_CONVERSION_UNOP_LIST(V)   \
+  JS_OTHER_UNOP_LIST(V)
+
+#define JS_OBJECT_OP_LIST(V) \
+  V(JSCreate)                \
+  V(JSLoadProperty)          \
+  V(JSLoadNamed)             \
+  V(JSStoreProperty)         \
+  V(JSStoreNamed)            \
+  V(JSDeleteProperty)        \
+  V(JSHasProperty)           \
+  V(JSInstanceOf)
+
+#define JS_CONTEXT_OP_LIST(V) \
+  V(JSLoadContext)            \
+  V(JSStoreContext)           \
+  V(JSCreateFunctionContext)  \
+  V(JSCreateCatchContext)     \
+  V(JSCreateWithContext)      \
+  V(JSCreateBlockContext)     \
+  V(JSCreateModuleContext)    \
+  V(JSCreateGlobalContext)
+
+#define JS_OTHER_OP_LIST(V) \
+  V(JSCallConstruct)        \
+  V(JSCallFunction)         \
+  V(JSCallRuntime)          \
+  V(JSYield)                \
+  V(JSDebugger)
+
+#define JS_OP_LIST(V)     \
+  JS_SIMPLE_BINOP_LIST(V) \
+  JS_SIMPLE_UNOP_LIST(V)  \
+  JS_OBJECT_OP_LIST(V)    \
+  JS_CONTEXT_OP_LIST(V)   \
+  JS_OTHER_OP_LIST(V)
+
+// Opcodes for VirtuaMachine-level operators.
+#define SIMPLIFIED_OP_LIST(V) \
+  V(BooleanNot)               \
+  V(NumberEqual)              \
+  V(NumberLessThan)           \
+  V(NumberLessThanOrEqual)    \
+  V(NumberAdd)                \
+  V(NumberSubtract)           \
+  V(NumberMultiply)           \
+  V(NumberDivide)             \
+  V(NumberModulus)            \
+  V(NumberToInt32)            \
+  V(NumberToUint32)           \
+  V(ReferenceEqual)           \
+  V(StringEqual)              \
+  V(StringLessThan)           \
+  V(StringLessThanOrEqual)    \
+  V(StringAdd)                \
+  V(ChangeTaggedToInt32)      \
+  V(ChangeTaggedToUint32)     \
+  V(ChangeTaggedToFloat64)    \
+  V(ChangeInt32ToTagged)      \
+  V(ChangeUint32ToTagged)     \
+  V(ChangeFloat64ToTagged)    \
+  V(ChangeBoolToBit)          \
+  V(ChangeBitToBool)          \
+  V(LoadField)                \
+  V(LoadElement)              \
+  V(StoreField)               \
+  V(StoreElement)
+
+// Opcodes for Machine-level operators.
+#define MACHINE_OP_LIST(V)  \
+  V(Load)                   \
+  V(Store)                  \
+  V(Word32And)              \
+  V(Word32Or)               \
+  V(Word32Xor)              \
+  V(Word32Shl)              \
+  V(Word32Shr)              \
+  V(Word32Sar)              \
+  V(Word32Equal)            \
+  V(Word64And)              \
+  V(Word64Or)               \
+  V(Word64Xor)              \
+  V(Word64Shl)              \
+  V(Word64Shr)              \
+  V(Word64Sar)              \
+  V(Word64Equal)            \
+  V(Int32Add)               \
+  V(Int32Sub)               \
+  V(Int32Mul)               \
+  V(Int32Div)               \
+  V(Int32UDiv)              \
+  V(Int32Mod)               \
+  V(Int32UMod)              \
+  V(Int32LessThan)          \
+  V(Int32LessThanOrEqual)   \
+  V(Uint32LessThan)         \
+  V(Uint32LessThanOrEqual)  \
+  V(Int64Add)               \
+  V(Int64Sub)               \
+  V(Int64Mul)               \
+  V(Int64Div)               \
+  V(Int64UDiv)              \
+  V(Int64Mod)               \
+  V(Int64UMod)              \
+  V(Int64LessThan)          \
+  V(Int64LessThanOrEqual)   \
+  V(ConvertInt64ToInt32)    \
+  V(ConvertInt32ToInt64)    \
+  V(ConvertInt32ToFloat64)  \
+  V(ConvertUint32ToFloat64) \
+  V(ConvertFloat64ToInt32)  \
+  V(ConvertFloat64ToUint32) \
+  V(Float64Add)             \
+  V(Float64Sub)             \
+  V(Float64Mul)             \
+  V(Float64Div)             \
+  V(Float64Mod)             \
+  V(Float64Equal)           \
+  V(Float64LessThan)        \
+  V(Float64LessThanOrEqual)
+
+#define VALUE_OP_LIST(V) \
+  COMMON_OP_LIST(V)      \
+  SIMPLIFIED_OP_LIST(V)  \
+  MACHINE_OP_LIST(V)     \
+  JS_OP_LIST(V)
+
+// The combination of all operators at all levels and the common operators.
+#define ALL_OP_LIST(V) \
+  CONTROL_OP_LIST(V)   \
+  VALUE_OP_LIST(V)
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// Declare an enumeration with all the opcodes at all levels so that they
+// can be globally, uniquely numbered.
+class IrOpcode {
+ public:
+  enum Value {
+#define DECLARE_OPCODE(x) k##x,
+    ALL_OP_LIST(DECLARE_OPCODE)
+#undef DECLARE_OPCODE
+    kLast = -1
+#define COUNT_OPCODE(x) +1
+            ALL_OP_LIST(COUNT_OPCODE)
+#undef COUNT_OPCODE
+  };
+
+  // Returns the mnemonic name of an opcode.
+  static const char* Mnemonic(Value val) {
+    switch (val) {
+#define RETURN_NAME(x) \
+  case k##x:           \
+    return #x;
+      ALL_OP_LIST(RETURN_NAME)
+#undef RETURN_NAME
+      default:
+        return "UnknownOpcode";
+    }
+  }
+
+  static bool IsJsOpcode(Value val) {
+    switch (val) {
+#define RETURN_NAME(x) \
+  case k##x:           \
+    return true;
+      JS_OP_LIST(RETURN_NAME)
+#undef RETURN_NAME
+      default:
+        return false;
+    }
+  }
+
+  static bool IsControlOpcode(Value val) {
+    switch (val) {
+#define RETURN_NAME(x) \
+  case k##x:           \
+    return true;
+      CONTROL_OP_LIST(RETURN_NAME)
+#undef RETURN_NAME
+      default:
+        return false;
+    }
+  }
+
+  static bool IsCommonOpcode(Value val) {
+    switch (val) {
+#define RETURN_NAME(x) \
+  case k##x:           \
+    return true;
+      CONTROL_OP_LIST(RETURN_NAME)
+      COMMON_OP_LIST(RETURN_NAME)
+#undef RETURN_NAME
+      default:
+        return false;
+    }
+  }
+};
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_COMPILER_OPCODES_H_
diff --git a/src/compiler/operator-properties-inl.h b/src/compiler/operator-properties-inl.h
new file mode 100644 (file)
index 0000000..2147f2f
--- /dev/null
@@ -0,0 +1,101 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_OPERATOR_PROPERTIES_INL_H_
+#define V8_COMPILER_OPERATOR_PROPERTIES_INL_H_
+
+#include "src/v8.h"
+
+#include "src/compiler/opcodes.h"
+#include "src/compiler/operator-properties.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+inline int OperatorProperties::GetValueOutputCount(Operator* op) {
+  return op->OutputCount();
+}
+
+inline int OperatorProperties::GetValueInputCount(Operator* op) {
+  return op->InputCount();
+}
+
+inline int OperatorProperties::GetControlInputCount(Operator* op) {
+  switch (op->opcode()) {
+    case IrOpcode::kPhi:
+    case IrOpcode::kEffectPhi:
+      return 1;
+#define OPCODE_CASE(x) case IrOpcode::k##x:
+      CONTROL_OP_LIST(OPCODE_CASE)
+#undef OPCODE_CASE
+      return static_cast<ControlOperator*>(op)->ControlInputCount();
+    default:
+      // Operators that have write effects must have a control
+      // dependency. Effect dependencies only ensure the correct order of
+      // write/read operations without consideration of control flow. Without an
+      // explicit control dependency writes can be float in the schedule too
+      // early along a path that shouldn't generate a side-effect.
+      return op->HasProperty(Operator::kNoWrite) ? 0 : 1;
+  }
+  return 0;
+}
+
+inline int OperatorProperties::GetEffectInputCount(Operator* op) {
+  if (op->opcode() == IrOpcode::kEffectPhi) {
+    return static_cast<Operator1<int>*>(op)->parameter();
+  }
+  if (op->HasProperty(Operator::kNoRead) && op->HasProperty(Operator::kNoWrite))
+    return 0;  // no effects.
+  return 1;
+}
+
+inline bool OperatorProperties::HasContextInput(Operator* op) {
+  IrOpcode::Value opcode = static_cast<IrOpcode::Value>(op->opcode());
+  return IrOpcode::IsJsOpcode(opcode);
+}
+
+inline bool OperatorProperties::IsBasicBlockBegin(Operator* op) {
+  uint8_t opcode = op->opcode();
+  return opcode == IrOpcode::kStart || opcode == IrOpcode::kEnd ||
+         opcode == IrOpcode::kDead || opcode == IrOpcode::kLoop ||
+         opcode == IrOpcode::kMerge || opcode == IrOpcode::kIfTrue ||
+         opcode == IrOpcode::kIfFalse;
+}
+
+inline bool OperatorProperties::CanBeScheduled(Operator* op) { return true; }
+
+inline bool OperatorProperties::HasFixedSchedulePosition(Operator* op) {
+  IrOpcode::Value opcode = static_cast<IrOpcode::Value>(op->opcode());
+  return (IrOpcode::IsControlOpcode(opcode)) ||
+         opcode == IrOpcode::kParameter || opcode == IrOpcode::kEffectPhi ||
+         opcode == IrOpcode::kPhi;
+}
+
+inline bool OperatorProperties::IsScheduleRoot(Operator* op) {
+  uint8_t opcode = op->opcode();
+  return opcode == IrOpcode::kEnd || opcode == IrOpcode::kEffectPhi ||
+         opcode == IrOpcode::kPhi;
+}
+
+inline bool OperatorProperties::CanLazilyDeoptimize(Operator* op) {
+  if (op->opcode() == IrOpcode::kCall) {
+    CallOperator* call_op = reinterpret_cast<CallOperator*>(op);
+    CallDescriptor* descriptor = call_op->parameter();
+    return descriptor->CanLazilyDeoptimize();
+  }
+  if (op->opcode() == IrOpcode::kJSCallRuntime) {
+    // TODO(jarin) At the moment, we only support lazy deoptimization for
+    // the %DeoptimizeFunction runtime function.
+    Runtime::FunctionId function =
+        reinterpret_cast<Operator1<Runtime::FunctionId>*>(op)->parameter();
+    return function == Runtime::kDeoptimizeFunction;
+  }
+  return false;
+}
+}
+}
+}  // namespace v8::internal::compiler
+
+#endif  // V8_COMPILER_OPERATOR_PROPERTIES_INL_H_
diff --git a/src/compiler/operator-properties.h b/src/compiler/operator-properties.h
new file mode 100644 (file)
index 0000000..a2d220d
--- /dev/null
@@ -0,0 +1,36 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_OPERATOR_PROPERTIES_H_
+#define V8_COMPILER_OPERATOR_PROPERTIES_H_
+
+#include "src/v8.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+class Operator;
+
+class OperatorProperties {
+ public:
+  static int GetValueOutputCount(Operator* op);
+  static int GetValueInputCount(Operator* op);
+  static bool HasContextInput(Operator* op);
+  static int GetEffectInputCount(Operator* op);
+  static int GetControlInputCount(Operator* op);
+
+  static bool IsBasicBlockBegin(Operator* op);
+
+  static bool CanBeScheduled(Operator* op);
+  static bool HasFixedSchedulePosition(Operator* op);
+  static bool IsScheduleRoot(Operator* op);
+
+  static bool CanLazilyDeoptimize(Operator* op);
+};
+}
+}
+}  // namespace v8::internal::compiler
+
+#endif  // V8_COMPILER_OPERATOR_PROPERTIES_H_
diff --git a/src/compiler/operator.h b/src/compiler/operator.h
new file mode 100644 (file)
index 0000000..c644ac9
--- /dev/null
@@ -0,0 +1,276 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_OPERATOR_H_
+#define V8_COMPILER_OPERATOR_H_
+
+#include "src/v8.h"
+
+#include "src/assembler.h"
+#include "src/ostreams.h"
+#include "src/unique.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// An operator represents description of the "computation" of a node in the
+// compiler IR. A computation takes values (i.e. data) as input and produces
+// zero or more values as output. The side-effects of a computation must be
+// captured by additional control and data dependencies which are part of the
+// IR graph.
+// Operators are immutable and describe the statically-known parts of a
+// computation. Thus they can be safely shared by many different nodes in the
+// IR graph, or even globally between graphs. Operators can have "static
+// parameters" which are compile-time constant parameters to the operator, such
+// as the name for a named field access, the ID of a runtime function, etc.
+// Static parameters are private to the operator and only semantically
+// meaningful to the operator itself.
+class Operator : public ZoneObject {
+ public:
+  Operator(uint8_t opcode, uint16_t properties)
+      : opcode_(opcode), properties_(properties) {}
+  virtual ~Operator() {}
+
+  // Properties inform the operator-independent optimizer about legal
+  // transformations for nodes that have this operator.
+  enum Property {
+    kNoProperties = 0,
+    kReducible = 1 << 0,    // Participates in strength reduction.
+    kCommutative = 1 << 1,  // OP(a, b) == OP(b, a) for all inputs.
+    kAssociative = 1 << 2,  // OP(a, OP(b,c)) == OP(OP(a,b), c) for all inputs.
+    kIdempotent = 1 << 3,   // OP(a); OP(a) == OP(a).
+    kNoRead = 1 << 4,       // Has no scheduling dependency on Effects
+    kNoWrite = 1 << 5,      // Does not modify any Effects and thereby
+                            // create new scheduling dependencies.
+    kNoThrow = 1 << 6,      // Can never generate an exception.
+    kFoldable = kNoRead | kNoWrite,
+    kEliminatable = kNoWrite | kNoThrow,
+    kPure = kNoRead | kNoWrite | kNoThrow | kIdempotent
+  };
+
+  // A small integer unique to all instances of a particular kind of operator,
+  // useful for quick matching for specific kinds of operators. For fast access
+  // the opcode is stored directly in the operator object.
+  inline uint8_t opcode() const { return opcode_; }
+
+  // Returns a constant string representing the mnemonic of the operator,
+  // without the static parameters. Useful for debugging.
+  virtual const char* mnemonic() = 0;
+
+  // Check if this operator equals another operator. Equivalent operators can
+  // be merged, and nodes with equivalent operators and equivalent inputs
+  // can be merged.
+  virtual bool Equals(Operator* other) = 0;
+
+  // Compute a hashcode to speed up equivalence-set checking.
+  // Equal operators should always have equal hashcodes, and unequal operators
+  // should have unequal hashcodes with high probability.
+  virtual int HashCode() = 0;
+
+  // Check whether this operator has the given property.
+  inline bool HasProperty(Property property) const {
+    return (properties_ & static_cast<int>(property)) == property;
+  }
+
+  // Number of data inputs to the operator, for verifying graph structure.
+  virtual int InputCount() = 0;
+
+  // Number of data outputs from the operator, for verifying graph structure.
+  virtual int OutputCount() = 0;
+
+  inline Property properties() { return static_cast<Property>(properties_); }
+
+  // TODO(titzer): API for input and output types, for typechecking graph.
+ private:
+  // Print the full operator into the given stream, including any
+  // static parameters. Useful for debugging and visualizing the IR.
+  virtual OStream& PrintTo(OStream& os) const = 0;  // NOLINT
+  friend OStream& operator<<(OStream& os, const Operator& op);
+
+  uint8_t opcode_;
+  uint16_t properties_;
+};
+
+OStream& operator<<(OStream& os, const Operator& op);
+
+// An implementation of Operator that has no static parameters. Such operators
+// have just a name, an opcode, and a fixed number of inputs and outputs.
+// They can represented by singletons and shared globally.
+class SimpleOperator : public Operator {
+ public:
+  SimpleOperator(uint8_t opcode, uint16_t properties, int input_count,
+                 int output_count, const char* mnemonic)
+      : Operator(opcode, properties),
+        input_count_(input_count),
+        output_count_(output_count),
+        mnemonic_(mnemonic) {}
+
+  virtual const char* mnemonic() { return mnemonic_; }
+  virtual bool Equals(Operator* that) { return opcode() == that->opcode(); }
+  virtual int HashCode() { return opcode(); }
+  virtual int InputCount() { return input_count_; }
+  virtual int OutputCount() { return output_count_; }
+
+ private:
+  virtual OStream& PrintTo(OStream& os) const {  // NOLINT
+    return os << mnemonic_;
+  }
+
+  int input_count_;
+  int output_count_;
+  const char* mnemonic_;
+};
+
+// Template specialization implements a kind of type class for dealing with the
+// static parameters of Operator1 automatically.
+template <typename T>
+struct StaticParameterTraits {
+  static OStream& PrintTo(OStream& os, T val) {  // NOLINT
+    return os << "??";
+  }
+  static int HashCode(T a) { return 0; }
+  static bool Equals(T a, T b) {
+    return false;  // Not every T has a ==. By default, be conservative.
+  }
+};
+
+template <>
+struct StaticParameterTraits<ExternalReference> {
+  static OStream& PrintTo(OStream& os, ExternalReference val) {  // NOLINT
+    os << val.address();
+    const Runtime::Function* function =
+        Runtime::FunctionForEntry(val.address());
+    if (function != NULL) {
+      os << " <" << function->name << ".entry>";
+    }
+    return os;
+  }
+  static int HashCode(ExternalReference a) {
+    return reinterpret_cast<intptr_t>(a.address()) & 0xFFFFFFFF;
+  }
+  static bool Equals(ExternalReference a, ExternalReference b) {
+    return a == b;
+  }
+};
+
+// Specialization for static parameters of type {int}.
+template <>
+struct StaticParameterTraits<int> {
+  static OStream& PrintTo(OStream& os, int val) {  // NOLINT
+    return os << val;
+  }
+  static int HashCode(int a) { return a; }
+  static bool Equals(int a, int b) { return a == b; }
+};
+
+// Specialization for static parameters of type {double}.
+template <>
+struct StaticParameterTraits<double> {
+  static OStream& PrintTo(OStream& os, double val) {  // NOLINT
+    return os << val;
+  }
+  static int HashCode(double a) {
+    return static_cast<int>(BitCast<int64_t>(a));
+  }
+  static bool Equals(double a, double b) {
+    return BitCast<int64_t>(a) == BitCast<int64_t>(b);
+  }
+};
+
+// Specialization for static parameters of type {PrintableUnique<Object>}.
+template <>
+struct StaticParameterTraits<PrintableUnique<Object> > {
+  static OStream& PrintTo(OStream& os, PrintableUnique<Object> val) {  // NOLINT
+    return os << val.string();
+  }
+  static int HashCode(PrintableUnique<Object> a) { return a.Hashcode(); }
+  static bool Equals(PrintableUnique<Object> a, PrintableUnique<Object> b) {
+    return a == b;
+  }
+};
+
+// Specialization for static parameters of type {PrintableUnique<Name>}.
+template <>
+struct StaticParameterTraits<PrintableUnique<Name> > {
+  static OStream& PrintTo(OStream& os, PrintableUnique<Name> val) {  // NOLINT
+    return os << val.string();
+  }
+  static int HashCode(PrintableUnique<Name> a) { return a.Hashcode(); }
+  static bool Equals(PrintableUnique<Name> a, PrintableUnique<Name> b) {
+    return a == b;
+  }
+};
+
+#if DEBUG
+// Specialization for static parameters of type {Handle<Object>} to prevent any
+// direct usage of Handles in constants.
+template <>
+struct StaticParameterTraits<Handle<Object> > {
+  static OStream& PrintTo(OStream& os, Handle<Object> val) {  // NOLINT
+    UNREACHABLE();  // Should use PrintableUnique<Object> instead
+    return os;
+  }
+  static int HashCode(Handle<Object> a) {
+    UNREACHABLE();  // Should use PrintableUnique<Object> instead
+    return 0;
+  }
+  static bool Equals(Handle<Object> a, Handle<Object> b) {
+    UNREACHABLE();  // Should use PrintableUnique<Object> instead
+    return false;
+  }
+};
+#endif
+
+// A templatized implementation of Operator that has one static parameter of
+// type {T}. If a specialization of StaticParameterTraits<{T}> exists, then
+// operators of this kind can automatically be hashed, compared, and printed.
+template <typename T>
+class Operator1 : public Operator {
+ public:
+  Operator1(uint8_t opcode, uint16_t properties, int input_count,
+            int output_count, const char* mnemonic, T parameter)
+      : Operator(opcode, properties),
+        input_count_(input_count),
+        output_count_(output_count),
+        mnemonic_(mnemonic),
+        parameter_(parameter) {}
+
+  const T& parameter() const { return parameter_; }
+
+  virtual const char* mnemonic() { return mnemonic_; }
+  virtual bool Equals(Operator* other) {
+    if (opcode() != other->opcode()) return false;
+    Operator1<T>* that = static_cast<Operator1<T>*>(other);
+    T temp1 = this->parameter_;
+    T temp2 = that->parameter_;
+    return StaticParameterTraits<T>::Equals(temp1, temp2);
+  }
+  virtual int HashCode() {
+    return opcode() + 33 * StaticParameterTraits<T>::HashCode(this->parameter_);
+  }
+  virtual int InputCount() { return input_count_; }
+  virtual int OutputCount() { return output_count_; }
+  virtual OStream& PrintParameter(OStream& os) const {  // NOLINT
+    return StaticParameterTraits<T>::PrintTo(os << "[", parameter_) << "]";
+  }
+
+ private:
+  virtual OStream& PrintTo(OStream& os) const {  // NOLINT
+    return PrintParameter(os << mnemonic_);
+  }
+
+  int input_count_;
+  int output_count_;
+  const char* mnemonic_;
+  T parameter_;
+};
+
+// Type definitions for operators with specific types of parameters.
+typedef Operator1<PrintableUnique<Name> > NameOperator;
+}
+}
+}  // namespace v8::internal::compiler
+
+#endif  // V8_COMPILER_OPERATOR_H_
diff --git a/src/compiler/phi-reducer.h b/src/compiler/phi-reducer.h
new file mode 100644 (file)
index 0000000..b6aa65e
--- /dev/null
@@ -0,0 +1,42 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_PHI_REDUCER_H_
+#define V8_COMPILER_PHI_REDUCER_H_
+
+#include "src/compiler/graph-reducer.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// Replaces redundant phis if all the inputs are the same or the phi itself.
+class PhiReducer V8_FINAL : public Reducer {
+ public:
+  virtual Reduction Reduce(Node* node) V8_OVERRIDE {
+    if (node->opcode() != IrOpcode::kPhi &&
+        node->opcode() != IrOpcode::kEffectPhi)
+      return NoChange();
+
+    int n = node->op()->InputCount();
+    if (n == 1) return Replace(node->InputAt(0));
+
+    Node* replacement = NULL;
+    Node::Inputs inputs = node->inputs();
+    for (InputIter it = inputs.begin(); n > 0; --n, ++it) {
+      Node* input = *it;
+      if (input != node && input != replacement) {
+        if (replacement != NULL) return NoChange();
+        replacement = input;
+      }
+    }
+    ASSERT_NE(node, replacement);
+    return Replace(replacement);
+  }
+};
+}
+}
+}  // namespace v8::internal::compiler
+
+#endif  // V8_COMPILER_PHI_REDUCER_H_
diff --git a/src/compiler/pipeline.cc b/src/compiler/pipeline.cc
new file mode 100644 (file)
index 0000000..25f0063
--- /dev/null
@@ -0,0 +1,290 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/pipeline.h"
+
+#include "src/base/platform/elapsed-timer.h"
+#include "src/compiler/ast-graph-builder.h"
+#include "src/compiler/code-generator.h"
+#include "src/compiler/graph-replay.h"
+#include "src/compiler/graph-visualizer.h"
+#include "src/compiler/instruction-selector.h"
+#include "src/compiler/js-context-specialization.h"
+#include "src/compiler/js-generic-lowering.h"
+#include "src/compiler/js-typed-lowering.h"
+#include "src/compiler/register-allocator.h"
+#include "src/compiler/schedule.h"
+#include "src/compiler/scheduler.h"
+#include "src/compiler/simplified-lowering.h"
+#include "src/compiler/typer.h"
+#include "src/compiler/verifier.h"
+#include "src/hydrogen.h"
+#include "src/ostreams.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+class PhaseStats {
+ public:
+  enum PhaseKind { CREATE_GRAPH, OPTIMIZATION, CODEGEN };
+
+  PhaseStats(CompilationInfo* info, PhaseKind kind, const char* name)
+      : info_(info),
+        kind_(kind),
+        name_(name),
+        size_(info->zone()->allocation_size()) {
+    if (FLAG_turbo_stats) {
+      timer_.Start();
+    }
+  }
+
+  ~PhaseStats() {
+    if (FLAG_turbo_stats) {
+      base::TimeDelta delta = timer_.Elapsed();
+      size_t bytes = info_->zone()->allocation_size() - size_;
+      HStatistics* stats = info_->isolate()->GetTStatistics();
+      stats->SaveTiming(name_, delta, bytes);
+
+      switch (kind_) {
+        case CREATE_GRAPH:
+          stats->IncrementCreateGraph(delta);
+          break;
+        case OPTIMIZATION:
+          stats->IncrementOptimizeGraph(delta);
+          break;
+        case CODEGEN:
+          stats->IncrementGenerateCode(delta);
+          break;
+      }
+    }
+  }
+
+ private:
+  CompilationInfo* info_;
+  PhaseKind kind_;
+  const char* name_;
+  size_t size_;
+  base::ElapsedTimer timer_;
+};
+
+
+void Pipeline::VerifyAndPrintGraph(Graph* graph, const char* phase) {
+  if (FLAG_trace_turbo) {
+    OFStream os(stdout);
+    os << "-- " << phase << " graph -----------------------------------\n"
+       << AsDOT(*graph);
+  }
+  if (VerifyGraphs()) Verifier::Run(graph);
+}
+
+
+class AstGraphBuilderWithPositions : public AstGraphBuilder {
+ public:
+  explicit AstGraphBuilderWithPositions(CompilationInfo* info, JSGraph* jsgraph,
+                                        SourcePositionTable* source_positions)
+      : AstGraphBuilder(info, jsgraph, source_positions) {}
+
+#define DEF_VISIT(type)                                               \
+  virtual void Visit##type(type* node) V8_OVERRIDE {                  \
+    SourcePositionTable::Scope pos(source_positions(),                \
+                                   SourcePosition(node->position())); \
+    AstGraphBuilder::Visit##type(node);                               \
+  }
+  AST_NODE_LIST(DEF_VISIT)
+#undef DEF_VISIT
+};
+
+
+static void TraceSchedule(Schedule* schedule) {
+  if (!FLAG_trace_turbo) return;
+  OFStream os(stdout);
+  os << "-- Schedule --------------------------------------\n" << *schedule;
+}
+
+
+Handle<Code> Pipeline::GenerateCode() {
+  if (FLAG_turbo_stats) isolate()->GetTStatistics()->Initialize(info_);
+
+  if (FLAG_trace_turbo) {
+    OFStream os(stdout);
+    os << "---------------------------------------------------\n"
+       << "Begin compiling method "
+       << info()->function()->debug_name()->ToCString().get()
+       << " using Turbofan" << endl;
+  }
+
+  // Build the graph.
+  Graph graph(zone());
+  SourcePositionTable source_positions(&graph);
+  source_positions.AddDecorator();
+  // TODO(turbofan): there is no need to type anything during initial graph
+  // construction.  This is currently only needed for the node cache, which the
+  // typer could sweep over later.
+  Typer typer(zone());
+  CommonOperatorBuilder common(zone());
+  JSGraph jsgraph(&graph, &common, &typer);
+  Node* context_node;
+  {
+    PhaseStats graph_builder_stats(info(), PhaseStats::CREATE_GRAPH,
+                                   "graph builder");
+    AstGraphBuilderWithPositions graph_builder(info(), &jsgraph,
+                                               &source_positions);
+    graph_builder.CreateGraph();
+    context_node = graph_builder.GetFunctionContext();
+  }
+
+  VerifyAndPrintGraph(&graph, "Initial untyped");
+
+  if (FLAG_context_specialization) {
+    SourcePositionTable::Scope pos_(&source_positions,
+                                    SourcePosition::Unknown());
+    // Specialize the code to the context as aggressively as possible.
+    JSContextSpecializer spec(info(), &jsgraph, context_node);
+    spec.SpecializeToContext();
+    VerifyAndPrintGraph(&graph, "Context specialized");
+  }
+
+  // Print a replay of the initial graph.
+  if (FLAG_print_turbo_replay) {
+    GraphReplayPrinter::PrintReplay(&graph);
+  }
+
+  if (FLAG_turbo_types) {
+    {
+      // Type the graph.
+      PhaseStats typer_stats(info(), PhaseStats::CREATE_GRAPH, "typer");
+      typer.Run(&graph, info()->context());
+    }
+    // All new nodes must be typed.
+    typer.DecorateGraph(&graph);
+    {
+      // Lower JSOperators where we can determine types.
+      PhaseStats lowering_stats(info(), PhaseStats::CREATE_GRAPH,
+                                "typed lowering");
+      JSTypedLowering lowering(&jsgraph, &source_positions);
+      lowering.LowerAllNodes();
+
+      VerifyAndPrintGraph(&graph, "Lowered typed");
+    }
+  }
+
+  {
+    // Lower any remaining generic JSOperators.
+    PhaseStats lowering_stats(info(), PhaseStats::CREATE_GRAPH,
+                              "generic lowering");
+    MachineOperatorBuilder machine(zone());
+    JSGenericLowering lowering(info(), &jsgraph, &machine, &source_positions);
+    lowering.LowerAllNodes();
+  }
+
+  // Compute a schedule.
+  Schedule* schedule = ComputeSchedule(&graph);
+  TraceSchedule(schedule);
+
+  Handle<Code> code = Handle<Code>::null();
+  if (SupportedTarget()) {
+    {
+      // Generate optimized code.
+      PhaseStats codegen_stats(info(), PhaseStats::CODEGEN, "codegen");
+      Linkage linkage(info());
+      code = GenerateCode(&linkage, &graph, schedule, &source_positions);
+      info()->SetCode(code);
+    }
+    // Print optimized code.
+    v8::internal::CodeGenerator::PrintCode(code, info());
+  }
+
+  if (FLAG_trace_turbo) {
+    OFStream os(stdout);
+    os << "--------------------------------------------------\n"
+       << "Finished compiling method "
+       << info()->function()->debug_name()->ToCString().get()
+       << " using Turbofan" << endl;
+  }
+
+  return code;
+}
+
+
+Schedule* Pipeline::ComputeSchedule(Graph* graph) {
+  Scheduler scheduler(zone());
+  PhaseStats schedule_stats(info(), PhaseStats::CODEGEN, "scheduling");
+  return scheduler.NewSchedule(graph);
+}
+
+
+Handle<Code> Pipeline::GenerateCodeForMachineGraph(Linkage* linkage,
+                                                   Graph* graph,
+                                                   Schedule* schedule) {
+  CHECK(SupportedTarget());
+  if (schedule == NULL) {
+    VerifyAndPrintGraph(graph, "Machine");
+    schedule = ComputeSchedule(graph);
+  }
+  TraceSchedule(schedule);
+
+  SourcePositionTable source_positions(graph);
+  Handle<Code> code = GenerateCode(linkage, graph, schedule, &source_positions);
+#if ENABLE_DISASSEMBLER
+  if (!code.is_null() && FLAG_print_opt_code) {
+    CodeTracer::Scope tracing_scope(isolate()->GetCodeTracer());
+    OFStream os(tracing_scope.file());
+    code->Disassemble("test code", os);
+  }
+#endif
+  return code;
+}
+
+
+Handle<Code> Pipeline::GenerateCode(Linkage* linkage, Graph* graph,
+                                    Schedule* schedule,
+                                    SourcePositionTable* source_positions) {
+  ASSERT_NOT_NULL(graph);
+  ASSERT_NOT_NULL(linkage);
+  ASSERT_NOT_NULL(schedule);
+  ASSERT(SupportedTarget());
+
+  InstructionSequence sequence(linkage, graph, schedule);
+
+  // Select and schedule instructions covering the scheduled graph.
+  {
+    InstructionSelector selector(&sequence, source_positions);
+    selector.SelectInstructions();
+  }
+
+  if (FLAG_trace_turbo) {
+    OFStream os(stdout);
+    os << "----- Instruction sequence before register allocation -----\n"
+       << sequence;
+  }
+
+  // Allocate registers.
+  {
+    int node_count = graph->NodeCount();
+    if (node_count > UnallocatedOperand::kMaxVirtualRegisters) {
+      linkage->info()->set_bailout_reason(kNotEnoughVirtualRegistersForValues);
+      return Handle<Code>::null();
+    }
+    RegisterAllocator allocator(&sequence);
+    if (!allocator.Allocate()) {
+      linkage->info()->set_bailout_reason(kNotEnoughVirtualRegistersRegalloc);
+      return Handle<Code>::null();
+    }
+  }
+
+  if (FLAG_trace_turbo) {
+    OFStream os(stdout);
+    os << "----- Instruction sequence after register allocation -----\n"
+       << sequence;
+  }
+
+  // Generate native sequence.
+  CodeGenerator generator(&sequence);
+  return generator.GenerateCode();
+}
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
diff --git a/src/compiler/pipeline.h b/src/compiler/pipeline.h
new file mode 100644 (file)
index 0000000..807495f
--- /dev/null
@@ -0,0 +1,71 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_PIPELINE_H_
+#define V8_COMPILER_PIPELINE_H_
+
+#include "src/v8.h"
+
+#include "src/compiler.h"
+
+// Note: TODO(turbofan) implies a performance improvement opportunity,
+//   and TODO(name) implies an incomplete implementation
+
+#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64 || \
+    V8_TARGET_ARCH_ARM
+#define V8_TURBOFAN_TARGET 1
+#else
+#define V8_TURBOFAN_TARGET 0
+#endif
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// Clients of this interface shouldn't depend on lots of compiler internals.
+class CallDescriptor;
+class Graph;
+class Schedule;
+class SourcePositionTable;
+class Linkage;
+
+class Pipeline {
+ public:
+  explicit Pipeline(CompilationInfo* info) : info_(info) {}
+
+  // Run the entire pipeline and generate a handle to a code object.
+  Handle<Code> GenerateCode();
+
+  // Run the pipeline on a machine graph and generate code. If {schedule}
+  // is {NULL}, then compute a new schedule for code generation.
+  Handle<Code> GenerateCodeForMachineGraph(Linkage* linkage, Graph* graph,
+                                           Schedule* schedule = NULL);
+
+  CompilationInfo* info() const { return info_; }
+  Zone* zone() { return info_->zone(); }
+  Isolate* isolate() { return info_->isolate(); }
+
+  static inline bool SupportedTarget() { return V8_TURBOFAN_TARGET != 0; }
+
+  static inline bool VerifyGraphs() {
+#ifdef DEBUG
+    return true;
+#else
+    return FLAG_turbo_verify;
+#endif
+  }
+
+ private:
+  CompilationInfo* info_;
+
+  Schedule* ComputeSchedule(Graph* graph);
+  void VerifyAndPrintGraph(Graph* graph, const char* phase);
+  Handle<Code> GenerateCode(Linkage* linkage, Graph* graph, Schedule* schedule,
+                            SourcePositionTable* source_positions);
+};
+}
+}
+}  // namespace v8::internal::compiler
+
+#endif  // V8_COMPILER_PIPELINE_H_
diff --git a/src/compiler/raw-machine-assembler.cc b/src/compiler/raw-machine-assembler.cc
new file mode 100644 (file)
index 0000000..c6b92a1
--- /dev/null
@@ -0,0 +1,157 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/pipeline.h"
+#include "src/compiler/raw-machine-assembler.h"
+#include "src/compiler/scheduler.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+RawMachineAssembler::RawMachineAssembler(
+    Graph* graph, MachineCallDescriptorBuilder* call_descriptor_builder,
+    MachineRepresentation word)
+    : GraphBuilder(graph),
+      schedule_(new (zone()) Schedule(zone())),
+      machine_(zone(), word),
+      common_(zone()),
+      call_descriptor_builder_(call_descriptor_builder),
+      parameters_(NULL),
+      exit_label_(schedule()->exit()),
+      current_block_(schedule()->entry()) {
+  if (parameter_count() == 0) return;
+  parameters_ = zone()->NewArray<Node*>(parameter_count());
+  for (int i = 0; i < parameter_count(); ++i) {
+    parameters_[i] = NewNode(common()->Parameter(i));
+  }
+}
+
+
+Schedule* RawMachineAssembler::Export() {
+  // Compute the correct codegen order.
+  ASSERT(schedule_->rpo_order()->empty());
+  Scheduler scheduler(zone(), graph(), schedule_);
+  scheduler.ComputeSpecialRPO();
+  // Invalidate MachineAssembler.
+  Schedule* schedule = schedule_;
+  schedule_ = NULL;
+  return schedule;
+}
+
+
+Node* RawMachineAssembler::Parameter(int index) {
+  ASSERT(0 <= index && index < parameter_count());
+  return parameters_[index];
+}
+
+
+RawMachineAssembler::Label* RawMachineAssembler::Exit() {
+  exit_label_.used_ = true;
+  return &exit_label_;
+}
+
+
+void RawMachineAssembler::Goto(Label* label) {
+  ASSERT(current_block_ != schedule()->exit());
+  schedule()->AddGoto(CurrentBlock(), Use(label));
+  current_block_ = NULL;
+}
+
+
+void RawMachineAssembler::Branch(Node* condition, Label* true_val,
+                                 Label* false_val) {
+  ASSERT(current_block_ != schedule()->exit());
+  Node* branch = NewNode(common()->Branch(), condition);
+  schedule()->AddBranch(CurrentBlock(), branch, Use(true_val), Use(false_val));
+  current_block_ = NULL;
+}
+
+
+void RawMachineAssembler::Return(Node* value) {
+  schedule()->AddReturn(CurrentBlock(), value);
+  current_block_ = NULL;
+}
+
+
+void RawMachineAssembler::Deoptimize(Node* state) {
+  Node* deopt = graph()->NewNode(common()->Deoptimize(), state);
+  schedule()->AddDeoptimize(CurrentBlock(), deopt);
+  current_block_ = NULL;
+}
+
+
+Node* RawMachineAssembler::CallJS0(Node* function, Node* receiver,
+                                   Label* continuation, Label* deoptimization) {
+  CallDescriptor* descriptor = Linkage::GetJSCallDescriptor(1, zone());
+  Node* call = graph()->NewNode(common()->Call(descriptor), function, receiver);
+  schedule()->AddCall(CurrentBlock(), call, Use(continuation),
+                      Use(deoptimization));
+  current_block_ = NULL;
+  return call;
+}
+
+
+Node* RawMachineAssembler::CallRuntime1(Runtime::FunctionId function,
+                                        Node* arg0, Label* continuation,
+                                        Label* deoptimization) {
+  CallDescriptor* descriptor =
+      Linkage::GetRuntimeCallDescriptor(function, 1, Operator::kNoProperties,
+                                        CallDescriptor::kCanDeoptimize, zone());
+
+  Node* centry = HeapConstant(CEntryStub(isolate(), 1).GetCode());
+  Node* ref = NewNode(
+      common()->ExternalConstant(ExternalReference(function, isolate())));
+  Node* arity = Int32Constant(1);
+  Node* context = Parameter(1);
+
+  Node* call = graph()->NewNode(common()->Call(descriptor), centry, arg0, ref,
+                                arity, context);
+  schedule()->AddCall(CurrentBlock(), call, Use(continuation),
+                      Use(deoptimization));
+  current_block_ = NULL;
+  return call;
+}
+
+
+void RawMachineAssembler::Bind(Label* label) {
+  ASSERT(current_block_ == NULL);
+  ASSERT(!label->bound_);
+  label->bound_ = true;
+  current_block_ = EnsureBlock(label);
+}
+
+
+BasicBlock* RawMachineAssembler::Use(Label* label) {
+  label->used_ = true;
+  return EnsureBlock(label);
+}
+
+
+BasicBlock* RawMachineAssembler::EnsureBlock(Label* label) {
+  if (label->block_ == NULL) label->block_ = schedule()->NewBasicBlock();
+  return label->block_;
+}
+
+
+BasicBlock* RawMachineAssembler::CurrentBlock() {
+  ASSERT(current_block_);
+  return current_block_;
+}
+
+
+Node* RawMachineAssembler::MakeNode(Operator* op, int input_count,
+                                    Node** inputs) {
+  ASSERT(ScheduleValid());
+  ASSERT(current_block_ != NULL);
+  Node* node = graph()->NewNode(op, input_count, inputs);
+  BasicBlock* block = op->opcode() == IrOpcode::kParameter ? schedule()->start()
+                                                           : CurrentBlock();
+  schedule()->AddNode(block, node);
+  return node;
+}
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
diff --git a/src/compiler/raw-machine-assembler.h b/src/compiler/raw-machine-assembler.h
new file mode 100644 (file)
index 0000000..16aa1ef
--- /dev/null
@@ -0,0 +1,130 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_RAW_MACHINE_ASSEMBLER_H_
+#define V8_COMPILER_RAW_MACHINE_ASSEMBLER_H_
+
+#include "src/v8.h"
+
+#include "src/compiler/common-operator.h"
+#include "src/compiler/graph-builder.h"
+#include "src/compiler/machine-node-factory.h"
+#include "src/compiler/machine-operator.h"
+#include "src/compiler/node.h"
+#include "src/compiler/operator.h"
+
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+class BasicBlock;
+class Schedule;
+
+
+class RawMachineAssembler : public GraphBuilder,
+                            public MachineNodeFactory<RawMachineAssembler> {
+ public:
+  class Label {
+   public:
+    Label() : block_(NULL), used_(false), bound_(false) {}
+    ~Label() { ASSERT(bound_ || !used_); }
+
+    BasicBlock* block() { return block_; }
+
+   private:
+    // Private constructor for exit label.
+    explicit Label(BasicBlock* block)
+        : block_(block), used_(false), bound_(false) {}
+
+    BasicBlock* block_;
+    bool used_;
+    bool bound_;
+    friend class RawMachineAssembler;
+    DISALLOW_COPY_AND_ASSIGN(Label);
+  };
+
+  RawMachineAssembler(
+      Graph* graph, MachineCallDescriptorBuilder* call_descriptor_builder,
+      MachineRepresentation word = MachineOperatorBuilder::pointer_rep());
+  virtual ~RawMachineAssembler() {}
+
+  Isolate* isolate() const { return zone()->isolate(); }
+  Zone* zone() const { return graph()->zone(); }
+  MachineOperatorBuilder* machine() { return &machine_; }
+  CommonOperatorBuilder* common() { return &common_; }
+  CallDescriptor* call_descriptor() const {
+    return call_descriptor_builder_->BuildCallDescriptor(zone());
+  }
+  int parameter_count() const {
+    return call_descriptor_builder_->parameter_count();
+  }
+  const MachineRepresentation* parameter_types() const {
+    return call_descriptor_builder_->parameter_types();
+  }
+
+  // Parameters.
+  Node* Parameter(int index);
+
+  // Control flow.
+  Label* Exit();
+  void Goto(Label* label);
+  void Branch(Node* condition, Label* true_val, Label* false_val);
+  // Call to a JS function with zero parameters.
+  Node* CallJS0(Node* function, Node* receiver, Label* continuation,
+                Label* deoptimization);
+  // Call to a runtime function with zero parameters.
+  Node* CallRuntime1(Runtime::FunctionId function, Node* arg0,
+                     Label* continuation, Label* deoptimization);
+  void Return(Node* value);
+  void Bind(Label* label);
+  void Deoptimize(Node* state);
+
+  // Variables.
+  Node* Phi(Node* n1, Node* n2) { return NewNode(common()->Phi(2), n1, n2); }
+  Node* Phi(Node* n1, Node* n2, Node* n3) {
+    return NewNode(common()->Phi(3), n1, n2, n3);
+  }
+  Node* Phi(Node* n1, Node* n2, Node* n3, Node* n4) {
+    return NewNode(common()->Phi(4), n1, n2, n3, n4);
+  }
+
+  // MachineAssembler is invalid after export.
+  Schedule* Export();
+
+ protected:
+  virtual Node* MakeNode(Operator* op, int input_count, Node** inputs);
+
+  Schedule* schedule() {
+    ASSERT(ScheduleValid());
+    return schedule_;
+  }
+
+ private:
+  bool ScheduleValid() { return schedule_ != NULL; }
+
+  BasicBlock* Use(Label* label);
+  BasicBlock* EnsureBlock(Label* label);
+  BasicBlock* CurrentBlock();
+
+  typedef std::vector<MachineRepresentation,
+                      zone_allocator<MachineRepresentation> >
+      RepresentationVector;
+
+  Schedule* schedule_;
+  MachineOperatorBuilder machine_;
+  CommonOperatorBuilder common_;
+  MachineCallDescriptorBuilder* call_descriptor_builder_;
+  Node** parameters_;
+  Label exit_label_;
+  BasicBlock* current_block_;
+
+  DISALLOW_COPY_AND_ASSIGN(RawMachineAssembler);
+};
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_COMPILER_RAW_MACHINE_ASSEMBLER_H_
diff --git a/src/compiler/register-allocator.cc b/src/compiler/register-allocator.cc
new file mode 100644 (file)
index 0000000..342615e
--- /dev/null
@@ -0,0 +1,2166 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/register-allocator.h"
+
+#include "src/compiler/linkage.h"
+#include "src/hydrogen.h"
+#include "src/string-stream.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+static inline LifetimePosition Min(LifetimePosition a, LifetimePosition b) {
+  return a.Value() < b.Value() ? a : b;
+}
+
+
+static inline LifetimePosition Max(LifetimePosition a, LifetimePosition b) {
+  return a.Value() > b.Value() ? a : b;
+}
+
+
+UsePosition::UsePosition(LifetimePosition pos, InstructionOperand* operand,
+                         InstructionOperand* hint)
+    : operand_(operand),
+      hint_(hint),
+      pos_(pos),
+      next_(NULL),
+      requires_reg_(false),
+      register_beneficial_(true) {
+  if (operand_ != NULL && operand_->IsUnallocated()) {
+    const UnallocatedOperand* unalloc = UnallocatedOperand::cast(operand_);
+    requires_reg_ = unalloc->HasRegisterPolicy();
+    register_beneficial_ = !unalloc->HasAnyPolicy();
+  }
+  ASSERT(pos_.IsValid());
+}
+
+
+bool UsePosition::HasHint() const {
+  return hint_ != NULL && !hint_->IsUnallocated();
+}
+
+
+bool UsePosition::RequiresRegister() const { return requires_reg_; }
+
+
+bool UsePosition::RegisterIsBeneficial() const { return register_beneficial_; }
+
+
+void UseInterval::SplitAt(LifetimePosition pos, Zone* zone) {
+  ASSERT(Contains(pos) && pos.Value() != start().Value());
+  UseInterval* after = new (zone) UseInterval(pos, end_);
+  after->next_ = next_;
+  next_ = after;
+  end_ = pos;
+}
+
+
+#ifdef DEBUG
+
+
+void LiveRange::Verify() const {
+  UsePosition* cur = first_pos_;
+  while (cur != NULL) {
+    ASSERT(Start().Value() <= cur->pos().Value() &&
+           cur->pos().Value() <= End().Value());
+    cur = cur->next();
+  }
+}
+
+
+bool LiveRange::HasOverlap(UseInterval* target) const {
+  UseInterval* current_interval = first_interval_;
+  while (current_interval != NULL) {
+    // Intervals overlap if the start of one is contained in the other.
+    if (current_interval->Contains(target->start()) ||
+        target->Contains(current_interval->start())) {
+      return true;
+    }
+    current_interval = current_interval->next();
+  }
+  return false;
+}
+
+
+#endif
+
+
+LiveRange::LiveRange(int id, Zone* zone)
+    : id_(id),
+      spilled_(false),
+      is_phi_(false),
+      is_non_loop_phi_(false),
+      kind_(UNALLOCATED_REGISTERS),
+      assigned_register_(kInvalidAssignment),
+      last_interval_(NULL),
+      first_interval_(NULL),
+      first_pos_(NULL),
+      parent_(NULL),
+      next_(NULL),
+      current_interval_(NULL),
+      last_processed_use_(NULL),
+      current_hint_operand_(NULL),
+      spill_operand_(new (zone) InstructionOperand()),
+      spill_start_index_(kMaxInt) {}
+
+
+void LiveRange::set_assigned_register(int reg, Zone* zone) {
+  ASSERT(!HasRegisterAssigned() && !IsSpilled());
+  assigned_register_ = reg;
+  ConvertOperands(zone);
+}
+
+
+void LiveRange::MakeSpilled(Zone* zone) {
+  ASSERT(!IsSpilled());
+  ASSERT(TopLevel()->HasAllocatedSpillOperand());
+  spilled_ = true;
+  assigned_register_ = kInvalidAssignment;
+  ConvertOperands(zone);
+}
+
+
+bool LiveRange::HasAllocatedSpillOperand() const {
+  ASSERT(spill_operand_ != NULL);
+  return !spill_operand_->IsIgnored();
+}
+
+
+void LiveRange::SetSpillOperand(InstructionOperand* operand) {
+  ASSERT(!operand->IsUnallocated());
+  ASSERT(spill_operand_ != NULL);
+  ASSERT(spill_operand_->IsIgnored());
+  spill_operand_->ConvertTo(operand->kind(), operand->index());
+}
+
+
+UsePosition* LiveRange::NextUsePosition(LifetimePosition start) {
+  UsePosition* use_pos = last_processed_use_;
+  if (use_pos == NULL) use_pos = first_pos();
+  while (use_pos != NULL && use_pos->pos().Value() < start.Value()) {
+    use_pos = use_pos->next();
+  }
+  last_processed_use_ = use_pos;
+  return use_pos;
+}
+
+
+UsePosition* LiveRange::NextUsePositionRegisterIsBeneficial(
+    LifetimePosition start) {
+  UsePosition* pos = NextUsePosition(start);
+  while (pos != NULL && !pos->RegisterIsBeneficial()) {
+    pos = pos->next();
+  }
+  return pos;
+}
+
+
+UsePosition* LiveRange::PreviousUsePositionRegisterIsBeneficial(
+    LifetimePosition start) {
+  UsePosition* pos = first_pos();
+  UsePosition* prev = NULL;
+  while (pos != NULL && pos->pos().Value() < start.Value()) {
+    if (pos->RegisterIsBeneficial()) prev = pos;
+    pos = pos->next();
+  }
+  return prev;
+}
+
+
+UsePosition* LiveRange::NextRegisterPosition(LifetimePosition start) {
+  UsePosition* pos = NextUsePosition(start);
+  while (pos != NULL && !pos->RequiresRegister()) {
+    pos = pos->next();
+  }
+  return pos;
+}
+
+
+bool LiveRange::CanBeSpilled(LifetimePosition pos) {
+  // We cannot spill a live range that has a use requiring a register
+  // at the current or the immediate next position.
+  UsePosition* use_pos = NextRegisterPosition(pos);
+  if (use_pos == NULL) return true;
+  return use_pos->pos().Value() >
+         pos.NextInstruction().InstructionEnd().Value();
+}
+
+
+InstructionOperand* LiveRange::CreateAssignedOperand(Zone* zone) {
+  InstructionOperand* op = NULL;
+  if (HasRegisterAssigned()) {
+    ASSERT(!IsSpilled());
+    switch (Kind()) {
+      case GENERAL_REGISTERS:
+        op = RegisterOperand::Create(assigned_register(), zone);
+        break;
+      case DOUBLE_REGISTERS:
+        op = DoubleRegisterOperand::Create(assigned_register(), zone);
+        break;
+      default:
+        UNREACHABLE();
+    }
+  } else if (IsSpilled()) {
+    ASSERT(!HasRegisterAssigned());
+    op = TopLevel()->GetSpillOperand();
+    ASSERT(!op->IsUnallocated());
+  } else {
+    UnallocatedOperand* unalloc =
+        new (zone) UnallocatedOperand(UnallocatedOperand::NONE);
+    unalloc->set_virtual_register(id_);
+    op = unalloc;
+  }
+  return op;
+}
+
+
+UseInterval* LiveRange::FirstSearchIntervalForPosition(
+    LifetimePosition position) const {
+  if (current_interval_ == NULL) return first_interval_;
+  if (current_interval_->start().Value() > position.Value()) {
+    current_interval_ = NULL;
+    return first_interval_;
+  }
+  return current_interval_;
+}
+
+
+void LiveRange::AdvanceLastProcessedMarker(
+    UseInterval* to_start_of, LifetimePosition but_not_past) const {
+  if (to_start_of == NULL) return;
+  if (to_start_of->start().Value() > but_not_past.Value()) return;
+  LifetimePosition start = current_interval_ == NULL
+                               ? LifetimePosition::Invalid()
+                               : current_interval_->start();
+  if (to_start_of->start().Value() > start.Value()) {
+    current_interval_ = to_start_of;
+  }
+}
+
+
+void LiveRange::SplitAt(LifetimePosition position, LiveRange* result,
+                        Zone* zone) {
+  ASSERT(Start().Value() < position.Value());
+  ASSERT(result->IsEmpty());
+  // Find the last interval that ends before the position. If the
+  // position is contained in one of the intervals in the chain, we
+  // split that interval and use the first part.
+  UseInterval* current = FirstSearchIntervalForPosition(position);
+
+  // If the split position coincides with the beginning of a use interval
+  // we need to split use positons in a special way.
+  bool split_at_start = false;
+
+  if (current->start().Value() == position.Value()) {
+    // When splitting at start we need to locate the previous use interval.
+    current = first_interval_;
+  }
+
+  while (current != NULL) {
+    if (current->Contains(position)) {
+      current->SplitAt(position, zone);
+      break;
+    }
+    UseInterval* next = current->next();
+    if (next->start().Value() >= position.Value()) {
+      split_at_start = (next->start().Value() == position.Value());
+      break;
+    }
+    current = next;
+  }
+
+  // Partition original use intervals to the two live ranges.
+  UseInterval* before = current;
+  UseInterval* after = before->next();
+  result->last_interval_ =
+      (last_interval_ == before)
+          ? after            // Only interval in the range after split.
+          : last_interval_;  // Last interval of the original range.
+  result->first_interval_ = after;
+  last_interval_ = before;
+
+  // Find the last use position before the split and the first use
+  // position after it.
+  UsePosition* use_after = first_pos_;
+  UsePosition* use_before = NULL;
+  if (split_at_start) {
+    // The split position coincides with the beginning of a use interval (the
+    // end of a lifetime hole). Use at this position should be attributed to
+    // the split child because split child owns use interval covering it.
+    while (use_after != NULL && use_after->pos().Value() < position.Value()) {
+      use_before = use_after;
+      use_after = use_after->next();
+    }
+  } else {
+    while (use_after != NULL && use_after->pos().Value() <= position.Value()) {
+      use_before = use_after;
+      use_after = use_after->next();
+    }
+  }
+
+  // Partition original use positions to the two live ranges.
+  if (use_before != NULL) {
+    use_before->next_ = NULL;
+  } else {
+    first_pos_ = NULL;
+  }
+  result->first_pos_ = use_after;
+
+  // Discard cached iteration state. It might be pointing
+  // to the use that no longer belongs to this live range.
+  last_processed_use_ = NULL;
+  current_interval_ = NULL;
+
+  // Link the new live range in the chain before any of the other
+  // ranges linked from the range before the split.
+  result->parent_ = (parent_ == NULL) ? this : parent_;
+  result->kind_ = result->parent_->kind_;
+  result->next_ = next_;
+  next_ = result;
+
+#ifdef DEBUG
+  Verify();
+  result->Verify();
+#endif
+}
+
+
+// This implements an ordering on live ranges so that they are ordered by their
+// start positions.  This is needed for the correctness of the register
+// allocation algorithm.  If two live ranges start at the same offset then there
+// is a tie breaker based on where the value is first used.  This part of the
+// ordering is merely a heuristic.
+bool LiveRange::ShouldBeAllocatedBefore(const LiveRange* other) const {
+  LifetimePosition start = Start();
+  LifetimePosition other_start = other->Start();
+  if (start.Value() == other_start.Value()) {
+    UsePosition* pos = first_pos();
+    if (pos == NULL) return false;
+    UsePosition* other_pos = other->first_pos();
+    if (other_pos == NULL) return true;
+    return pos->pos().Value() < other_pos->pos().Value();
+  }
+  return start.Value() < other_start.Value();
+}
+
+
+void LiveRange::ShortenTo(LifetimePosition start) {
+  RegisterAllocator::TraceAlloc("Shorten live range %d to [%d\n", id_,
+                                start.Value());
+  ASSERT(first_interval_ != NULL);
+  ASSERT(first_interval_->start().Value() <= start.Value());
+  ASSERT(start.Value() < first_interval_->end().Value());
+  first_interval_->set_start(start);
+}
+
+
+void LiveRange::EnsureInterval(LifetimePosition start, LifetimePosition end,
+                               Zone* zone) {
+  RegisterAllocator::TraceAlloc("Ensure live range %d in interval [%d %d[\n",
+                                id_, start.Value(), end.Value());
+  LifetimePosition new_end = end;
+  while (first_interval_ != NULL &&
+         first_interval_->start().Value() <= end.Value()) {
+    if (first_interval_->end().Value() > end.Value()) {
+      new_end = first_interval_->end();
+    }
+    first_interval_ = first_interval_->next();
+  }
+
+  UseInterval* new_interval = new (zone) UseInterval(start, new_end);
+  new_interval->next_ = first_interval_;
+  first_interval_ = new_interval;
+  if (new_interval->next() == NULL) {
+    last_interval_ = new_interval;
+  }
+}
+
+
+void LiveRange::AddUseInterval(LifetimePosition start, LifetimePosition end,
+                               Zone* zone) {
+  RegisterAllocator::TraceAlloc("Add to live range %d interval [%d %d[\n", id_,
+                                start.Value(), end.Value());
+  if (first_interval_ == NULL) {
+    UseInterval* interval = new (zone) UseInterval(start, end);
+    first_interval_ = interval;
+    last_interval_ = interval;
+  } else {
+    if (end.Value() == first_interval_->start().Value()) {
+      first_interval_->set_start(start);
+    } else if (end.Value() < first_interval_->start().Value()) {
+      UseInterval* interval = new (zone) UseInterval(start, end);
+      interval->set_next(first_interval_);
+      first_interval_ = interval;
+    } else {
+      // Order of instruction's processing (see ProcessInstructions) guarantees
+      // that each new use interval either precedes or intersects with
+      // last added interval.
+      ASSERT(start.Value() < first_interval_->end().Value());
+      first_interval_->start_ = Min(start, first_interval_->start_);
+      first_interval_->end_ = Max(end, first_interval_->end_);
+    }
+  }
+}
+
+
+void LiveRange::AddUsePosition(LifetimePosition pos,
+                               InstructionOperand* operand,
+                               InstructionOperand* hint, Zone* zone) {
+  RegisterAllocator::TraceAlloc("Add to live range %d use position %d\n", id_,
+                                pos.Value());
+  UsePosition* use_pos = new (zone) UsePosition(pos, operand, hint);
+  UsePosition* prev_hint = NULL;
+  UsePosition* prev = NULL;
+  UsePosition* current = first_pos_;
+  while (current != NULL && current->pos().Value() < pos.Value()) {
+    prev_hint = current->HasHint() ? current : prev_hint;
+    prev = current;
+    current = current->next();
+  }
+
+  if (prev == NULL) {
+    use_pos->set_next(first_pos_);
+    first_pos_ = use_pos;
+  } else {
+    use_pos->next_ = prev->next_;
+    prev->next_ = use_pos;
+  }
+
+  if (prev_hint == NULL && use_pos->HasHint()) {
+    current_hint_operand_ = hint;
+  }
+}
+
+
+void LiveRange::ConvertOperands(Zone* zone) {
+  InstructionOperand* op = CreateAssignedOperand(zone);
+  UsePosition* use_pos = first_pos();
+  while (use_pos != NULL) {
+    ASSERT(Start().Value() <= use_pos->pos().Value() &&
+           use_pos->pos().Value() <= End().Value());
+
+    if (use_pos->HasOperand()) {
+      ASSERT(op->IsRegister() || op->IsDoubleRegister() ||
+             !use_pos->RequiresRegister());
+      use_pos->operand()->ConvertTo(op->kind(), op->index());
+    }
+    use_pos = use_pos->next();
+  }
+}
+
+
+bool LiveRange::CanCover(LifetimePosition position) const {
+  if (IsEmpty()) return false;
+  return Start().Value() <= position.Value() &&
+         position.Value() < End().Value();
+}
+
+
+bool LiveRange::Covers(LifetimePosition position) {
+  if (!CanCover(position)) return false;
+  UseInterval* start_search = FirstSearchIntervalForPosition(position);
+  for (UseInterval* interval = start_search; interval != NULL;
+       interval = interval->next()) {
+    ASSERT(interval->next() == NULL ||
+           interval->next()->start().Value() >= interval->start().Value());
+    AdvanceLastProcessedMarker(interval, position);
+    if (interval->Contains(position)) return true;
+    if (interval->start().Value() > position.Value()) return false;
+  }
+  return false;
+}
+
+
+LifetimePosition LiveRange::FirstIntersection(LiveRange* other) {
+  UseInterval* b = other->first_interval();
+  if (b == NULL) return LifetimePosition::Invalid();
+  LifetimePosition advance_last_processed_up_to = b->start();
+  UseInterval* a = FirstSearchIntervalForPosition(b->start());
+  while (a != NULL && b != NULL) {
+    if (a->start().Value() > other->End().Value()) break;
+    if (b->start().Value() > End().Value()) break;
+    LifetimePosition cur_intersection = a->Intersect(b);
+    if (cur_intersection.IsValid()) {
+      return cur_intersection;
+    }
+    if (a->start().Value() < b->start().Value()) {
+      a = a->next();
+      if (a == NULL || a->start().Value() > other->End().Value()) break;
+      AdvanceLastProcessedMarker(a, advance_last_processed_up_to);
+    } else {
+      b = b->next();
+    }
+  }
+  return LifetimePosition::Invalid();
+}
+
+
+RegisterAllocator::RegisterAllocator(InstructionSequence* code)
+    : zone_(code->isolate()),
+      code_(code),
+      live_in_sets_(code->BasicBlockCount(), zone()),
+      live_ranges_(code->VirtualRegisterCount() * 2, zone()),
+      fixed_live_ranges_(NULL),
+      fixed_double_live_ranges_(NULL),
+      unhandled_live_ranges_(code->VirtualRegisterCount() * 2, zone()),
+      active_live_ranges_(8, zone()),
+      inactive_live_ranges_(8, zone()),
+      reusable_slots_(8, zone()),
+      mode_(UNALLOCATED_REGISTERS),
+      num_registers_(-1),
+      allocation_ok_(true) {}
+
+
+void RegisterAllocator::InitializeLivenessAnalysis() {
+  // Initialize the live_in sets for each block to NULL.
+  int block_count = code()->BasicBlockCount();
+  live_in_sets_.Initialize(block_count, zone());
+  live_in_sets_.AddBlock(NULL, block_count, zone());
+}
+
+
+BitVector* RegisterAllocator::ComputeLiveOut(BasicBlock* block) {
+  // Compute live out for the given block, except not including backward
+  // successor edges.
+  BitVector* live_out =
+      new (zone()) BitVector(code()->VirtualRegisterCount(), zone());
+
+  // Process all successor blocks.
+  BasicBlock::Successors successors = block->successors();
+  for (BasicBlock::Successors::iterator i = successors.begin();
+       i != successors.end(); ++i) {
+    // Add values live on entry to the successor. Note the successor's
+    // live_in will not be computed yet for backwards edges.
+    BasicBlock* successor = *i;
+    BitVector* live_in = live_in_sets_[successor->rpo_number_];
+    if (live_in != NULL) live_out->Union(*live_in);
+
+    // All phi input operands corresponding to this successor edge are live
+    // out from this block.
+    int index = successor->PredecessorIndexOf(block);
+    ASSERT(index >= 0);
+    ASSERT(index < static_cast<int>(successor->PredecessorCount()));
+    for (BasicBlock::const_iterator j = successor->begin();
+         j != successor->end(); ++j) {
+      Node* phi = *j;
+      if (phi->opcode() != IrOpcode::kPhi) continue;
+      Node* input = phi->InputAt(index);
+      live_out->Add(input->id());
+    }
+  }
+
+  return live_out;
+}
+
+
+void RegisterAllocator::AddInitialIntervals(BasicBlock* block,
+                                            BitVector* live_out) {
+  // Add an interval that includes the entire block to the live range for
+  // each live_out value.
+  LifetimePosition start =
+      LifetimePosition::FromInstructionIndex(block->first_instruction_index());
+  LifetimePosition end = LifetimePosition::FromInstructionIndex(
+                             block->last_instruction_index()).NextInstruction();
+  BitVector::Iterator iterator(live_out);
+  while (!iterator.Done()) {
+    int operand_index = iterator.Current();
+    LiveRange* range = LiveRangeFor(operand_index);
+    range->AddUseInterval(start, end, zone());
+    iterator.Advance();
+  }
+}
+
+
+int RegisterAllocator::FixedDoubleLiveRangeID(int index) {
+  return -index - 1 - Register::kMaxNumAllocatableRegisters;
+}
+
+
+InstructionOperand* RegisterAllocator::AllocateFixed(
+    UnallocatedOperand* operand, int pos, bool is_tagged) {
+  TraceAlloc("Allocating fixed reg for op %d\n", operand->virtual_register());
+  ASSERT(operand->HasFixedPolicy());
+  if (operand->HasFixedSlotPolicy()) {
+    operand->ConvertTo(InstructionOperand::STACK_SLOT,
+                       operand->fixed_slot_index());
+  } else if (operand->HasFixedRegisterPolicy()) {
+    int reg_index = operand->fixed_register_index();
+    operand->ConvertTo(InstructionOperand::REGISTER, reg_index);
+  } else if (operand->HasFixedDoubleRegisterPolicy()) {
+    int reg_index = operand->fixed_register_index();
+    operand->ConvertTo(InstructionOperand::DOUBLE_REGISTER, reg_index);
+  } else {
+    UNREACHABLE();
+  }
+  if (is_tagged) {
+    TraceAlloc("Fixed reg is tagged at %d\n", pos);
+    Instruction* instr = InstructionAt(pos);
+    if (instr->HasPointerMap()) {
+      instr->pointer_map()->RecordPointer(operand, code_zone());
+    }
+  }
+  return operand;
+}
+
+
+LiveRange* RegisterAllocator::FixedLiveRangeFor(int index) {
+  ASSERT(index < Register::kMaxNumAllocatableRegisters);
+  LiveRange* result = fixed_live_ranges_[index];
+  if (result == NULL) {
+    // TODO(titzer): add a utility method to allocate a new LiveRange:
+    // The LiveRange object itself can go in this zone, but the
+    // InstructionOperand needs
+    // to go in the code zone, since it may survive register allocation.
+    result = new (zone()) LiveRange(FixedLiveRangeID(index), code_zone());
+    ASSERT(result->IsFixed());
+    result->kind_ = GENERAL_REGISTERS;
+    SetLiveRangeAssignedRegister(result, index);
+    fixed_live_ranges_[index] = result;
+  }
+  return result;
+}
+
+
+LiveRange* RegisterAllocator::FixedDoubleLiveRangeFor(int index) {
+  ASSERT(index < DoubleRegister::NumAllocatableRegisters());
+  LiveRange* result = fixed_double_live_ranges_[index];
+  if (result == NULL) {
+    result = new (zone()) LiveRange(FixedDoubleLiveRangeID(index), code_zone());
+    ASSERT(result->IsFixed());
+    result->kind_ = DOUBLE_REGISTERS;
+    SetLiveRangeAssignedRegister(result, index);
+    fixed_double_live_ranges_[index] = result;
+  }
+  return result;
+}
+
+
+LiveRange* RegisterAllocator::LiveRangeFor(int index) {
+  if (index >= live_ranges_.length()) {
+    live_ranges_.AddBlock(NULL, index - live_ranges_.length() + 1, zone());
+  }
+  LiveRange* result = live_ranges_[index];
+  if (result == NULL) {
+    result = new (zone()) LiveRange(index, code_zone());
+    live_ranges_[index] = result;
+  }
+  return result;
+}
+
+
+GapInstruction* RegisterAllocator::GetLastGap(BasicBlock* block) {
+  int last_instruction = block->last_instruction_index();
+  return code()->GapAt(last_instruction - 1);
+}
+
+
+LiveRange* RegisterAllocator::LiveRangeFor(InstructionOperand* operand) {
+  if (operand->IsUnallocated()) {
+    return LiveRangeFor(UnallocatedOperand::cast(operand)->virtual_register());
+  } else if (operand->IsRegister()) {
+    return FixedLiveRangeFor(operand->index());
+  } else if (operand->IsDoubleRegister()) {
+    return FixedDoubleLiveRangeFor(operand->index());
+  } else {
+    return NULL;
+  }
+}
+
+
+void RegisterAllocator::Define(LifetimePosition position,
+                               InstructionOperand* operand,
+                               InstructionOperand* hint) {
+  LiveRange* range = LiveRangeFor(operand);
+  if (range == NULL) return;
+
+  if (range->IsEmpty() || range->Start().Value() > position.Value()) {
+    // Can happen if there is a definition without use.
+    range->AddUseInterval(position, position.NextInstruction(), zone());
+    range->AddUsePosition(position.NextInstruction(), NULL, NULL, zone());
+  } else {
+    range->ShortenTo(position);
+  }
+
+  if (operand->IsUnallocated()) {
+    UnallocatedOperand* unalloc_operand = UnallocatedOperand::cast(operand);
+    range->AddUsePosition(position, unalloc_operand, hint, zone());
+  }
+}
+
+
+void RegisterAllocator::Use(LifetimePosition block_start,
+                            LifetimePosition position,
+                            InstructionOperand* operand,
+                            InstructionOperand* hint) {
+  LiveRange* range = LiveRangeFor(operand);
+  if (range == NULL) return;
+  if (operand->IsUnallocated()) {
+    UnallocatedOperand* unalloc_operand = UnallocatedOperand::cast(operand);
+    range->AddUsePosition(position, unalloc_operand, hint, zone());
+  }
+  range->AddUseInterval(block_start, position, zone());
+}
+
+
+void RegisterAllocator::AddConstraintsGapMove(int index,
+                                              InstructionOperand* from,
+                                              InstructionOperand* to) {
+  GapInstruction* gap = code()->GapAt(index);
+  ParallelMove* move =
+      gap->GetOrCreateParallelMove(GapInstruction::START, code_zone());
+  if (from->IsUnallocated()) {
+    const ZoneList<MoveOperands>* move_operands = move->move_operands();
+    for (int i = 0; i < move_operands->length(); ++i) {
+      MoveOperands cur = move_operands->at(i);
+      InstructionOperand* cur_to = cur.destination();
+      if (cur_to->IsUnallocated()) {
+        if (UnallocatedOperand::cast(cur_to)->virtual_register() ==
+            UnallocatedOperand::cast(from)->virtual_register()) {
+          move->AddMove(cur.source(), to, code_zone());
+          return;
+        }
+      }
+    }
+  }
+  move->AddMove(from, to, code_zone());
+}
+
+
+void RegisterAllocator::MeetRegisterConstraints(BasicBlock* block) {
+  int start = block->first_instruction_index();
+  int end = block->last_instruction_index();
+  ASSERT_NE(-1, start);
+  for (int i = start; i <= end; ++i) {
+    if (code()->IsGapAt(i)) {
+      Instruction* instr = NULL;
+      Instruction* prev_instr = NULL;
+      if (i < end) instr = InstructionAt(i + 1);
+      if (i > start) prev_instr = InstructionAt(i - 1);
+      MeetConstraintsBetween(prev_instr, instr, i);
+      if (!AllocationOk()) return;
+    }
+  }
+}
+
+
+void RegisterAllocator::MeetConstraintsBetween(Instruction* first,
+                                               Instruction* second,
+                                               int gap_index) {
+  if (first != NULL) {
+    // Handle fixed temporaries.
+    for (size_t i = 0; i < first->TempCount(); i++) {
+      UnallocatedOperand* temp = UnallocatedOperand::cast(first->TempAt(i));
+      if (temp->HasFixedPolicy()) {
+        AllocateFixed(temp, gap_index - 1, false);
+      }
+    }
+
+    // Handle constant/fixed output operands.
+    for (size_t i = 0; i < first->OutputCount(); i++) {
+      InstructionOperand* output = first->OutputAt(i);
+      if (output->IsConstant()) {
+        int output_vreg = output->index();
+        LiveRange* range = LiveRangeFor(output_vreg);
+        range->SetSpillStartIndex(gap_index - 1);
+        range->SetSpillOperand(output);
+      } else {
+        UnallocatedOperand* first_output = UnallocatedOperand::cast(output);
+        LiveRange* range = LiveRangeFor(first_output->virtual_register());
+        bool assigned = false;
+        if (first_output->HasFixedPolicy()) {
+          UnallocatedOperand* output_copy =
+              first_output->CopyUnconstrained(code_zone());
+          bool is_tagged = HasTaggedValue(first_output->virtual_register());
+          AllocateFixed(first_output, gap_index, is_tagged);
+
+          // This value is produced on the stack, we never need to spill it.
+          if (first_output->IsStackSlot()) {
+            range->SetSpillOperand(first_output);
+            range->SetSpillStartIndex(gap_index - 1);
+            assigned = true;
+          }
+          code()->AddGapMove(gap_index, first_output, output_copy);
+        }
+
+        if (!assigned) {
+          range->SetSpillStartIndex(gap_index);
+
+          // This move to spill operand is not a real use. Liveness analysis
+          // and splitting of live ranges do not account for it.
+          // Thus it should be inserted to a lifetime position corresponding to
+          // the instruction end.
+          GapInstruction* gap = code()->GapAt(gap_index);
+          ParallelMove* move =
+              gap->GetOrCreateParallelMove(GapInstruction::BEFORE, code_zone());
+          move->AddMove(first_output, range->GetSpillOperand(), code_zone());
+        }
+      }
+    }
+  }
+
+  if (second != NULL) {
+    // Handle fixed input operands of second instruction.
+    for (size_t i = 0; i < second->InputCount(); i++) {
+      InstructionOperand* input = second->InputAt(i);
+      if (input->IsImmediate()) continue;  // Ignore immediates.
+      UnallocatedOperand* cur_input = UnallocatedOperand::cast(input);
+      if (cur_input->HasFixedPolicy()) {
+        UnallocatedOperand* input_copy =
+            cur_input->CopyUnconstrained(code_zone());
+        bool is_tagged = HasTaggedValue(cur_input->virtual_register());
+        AllocateFixed(cur_input, gap_index + 1, is_tagged);
+        AddConstraintsGapMove(gap_index, input_copy, cur_input);
+      }
+    }
+
+    // Handle "output same as input" for second instruction.
+    for (size_t i = 0; i < second->OutputCount(); i++) {
+      InstructionOperand* output = second->Output();
+      if (!output->IsUnallocated()) continue;
+      UnallocatedOperand* second_output = UnallocatedOperand::cast(output);
+      if (second_output->HasSameAsInputPolicy()) {
+        ASSERT(second->OutputCount() == 1);  // Only valid for one output.
+        UnallocatedOperand* cur_input =
+            UnallocatedOperand::cast(second->InputAt(0));
+        int output_vreg = second_output->virtual_register();
+        int input_vreg = cur_input->virtual_register();
+
+        UnallocatedOperand* input_copy =
+            cur_input->CopyUnconstrained(code_zone());
+        cur_input->set_virtual_register(second_output->virtual_register());
+        AddConstraintsGapMove(gap_index, input_copy, cur_input);
+
+        if (HasTaggedValue(input_vreg) && !HasTaggedValue(output_vreg)) {
+          int index = gap_index + 1;
+          Instruction* instr = InstructionAt(index);
+          if (instr->HasPointerMap()) {
+            instr->pointer_map()->RecordPointer(input_copy, code_zone());
+          }
+        } else if (!HasTaggedValue(input_vreg) && HasTaggedValue(output_vreg)) {
+          // The input is assumed to immediately have a tagged representation,
+          // before the pointer map can be used. I.e. the pointer map at the
+          // instruction will include the output operand (whose value at the
+          // beginning of the instruction is equal to the input operand). If
+          // this is not desired, then the pointer map at this instruction needs
+          // to be adjusted manually.
+        }
+      }
+    }
+  }
+}
+
+
+bool RegisterAllocator::IsOutputRegisterOf(Instruction* instr, int index) {
+  for (size_t i = 0; i < instr->OutputCount(); i++) {
+    InstructionOperand* output = instr->OutputAt(i);
+    if (output->IsRegister() && output->index() == index) return true;
+  }
+  return false;
+}
+
+
+bool RegisterAllocator::IsOutputDoubleRegisterOf(Instruction* instr,
+                                                 int index) {
+  for (size_t i = 0; i < instr->OutputCount(); i++) {
+    InstructionOperand* output = instr->OutputAt(i);
+    if (output->IsDoubleRegister() && output->index() == index) return true;
+  }
+  return false;
+}
+
+
+void RegisterAllocator::ProcessInstructions(BasicBlock* block,
+                                            BitVector* live) {
+  int block_start = block->first_instruction_index();
+
+  LifetimePosition block_start_position =
+      LifetimePosition::FromInstructionIndex(block_start);
+
+  for (int index = block->last_instruction_index(); index >= block_start;
+       index--) {
+    LifetimePosition curr_position =
+        LifetimePosition::FromInstructionIndex(index);
+
+    Instruction* instr = InstructionAt(index);
+    ASSERT(instr != NULL);
+    if (instr->IsGapMoves()) {
+      // Process the moves of the gap instruction, making their sources live.
+      GapInstruction* gap = code()->GapAt(index);
+
+      // TODO(titzer): no need to create the parallel move if it doesn't exist.
+      ParallelMove* move =
+          gap->GetOrCreateParallelMove(GapInstruction::START, code_zone());
+      const ZoneList<MoveOperands>* move_operands = move->move_operands();
+      for (int i = 0; i < move_operands->length(); ++i) {
+        MoveOperands* cur = &move_operands->at(i);
+        if (cur->IsIgnored()) continue;
+        InstructionOperand* from = cur->source();
+        InstructionOperand* to = cur->destination();
+        InstructionOperand* hint = to;
+        if (to->IsUnallocated()) {
+          int to_vreg = UnallocatedOperand::cast(to)->virtual_register();
+          LiveRange* to_range = LiveRangeFor(to_vreg);
+          if (to_range->is_phi()) {
+            if (to_range->is_non_loop_phi()) {
+              hint = to_range->current_hint_operand();
+            }
+          } else {
+            if (live->Contains(to_vreg)) {
+              Define(curr_position, to, from);
+              live->Remove(to_vreg);
+            } else {
+              cur->Eliminate();
+              continue;
+            }
+          }
+        } else {
+          Define(curr_position, to, from);
+        }
+        Use(block_start_position, curr_position, from, hint);
+        if (from->IsUnallocated()) {
+          live->Add(UnallocatedOperand::cast(from)->virtual_register());
+        }
+      }
+    } else {
+      // Process output, inputs, and temps of this non-gap instruction.
+      for (size_t i = 0; i < instr->OutputCount(); i++) {
+        InstructionOperand* output = instr->OutputAt(i);
+        if (output->IsUnallocated()) {
+          int out_vreg = UnallocatedOperand::cast(output)->virtual_register();
+          live->Remove(out_vreg);
+        } else if (output->IsConstant()) {
+          int out_vreg = output->index();
+          live->Remove(out_vreg);
+        }
+        Define(curr_position, output, NULL);
+      }
+
+      if (instr->ClobbersRegisters()) {
+        for (int i = 0; i < Register::kMaxNumAllocatableRegisters; ++i) {
+          if (!IsOutputRegisterOf(instr, i)) {
+            LiveRange* range = FixedLiveRangeFor(i);
+            range->AddUseInterval(curr_position, curr_position.InstructionEnd(),
+                                  zone());
+          }
+        }
+      }
+
+      if (instr->ClobbersDoubleRegisters()) {
+        for (int i = 0; i < DoubleRegister::NumAllocatableRegisters(); ++i) {
+          if (!IsOutputDoubleRegisterOf(instr, i)) {
+            LiveRange* range = FixedDoubleLiveRangeFor(i);
+            range->AddUseInterval(curr_position, curr_position.InstructionEnd(),
+                                  zone());
+          }
+        }
+      }
+
+      for (size_t i = 0; i < instr->InputCount(); i++) {
+        InstructionOperand* input = instr->InputAt(i);
+        if (input->IsImmediate()) continue;  // Ignore immediates.
+        LifetimePosition use_pos;
+        if (input->IsUnallocated() &&
+            UnallocatedOperand::cast(input)->IsUsedAtStart()) {
+          use_pos = curr_position;
+        } else {
+          use_pos = curr_position.InstructionEnd();
+        }
+
+        Use(block_start_position, use_pos, input, NULL);
+        if (input->IsUnallocated()) {
+          live->Add(UnallocatedOperand::cast(input)->virtual_register());
+        }
+      }
+
+      for (size_t i = 0; i < instr->TempCount(); i++) {
+        InstructionOperand* temp = instr->TempAt(i);
+        if (instr->ClobbersTemps()) {
+          if (temp->IsRegister()) continue;
+          if (temp->IsUnallocated()) {
+            UnallocatedOperand* temp_unalloc = UnallocatedOperand::cast(temp);
+            if (temp_unalloc->HasFixedPolicy()) {
+              continue;
+            }
+          }
+        }
+        Use(block_start_position, curr_position.InstructionEnd(), temp, NULL);
+        Define(curr_position, temp, NULL);
+      }
+    }
+  }
+}
+
+
+void RegisterAllocator::ResolvePhis(BasicBlock* block) {
+  for (BasicBlock::const_iterator i = block->begin(); i != block->end(); ++i) {
+    Node* phi = *i;
+    if (phi->opcode() != IrOpcode::kPhi) continue;
+
+    UnallocatedOperand* phi_operand =
+        new (code_zone()) UnallocatedOperand(UnallocatedOperand::NONE);
+    phi_operand->set_virtual_register(phi->id());
+
+    int j = 0;
+    Node::Inputs inputs = phi->inputs();
+    for (Node::Inputs::iterator iter(inputs.begin()); iter != inputs.end();
+         ++iter, ++j) {
+      Node* op = *iter;
+      // TODO(mstarzinger): Use a ValueInputIterator instead.
+      if (j >= block->PredecessorCount()) continue;
+      UnallocatedOperand* operand =
+          new (code_zone()) UnallocatedOperand(UnallocatedOperand::ANY);
+      operand->set_virtual_register(op->id());
+      BasicBlock* cur_block = block->PredecessorAt(j);
+      // The gap move must be added without any special processing as in
+      // the AddConstraintsGapMove.
+      code()->AddGapMove(cur_block->last_instruction_index() - 1, operand,
+                         phi_operand);
+
+      Instruction* branch = InstructionAt(cur_block->last_instruction_index());
+      ASSERT(!branch->HasPointerMap());
+      USE(branch);
+    }
+
+    LiveRange* live_range = LiveRangeFor(phi->id());
+    BlockStartInstruction* block_start = code()->GetBlockStart(block);
+    block_start->GetOrCreateParallelMove(GapInstruction::START, code_zone())
+        ->AddMove(phi_operand, live_range->GetSpillOperand(), code_zone());
+    live_range->SetSpillStartIndex(block->first_instruction_index());
+
+    // We use the phi-ness of some nodes in some later heuristics.
+    live_range->set_is_phi(true);
+    if (!block->IsLoopHeader()) {
+      live_range->set_is_non_loop_phi(true);
+    }
+  }
+}
+
+
+bool RegisterAllocator::Allocate() {
+  assigned_registers_ = new (code_zone())
+      BitVector(Register::NumAllocatableRegisters(), code_zone());
+  assigned_double_registers_ = new (code_zone())
+      BitVector(DoubleRegister::NumAllocatableRegisters(), code_zone());
+  MeetRegisterConstraints();
+  if (!AllocationOk()) return false;
+  ResolvePhis();
+  BuildLiveRanges();
+  AllocateGeneralRegisters();
+  if (!AllocationOk()) return false;
+  AllocateDoubleRegisters();
+  if (!AllocationOk()) return false;
+  PopulatePointerMaps();
+  ConnectRanges();
+  ResolveControlFlow();
+  code()->frame()->SetAllocatedRegisters(assigned_registers_);
+  code()->frame()->SetAllocatedDoubleRegisters(assigned_double_registers_);
+  return true;
+}
+
+
+void RegisterAllocator::MeetRegisterConstraints() {
+  RegisterAllocatorPhase phase("L_Register constraints", this);
+  for (int i = 0; i < code()->BasicBlockCount(); ++i) {
+    MeetRegisterConstraints(code()->BlockAt(i));
+    if (!AllocationOk()) return;
+  }
+}
+
+
+void RegisterAllocator::ResolvePhis() {
+  RegisterAllocatorPhase phase("L_Resolve phis", this);
+
+  // Process the blocks in reverse order.
+  for (int i = code()->BasicBlockCount() - 1; i >= 0; --i) {
+    ResolvePhis(code()->BlockAt(i));
+  }
+}
+
+
+void RegisterAllocator::ResolveControlFlow(LiveRange* range, BasicBlock* block,
+                                           BasicBlock* pred) {
+  LifetimePosition pred_end =
+      LifetimePosition::FromInstructionIndex(pred->last_instruction_index());
+  LifetimePosition cur_start =
+      LifetimePosition::FromInstructionIndex(block->first_instruction_index());
+  LiveRange* pred_cover = NULL;
+  LiveRange* cur_cover = NULL;
+  LiveRange* cur_range = range;
+  while (cur_range != NULL && (cur_cover == NULL || pred_cover == NULL)) {
+    if (cur_range->CanCover(cur_start)) {
+      ASSERT(cur_cover == NULL);
+      cur_cover = cur_range;
+    }
+    if (cur_range->CanCover(pred_end)) {
+      ASSERT(pred_cover == NULL);
+      pred_cover = cur_range;
+    }
+    cur_range = cur_range->next();
+  }
+
+  if (cur_cover->IsSpilled()) return;
+  ASSERT(pred_cover != NULL && cur_cover != NULL);
+  if (pred_cover != cur_cover) {
+    InstructionOperand* pred_op =
+        pred_cover->CreateAssignedOperand(code_zone());
+    InstructionOperand* cur_op = cur_cover->CreateAssignedOperand(code_zone());
+    if (!pred_op->Equals(cur_op)) {
+      GapInstruction* gap = NULL;
+      if (block->PredecessorCount() == 1) {
+        gap = code()->GapAt(block->first_instruction_index());
+      } else {
+        ASSERT(pred->SuccessorCount() == 1);
+        gap = GetLastGap(pred);
+
+        Instruction* branch = InstructionAt(pred->last_instruction_index());
+        ASSERT(!branch->HasPointerMap());
+        USE(branch);
+      }
+      gap->GetOrCreateParallelMove(GapInstruction::START, code_zone())
+          ->AddMove(pred_op, cur_op, code_zone());
+    }
+  }
+}
+
+
+ParallelMove* RegisterAllocator::GetConnectingParallelMove(
+    LifetimePosition pos) {
+  int index = pos.InstructionIndex();
+  if (code()->IsGapAt(index)) {
+    GapInstruction* gap = code()->GapAt(index);
+    return gap->GetOrCreateParallelMove(
+        pos.IsInstructionStart() ? GapInstruction::START : GapInstruction::END,
+        code_zone());
+  }
+  int gap_pos = pos.IsInstructionStart() ? (index - 1) : (index + 1);
+  return code()->GapAt(gap_pos)->GetOrCreateParallelMove(
+      (gap_pos < index) ? GapInstruction::AFTER : GapInstruction::BEFORE,
+      code_zone());
+}
+
+
+BasicBlock* RegisterAllocator::GetBlock(LifetimePosition pos) {
+  return code()->GetBasicBlock(pos.InstructionIndex());
+}
+
+
+void RegisterAllocator::ConnectRanges() {
+  RegisterAllocatorPhase phase("L_Connect ranges", this);
+  for (int i = 0; i < live_ranges()->length(); ++i) {
+    LiveRange* first_range = live_ranges()->at(i);
+    if (first_range == NULL || first_range->parent() != NULL) continue;
+
+    LiveRange* second_range = first_range->next();
+    while (second_range != NULL) {
+      LifetimePosition pos = second_range->Start();
+
+      if (!second_range->IsSpilled()) {
+        // Add gap move if the two live ranges touch and there is no block
+        // boundary.
+        if (first_range->End().Value() == pos.Value()) {
+          bool should_insert = true;
+          if (IsBlockBoundary(pos)) {
+            should_insert = CanEagerlyResolveControlFlow(GetBlock(pos));
+          }
+          if (should_insert) {
+            ParallelMove* move = GetConnectingParallelMove(pos);
+            InstructionOperand* prev_operand =
+                first_range->CreateAssignedOperand(code_zone());
+            InstructionOperand* cur_operand =
+                second_range->CreateAssignedOperand(code_zone());
+            move->AddMove(prev_operand, cur_operand, code_zone());
+          }
+        }
+      }
+
+      first_range = second_range;
+      second_range = second_range->next();
+    }
+  }
+}
+
+
+bool RegisterAllocator::CanEagerlyResolveControlFlow(BasicBlock* block) const {
+  if (block->PredecessorCount() != 1) return false;
+  return block->PredecessorAt(0)->rpo_number_ == block->rpo_number_ - 1;
+}
+
+
+void RegisterAllocator::ResolveControlFlow() {
+  RegisterAllocatorPhase phase("L_Resolve control flow", this);
+  for (int block_id = 1; block_id < code()->BasicBlockCount(); ++block_id) {
+    BasicBlock* block = code()->BlockAt(block_id);
+    if (CanEagerlyResolveControlFlow(block)) continue;
+    BitVector* live = live_in_sets_[block->rpo_number_];
+    BitVector::Iterator iterator(live);
+    while (!iterator.Done()) {
+      int operand_index = iterator.Current();
+      BasicBlock::Predecessors predecessors = block->predecessors();
+      for (BasicBlock::Predecessors::iterator i = predecessors.begin();
+           i != predecessors.end(); ++i) {
+        BasicBlock* cur = *i;
+        LiveRange* cur_range = LiveRangeFor(operand_index);
+        ResolveControlFlow(cur_range, block, cur);
+      }
+      iterator.Advance();
+    }
+  }
+}
+
+
+void RegisterAllocator::BuildLiveRanges() {
+  RegisterAllocatorPhase phase("L_Build live ranges", this);
+  InitializeLivenessAnalysis();
+  // Process the blocks in reverse order.
+  for (int block_id = code()->BasicBlockCount() - 1; block_id >= 0;
+       --block_id) {
+    BasicBlock* block = code()->BlockAt(block_id);
+    BitVector* live = ComputeLiveOut(block);
+    // Initially consider all live_out values live for the entire block. We
+    // will shorten these intervals if necessary.
+    AddInitialIntervals(block, live);
+
+    // Process the instructions in reverse order, generating and killing
+    // live values.
+    ProcessInstructions(block, live);
+    // All phi output operands are killed by this block.
+    for (BasicBlock::const_iterator i = block->begin(); i != block->end();
+         ++i) {
+      Node* phi = *i;
+      if (phi->opcode() != IrOpcode::kPhi) continue;
+
+      // The live range interval already ends at the first instruction of the
+      // block.
+      live->Remove(phi->id());
+
+      InstructionOperand* hint = NULL;
+      InstructionOperand* phi_operand = NULL;
+      GapInstruction* gap = GetLastGap(block->PredecessorAt(0));
+
+      // TODO(titzer): no need to create the parallel move if it doesn't exit.
+      ParallelMove* move =
+          gap->GetOrCreateParallelMove(GapInstruction::START, code_zone());
+      for (int j = 0; j < move->move_operands()->length(); ++j) {
+        InstructionOperand* to = move->move_operands()->at(j).destination();
+        if (to->IsUnallocated() &&
+            UnallocatedOperand::cast(to)->virtual_register() == phi->id()) {
+          hint = move->move_operands()->at(j).source();
+          phi_operand = to;
+          break;
+        }
+      }
+      ASSERT(hint != NULL);
+
+      LifetimePosition block_start = LifetimePosition::FromInstructionIndex(
+          block->first_instruction_index());
+      Define(block_start, phi_operand, hint);
+    }
+
+    // Now live is live_in for this block except not including values live
+    // out on backward successor edges.
+    live_in_sets_[block_id] = live;
+
+    if (block->IsLoopHeader()) {
+      // Add a live range stretching from the first loop instruction to the last
+      // for each value live on entry to the header.
+      BitVector::Iterator iterator(live);
+      LifetimePosition start = LifetimePosition::FromInstructionIndex(
+          block->first_instruction_index());
+      int end_index =
+          code()->BlockAt(block->loop_end_)->last_instruction_index();
+      LifetimePosition end =
+          LifetimePosition::FromInstructionIndex(end_index).NextInstruction();
+      while (!iterator.Done()) {
+        int operand_index = iterator.Current();
+        LiveRange* range = LiveRangeFor(operand_index);
+        range->EnsureInterval(start, end, zone());
+        iterator.Advance();
+      }
+
+      // Insert all values into the live in sets of all blocks in the loop.
+      for (int i = block->rpo_number_ + 1; i < block->loop_end_; ++i) {
+        live_in_sets_[i]->Union(*live);
+      }
+    }
+
+#ifdef DEBUG
+    if (block_id == 0) {
+      BitVector::Iterator iterator(live);
+      bool found = false;
+      while (!iterator.Done()) {
+        found = true;
+        int operand_index = iterator.Current();
+        PrintF("Register allocator error: live v%d reached first block.\n",
+               operand_index);
+        LiveRange* range = LiveRangeFor(operand_index);
+        PrintF("  (first use is at %d)\n", range->first_pos()->pos().Value());
+        CompilationInfo* info = code()->linkage()->info();
+        if (info->IsStub()) {
+          if (info->code_stub() == NULL) {
+            PrintF("\n");
+          } else {
+            CodeStub::Major major_key = info->code_stub()->MajorKey();
+            PrintF("  (function: %s)\n", CodeStub::MajorName(major_key, false));
+          }
+        } else {
+          ASSERT(info->IsOptimizing());
+          AllowHandleDereference allow_deref;
+          PrintF("  (function: %s)\n",
+                 info->function()->debug_name()->ToCString().get());
+        }
+        iterator.Advance();
+      }
+      ASSERT(!found);
+    }
+#endif
+  }
+
+  for (int i = 0; i < live_ranges_.length(); ++i) {
+    if (live_ranges_[i] != NULL) {
+      live_ranges_[i]->kind_ = RequiredRegisterKind(live_ranges_[i]->id());
+
+      // TODO(bmeurer): This is a horrible hack to make sure that for constant
+      // live ranges, every use requires the constant to be in a register.
+      // Without this hack, all uses with "any" policy would get the constant
+      // operand assigned.
+      LiveRange* range = live_ranges_[i];
+      if (range->HasAllocatedSpillOperand() &&
+          range->GetSpillOperand()->IsConstant()) {
+        for (UsePosition* pos = range->first_pos(); pos != NULL;
+             pos = pos->next_) {
+          pos->register_beneficial_ = true;
+          pos->requires_reg_ = true;
+        }
+      }
+    }
+  }
+}
+
+
+bool RegisterAllocator::SafePointsAreInOrder() const {
+  int safe_point = 0;
+  const PointerMapDeque* pointer_maps = code()->pointer_maps();
+  for (PointerMapDeque::const_iterator it = pointer_maps->begin();
+       it != pointer_maps->end(); ++it) {
+    PointerMap* map = *it;
+    if (safe_point > map->instruction_position()) return false;
+    safe_point = map->instruction_position();
+  }
+  return true;
+}
+
+
+void RegisterAllocator::PopulatePointerMaps() {
+  RegisterAllocatorPhase phase("L_Populate pointer maps", this);
+
+  ASSERT(SafePointsAreInOrder());
+
+  // Iterate over all safe point positions and record a pointer
+  // for all spilled live ranges at this point.
+  int last_range_start = 0;
+  const PointerMapDeque* pointer_maps = code()->pointer_maps();
+  PointerMapDeque::const_iterator first_it = pointer_maps->begin();
+  for (int range_idx = 0; range_idx < live_ranges()->length(); ++range_idx) {
+    LiveRange* range = live_ranges()->at(range_idx);
+    if (range == NULL) continue;
+    // Iterate over the first parts of multi-part live ranges.
+    if (range->parent() != NULL) continue;
+    // Skip non-reference values.
+    if (!HasTaggedValue(range->id())) continue;
+    // Skip empty live ranges.
+    if (range->IsEmpty()) continue;
+
+    // Find the extent of the range and its children.
+    int start = range->Start().InstructionIndex();
+    int end = 0;
+    for (LiveRange* cur = range; cur != NULL; cur = cur->next()) {
+      LifetimePosition this_end = cur->End();
+      if (this_end.InstructionIndex() > end) end = this_end.InstructionIndex();
+      ASSERT(cur->Start().InstructionIndex() >= start);
+    }
+
+    // Most of the ranges are in order, but not all.  Keep an eye on when they
+    // step backwards and reset the first_it so we don't miss any safe points.
+    if (start < last_range_start) first_it = pointer_maps->begin();
+    last_range_start = start;
+
+    // Step across all the safe points that are before the start of this range,
+    // recording how far we step in order to save doing this for the next range.
+    for (; first_it != pointer_maps->end(); ++first_it) {
+      PointerMap* map = *first_it;
+      if (map->instruction_position() >= start) break;
+    }
+
+    // Step through the safe points to see whether they are in the range.
+    for (PointerMapDeque::const_iterator it = first_it;
+         it != pointer_maps->end(); ++it) {
+      PointerMap* map = *it;
+      int safe_point = map->instruction_position();
+
+      // The safe points are sorted so we can stop searching here.
+      if (safe_point - 1 > end) break;
+
+      // Advance to the next active range that covers the current
+      // safe point position.
+      LifetimePosition safe_point_pos =
+          LifetimePosition::FromInstructionIndex(safe_point);
+      LiveRange* cur = range;
+      while (cur != NULL && !cur->Covers(safe_point_pos)) {
+        cur = cur->next();
+      }
+      if (cur == NULL) continue;
+
+      // Check if the live range is spilled and the safe point is after
+      // the spill position.
+      if (range->HasAllocatedSpillOperand() &&
+          safe_point >= range->spill_start_index() &&
+          !range->GetSpillOperand()->IsConstant()) {
+        TraceAlloc("Pointer for range %d (spilled at %d) at safe point %d\n",
+                   range->id(), range->spill_start_index(), safe_point);
+        map->RecordPointer(range->GetSpillOperand(), code_zone());
+      }
+
+      if (!cur->IsSpilled()) {
+        TraceAlloc(
+            "Pointer in register for range %d (start at %d) "
+            "at safe point %d\n",
+            cur->id(), cur->Start().Value(), safe_point);
+        InstructionOperand* operand = cur->CreateAssignedOperand(code_zone());
+        ASSERT(!operand->IsStackSlot());
+        map->RecordPointer(operand, code_zone());
+      }
+    }
+  }
+}
+
+
+void RegisterAllocator::AllocateGeneralRegisters() {
+  RegisterAllocatorPhase phase("L_Allocate general registers", this);
+  num_registers_ = Register::NumAllocatableRegisters();
+  mode_ = GENERAL_REGISTERS;
+  AllocateRegisters();
+}
+
+
+void RegisterAllocator::AllocateDoubleRegisters() {
+  RegisterAllocatorPhase phase("L_Allocate double registers", this);
+  num_registers_ = DoubleRegister::NumAllocatableRegisters();
+  mode_ = DOUBLE_REGISTERS;
+  AllocateRegisters();
+}
+
+
+void RegisterAllocator::AllocateRegisters() {
+  ASSERT(unhandled_live_ranges_.is_empty());
+
+  for (int i = 0; i < live_ranges_.length(); ++i) {
+    if (live_ranges_[i] != NULL) {
+      if (live_ranges_[i]->Kind() == mode_) {
+        AddToUnhandledUnsorted(live_ranges_[i]);
+      }
+    }
+  }
+  SortUnhandled();
+  ASSERT(UnhandledIsSorted());
+
+  ASSERT(reusable_slots_.is_empty());
+  ASSERT(active_live_ranges_.is_empty());
+  ASSERT(inactive_live_ranges_.is_empty());
+
+  if (mode_ == DOUBLE_REGISTERS) {
+    for (int i = 0; i < DoubleRegister::NumAllocatableRegisters(); ++i) {
+      LiveRange* current = fixed_double_live_ranges_.at(i);
+      if (current != NULL) {
+        AddToInactive(current);
+      }
+    }
+  } else {
+    ASSERT(mode_ == GENERAL_REGISTERS);
+    for (int i = 0; i < fixed_live_ranges_.length(); ++i) {
+      LiveRange* current = fixed_live_ranges_.at(i);
+      if (current != NULL) {
+        AddToInactive(current);
+      }
+    }
+  }
+
+  while (!unhandled_live_ranges_.is_empty()) {
+    ASSERT(UnhandledIsSorted());
+    LiveRange* current = unhandled_live_ranges_.RemoveLast();
+    ASSERT(UnhandledIsSorted());
+    LifetimePosition position = current->Start();
+#ifdef DEBUG
+    allocation_finger_ = position;
+#endif
+    TraceAlloc("Processing interval %d start=%d\n", current->id(),
+               position.Value());
+
+    if (current->HasAllocatedSpillOperand()) {
+      TraceAlloc("Live range %d already has a spill operand\n", current->id());
+      LifetimePosition next_pos = position;
+      if (code()->IsGapAt(next_pos.InstructionIndex())) {
+        next_pos = next_pos.NextInstruction();
+      }
+      UsePosition* pos = current->NextUsePositionRegisterIsBeneficial(next_pos);
+      // If the range already has a spill operand and it doesn't need a
+      // register immediately, split it and spill the first part of the range.
+      if (pos == NULL) {
+        Spill(current);
+        continue;
+      } else if (pos->pos().Value() >
+                 current->Start().NextInstruction().Value()) {
+        // Do not spill live range eagerly if use position that can benefit from
+        // the register is too close to the start of live range.
+        SpillBetween(current, current->Start(), pos->pos());
+        if (!AllocationOk()) return;
+        ASSERT(UnhandledIsSorted());
+        continue;
+      }
+    }
+
+    for (int i = 0; i < active_live_ranges_.length(); ++i) {
+      LiveRange* cur_active = active_live_ranges_.at(i);
+      if (cur_active->End().Value() <= position.Value()) {
+        ActiveToHandled(cur_active);
+        --i;  // The live range was removed from the list of active live ranges.
+      } else if (!cur_active->Covers(position)) {
+        ActiveToInactive(cur_active);
+        --i;  // The live range was removed from the list of active live ranges.
+      }
+    }
+
+    for (int i = 0; i < inactive_live_ranges_.length(); ++i) {
+      LiveRange* cur_inactive = inactive_live_ranges_.at(i);
+      if (cur_inactive->End().Value() <= position.Value()) {
+        InactiveToHandled(cur_inactive);
+        --i;  // Live range was removed from the list of inactive live ranges.
+      } else if (cur_inactive->Covers(position)) {
+        InactiveToActive(cur_inactive);
+        --i;  // Live range was removed from the list of inactive live ranges.
+      }
+    }
+
+    ASSERT(!current->HasRegisterAssigned() && !current->IsSpilled());
+
+    bool result = TryAllocateFreeReg(current);
+    if (!AllocationOk()) return;
+
+    if (!result) AllocateBlockedReg(current);
+    if (!AllocationOk()) return;
+
+    if (current->HasRegisterAssigned()) {
+      AddToActive(current);
+    }
+  }
+
+  reusable_slots_.Rewind(0);
+  active_live_ranges_.Rewind(0);
+  inactive_live_ranges_.Rewind(0);
+}
+
+
+const char* RegisterAllocator::RegisterName(int allocation_index) {
+  if (mode_ == GENERAL_REGISTERS) {
+    return Register::AllocationIndexToString(allocation_index);
+  } else {
+    return DoubleRegister::AllocationIndexToString(allocation_index);
+  }
+}
+
+
+void RegisterAllocator::TraceAlloc(const char* msg, ...) {
+  if (FLAG_trace_alloc) {
+    va_list arguments;
+    va_start(arguments, msg);
+    base::OS::VPrint(msg, arguments);
+    va_end(arguments);
+  }
+}
+
+
+bool RegisterAllocator::HasTaggedValue(int virtual_register) const {
+  return code()->IsReference(virtual_register);
+}
+
+
+RegisterKind RegisterAllocator::RequiredRegisterKind(
+    int virtual_register) const {
+  return (code()->IsDouble(virtual_register)) ? DOUBLE_REGISTERS
+                                              : GENERAL_REGISTERS;
+}
+
+
+void RegisterAllocator::AddToActive(LiveRange* range) {
+  TraceAlloc("Add live range %d to active\n", range->id());
+  active_live_ranges_.Add(range, zone());
+}
+
+
+void RegisterAllocator::AddToInactive(LiveRange* range) {
+  TraceAlloc("Add live range %d to inactive\n", range->id());
+  inactive_live_ranges_.Add(range, zone());
+}
+
+
+void RegisterAllocator::AddToUnhandledSorted(LiveRange* range) {
+  if (range == NULL || range->IsEmpty()) return;
+  ASSERT(!range->HasRegisterAssigned() && !range->IsSpilled());
+  ASSERT(allocation_finger_.Value() <= range->Start().Value());
+  for (int i = unhandled_live_ranges_.length() - 1; i >= 0; --i) {
+    LiveRange* cur_range = unhandled_live_ranges_.at(i);
+    if (range->ShouldBeAllocatedBefore(cur_range)) {
+      TraceAlloc("Add live range %d to unhandled at %d\n", range->id(), i + 1);
+      unhandled_live_ranges_.InsertAt(i + 1, range, zone());
+      ASSERT(UnhandledIsSorted());
+      return;
+    }
+  }
+  TraceAlloc("Add live range %d to unhandled at start\n", range->id());
+  unhandled_live_ranges_.InsertAt(0, range, zone());
+  ASSERT(UnhandledIsSorted());
+}
+
+
+void RegisterAllocator::AddToUnhandledUnsorted(LiveRange* range) {
+  if (range == NULL || range->IsEmpty()) return;
+  ASSERT(!range->HasRegisterAssigned() && !range->IsSpilled());
+  TraceAlloc("Add live range %d to unhandled unsorted at end\n", range->id());
+  unhandled_live_ranges_.Add(range, zone());
+}
+
+
+static int UnhandledSortHelper(LiveRange* const* a, LiveRange* const* b) {
+  ASSERT(!(*a)->ShouldBeAllocatedBefore(*b) ||
+         !(*b)->ShouldBeAllocatedBefore(*a));
+  if ((*a)->ShouldBeAllocatedBefore(*b)) return 1;
+  if ((*b)->ShouldBeAllocatedBefore(*a)) return -1;
+  return (*a)->id() - (*b)->id();
+}
+
+
+// Sort the unhandled live ranges so that the ranges to be processed first are
+// at the end of the array list.  This is convenient for the register allocation
+// algorithm because it is efficient to remove elements from the end.
+void RegisterAllocator::SortUnhandled() {
+  TraceAlloc("Sort unhandled\n");
+  unhandled_live_ranges_.Sort(&UnhandledSortHelper);
+}
+
+
+bool RegisterAllocator::UnhandledIsSorted() {
+  int len = unhandled_live_ranges_.length();
+  for (int i = 1; i < len; i++) {
+    LiveRange* a = unhandled_live_ranges_.at(i - 1);
+    LiveRange* b = unhandled_live_ranges_.at(i);
+    if (a->Start().Value() < b->Start().Value()) return false;
+  }
+  return true;
+}
+
+
+void RegisterAllocator::FreeSpillSlot(LiveRange* range) {
+  // Check that we are the last range.
+  if (range->next() != NULL) return;
+
+  if (!range->TopLevel()->HasAllocatedSpillOperand()) return;
+
+  InstructionOperand* spill_operand = range->TopLevel()->GetSpillOperand();
+  if (spill_operand->IsConstant()) return;
+  if (spill_operand->index() >= 0) {
+    reusable_slots_.Add(range, zone());
+  }
+}
+
+
+InstructionOperand* RegisterAllocator::TryReuseSpillSlot(LiveRange* range) {
+  if (reusable_slots_.is_empty()) return NULL;
+  if (reusable_slots_.first()->End().Value() >
+      range->TopLevel()->Start().Value()) {
+    return NULL;
+  }
+  InstructionOperand* result =
+      reusable_slots_.first()->TopLevel()->GetSpillOperand();
+  reusable_slots_.Remove(0);
+  return result;
+}
+
+
+void RegisterAllocator::ActiveToHandled(LiveRange* range) {
+  ASSERT(active_live_ranges_.Contains(range));
+  active_live_ranges_.RemoveElement(range);
+  TraceAlloc("Moving live range %d from active to handled\n", range->id());
+  FreeSpillSlot(range);
+}
+
+
+void RegisterAllocator::ActiveToInactive(LiveRange* range) {
+  ASSERT(active_live_ranges_.Contains(range));
+  active_live_ranges_.RemoveElement(range);
+  inactive_live_ranges_.Add(range, zone());
+  TraceAlloc("Moving live range %d from active to inactive\n", range->id());
+}
+
+
+void RegisterAllocator::InactiveToHandled(LiveRange* range) {
+  ASSERT(inactive_live_ranges_.Contains(range));
+  inactive_live_ranges_.RemoveElement(range);
+  TraceAlloc("Moving live range %d from inactive to handled\n", range->id());
+  FreeSpillSlot(range);
+}
+
+
+void RegisterAllocator::InactiveToActive(LiveRange* range) {
+  ASSERT(inactive_live_ranges_.Contains(range));
+  inactive_live_ranges_.RemoveElement(range);
+  active_live_ranges_.Add(range, zone());
+  TraceAlloc("Moving live range %d from inactive to active\n", range->id());
+}
+
+
+// TryAllocateFreeReg and AllocateBlockedReg assume this
+// when allocating local arrays.
+STATIC_ASSERT(DoubleRegister::kMaxNumAllocatableRegisters >=
+              Register::kMaxNumAllocatableRegisters);
+
+
+bool RegisterAllocator::TryAllocateFreeReg(LiveRange* current) {
+  LifetimePosition free_until_pos[DoubleRegister::kMaxNumAllocatableRegisters];
+
+  for (int i = 0; i < num_registers_; i++) {
+    free_until_pos[i] = LifetimePosition::MaxPosition();
+  }
+
+  for (int i = 0; i < active_live_ranges_.length(); ++i) {
+    LiveRange* cur_active = active_live_ranges_.at(i);
+    free_until_pos[cur_active->assigned_register()] =
+        LifetimePosition::FromInstructionIndex(0);
+  }
+
+  for (int i = 0; i < inactive_live_ranges_.length(); ++i) {
+    LiveRange* cur_inactive = inactive_live_ranges_.at(i);
+    ASSERT(cur_inactive->End().Value() > current->Start().Value());
+    LifetimePosition next_intersection =
+        cur_inactive->FirstIntersection(current);
+    if (!next_intersection.IsValid()) continue;
+    int cur_reg = cur_inactive->assigned_register();
+    free_until_pos[cur_reg] = Min(free_until_pos[cur_reg], next_intersection);
+  }
+
+  InstructionOperand* hint = current->FirstHint();
+  if (hint != NULL && (hint->IsRegister() || hint->IsDoubleRegister())) {
+    int register_index = hint->index();
+    TraceAlloc(
+        "Found reg hint %s (free until [%d) for live range %d (end %d[).\n",
+        RegisterName(register_index), free_until_pos[register_index].Value(),
+        current->id(), current->End().Value());
+
+    // The desired register is free until the end of the current live range.
+    if (free_until_pos[register_index].Value() >= current->End().Value()) {
+      TraceAlloc("Assigning preferred reg %s to live range %d\n",
+                 RegisterName(register_index), current->id());
+      SetLiveRangeAssignedRegister(current, register_index);
+      return true;
+    }
+  }
+
+  // Find the register which stays free for the longest time.
+  int reg = 0;
+  for (int i = 1; i < RegisterCount(); ++i) {
+    if (free_until_pos[i].Value() > free_until_pos[reg].Value()) {
+      reg = i;
+    }
+  }
+
+  LifetimePosition pos = free_until_pos[reg];
+
+  if (pos.Value() <= current->Start().Value()) {
+    // All registers are blocked.
+    return false;
+  }
+
+  if (pos.Value() < current->End().Value()) {
+    // Register reg is available at the range start but becomes blocked before
+    // the range end. Split current at position where it becomes blocked.
+    LiveRange* tail = SplitRangeAt(current, pos);
+    if (!AllocationOk()) return false;
+    AddToUnhandledSorted(tail);
+  }
+
+
+  // Register reg is available at the range start and is free until
+  // the range end.
+  ASSERT(pos.Value() >= current->End().Value());
+  TraceAlloc("Assigning free reg %s to live range %d\n", RegisterName(reg),
+             current->id());
+  SetLiveRangeAssignedRegister(current, reg);
+
+  return true;
+}
+
+
+void RegisterAllocator::AllocateBlockedReg(LiveRange* current) {
+  UsePosition* register_use = current->NextRegisterPosition(current->Start());
+  if (register_use == NULL) {
+    // There is no use in the current live range that requires a register.
+    // We can just spill it.
+    Spill(current);
+    return;
+  }
+
+
+  LifetimePosition use_pos[DoubleRegister::kMaxNumAllocatableRegisters];
+  LifetimePosition block_pos[DoubleRegister::kMaxNumAllocatableRegisters];
+
+  for (int i = 0; i < num_registers_; i++) {
+    use_pos[i] = block_pos[i] = LifetimePosition::MaxPosition();
+  }
+
+  for (int i = 0; i < active_live_ranges_.length(); ++i) {
+    LiveRange* range = active_live_ranges_[i];
+    int cur_reg = range->assigned_register();
+    if (range->IsFixed() || !range->CanBeSpilled(current->Start())) {
+      block_pos[cur_reg] = use_pos[cur_reg] =
+          LifetimePosition::FromInstructionIndex(0);
+    } else {
+      UsePosition* next_use =
+          range->NextUsePositionRegisterIsBeneficial(current->Start());
+      if (next_use == NULL) {
+        use_pos[cur_reg] = range->End();
+      } else {
+        use_pos[cur_reg] = next_use->pos();
+      }
+    }
+  }
+
+  for (int i = 0; i < inactive_live_ranges_.length(); ++i) {
+    LiveRange* range = inactive_live_ranges_.at(i);
+    ASSERT(range->End().Value() > current->Start().Value());
+    LifetimePosition next_intersection = range->FirstIntersection(current);
+    if (!next_intersection.IsValid()) continue;
+    int cur_reg = range->assigned_register();
+    if (range->IsFixed()) {
+      block_pos[cur_reg] = Min(block_pos[cur_reg], next_intersection);
+      use_pos[cur_reg] = Min(block_pos[cur_reg], use_pos[cur_reg]);
+    } else {
+      use_pos[cur_reg] = Min(use_pos[cur_reg], next_intersection);
+    }
+  }
+
+  int reg = 0;
+  for (int i = 1; i < RegisterCount(); ++i) {
+    if (use_pos[i].Value() > use_pos[reg].Value()) {
+      reg = i;
+    }
+  }
+
+  LifetimePosition pos = use_pos[reg];
+
+  if (pos.Value() < register_use->pos().Value()) {
+    // All registers are blocked before the first use that requires a register.
+    // Spill starting part of live range up to that use.
+    SpillBetween(current, current->Start(), register_use->pos());
+    return;
+  }
+
+  if (block_pos[reg].Value() < current->End().Value()) {
+    // Register becomes blocked before the current range end. Split before that
+    // position.
+    LiveRange* tail = SplitBetween(current, current->Start(),
+                                   block_pos[reg].InstructionStart());
+    if (!AllocationOk()) return;
+    AddToUnhandledSorted(tail);
+  }
+
+  // Register reg is not blocked for the whole range.
+  ASSERT(block_pos[reg].Value() >= current->End().Value());
+  TraceAlloc("Assigning blocked reg %s to live range %d\n", RegisterName(reg),
+             current->id());
+  SetLiveRangeAssignedRegister(current, reg);
+
+  // This register was not free. Thus we need to find and spill
+  // parts of active and inactive live regions that use the same register
+  // at the same lifetime positions as current.
+  SplitAndSpillIntersecting(current);
+}
+
+
+LifetimePosition RegisterAllocator::FindOptimalSpillingPos(
+    LiveRange* range, LifetimePosition pos) {
+  BasicBlock* block = GetBlock(pos.InstructionStart());
+  BasicBlock* loop_header =
+      block->IsLoopHeader() ? block : code()->GetContainingLoop(block);
+
+  if (loop_header == NULL) return pos;
+
+  UsePosition* prev_use = range->PreviousUsePositionRegisterIsBeneficial(pos);
+
+  while (loop_header != NULL) {
+    // We are going to spill live range inside the loop.
+    // If possible try to move spilling position backwards to loop header.
+    // This will reduce number of memory moves on the back edge.
+    LifetimePosition loop_start = LifetimePosition::FromInstructionIndex(
+        loop_header->first_instruction_index());
+
+    if (range->Covers(loop_start)) {
+      if (prev_use == NULL || prev_use->pos().Value() < loop_start.Value()) {
+        // No register beneficial use inside the loop before the pos.
+        pos = loop_start;
+      }
+    }
+
+    // Try hoisting out to an outer loop.
+    loop_header = code()->GetContainingLoop(loop_header);
+  }
+
+  return pos;
+}
+
+
+void RegisterAllocator::SplitAndSpillIntersecting(LiveRange* current) {
+  ASSERT(current->HasRegisterAssigned());
+  int reg = current->assigned_register();
+  LifetimePosition split_pos = current->Start();
+  for (int i = 0; i < active_live_ranges_.length(); ++i) {
+    LiveRange* range = active_live_ranges_[i];
+    if (range->assigned_register() == reg) {
+      UsePosition* next_pos = range->NextRegisterPosition(current->Start());
+      LifetimePosition spill_pos = FindOptimalSpillingPos(range, split_pos);
+      if (next_pos == NULL) {
+        SpillAfter(range, spill_pos);
+      } else {
+        // When spilling between spill_pos and next_pos ensure that the range
+        // remains spilled at least until the start of the current live range.
+        // This guarantees that we will not introduce new unhandled ranges that
+        // start before the current range as this violates allocation invariant
+        // and will lead to an inconsistent state of active and inactive
+        // live-ranges: ranges are allocated in order of their start positions,
+        // ranges are retired from active/inactive when the start of the
+        // current live-range is larger than their end.
+        SpillBetweenUntil(range, spill_pos, current->Start(), next_pos->pos());
+      }
+      if (!AllocationOk()) return;
+      ActiveToHandled(range);
+      --i;
+    }
+  }
+
+  for (int i = 0; i < inactive_live_ranges_.length(); ++i) {
+    LiveRange* range = inactive_live_ranges_[i];
+    ASSERT(range->End().Value() > current->Start().Value());
+    if (range->assigned_register() == reg && !range->IsFixed()) {
+      LifetimePosition next_intersection = range->FirstIntersection(current);
+      if (next_intersection.IsValid()) {
+        UsePosition* next_pos = range->NextRegisterPosition(current->Start());
+        if (next_pos == NULL) {
+          SpillAfter(range, split_pos);
+        } else {
+          next_intersection = Min(next_intersection, next_pos->pos());
+          SpillBetween(range, split_pos, next_intersection);
+        }
+        if (!AllocationOk()) return;
+        InactiveToHandled(range);
+        --i;
+      }
+    }
+  }
+}
+
+
+bool RegisterAllocator::IsBlockBoundary(LifetimePosition pos) {
+  return pos.IsInstructionStart() &&
+         InstructionAt(pos.InstructionIndex())->IsBlockStart();
+}
+
+
+LiveRange* RegisterAllocator::SplitRangeAt(LiveRange* range,
+                                           LifetimePosition pos) {
+  ASSERT(!range->IsFixed());
+  TraceAlloc("Splitting live range %d at %d\n", range->id(), pos.Value());
+
+  if (pos.Value() <= range->Start().Value()) return range;
+
+  // We can't properly connect liveranges if split occured at the end
+  // of control instruction.
+  ASSERT(pos.IsInstructionStart() ||
+         !InstructionAt(pos.InstructionIndex())->IsControl());
+
+  int vreg = GetVirtualRegister();
+  if (!AllocationOk()) return NULL;
+  LiveRange* result = LiveRangeFor(vreg);
+  range->SplitAt(pos, result, zone());
+  return result;
+}
+
+
+LiveRange* RegisterAllocator::SplitBetween(LiveRange* range,
+                                           LifetimePosition start,
+                                           LifetimePosition end) {
+  ASSERT(!range->IsFixed());
+  TraceAlloc("Splitting live range %d in position between [%d, %d]\n",
+             range->id(), start.Value(), end.Value());
+
+  LifetimePosition split_pos = FindOptimalSplitPos(start, end);
+  ASSERT(split_pos.Value() >= start.Value());
+  return SplitRangeAt(range, split_pos);
+}
+
+
+LifetimePosition RegisterAllocator::FindOptimalSplitPos(LifetimePosition start,
+                                                        LifetimePosition end) {
+  int start_instr = start.InstructionIndex();
+  int end_instr = end.InstructionIndex();
+  ASSERT(start_instr <= end_instr);
+
+  // We have no choice
+  if (start_instr == end_instr) return end;
+
+  BasicBlock* start_block = GetBlock(start);
+  BasicBlock* end_block = GetBlock(end);
+
+  if (end_block == start_block) {
+    // The interval is split in the same basic block. Split at the latest
+    // possible position.
+    return end;
+  }
+
+  BasicBlock* block = end_block;
+  // Find header of outermost loop.
+  // TODO(titzer): fix redundancy below.
+  while (code()->GetContainingLoop(block) != NULL &&
+         code()->GetContainingLoop(block)->rpo_number_ >
+             start_block->rpo_number_) {
+    block = code()->GetContainingLoop(block);
+  }
+
+  // We did not find any suitable outer loop. Split at the latest possible
+  // position unless end_block is a loop header itself.
+  if (block == end_block && !end_block->IsLoopHeader()) return end;
+
+  return LifetimePosition::FromInstructionIndex(
+      block->first_instruction_index());
+}
+
+
+void RegisterAllocator::SpillAfter(LiveRange* range, LifetimePosition pos) {
+  LiveRange* second_part = SplitRangeAt(range, pos);
+  if (!AllocationOk()) return;
+  Spill(second_part);
+}
+
+
+void RegisterAllocator::SpillBetween(LiveRange* range, LifetimePosition start,
+                                     LifetimePosition end) {
+  SpillBetweenUntil(range, start, start, end);
+}
+
+
+void RegisterAllocator::SpillBetweenUntil(LiveRange* range,
+                                          LifetimePosition start,
+                                          LifetimePosition until,
+                                          LifetimePosition end) {
+  CHECK(start.Value() < end.Value());
+  LiveRange* second_part = SplitRangeAt(range, start);
+  if (!AllocationOk()) return;
+
+  if (second_part->Start().Value() < end.Value()) {
+    // The split result intersects with [start, end[.
+    // Split it at position between ]start+1, end[, spill the middle part
+    // and put the rest to unhandled.
+    LiveRange* third_part = SplitBetween(
+        second_part, Max(second_part->Start().InstructionEnd(), until),
+        end.PrevInstruction().InstructionEnd());
+    if (!AllocationOk()) return;
+
+    ASSERT(third_part != second_part);
+
+    Spill(second_part);
+    AddToUnhandledSorted(third_part);
+  } else {
+    // The split result does not intersect with [start, end[.
+    // Nothing to spill. Just put it to unhandled as whole.
+    AddToUnhandledSorted(second_part);
+  }
+}
+
+
+void RegisterAllocator::Spill(LiveRange* range) {
+  ASSERT(!range->IsSpilled());
+  TraceAlloc("Spilling live range %d\n", range->id());
+  LiveRange* first = range->TopLevel();
+
+  if (!first->HasAllocatedSpillOperand()) {
+    InstructionOperand* op = TryReuseSpillSlot(range);
+    if (op == NULL) {
+      // Allocate a new operand referring to the spill slot.
+      RegisterKind kind = range->Kind();
+      int index = code()->frame()->AllocateSpillSlot(kind == DOUBLE_REGISTERS);
+      if (kind == DOUBLE_REGISTERS) {
+        op = DoubleStackSlotOperand::Create(index, zone());
+      } else {
+        ASSERT(kind == GENERAL_REGISTERS);
+        op = StackSlotOperand::Create(index, zone());
+      }
+    }
+    first->SetSpillOperand(op);
+  }
+  range->MakeSpilled(code_zone());
+}
+
+
+int RegisterAllocator::RegisterCount() const { return num_registers_; }
+
+
+#ifdef DEBUG
+
+
+void RegisterAllocator::Verify() const {
+  for (int i = 0; i < live_ranges()->length(); ++i) {
+    LiveRange* current = live_ranges()->at(i);
+    if (current != NULL) current->Verify();
+  }
+}
+
+
+#endif
+
+
+void RegisterAllocator::SetLiveRangeAssignedRegister(LiveRange* range,
+                                                     int reg) {
+  if (range->Kind() == DOUBLE_REGISTERS) {
+    assigned_double_registers_->Add(reg);
+  } else {
+    ASSERT(range->Kind() == GENERAL_REGISTERS);
+    assigned_registers_->Add(reg);
+  }
+  range->set_assigned_register(reg, code_zone());
+}
+
+
+RegisterAllocatorPhase::RegisterAllocatorPhase(const char* name,
+                                               RegisterAllocator* allocator)
+    : CompilationPhase(name, allocator->code()->linkage()->info()),
+      allocator_(allocator) {
+  if (FLAG_turbo_stats) {
+    allocator_zone_start_allocation_size_ =
+        allocator->zone()->allocation_size();
+  }
+}
+
+
+RegisterAllocatorPhase::~RegisterAllocatorPhase() {
+  if (FLAG_turbo_stats) {
+    unsigned size = allocator_->zone()->allocation_size() -
+                    allocator_zone_start_allocation_size_;
+    isolate()->GetTStatistics()->SaveTiming(name(), base::TimeDelta(), size);
+  }
+#ifdef DEBUG
+  if (allocator_ != NULL) allocator_->Verify();
+#endif
+}
+}
+}
+}  // namespace v8::internal::compiler
diff --git a/src/compiler/register-allocator.h b/src/compiler/register-allocator.h
new file mode 100644 (file)
index 0000000..1d50aec
--- /dev/null
@@ -0,0 +1,547 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_REGISTER_ALLOCATOR_H_
+#define V8_REGISTER_ALLOCATOR_H_
+
+#include "src/allocation.h"
+#include "src/compiler/instruction.h"
+#include "src/compiler/node.h"
+#include "src/compiler/schedule.h"
+#include "src/macro-assembler.h"
+#include "src/zone.h"
+
+namespace v8 {
+namespace internal {
+
+// Forward declarations.
+class BitVector;
+class InstructionOperand;
+class UnallocatedOperand;
+class ParallelMove;
+class PointerMap;
+
+namespace compiler {
+
+enum RegisterKind {
+  UNALLOCATED_REGISTERS,
+  GENERAL_REGISTERS,
+  DOUBLE_REGISTERS
+};
+
+
+// This class represents a single point of a InstructionOperand's lifetime. For
+// each instruction there are exactly two lifetime positions: the beginning and
+// the end of the instruction. Lifetime positions for different instructions are
+// disjoint.
+class LifetimePosition {
+ public:
+  // Return the lifetime position that corresponds to the beginning of
+  // the instruction with the given index.
+  static LifetimePosition FromInstructionIndex(int index) {
+    return LifetimePosition(index * kStep);
+  }
+
+  // Returns a numeric representation of this lifetime position.
+  int Value() const { return value_; }
+
+  // Returns the index of the instruction to which this lifetime position
+  // corresponds.
+  int InstructionIndex() const {
+    ASSERT(IsValid());
+    return value_ / kStep;
+  }
+
+  // Returns true if this lifetime position corresponds to the instruction
+  // start.
+  bool IsInstructionStart() const { return (value_ & (kStep - 1)) == 0; }
+
+  // Returns the lifetime position for the start of the instruction which
+  // corresponds to this lifetime position.
+  LifetimePosition InstructionStart() const {
+    ASSERT(IsValid());
+    return LifetimePosition(value_ & ~(kStep - 1));
+  }
+
+  // Returns the lifetime position for the end of the instruction which
+  // corresponds to this lifetime position.
+  LifetimePosition InstructionEnd() const {
+    ASSERT(IsValid());
+    return LifetimePosition(InstructionStart().Value() + kStep / 2);
+  }
+
+  // Returns the lifetime position for the beginning of the next instruction.
+  LifetimePosition NextInstruction() const {
+    ASSERT(IsValid());
+    return LifetimePosition(InstructionStart().Value() + kStep);
+  }
+
+  // Returns the lifetime position for the beginning of the previous
+  // instruction.
+  LifetimePosition PrevInstruction() const {
+    ASSERT(IsValid());
+    ASSERT(value_ > 1);
+    return LifetimePosition(InstructionStart().Value() - kStep);
+  }
+
+  // Constructs the lifetime position which does not correspond to any
+  // instruction.
+  LifetimePosition() : value_(-1) {}
+
+  // Returns true if this lifetime positions corrensponds to some
+  // instruction.
+  bool IsValid() const { return value_ != -1; }
+
+  static inline LifetimePosition Invalid() { return LifetimePosition(); }
+
+  static inline LifetimePosition MaxPosition() {
+    // We have to use this kind of getter instead of static member due to
+    // crash bug in GDB.
+    return LifetimePosition(kMaxInt);
+  }
+
+ private:
+  static const int kStep = 2;
+
+  // Code relies on kStep being a power of two.
+  STATIC_ASSERT(IS_POWER_OF_TWO(kStep));
+
+  explicit LifetimePosition(int value) : value_(value) {}
+
+  int value_;
+};
+
+
+// Representation of the non-empty interval [start,end[.
+class UseInterval : public ZoneObject {
+ public:
+  UseInterval(LifetimePosition start, LifetimePosition end)
+      : start_(start), end_(end), next_(NULL) {
+    ASSERT(start.Value() < end.Value());
+  }
+
+  LifetimePosition start() const { return start_; }
+  LifetimePosition end() const { return end_; }
+  UseInterval* next() const { return next_; }
+
+  // Split this interval at the given position without effecting the
+  // live range that owns it. The interval must contain the position.
+  void SplitAt(LifetimePosition pos, Zone* zone);
+
+  // If this interval intersects with other return smallest position
+  // that belongs to both of them.
+  LifetimePosition Intersect(const UseInterval* other) const {
+    if (other->start().Value() < start_.Value()) return other->Intersect(this);
+    if (other->start().Value() < end_.Value()) return other->start();
+    return LifetimePosition::Invalid();
+  }
+
+  bool Contains(LifetimePosition point) const {
+    return start_.Value() <= point.Value() && point.Value() < end_.Value();
+  }
+
+  void set_start(LifetimePosition start) { start_ = start; }
+  void set_next(UseInterval* next) { next_ = next; }
+
+  LifetimePosition start_;
+  LifetimePosition end_;
+  UseInterval* next_;
+};
+
+// Representation of a use position.
+class UsePosition : public ZoneObject {
+ public:
+  UsePosition(LifetimePosition pos, InstructionOperand* operand,
+              InstructionOperand* hint);
+
+  InstructionOperand* operand() const { return operand_; }
+  bool HasOperand() const { return operand_ != NULL; }
+
+  InstructionOperand* hint() const { return hint_; }
+  bool HasHint() const;
+  bool RequiresRegister() const;
+  bool RegisterIsBeneficial() const;
+
+  LifetimePosition pos() const { return pos_; }
+  UsePosition* next() const { return next_; }
+
+  void set_next(UsePosition* next) { next_ = next; }
+
+  InstructionOperand* const operand_;
+  InstructionOperand* const hint_;
+  LifetimePosition const pos_;
+  UsePosition* next_;
+  bool requires_reg_;
+  bool register_beneficial_;
+};
+
+// Representation of SSA values' live ranges as a collection of (continuous)
+// intervals over the instruction ordering.
+class LiveRange : public ZoneObject {
+ public:
+  static const int kInvalidAssignment = 0x7fffffff;
+
+  LiveRange(int id, Zone* zone);
+
+  UseInterval* first_interval() const { return first_interval_; }
+  UsePosition* first_pos() const { return first_pos_; }
+  LiveRange* parent() const { return parent_; }
+  LiveRange* TopLevel() { return (parent_ == NULL) ? this : parent_; }
+  LiveRange* next() const { return next_; }
+  bool IsChild() const { return parent() != NULL; }
+  int id() const { return id_; }
+  bool IsFixed() const { return id_ < 0; }
+  bool IsEmpty() const { return first_interval() == NULL; }
+  InstructionOperand* CreateAssignedOperand(Zone* zone);
+  int assigned_register() const { return assigned_register_; }
+  int spill_start_index() const { return spill_start_index_; }
+  void set_assigned_register(int reg, Zone* zone);
+  void MakeSpilled(Zone* zone);
+  bool is_phi() const { return is_phi_; }
+  void set_is_phi(bool is_phi) { is_phi_ = is_phi; }
+  bool is_non_loop_phi() const { return is_non_loop_phi_; }
+  void set_is_non_loop_phi(bool is_non_loop_phi) {
+    is_non_loop_phi_ = is_non_loop_phi;
+  }
+
+  // Returns use position in this live range that follows both start
+  // and last processed use position.
+  // Modifies internal state of live range!
+  UsePosition* NextUsePosition(LifetimePosition start);
+
+  // Returns use position for which register is required in this live
+  // range and which follows both start and last processed use position
+  // Modifies internal state of live range!
+  UsePosition* NextRegisterPosition(LifetimePosition start);
+
+  // Returns use position for which register is beneficial in this live
+  // range and which follows both start and last processed use position
+  // Modifies internal state of live range!
+  UsePosition* NextUsePositionRegisterIsBeneficial(LifetimePosition start);
+
+  // Returns use position for which register is beneficial in this live
+  // range and which precedes start.
+  UsePosition* PreviousUsePositionRegisterIsBeneficial(LifetimePosition start);
+
+  // Can this live range be spilled at this position.
+  bool CanBeSpilled(LifetimePosition pos);
+
+  // Split this live range at the given position which must follow the start of
+  // the range.
+  // All uses following the given position will be moved from this
+  // live range to the result live range.
+  void SplitAt(LifetimePosition position, LiveRange* result, Zone* zone);
+
+  RegisterKind Kind() const { return kind_; }
+  bool HasRegisterAssigned() const {
+    return assigned_register_ != kInvalidAssignment;
+  }
+  bool IsSpilled() const { return spilled_; }
+
+  InstructionOperand* current_hint_operand() const {
+    ASSERT(current_hint_operand_ == FirstHint());
+    return current_hint_operand_;
+  }
+  InstructionOperand* FirstHint() const {
+    UsePosition* pos = first_pos_;
+    while (pos != NULL && !pos->HasHint()) pos = pos->next();
+    if (pos != NULL) return pos->hint();
+    return NULL;
+  }
+
+  LifetimePosition Start() const {
+    ASSERT(!IsEmpty());
+    return first_interval()->start();
+  }
+
+  LifetimePosition End() const {
+    ASSERT(!IsEmpty());
+    return last_interval_->end();
+  }
+
+  bool HasAllocatedSpillOperand() const;
+  InstructionOperand* GetSpillOperand() const { return spill_operand_; }
+  void SetSpillOperand(InstructionOperand* operand);
+
+  void SetSpillStartIndex(int start) {
+    spill_start_index_ = Min(start, spill_start_index_);
+  }
+
+  bool ShouldBeAllocatedBefore(const LiveRange* other) const;
+  bool CanCover(LifetimePosition position) const;
+  bool Covers(LifetimePosition position);
+  LifetimePosition FirstIntersection(LiveRange* other);
+
+  // Add a new interval or a new use position to this live range.
+  void EnsureInterval(LifetimePosition start, LifetimePosition end, Zone* zone);
+  void AddUseInterval(LifetimePosition start, LifetimePosition end, Zone* zone);
+  void AddUsePosition(LifetimePosition pos, InstructionOperand* operand,
+                      InstructionOperand* hint, Zone* zone);
+
+  // Shorten the most recently added interval by setting a new start.
+  void ShortenTo(LifetimePosition start);
+
+#ifdef DEBUG
+  // True if target overlaps an existing interval.
+  bool HasOverlap(UseInterval* target) const;
+  void Verify() const;
+#endif
+
+ private:
+  void ConvertOperands(Zone* zone);
+  UseInterval* FirstSearchIntervalForPosition(LifetimePosition position) const;
+  void AdvanceLastProcessedMarker(UseInterval* to_start_of,
+                                  LifetimePosition but_not_past) const;
+
+  int id_;
+  bool spilled_;
+  bool is_phi_;
+  bool is_non_loop_phi_;
+  RegisterKind kind_;
+  int assigned_register_;
+  UseInterval* last_interval_;
+  UseInterval* first_interval_;
+  UsePosition* first_pos_;
+  LiveRange* parent_;
+  LiveRange* next_;
+  // This is used as a cache, it doesn't affect correctness.
+  mutable UseInterval* current_interval_;
+  UsePosition* last_processed_use_;
+  // This is used as a cache, it's invalid outside of BuildLiveRanges.
+  InstructionOperand* current_hint_operand_;
+  InstructionOperand* spill_operand_;
+  int spill_start_index_;
+
+  friend class RegisterAllocator;  // Assigns to kind_.
+};
+
+
+class RegisterAllocator BASE_EMBEDDED {
+ public:
+  explicit RegisterAllocator(InstructionSequence* code);
+
+  static void TraceAlloc(const char* msg, ...);
+
+  // Checks whether the value of a given virtual register is a reference.
+  // TODO(titzer): rename this to IsReference.
+  bool HasTaggedValue(int virtual_register) const;
+
+  // Returns the register kind required by the given virtual register.
+  RegisterKind RequiredRegisterKind(int virtual_register) const;
+
+  bool Allocate();
+
+  const ZoneList<LiveRange*>* live_ranges() const { return &live_ranges_; }
+  const Vector<LiveRange*>* fixed_live_ranges() const {
+    return &fixed_live_ranges_;
+  }
+  const Vector<LiveRange*>* fixed_double_live_ranges() const {
+    return &fixed_double_live_ranges_;
+  }
+
+  inline InstructionSequence* code() const { return code_; }
+
+  // This zone is for datastructures only needed during register allocation.
+  inline Zone* zone() { return &zone_; }
+
+  // This zone is for InstructionOperands and moves that live beyond register
+  // allocation.
+  inline Zone* code_zone() { return code()->zone(); }
+
+  int GetVirtualRegister() {
+    int vreg = code()->NextVirtualRegister();
+    if (vreg >= UnallocatedOperand::kMaxVirtualRegisters) {
+      allocation_ok_ = false;
+      // Maintain the invariant that we return something below the maximum.
+      return 0;
+    }
+    return vreg;
+  }
+
+  bool AllocationOk() { return allocation_ok_; }
+
+#ifdef DEBUG
+  void Verify() const;
+#endif
+
+  BitVector* assigned_registers() { return assigned_registers_; }
+  BitVector* assigned_double_registers() { return assigned_double_registers_; }
+
+ private:
+  void MeetRegisterConstraints();
+  void ResolvePhis();
+  void BuildLiveRanges();
+  void AllocateGeneralRegisters();
+  void AllocateDoubleRegisters();
+  void ConnectRanges();
+  void ResolveControlFlow();
+  void PopulatePointerMaps();  // TODO(titzer): rename to PopulateReferenceMaps.
+  void AllocateRegisters();
+  bool CanEagerlyResolveControlFlow(BasicBlock* block) const;
+  inline bool SafePointsAreInOrder() const;
+
+  // Liveness analysis support.
+  void InitializeLivenessAnalysis();
+  BitVector* ComputeLiveOut(BasicBlock* block);
+  void AddInitialIntervals(BasicBlock* block, BitVector* live_out);
+  bool IsOutputRegisterOf(Instruction* instr, int index);
+  bool IsOutputDoubleRegisterOf(Instruction* instr, int index);
+  void ProcessInstructions(BasicBlock* block, BitVector* live);
+  void MeetRegisterConstraints(BasicBlock* block);
+  void MeetConstraintsBetween(Instruction* first, Instruction* second,
+                              int gap_index);
+  void ResolvePhis(BasicBlock* block);
+
+  // Helper methods for building intervals.
+  InstructionOperand* AllocateFixed(UnallocatedOperand* operand, int pos,
+                                    bool is_tagged);
+  LiveRange* LiveRangeFor(InstructionOperand* operand);
+  void Define(LifetimePosition position, InstructionOperand* operand,
+              InstructionOperand* hint);
+  void Use(LifetimePosition block_start, LifetimePosition position,
+           InstructionOperand* operand, InstructionOperand* hint);
+  void AddConstraintsGapMove(int index, InstructionOperand* from,
+                             InstructionOperand* to);
+
+  // Helper methods for updating the life range lists.
+  void AddToActive(LiveRange* range);
+  void AddToInactive(LiveRange* range);
+  void AddToUnhandledSorted(LiveRange* range);
+  void AddToUnhandledUnsorted(LiveRange* range);
+  void SortUnhandled();
+  bool UnhandledIsSorted();
+  void ActiveToHandled(LiveRange* range);
+  void ActiveToInactive(LiveRange* range);
+  void InactiveToHandled(LiveRange* range);
+  void InactiveToActive(LiveRange* range);
+  void FreeSpillSlot(LiveRange* range);
+  InstructionOperand* TryReuseSpillSlot(LiveRange* range);
+
+  // Helper methods for allocating registers.
+  bool TryAllocateFreeReg(LiveRange* range);
+  void AllocateBlockedReg(LiveRange* range);
+
+  // Live range splitting helpers.
+
+  // Split the given range at the given position.
+  // If range starts at or after the given position then the
+  // original range is returned.
+  // Otherwise returns the live range that starts at pos and contains
+  // all uses from the original range that follow pos. Uses at pos will
+  // still be owned by the original range after splitting.
+  LiveRange* SplitRangeAt(LiveRange* range, LifetimePosition pos);
+
+  // Split the given range in a position from the interval [start, end].
+  LiveRange* SplitBetween(LiveRange* range, LifetimePosition start,
+                          LifetimePosition end);
+
+  // Find a lifetime position in the interval [start, end] which
+  // is optimal for splitting: it is either header of the outermost
+  // loop covered by this interval or the latest possible position.
+  LifetimePosition FindOptimalSplitPos(LifetimePosition start,
+                                       LifetimePosition end);
+
+  // Spill the given life range after position pos.
+  void SpillAfter(LiveRange* range, LifetimePosition pos);
+
+  // Spill the given life range after position [start] and up to position [end].
+  void SpillBetween(LiveRange* range, LifetimePosition start,
+                    LifetimePosition end);
+
+  // Spill the given life range after position [start] and up to position [end].
+  // Range is guaranteed to be spilled at least until position [until].
+  void SpillBetweenUntil(LiveRange* range, LifetimePosition start,
+                         LifetimePosition until, LifetimePosition end);
+
+  void SplitAndSpillIntersecting(LiveRange* range);
+
+  // If we are trying to spill a range inside the loop try to
+  // hoist spill position out to the point just before the loop.
+  LifetimePosition FindOptimalSpillingPos(LiveRange* range,
+                                          LifetimePosition pos);
+
+  void Spill(LiveRange* range);
+  bool IsBlockBoundary(LifetimePosition pos);
+
+  // Helper methods for resolving control flow.
+  void ResolveControlFlow(LiveRange* range, BasicBlock* block,
+                          BasicBlock* pred);
+
+  inline void SetLiveRangeAssignedRegister(LiveRange* range, int reg);
+
+  // Return parallel move that should be used to connect ranges split at the
+  // given position.
+  ParallelMove* GetConnectingParallelMove(LifetimePosition pos);
+
+  // Return the block which contains give lifetime position.
+  BasicBlock* GetBlock(LifetimePosition pos);
+
+  // Helper methods for the fixed registers.
+  int RegisterCount() const;
+  static int FixedLiveRangeID(int index) { return -index - 1; }
+  static int FixedDoubleLiveRangeID(int index);
+  LiveRange* FixedLiveRangeFor(int index);
+  LiveRange* FixedDoubleLiveRangeFor(int index);
+  LiveRange* LiveRangeFor(int index);
+  GapInstruction* GetLastGap(BasicBlock* block);
+
+  const char* RegisterName(int allocation_index);
+
+  inline Instruction* InstructionAt(int index) {
+    return code()->InstructionAt(index);
+  }
+
+  Zone zone_;
+  InstructionSequence* code_;
+
+  // During liveness analysis keep a mapping from block id to live_in sets
+  // for blocks already analyzed.
+  ZoneList<BitVector*> live_in_sets_;
+
+  // Liveness analysis results.
+  ZoneList<LiveRange*> live_ranges_;
+
+  // Lists of live ranges
+  EmbeddedVector<LiveRange*, Register::kMaxNumAllocatableRegisters>
+      fixed_live_ranges_;
+  EmbeddedVector<LiveRange*, DoubleRegister::kMaxNumAllocatableRegisters>
+      fixed_double_live_ranges_;
+  ZoneList<LiveRange*> unhandled_live_ranges_;
+  ZoneList<LiveRange*> active_live_ranges_;
+  ZoneList<LiveRange*> inactive_live_ranges_;
+  ZoneList<LiveRange*> reusable_slots_;
+
+  RegisterKind mode_;
+  int num_registers_;
+
+  BitVector* assigned_registers_;
+  BitVector* assigned_double_registers_;
+
+  // Indicates success or failure during register allocation.
+  bool allocation_ok_;
+
+#ifdef DEBUG
+  LifetimePosition allocation_finger_;
+#endif
+
+  DISALLOW_COPY_AND_ASSIGN(RegisterAllocator);
+};
+
+
+class RegisterAllocatorPhase : public CompilationPhase {
+ public:
+  RegisterAllocatorPhase(const char* name, RegisterAllocator* allocator);
+  ~RegisterAllocatorPhase();
+
+ private:
+  RegisterAllocator* allocator_;
+  unsigned allocator_zone_start_allocation_size_;
+
+  DISALLOW_COPY_AND_ASSIGN(RegisterAllocatorPhase);
+};
+}
+}
+}  // namespace v8::internal::compiler
+
+#endif  // V8_REGISTER_ALLOCATOR_H_
diff --git a/src/compiler/representation-change.h b/src/compiler/representation-change.h
new file mode 100644 (file)
index 0000000..824e22b
--- /dev/null
@@ -0,0 +1,383 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_REPRESENTATION_CHANGE_H_
+#define V8_COMPILER_REPRESENTATION_CHANGE_H_
+
+#include "src/compiler/js-graph.h"
+#include "src/compiler/machine-operator.h"
+#include "src/compiler/node-properties-inl.h"
+#include "src/compiler/simplified-operator.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// The types and representations tracked during representation inference
+// and change insertion.
+// TODO(titzer): First, merge MachineRepresentation and RepType.
+// TODO(titzer): Second, Use the real type system instead of RepType.
+enum RepType {
+  // Representations.
+  rBit = 1 << 0,
+  rWord32 = 1 << 1,
+  rWord64 = 1 << 2,
+  rFloat64 = 1 << 3,
+  rTagged = 1 << 4,
+
+  // Types.
+  tBool = 1 << 5,
+  tInt32 = 1 << 6,
+  tUint32 = 1 << 7,
+  tInt64 = 1 << 8,
+  tUint64 = 1 << 9,
+  tNumber = 1 << 10,
+  tAny = 1 << 11
+};
+
+typedef uint16_t RepTypeUnion;
+
+const RepTypeUnion rMask = rBit | rWord32 | rWord64 | rFloat64 | rTagged;
+const RepTypeUnion tMask =
+    tBool | tInt32 | tUint32 | tInt64 | tUint64 | tNumber | tAny;
+const RepType rPtr = kPointerSize == 4 ? rWord32 : rWord64;
+
+
+// Contains logic related to changing the representation of values for constants
+// and other nodes, as well as lowering Simplified->Machine operators.
+// Eagerly folds any representation changes for constants.
+class RepresentationChanger {
+ public:
+  RepresentationChanger(JSGraph* jsgraph, SimplifiedOperatorBuilder* simplified,
+                        MachineOperatorBuilder* machine, Isolate* isolate)
+      : jsgraph_(jsgraph),
+        simplified_(simplified),
+        machine_(machine),
+        isolate_(isolate),
+        testing_type_errors_(false),
+        type_error_(false) {}
+
+
+  Node* GetRepresentationFor(Node* node, RepTypeUnion output_type,
+                             RepTypeUnion use_type) {
+    if (!IsPowerOf2(output_type & rMask)) {
+      // There should be only one output representation.
+      return TypeError(node, output_type, use_type);
+    }
+    if ((use_type & rMask) == (output_type & rMask)) {
+      // Representations are the same. That's a no-op.
+      return node;
+    }
+    if (use_type & rTagged) {
+      return GetTaggedRepresentationFor(node, output_type);
+    } else if (use_type & rFloat64) {
+      return GetFloat64RepresentationFor(node, output_type);
+    } else if (use_type & rWord32) {
+      return GetWord32RepresentationFor(node, output_type);
+    } else if (use_type & rBit) {
+      return GetBitRepresentationFor(node, output_type);
+    } else if (use_type & rWord64) {
+      return GetWord64RepresentationFor(node, output_type);
+    } else {
+      return node;
+    }
+  }
+
+  Node* GetTaggedRepresentationFor(Node* node, RepTypeUnion output_type) {
+    // Eagerly fold representation changes for constants.
+    switch (node->opcode()) {
+      case IrOpcode::kNumberConstant:
+      case IrOpcode::kHeapConstant:
+        return node;  // No change necessary.
+      case IrOpcode::kInt32Constant:
+        if (output_type & tUint32) {
+          uint32_t value = ValueOf<uint32_t>(node->op());
+          return jsgraph()->Constant(static_cast<double>(value));
+        } else if (output_type & tInt32) {
+          int32_t value = ValueOf<int32_t>(node->op());
+          return jsgraph()->Constant(value);
+        } else if (output_type & rBit) {
+          return ValueOf<int32_t>(node->op()) == 0 ? jsgraph()->FalseConstant()
+                                                   : jsgraph()->TrueConstant();
+        } else {
+          return TypeError(node, output_type, rTagged);
+        }
+      case IrOpcode::kFloat64Constant:
+        return jsgraph()->Constant(ValueOf<double>(node->op()));
+      default:
+        break;
+    }
+    // Select the correct X -> Tagged operator.
+    Operator* op;
+    if (output_type & rBit) {
+      op = simplified()->ChangeBitToBool();
+    } else if (output_type & rWord32) {
+      if (output_type & tUint32) {
+        op = simplified()->ChangeUint32ToTagged();
+      } else if (output_type & tInt32) {
+        op = simplified()->ChangeInt32ToTagged();
+      } else {
+        return TypeError(node, output_type, rTagged);
+      }
+    } else if (output_type & rFloat64) {
+      op = simplified()->ChangeFloat64ToTagged();
+    } else {
+      return TypeError(node, output_type, rTagged);
+    }
+    return jsgraph()->graph()->NewNode(op, node);
+  }
+
+  Node* GetFloat64RepresentationFor(Node* node, RepTypeUnion output_type) {
+    // Eagerly fold representation changes for constants.
+    switch (node->opcode()) {
+      case IrOpcode::kNumberConstant:
+        return jsgraph()->Float64Constant(ValueOf<double>(node->op()));
+      case IrOpcode::kInt32Constant:
+        if (output_type & tUint32) {
+          uint32_t value = ValueOf<uint32_t>(node->op());
+          return jsgraph()->Float64Constant(static_cast<double>(value));
+        } else {
+          int32_t value = ValueOf<int32_t>(node->op());
+          return jsgraph()->Float64Constant(value);
+        }
+      case IrOpcode::kFloat64Constant:
+        return node;  // No change necessary.
+      default:
+        break;
+    }
+    // Select the correct X -> Float64 operator.
+    Operator* op;
+    if (output_type & rWord32) {
+      if (output_type & tUint32) {
+        op = machine()->ConvertUint32ToFloat64();
+      } else if (output_type & tInt32) {
+        op = machine()->ConvertInt32ToFloat64();
+      } else {
+        return TypeError(node, output_type, rFloat64);
+      }
+    } else if (output_type & rTagged) {
+      op = simplified()->ChangeTaggedToFloat64();
+    } else {
+      return TypeError(node, output_type, rFloat64);
+    }
+    return jsgraph()->graph()->NewNode(op, node);
+  }
+
+  Node* GetWord32RepresentationFor(Node* node, RepTypeUnion output_type) {
+    // Eagerly fold representation changes for constants.
+    switch (node->opcode()) {
+      case IrOpcode::kInt32Constant:
+        return node;  // No change necessary.
+      case IrOpcode::kNumberConstant:
+      case IrOpcode::kFloat64Constant: {
+        if (output_type & tUint32) {
+          int32_t value = static_cast<int32_t>(
+              static_cast<uint32_t>(ValueOf<double>(node->op())));
+          return jsgraph()->Int32Constant(value);
+        } else if (output_type & tInt32) {
+          int32_t value = FastD2I(ValueOf<double>(node->op()));
+          return jsgraph()->Int32Constant(value);
+        } else {
+          return TypeError(node, output_type, rWord32);
+        }
+      }
+      default:
+        break;
+    }
+    // Select the correct X -> Word32 operator.
+    Operator* op = NULL;
+    if (output_type & rFloat64) {
+      // TODO(turbofan): could have cheaper float64 conversions that don't do
+      // the full JavaScript truncation here.
+      if (output_type & tUint32) {
+        op = machine()->ConvertFloat64ToUint32();
+      } else if (output_type & tInt32) {
+        op = machine()->ConvertFloat64ToInt32();
+      } else {
+        return TypeError(node, output_type, rWord32);
+      }
+    } else if (output_type & rTagged) {
+      if (output_type & tUint32) {
+        op = simplified()->ChangeTaggedToUint32();
+      } else if (output_type & tInt32) {
+        op = simplified()->ChangeTaggedToInt32();
+      } else {
+        return TypeError(node, output_type, rWord32);
+      }
+    } else if (output_type & rBit) {
+      return node;  // Sloppy comparison -> word32.
+    } else {
+      return TypeError(node, output_type, rWord32);
+    }
+    return jsgraph()->graph()->NewNode(op, node);
+  }
+
+  Node* GetBitRepresentationFor(Node* node, RepTypeUnion output_type) {
+    // Eagerly fold representation changes for constants.
+    switch (node->opcode()) {
+      case IrOpcode::kInt32Constant: {
+        int32_t value = ValueOf<int32_t>(node->op());
+        if (value == 0 || value == 1) return node;
+        return jsgraph()->OneConstant();  // value != 0
+      }
+      case IrOpcode::kHeapConstant: {
+        Handle<Object> handle = ValueOf<Handle<Object> >(node->op());
+        ASSERT(*handle == isolate()->heap()->true_value() ||
+               *handle == isolate()->heap()->false_value());
+        return jsgraph()->Int32Constant(
+            *handle == isolate()->heap()->true_value() ? 1 : 0);
+      }
+      default:
+        break;
+    }
+    // Select the correct X -> Bit operator.
+    Operator* op;
+    if (output_type & rWord32) {
+      return node;  // No change necessary.
+    } else if (output_type & rWord64) {
+      return node;  // TODO(titzer): No change necessary, on 64-bit.
+    } else if (output_type & rTagged) {
+      op = simplified()->ChangeBoolToBit();
+    } else {
+      return TypeError(node, output_type, rBit);
+    }
+    return jsgraph()->graph()->NewNode(op, node);
+  }
+
+  Node* GetWord64RepresentationFor(Node* node, RepTypeUnion output_type) {
+    if (output_type & rBit) {
+      return node;  // Sloppy comparison -> word64
+    }
+    // Can't really convert Word64 to anything else. Purported to be internal.
+    return TypeError(node, output_type, rWord64);
+  }
+
+  static RepType TypeForMachineRepresentation(MachineRepresentation rep) {
+    // TODO(titzer): merge MachineRepresentation and RepType.
+    switch (rep) {
+      case kMachineWord8:
+        return rWord32;
+      case kMachineWord16:
+        return rWord32;
+      case kMachineWord32:
+        return rWord32;
+      case kMachineWord64:
+        return rWord64;
+      case kMachineFloat64:
+        return rFloat64;
+      case kMachineTagged:
+        return rTagged;
+      default:
+        UNREACHABLE();
+        return static_cast<RepType>(0);
+    }
+  }
+
+  Operator* Int32OperatorFor(IrOpcode::Value opcode) {
+    switch (opcode) {
+      case IrOpcode::kNumberAdd:
+        return machine()->Int32Add();
+      case IrOpcode::kNumberSubtract:
+        return machine()->Int32Sub();
+      case IrOpcode::kNumberEqual:
+        return machine()->Word32Equal();
+      case IrOpcode::kNumberLessThan:
+        return machine()->Int32LessThan();
+      case IrOpcode::kNumberLessThanOrEqual:
+        return machine()->Int32LessThanOrEqual();
+      default:
+        UNREACHABLE();
+        return NULL;
+    }
+  }
+
+  Operator* Uint32OperatorFor(IrOpcode::Value opcode) {
+    switch (opcode) {
+      case IrOpcode::kNumberAdd:
+        return machine()->Int32Add();
+      case IrOpcode::kNumberSubtract:
+        return machine()->Int32Sub();
+      case IrOpcode::kNumberEqual:
+        return machine()->Word32Equal();
+      case IrOpcode::kNumberLessThan:
+        return machine()->Uint32LessThan();
+      case IrOpcode::kNumberLessThanOrEqual:
+        return machine()->Uint32LessThanOrEqual();
+      default:
+        UNREACHABLE();
+        return NULL;
+    }
+  }
+
+  Operator* Float64OperatorFor(IrOpcode::Value opcode) {
+    switch (opcode) {
+      case IrOpcode::kNumberAdd:
+        return machine()->Float64Add();
+      case IrOpcode::kNumberSubtract:
+        return machine()->Float64Sub();
+      case IrOpcode::kNumberMultiply:
+        return machine()->Float64Mul();
+      case IrOpcode::kNumberDivide:
+        return machine()->Float64Div();
+      case IrOpcode::kNumberModulus:
+        return machine()->Float64Mod();
+      case IrOpcode::kNumberEqual:
+        return machine()->Float64Equal();
+      case IrOpcode::kNumberLessThan:
+        return machine()->Float64LessThan();
+      case IrOpcode::kNumberLessThanOrEqual:
+        return machine()->Float64LessThanOrEqual();
+      default:
+        UNREACHABLE();
+        return NULL;
+    }
+  }
+
+  RepType TypeForField(const FieldAccess& access) {
+    RepType tElement = static_cast<RepType>(0);  // TODO(titzer)
+    RepType rElement = TypeForMachineRepresentation(access.representation);
+    return static_cast<RepType>(tElement | rElement);
+  }
+
+  RepType TypeForElement(const ElementAccess& access) {
+    RepType tElement = static_cast<RepType>(0);  // TODO(titzer)
+    RepType rElement = TypeForMachineRepresentation(access.representation);
+    return static_cast<RepType>(tElement | rElement);
+  }
+
+  RepType TypeForBasePointer(Node* node) {
+    Type* upper = NodeProperties::GetBounds(node).upper;
+    if (upper->Is(Type::UntaggedPtr())) return rPtr;
+    return static_cast<RepType>(tAny | rTagged);
+  }
+
+ private:
+  JSGraph* jsgraph_;
+  SimplifiedOperatorBuilder* simplified_;
+  MachineOperatorBuilder* machine_;
+  Isolate* isolate_;
+
+  friend class RepresentationChangerTester;  // accesses the below fields.
+
+  bool testing_type_errors_;  // If {true}, don't abort on a type error.
+  bool type_error_;           // Set when a type error is detected.
+
+  Node* TypeError(Node* node, RepTypeUnion output_type, RepTypeUnion use) {
+    type_error_ = true;
+    if (!testing_type_errors_) {
+      UNREACHABLE();  // TODO(titzer): report nicer type error
+    }
+    return node;
+  }
+
+  JSGraph* jsgraph() { return jsgraph_; }
+  Isolate* isolate() { return isolate_; }
+  SimplifiedOperatorBuilder* simplified() { return simplified_; }
+  MachineOperatorBuilder* machine() { return machine_; }
+};
+}
+}
+}  // namespace v8::internal::compiler
+
+#endif  // V8_COMPILER_REPRESENTATION_CHANGE_H_
diff --git a/src/compiler/schedule.cc b/src/compiler/schedule.cc
new file mode 100644 (file)
index 0000000..6476676
--- /dev/null
@@ -0,0 +1,92 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/node.h"
+#include "src/compiler/node-properties.h"
+#include "src/compiler/node-properties-inl.h"
+#include "src/compiler/schedule.h"
+#include "src/ostreams.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+OStream& operator<<(OStream& os, const BasicBlockData::Control& c) {
+  switch (c) {
+    case BasicBlockData::kNone:
+      return os << "none";
+    case BasicBlockData::kGoto:
+      return os << "goto";
+    case BasicBlockData::kBranch:
+      return os << "branch";
+    case BasicBlockData::kReturn:
+      return os << "return";
+    case BasicBlockData::kThrow:
+      return os << "throw";
+    case BasicBlockData::kCall:
+      return os << "call";
+    case BasicBlockData::kDeoptimize:
+      return os << "deoptimize";
+  }
+  UNREACHABLE();
+  return os;
+}
+
+
+OStream& operator<<(OStream& os, const Schedule& s) {
+  // TODO(svenpanne) Const-correct the RPO stuff/iterators.
+  BasicBlockVector* rpo = const_cast<Schedule*>(&s)->rpo_order();
+  for (BasicBlockVectorIter i = rpo->begin(); i != rpo->end(); ++i) {
+    BasicBlock* block = *i;
+    os << "--- BLOCK B" << block->id();
+    if (block->PredecessorCount() != 0) os << " <- ";
+    BasicBlock::Predecessors predecessors = block->predecessors();
+    bool comma = false;
+    for (BasicBlock::Predecessors::iterator j = predecessors.begin();
+         j != predecessors.end(); ++j) {
+      if (comma) os << ", ";
+      comma = true;
+      os << "B" << (*j)->id();
+    }
+    os << " ---\n";
+    for (BasicBlock::const_iterator j = block->begin(); j != block->end();
+         ++j) {
+      Node* node = *j;
+      os << "  " << *node;
+      if (!NodeProperties::IsControl(node)) {
+        Bounds bounds = NodeProperties::GetBounds(node);
+        os << " : ";
+        bounds.lower->PrintTo(os);
+        if (!bounds.upper->Is(bounds.lower)) {
+          os << "..";
+          bounds.upper->PrintTo(os);
+        }
+      }
+      os << "\n";
+    }
+    BasicBlock::Control control = block->control_;
+    if (control != BasicBlock::kNone) {
+      os << "  ";
+      if (block->control_input_ != NULL) {
+        os << *block->control_input_;
+      } else {
+        os << "Goto";
+      }
+      os << " -> ";
+      BasicBlock::Successors successors = block->successors();
+      comma = false;
+      for (BasicBlock::Successors::iterator j = successors.begin();
+           j != successors.end(); ++j) {
+        if (comma) os << ", ";
+        comma = true;
+        os << "B" << (*j)->id();
+      }
+      os << "\n";
+    }
+  }
+  return os;
+}
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
diff --git a/src/compiler/schedule.h b/src/compiler/schedule.h
new file mode 100644 (file)
index 0000000..6b148a1
--- /dev/null
@@ -0,0 +1,335 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_SCHEDULE_H_
+#define V8_COMPILER_SCHEDULE_H_
+
+#include <vector>
+
+#include "src/v8.h"
+
+#include "src/compiler/generic-algorithm.h"
+#include "src/compiler/generic-graph.h"
+#include "src/compiler/generic-node.h"
+#include "src/compiler/generic-node-inl.h"
+#include "src/compiler/node.h"
+#include "src/compiler/opcodes.h"
+#include "src/zone.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+class BasicBlock;
+class Graph;
+class ConstructScheduleData;
+class CodeGenerator;  // Because of a namespace bug in clang.
+
+class BasicBlockData {
+ public:
+  // Possible control nodes that can end a block.
+  enum Control {
+    kNone,       // Control not initialized yet.
+    kGoto,       // Goto a single successor block.
+    kBranch,     // Branch if true to first successor, otherwise second.
+    kReturn,     // Return a value from this method.
+    kThrow,      // Throw an exception.
+    kCall,       // Call to a possibly deoptimizing or throwing function.
+    kDeoptimize  // Deoptimize.
+  };
+
+  int32_t rpo_number_;       // special RPO number of the block.
+  BasicBlock* loop_header_;  // Pointer to dominating loop header basic block,
+                             // NULL if none. For loop headers, this points to
+                             // enclosing loop header.
+  int32_t loop_depth_;       // loop nesting, 0 is top-level
+  int32_t loop_end_;         // end of the loop, if this block is a loop header.
+  int32_t code_start_;       // start index of arch-specific code.
+  int32_t code_end_;         // end index of arch-specific code.
+  bool deferred_;            // {true} if this block is considered the slow
+                             // path.
+  Control control_;          // Control at the end of the block.
+  Node* control_input_;      // Input value for control.
+  NodeVector nodes_;         // nodes of this block in forward order.
+
+  explicit BasicBlockData(Zone* zone)
+      : rpo_number_(-1),
+        loop_header_(NULL),
+        loop_depth_(0),
+        loop_end_(-1),
+        code_start_(-1),
+        code_end_(-1),
+        deferred_(false),
+        control_(kNone),
+        control_input_(NULL),
+        nodes_(NodeVector::allocator_type(zone)) {}
+
+  inline bool IsLoopHeader() const { return loop_end_ >= 0; }
+  inline bool LoopContains(BasicBlockData* block) const {
+    // RPO numbers must be initialized.
+    ASSERT(rpo_number_ >= 0);
+    ASSERT(block->rpo_number_ >= 0);
+    if (loop_end_ < 0) return false;  // This is not a loop.
+    return block->rpo_number_ >= rpo_number_ && block->rpo_number_ < loop_end_;
+  }
+  int first_instruction_index() {
+    ASSERT(code_start_ >= 0);
+    ASSERT(code_end_ > 0);
+    ASSERT(code_end_ >= code_start_);
+    return code_start_;
+  }
+  int last_instruction_index() {
+    ASSERT(code_start_ >= 0);
+    ASSERT(code_end_ > 0);
+    ASSERT(code_end_ >= code_start_);
+    return code_end_ - 1;
+  }
+};
+
+OStream& operator<<(OStream& os, const BasicBlockData::Control& c);
+
+// A basic block contains an ordered list of nodes and ends with a control
+// node. Note that if a basic block has phis, then all phis must appear as the
+// first nodes in the block.
+class BasicBlock V8_FINAL : public GenericNode<BasicBlockData, BasicBlock> {
+ public:
+  BasicBlock(GenericGraphBase* graph, int input_count)
+      : GenericNode<BasicBlockData, BasicBlock>(graph, input_count) {}
+
+  typedef Uses Successors;
+  typedef Inputs Predecessors;
+
+  Successors successors() { return static_cast<Successors>(uses()); }
+  Predecessors predecessors() { return static_cast<Predecessors>(inputs()); }
+
+  int PredecessorCount() { return InputCount(); }
+  BasicBlock* PredecessorAt(int index) { return InputAt(index); }
+
+  int SuccessorCount() { return UseCount(); }
+  BasicBlock* SuccessorAt(int index) { return UseAt(index); }
+
+  int PredecessorIndexOf(BasicBlock* predecessor) {
+    BasicBlock::Predecessors predecessors = this->predecessors();
+    for (BasicBlock::Predecessors::iterator i = predecessors.begin();
+         i != predecessors.end(); ++i) {
+      if (*i == predecessor) return i.index();
+    }
+    return -1;
+  }
+
+  inline BasicBlock* loop_header() {
+    return static_cast<BasicBlock*>(loop_header_);
+  }
+  inline BasicBlock* ContainingLoop() {
+    if (IsLoopHeader()) return this;
+    return static_cast<BasicBlock*>(loop_header_);
+  }
+
+  typedef NodeVector::iterator iterator;
+  iterator begin() { return nodes_.begin(); }
+  iterator end() { return nodes_.end(); }
+
+  typedef NodeVector::const_iterator const_iterator;
+  const_iterator begin() const { return nodes_.begin(); }
+  const_iterator end() const { return nodes_.end(); }
+
+  typedef NodeVector::reverse_iterator reverse_iterator;
+  reverse_iterator rbegin() { return nodes_.rbegin(); }
+  reverse_iterator rend() { return nodes_.rend(); }
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(BasicBlock);
+};
+
+typedef GenericGraphVisit::NullNodeVisitor<BasicBlockData, BasicBlock>
+    NullBasicBlockVisitor;
+
+typedef zone_allocator<BasicBlock*> BasicBlockPtrZoneAllocator;
+typedef std::vector<BasicBlock*, BasicBlockPtrZoneAllocator> BasicBlockVector;
+typedef BasicBlockVector::iterator BasicBlockVectorIter;
+typedef BasicBlockVector::reverse_iterator BasicBlockVectorRIter;
+
+// A schedule represents the result of assigning nodes to basic blocks
+// and ordering them within basic blocks. Prior to computing a schedule,
+// a graph has no notion of control flow ordering other than that induced
+// by the graph's dependencies. A schedule is required to generate code.
+class Schedule : public GenericGraph<BasicBlock> {
+ public:
+  explicit Schedule(Zone* zone)
+      : GenericGraph<BasicBlock>(zone),
+        zone_(zone),
+        all_blocks_(BasicBlockVector::allocator_type(zone)),
+        nodeid_to_block_(BasicBlockVector::allocator_type(zone)),
+        rpo_order_(BasicBlockVector::allocator_type(zone)),
+        immediate_dominator_(BasicBlockVector::allocator_type(zone)) {
+    NewBasicBlock();  // entry.
+    NewBasicBlock();  // exit.
+    SetStart(entry());
+    SetEnd(exit());
+  }
+
+  // TODO(titzer): rewrite users of these methods to use start() and end().
+  BasicBlock* entry() const { return all_blocks_[0]; }  // Return entry block.
+  BasicBlock* exit() const { return all_blocks_[1]; }   // Return exit block.
+
+  // Return the block which contains {node}, if any.
+  BasicBlock* block(Node* node) const {
+    if (node->id() < static_cast<NodeId>(nodeid_to_block_.size())) {
+      return nodeid_to_block_[node->id()];
+    }
+    return NULL;
+  }
+
+  BasicBlock* dominator(BasicBlock* block) {
+    return immediate_dominator_[block->id()];
+  }
+
+  bool IsScheduled(Node* node) {
+    int length = static_cast<int>(nodeid_to_block_.size());
+    if (node->id() >= length) return false;
+    return nodeid_to_block_[node->id()] != NULL;
+  }
+
+  BasicBlock* GetBlockById(int block_id) { return all_blocks_[block_id]; }
+
+  int BasicBlockCount() const { return NodeCount(); }
+  int RpoBlockCount() const { return rpo_order_.size(); }
+
+  typedef ContainerPointerWrapper<BasicBlockVector> BasicBlocks;
+
+  // Return a list of all the blocks in the schedule, in arbitrary order.
+  BasicBlocks all_blocks() { return BasicBlocks(&all_blocks_); }
+
+  // Check if nodes {a} and {b} are in the same block.
+  inline bool SameBasicBlock(Node* a, Node* b) const {
+    BasicBlock* block = this->block(a);
+    return block != NULL && block == this->block(b);
+  }
+
+  // BasicBlock building: create a new block.
+  inline BasicBlock* NewBasicBlock() {
+    BasicBlock* block =
+        BasicBlock::New(this, 0, static_cast<BasicBlock**>(NULL));
+    all_blocks_.push_back(block);
+    return block;
+  }
+
+  // BasicBlock building: records that a node will later be added to a block but
+  // doesn't actually add the node to the block.
+  inline void PlanNode(BasicBlock* block, Node* node) {
+    if (FLAG_trace_turbo_scheduler) {
+      PrintF("Planning node %d for future add to block %d\n", node->id(),
+             block->id());
+    }
+    ASSERT(this->block(node) == NULL);
+    SetBlockForNode(block, node);
+  }
+
+  // BasicBlock building: add a node to the end of the block.
+  inline void AddNode(BasicBlock* block, Node* node) {
+    if (FLAG_trace_turbo_scheduler) {
+      PrintF("Adding node %d to block %d\n", node->id(), block->id());
+    }
+    ASSERT(this->block(node) == NULL || this->block(node) == block);
+    block->nodes_.push_back(node);
+    SetBlockForNode(block, node);
+  }
+
+  // BasicBlock building: add a goto to the end of {block}.
+  void AddGoto(BasicBlock* block, BasicBlock* succ) {
+    ASSERT(block->control_ == BasicBlock::kNone);
+    block->control_ = BasicBlock::kGoto;
+    AddSuccessor(block, succ);
+  }
+
+  // BasicBlock building: add a (branching) call at the end of {block}.
+  void AddCall(BasicBlock* block, Node* call, BasicBlock* cont_block,
+               BasicBlock* deopt_block) {
+    ASSERT(block->control_ == BasicBlock::kNone);
+    ASSERT(call->opcode() == IrOpcode::kCall);
+    block->control_ = BasicBlock::kCall;
+    // Insert the deopt block first so that the RPO order builder picks
+    // it first (and thus it ends up late in the RPO order).
+    AddSuccessor(block, deopt_block);
+    AddSuccessor(block, cont_block);
+    SetControlInput(block, call);
+  }
+
+  // BasicBlock building: add a branch at the end of {block}.
+  void AddBranch(BasicBlock* block, Node* branch, BasicBlock* tblock,
+                 BasicBlock* fblock) {
+    ASSERT(block->control_ == BasicBlock::kNone);
+    ASSERT(branch->opcode() == IrOpcode::kBranch);
+    block->control_ = BasicBlock::kBranch;
+    AddSuccessor(block, tblock);
+    AddSuccessor(block, fblock);
+    SetControlInput(block, branch);
+  }
+
+  // BasicBlock building: add a return at the end of {block}.
+  void AddReturn(BasicBlock* block, Node* input) {
+    // TODO(titzer): require a Return node here.
+    ASSERT(block->control_ == BasicBlock::kNone);
+    block->control_ = BasicBlock::kReturn;
+    SetControlInput(block, input);
+    if (block != exit()) AddSuccessor(block, exit());
+  }
+
+  // BasicBlock building: add a throw at the end of {block}.
+  void AddThrow(BasicBlock* block, Node* input) {
+    ASSERT(block->control_ == BasicBlock::kNone);
+    block->control_ = BasicBlock::kThrow;
+    SetControlInput(block, input);
+    if (block != exit()) AddSuccessor(block, exit());
+  }
+
+  // BasicBlock building: add a deopt at the end of {block}.
+  void AddDeoptimize(BasicBlock* block, Node* state) {
+    ASSERT(block->control_ == BasicBlock::kNone);
+    block->control_ = BasicBlock::kDeoptimize;
+    SetControlInput(block, state);
+    block->deferred_ = true;  // By default, consider deopts the slow path.
+    if (block != exit()) AddSuccessor(block, exit());
+  }
+
+  friend class Scheduler;
+  friend class CodeGenerator;
+
+  void AddSuccessor(BasicBlock* block, BasicBlock* succ) {
+    succ->AppendInput(zone_, block);
+  }
+
+  BasicBlockVector* rpo_order() { return &rpo_order_; }
+
+ private:
+  friend class ScheduleVisualizer;
+
+  void SetControlInput(BasicBlock* block, Node* node) {
+    block->control_input_ = node;
+    SetBlockForNode(block, node);
+  }
+
+  void SetBlockForNode(BasicBlock* block, Node* node) {
+    int length = static_cast<int>(nodeid_to_block_.size());
+    if (node->id() >= length) {
+      nodeid_to_block_.resize(node->id() + 1);
+    }
+    nodeid_to_block_[node->id()] = block;
+  }
+
+  Zone* zone_;
+  BasicBlockVector all_blocks_;           // All basic blocks in the schedule.
+  BasicBlockVector nodeid_to_block_;      // Map from node to containing block.
+  BasicBlockVector rpo_order_;            // Reverse-post-order block list.
+  BasicBlockVector immediate_dominator_;  // Maps to a block's immediate
+                                          // dominator, indexed by block
+                                          // id.
+};
+
+OStream& operator<<(OStream& os, const Schedule& s);
+}
+}
+}  // namespace v8::internal::compiler
+
+#endif  // V8_COMPILER_SCHEDULE_H_
diff --git a/src/compiler/scheduler.cc b/src/compiler/scheduler.cc
new file mode 100644 (file)
index 0000000..ee8b226
--- /dev/null
@@ -0,0 +1,1065 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/scheduler.h"
+
+#include "src/compiler/graph.h"
+#include "src/compiler/graph-inl.h"
+#include "src/compiler/node.h"
+#include "src/compiler/node-properties.h"
+#include "src/compiler/node-properties-inl.h"
+#include "src/data-flow.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+Scheduler::Scheduler(Zone* zone)
+    : zone_(zone),
+      graph_(NULL),
+      schedule_(NULL),
+      branches_(NodeVector::allocator_type(zone)),
+      calls_(NodeVector::allocator_type(zone)),
+      deopts_(NodeVector::allocator_type(zone)),
+      returns_(NodeVector::allocator_type(zone)),
+      loops_and_merges_(NodeVector::allocator_type(zone)),
+      node_block_placement_(BasicBlockVector::allocator_type(zone)),
+      unscheduled_uses_(IntVector::allocator_type(zone)),
+      scheduled_nodes_(NodeVectorVector::allocator_type(zone)),
+      schedule_root_nodes_(NodeVector::allocator_type(zone)),
+      schedule_early_rpo_index_(IntVector::allocator_type(zone)) {}
+
+
+Scheduler::Scheduler(Zone* zone, Graph* graph, Schedule* schedule)
+    : zone_(zone),
+      graph_(graph),
+      schedule_(schedule),
+      branches_(NodeVector::allocator_type(zone)),
+      calls_(NodeVector::allocator_type(zone)),
+      deopts_(NodeVector::allocator_type(zone)),
+      returns_(NodeVector::allocator_type(zone)),
+      loops_and_merges_(NodeVector::allocator_type(zone)),
+      node_block_placement_(BasicBlockVector::allocator_type(zone)),
+      unscheduled_uses_(IntVector::allocator_type(zone)),
+      scheduled_nodes_(NodeVectorVector::allocator_type(zone)),
+      schedule_root_nodes_(NodeVector::allocator_type(zone)),
+      schedule_early_rpo_index_(IntVector::allocator_type(zone)) {}
+
+
+Schedule* Scheduler::NewSchedule(Graph* graph) {
+  graph_ = graph;
+  schedule_ = new (zone_) Schedule(zone_);
+
+  schedule_->AddNode(schedule_->end(), graph_->end());
+
+  PrepareAuxiliaryNodeData();
+
+  // Create basic blocks for each block and merge node in the graph.
+  CreateBlocks();
+
+  // Wire the basic blocks together.
+  WireBlocks();
+
+  PrepareAuxiliaryBlockData();
+
+  ComputeSpecialRPO();
+  GenerateImmediateDominatorTree();
+
+  PrepareUses();
+  ScheduleEarly();
+  ScheduleLate();
+
+  return schedule_;
+}
+
+
+class CreateBlockVisitor : public NullNodeVisitor {
+ public:
+  explicit CreateBlockVisitor(Scheduler* scheduler) : scheduler_(scheduler) {}
+
+  GenericGraphVisit::Control Post(Node* node) {
+    Schedule* schedule = scheduler_->schedule_;
+    switch (node->opcode()) {
+      case IrOpcode::kIfTrue:
+      case IrOpcode::kIfFalse:
+      case IrOpcode::kContinuation:
+      case IrOpcode::kLazyDeoptimization: {
+        BasicBlock* block = schedule->NewBasicBlock();
+        schedule->AddNode(block, node);
+        break;
+      }
+      case IrOpcode::kLoop:
+      case IrOpcode::kMerge: {
+        BasicBlock* block = schedule->NewBasicBlock();
+        schedule->AddNode(block, node);
+        scheduler_->loops_and_merges_.push_back(node);
+        break;
+      }
+      case IrOpcode::kBranch: {
+        scheduler_->branches_.push_back(node);
+        break;
+      }
+      case IrOpcode::kDeoptimize: {
+        scheduler_->deopts_.push_back(node);
+        break;
+      }
+      case IrOpcode::kCall: {
+        if (NodeProperties::CanLazilyDeoptimize(node)) {
+          scheduler_->calls_.push_back(node);
+        }
+        break;
+      }
+      case IrOpcode::kReturn:
+        scheduler_->returns_.push_back(node);
+        break;
+      default:
+        break;
+    }
+
+    return GenericGraphVisit::CONTINUE;
+  }
+
+ private:
+  Scheduler* scheduler_;
+};
+
+
+void Scheduler::CreateBlocks() {
+  CreateBlockVisitor create_blocks(this);
+  if (FLAG_trace_turbo_scheduler) {
+    PrintF("---------------- CREATING BLOCKS ------------------\n");
+  }
+  schedule_->AddNode(schedule_->entry(), graph_->start());
+  graph_->VisitNodeInputsFromEnd(&create_blocks);
+}
+
+
+void Scheduler::WireBlocks() {
+  if (FLAG_trace_turbo_scheduler) {
+    PrintF("----------------- WIRING BLOCKS -------------------\n");
+  }
+  AddSuccessorsForBranches();
+  AddSuccessorsForReturns();
+  AddSuccessorsForCalls();
+  AddSuccessorsForDeopts();
+  AddPredecessorsForLoopsAndMerges();
+  // TODO(danno): Handle Throw, et al.
+}
+
+
+void Scheduler::PrepareAuxiliaryNodeData() {
+  unscheduled_uses_.resize(graph_->NodeCount(), 0);
+  schedule_early_rpo_index_.resize(graph_->NodeCount(), 0);
+}
+
+
+void Scheduler::PrepareAuxiliaryBlockData() {
+  Zone* zone = schedule_->zone();
+  scheduled_nodes_.resize(schedule_->BasicBlockCount(),
+                          NodeVector(NodeVector::allocator_type(zone)));
+  schedule_->immediate_dominator_.resize(schedule_->BasicBlockCount(), NULL);
+}
+
+
+void Scheduler::AddPredecessorsForLoopsAndMerges() {
+  for (NodeVectorIter i = loops_and_merges_.begin();
+       i != loops_and_merges_.end(); ++i) {
+    Node* merge_or_loop = *i;
+    BasicBlock* block = schedule_->block(merge_or_loop);
+    ASSERT(block != NULL);
+    // For all of the merge's control inputs, add a goto at the end to the
+    // merge's basic block.
+    for (InputIter j = (*i)->inputs().begin(); j != (*i)->inputs().end(); ++j) {
+      if (NodeProperties::IsBasicBlockBegin(*i)) {
+        BasicBlock* predecessor_block = schedule_->block(*j);
+        if ((*j)->opcode() != IrOpcode::kReturn &&
+            (*j)->opcode() != IrOpcode::kDeoptimize) {
+          ASSERT(predecessor_block != NULL);
+          if (FLAG_trace_turbo_scheduler) {
+            IrOpcode::Value opcode = (*i)->opcode();
+            PrintF("node %d (%s) in block %d -> block %d\n", (*i)->id(),
+                   IrOpcode::Mnemonic(opcode), predecessor_block->id(),
+                   block->id());
+          }
+          schedule_->AddGoto(predecessor_block, block);
+        }
+      }
+    }
+  }
+}
+
+
+void Scheduler::AddSuccessorsForCalls() {
+  for (NodeVectorIter i = calls_.begin(); i != calls_.end(); ++i) {
+    Node* call = *i;
+    ASSERT(call->opcode() == IrOpcode::kCall);
+    ASSERT(NodeProperties::CanLazilyDeoptimize(call));
+
+    Node* lazy_deopt_node = NULL;
+    Node* cont_node = NULL;
+    // Find the continuation and lazy-deopt nodes among the uses.
+    for (UseIter use_iter = call->uses().begin();
+         use_iter != call->uses().end(); ++use_iter) {
+      switch ((*use_iter)->opcode()) {
+        case IrOpcode::kContinuation: {
+          ASSERT(cont_node == NULL);
+          cont_node = *use_iter;
+          break;
+        }
+        case IrOpcode::kLazyDeoptimization: {
+          ASSERT(lazy_deopt_node == NULL);
+          lazy_deopt_node = *use_iter;
+          break;
+        }
+        default:
+          break;
+      }
+    }
+    ASSERT(lazy_deopt_node != NULL);
+    ASSERT(cont_node != NULL);
+    BasicBlock* cont_successor_block = schedule_->block(cont_node);
+    BasicBlock* deopt_successor_block = schedule_->block(lazy_deopt_node);
+    Node* call_block_node = NodeProperties::GetControlInput(call);
+    BasicBlock* call_block = schedule_->block(call_block_node);
+    if (FLAG_trace_turbo_scheduler) {
+      IrOpcode::Value opcode = call->opcode();
+      PrintF("node %d (%s) in block %d -> block %d\n", call->id(),
+             IrOpcode::Mnemonic(opcode), call_block->id(),
+             cont_successor_block->id());
+      PrintF("node %d (%s) in block %d -> block %d\n", call->id(),
+             IrOpcode::Mnemonic(opcode), call_block->id(),
+             deopt_successor_block->id());
+    }
+    schedule_->AddCall(call_block, call, cont_successor_block,
+                       deopt_successor_block);
+  }
+}
+
+
+void Scheduler::AddSuccessorsForDeopts() {
+  for (NodeVectorIter i = deopts_.begin(); i != deopts_.end(); ++i) {
+    Node* deopt_block_node = NodeProperties::GetControlInput(*i);
+    BasicBlock* deopt_block = schedule_->block(deopt_block_node);
+    ASSERT(deopt_block != NULL);
+    if (FLAG_trace_turbo_scheduler) {
+      IrOpcode::Value opcode = (*i)->opcode();
+      PrintF("node %d (%s) in block %d -> end\n", (*i)->id(),
+             IrOpcode::Mnemonic(opcode), deopt_block->id());
+    }
+    schedule_->AddDeoptimize(deopt_block, *i);
+  }
+}
+
+
+void Scheduler::AddSuccessorsForBranches() {
+  for (NodeVectorIter i = branches_.begin(); i != branches_.end(); ++i) {
+    Node* branch = *i;
+    ASSERT(branch->opcode() == IrOpcode::kBranch);
+    Node* branch_block_node = NodeProperties::GetControlInput(branch);
+    BasicBlock* branch_block = schedule_->block(branch_block_node);
+    ASSERT(branch_block != NULL);
+    UseIter use_iter = branch->uses().begin();
+    Node* first_successor = *use_iter;
+    ++use_iter;
+    ASSERT(use_iter != branch->uses().end());
+    Node* second_successor = *use_iter;
+    ASSERT(++use_iter == branch->uses().end());
+    Node* true_successor_node = first_successor->opcode() == IrOpcode::kIfTrue
+                                    ? first_successor
+                                    : second_successor;
+    Node* false_successor_node = first_successor->opcode() == IrOpcode::kIfTrue
+                                     ? second_successor
+                                     : first_successor;
+    ASSERT(true_successor_node->opcode() == IrOpcode::kIfTrue);
+    ASSERT(false_successor_node->opcode() == IrOpcode::kIfFalse);
+    BasicBlock* true_successor_block = schedule_->block(true_successor_node);
+    BasicBlock* false_successor_block = schedule_->block(false_successor_node);
+    ASSERT(true_successor_block != NULL);
+    ASSERT(false_successor_block != NULL);
+    if (FLAG_trace_turbo_scheduler) {
+      IrOpcode::Value opcode = branch->opcode();
+      PrintF("node %d (%s) in block %d -> block %d\n", branch->id(),
+             IrOpcode::Mnemonic(opcode), branch_block->id(),
+             true_successor_block->id());
+      PrintF("node %d (%s) in block %d -> block %d\n", branch->id(),
+             IrOpcode::Mnemonic(opcode), branch_block->id(),
+             false_successor_block->id());
+    }
+    schedule_->AddBranch(branch_block, branch, true_successor_block,
+                         false_successor_block);
+  }
+}
+
+
+void Scheduler::AddSuccessorsForReturns() {
+  for (NodeVectorIter i = returns_.begin(); i != returns_.end(); ++i) {
+    Node* return_block_node = NodeProperties::GetControlInput(*i);
+    BasicBlock* return_block = schedule_->block(return_block_node);
+    ASSERT(return_block != NULL);
+    if (FLAG_trace_turbo_scheduler) {
+      IrOpcode::Value opcode = (*i)->opcode();
+      PrintF("node %d (%s) in block %d -> end\n", (*i)->id(),
+             IrOpcode::Mnemonic(opcode), return_block->id());
+    }
+    schedule_->AddReturn(return_block, *i);
+  }
+}
+
+
+BasicBlock* Scheduler::GetCommonDominator(BasicBlock* b1, BasicBlock* b2) {
+  while (b1 != b2) {
+    int b1_rpo = GetRPONumber(b1);
+    int b2_rpo = GetRPONumber(b2);
+    ASSERT(b1_rpo != b2_rpo);
+    if (b1_rpo < b2_rpo) {
+      b2 = schedule_->immediate_dominator_[b2->id()];
+    } else {
+      b1 = schedule_->immediate_dominator_[b1->id()];
+    }
+  }
+  return b1;
+}
+
+
+void Scheduler::GenerateImmediateDominatorTree() {
+  // Build the dominator graph.  TODO(danno): consider using Lengauer & Tarjan's
+  // if this becomes really slow.
+  if (FLAG_trace_turbo_scheduler) {
+    PrintF("------------ IMMEDIATE BLOCK DOMINATORS -----------\n");
+  }
+  for (size_t i = 0; i < schedule_->rpo_order_.size(); i++) {
+    BasicBlock* current_rpo = schedule_->rpo_order_[i];
+    if (current_rpo != schedule_->entry()) {
+      BasicBlock::Predecessors::iterator current_pred =
+          current_rpo->predecessors().begin();
+      BasicBlock::Predecessors::iterator end =
+          current_rpo->predecessors().end();
+      ASSERT(current_pred != end);
+      BasicBlock* dominator = *current_pred;
+      ++current_pred;
+      // For multiple predecessors, walk up the rpo ordering until a common
+      // dominator is found.
+      int current_rpo_pos = GetRPONumber(current_rpo);
+      while (current_pred != end) {
+        // Don't examine backwards edges
+        BasicBlock* pred = *current_pred;
+        if (GetRPONumber(pred) < current_rpo_pos) {
+          dominator = GetCommonDominator(dominator, *current_pred);
+        }
+        ++current_pred;
+      }
+      schedule_->immediate_dominator_[current_rpo->id()] = dominator;
+      if (FLAG_trace_turbo_scheduler) {
+        PrintF("Block %d's idom is %d\n", current_rpo->id(), dominator->id());
+      }
+    }
+  }
+}
+
+
+class ScheduleEarlyNodeVisitor : public NullNodeVisitor {
+ public:
+  explicit ScheduleEarlyNodeVisitor(Scheduler* scheduler)
+      : has_changed_rpo_constraints_(true),
+        scheduler_(scheduler),
+        schedule_(scheduler->schedule_) {}
+
+  GenericGraphVisit::Control Pre(Node* node) {
+    int id = node->id();
+    int max_rpo = 0;
+    // Fixed nodes already know their schedule early position.
+    if (IsFixedNode(node)) {
+      BasicBlock* block = schedule_->block(node);
+      ASSERT(block != NULL);
+      max_rpo = block->rpo_number_;
+      if (scheduler_->schedule_early_rpo_index_[id] != max_rpo) {
+        has_changed_rpo_constraints_ = true;
+      }
+      scheduler_->schedule_early_rpo_index_[id] = max_rpo;
+      if (FLAG_trace_turbo_scheduler) {
+        PrintF("Node %d pre-scheduled early at rpo limit %d\n", id, max_rpo);
+      }
+    }
+    return GenericGraphVisit::CONTINUE;
+  }
+
+  GenericGraphVisit::Control Post(Node* node) {
+    int id = node->id();
+    int max_rpo = 0;
+    // Otherwise, the minimum rpo for the node is the max of all of the inputs.
+    if (!IsFixedNode(node)) {
+      ASSERT(!NodeProperties::IsBasicBlockBegin(node));
+      for (InputIter i = node->inputs().begin(); i != node->inputs().end();
+           ++i) {
+        int control_rpo = scheduler_->schedule_early_rpo_index_[(*i)->id()];
+        if (control_rpo > max_rpo) {
+          max_rpo = control_rpo;
+        }
+      }
+      if (scheduler_->schedule_early_rpo_index_[id] != max_rpo) {
+        has_changed_rpo_constraints_ = true;
+      }
+      scheduler_->schedule_early_rpo_index_[id] = max_rpo;
+      if (FLAG_trace_turbo_scheduler) {
+        PrintF("Node %d post-scheduled early at rpo limit %d\n", id, max_rpo);
+      }
+    }
+    return GenericGraphVisit::CONTINUE;
+  }
+
+  static bool IsFixedNode(Node* node) {
+    return NodeProperties::HasFixedSchedulePosition(node) ||
+           !NodeProperties::CanBeScheduled(node);
+  }
+
+  // TODO(mstarzinger): Dirty hack to unblock others, schedule early should be
+  // rewritten to use a pre-order traversal from the start instead.
+  bool has_changed_rpo_constraints_;
+
+ private:
+  Scheduler* scheduler_;
+  Schedule* schedule_;
+};
+
+
+void Scheduler::ScheduleEarly() {
+  if (FLAG_trace_turbo_scheduler) {
+    PrintF("------------------- SCHEDULE EARLY ----------------\n");
+  }
+
+  int fixpoint_count = 0;
+  ScheduleEarlyNodeVisitor visitor(this);
+  while (visitor.has_changed_rpo_constraints_) {
+    visitor.has_changed_rpo_constraints_ = false;
+    graph_->VisitNodeInputsFromEnd(&visitor);
+    fixpoint_count++;
+  }
+
+  if (FLAG_trace_turbo_scheduler) {
+    PrintF("It took %d iterations to determine fixpoint\n", fixpoint_count);
+  }
+}
+
+
+class PrepareUsesVisitor : public NullNodeVisitor {
+ public:
+  explicit PrepareUsesVisitor(Scheduler* scheduler)
+      : scheduler_(scheduler), schedule_(scheduler->schedule_) {}
+
+  GenericGraphVisit::Control Pre(Node* node) {
+    // Some nodes must be scheduled explicitly to ensure they are in exactly the
+    // right place; it's a convenient place during the preparation of use counts
+    // to schedule them.
+    if (!schedule_->IsScheduled(node) &&
+        NodeProperties::HasFixedSchedulePosition(node)) {
+      if (FLAG_trace_turbo_scheduler) {
+        PrintF("Fixed position node %d is unscheduled, scheduling now\n",
+               node->id());
+      }
+      IrOpcode::Value opcode = node->opcode();
+      BasicBlock* block =
+          opcode == IrOpcode::kParameter
+              ? schedule_->entry()
+              : schedule_->block(NodeProperties::GetControlInput(node));
+      ASSERT(block != NULL);
+      schedule_->AddNode(block, node);
+    }
+
+    if (NodeProperties::IsScheduleRoot(node)) {
+      scheduler_->schedule_root_nodes_.push_back(node);
+    }
+
+    return GenericGraphVisit::CONTINUE;
+  }
+
+  void PostEdge(Node* from, int index, Node* to) {
+    // If the edge is from an unscheduled node, then tally it in the use count
+    // for all of its inputs. The same criterion will be used in ScheduleLate
+    // for decrementing use counts.
+    if (!schedule_->IsScheduled(from) && NodeProperties::CanBeScheduled(from)) {
+      ASSERT(!NodeProperties::HasFixedSchedulePosition(from));
+      ++scheduler_->unscheduled_uses_[to->id()];
+      if (FLAG_trace_turbo_scheduler) {
+        PrintF("Incrementing uses of node %d from %d to %d\n", to->id(),
+               from->id(), scheduler_->unscheduled_uses_[to->id()]);
+      }
+    }
+  }
+
+ private:
+  Scheduler* scheduler_;
+  Schedule* schedule_;
+};
+
+
+void Scheduler::PrepareUses() {
+  if (FLAG_trace_turbo_scheduler) {
+    PrintF("------------------- PREPARE USES ------------------\n");
+  }
+  // Count the uses of every node, it will be used to ensure that all of a
+  // node's uses are scheduled before the node itself.
+  PrepareUsesVisitor prepare_uses(this);
+  graph_->VisitNodeInputsFromEnd(&prepare_uses);
+}
+
+
+class ScheduleLateNodeVisitor : public NullNodeVisitor {
+ public:
+  explicit ScheduleLateNodeVisitor(Scheduler* scheduler)
+      : scheduler_(scheduler), schedule_(scheduler_->schedule_) {}
+
+  GenericGraphVisit::Control Pre(Node* node) {
+    // Don't schedule nodes that cannot be scheduled or are already scheduled.
+    if (!NodeProperties::CanBeScheduled(node) || schedule_->IsScheduled(node)) {
+      return GenericGraphVisit::CONTINUE;
+    }
+    ASSERT(!NodeProperties::HasFixedSchedulePosition(node));
+
+    // If all the uses of a node have been scheduled, then the node itself can
+    // be scheduled.
+    bool eligible = scheduler_->unscheduled_uses_[node->id()] == 0;
+    if (FLAG_trace_turbo_scheduler) {
+      PrintF("Testing for schedule eligibility for node %d -> %s\n", node->id(),
+             eligible ? "true" : "false");
+    }
+    if (!eligible) return GenericGraphVisit::DEFER;
+
+    // Determine the dominating block for all of the uses of this node. It is
+    // the latest block that this node can be scheduled in.
+    BasicBlock* block = NULL;
+    for (Node::Uses::iterator i = node->uses().begin(); i != node->uses().end();
+         ++i) {
+      BasicBlock* use_block = GetBlockForUse(i.edge());
+      block = block == NULL ? use_block : use_block == NULL
+                                              ? block
+                                              : scheduler_->GetCommonDominator(
+                                                    block, use_block);
+    }
+    ASSERT(block != NULL);
+
+    int min_rpo = scheduler_->schedule_early_rpo_index_[node->id()];
+    if (FLAG_trace_turbo_scheduler) {
+      PrintF(
+          "Schedule late conservative for node %d is block %d at "
+          "loop depth %d, min rpo = %d\n",
+          node->id(), block->id(), block->loop_depth_, min_rpo);
+    }
+    // Hoist nodes out of loops if possible. Nodes can be hoisted iteratively
+    // into enlcosing loop pre-headers until they would preceed their
+    // ScheduleEarly position.
+    BasicBlock* hoist_block = block;
+    while (hoist_block != NULL && hoist_block->rpo_number_ >= min_rpo) {
+      if (hoist_block->loop_depth_ < block->loop_depth_) {
+        block = hoist_block;
+        if (FLAG_trace_turbo_scheduler) {
+          PrintF("Hoisting node %d to block %d\n", node->id(), block->id());
+        }
+      }
+      // Try to hoist to the pre-header of the loop header.
+      hoist_block = hoist_block->loop_header();
+      if (hoist_block != NULL) {
+        BasicBlock* pre_header = schedule_->dominator(hoist_block);
+        ASSERT(pre_header == NULL ||
+               *hoist_block->predecessors().begin() == pre_header);
+        if (FLAG_trace_turbo_scheduler) {
+          PrintF(
+              "Try hoist to pre-header block %d of loop header block %d,"
+              " depth would be %d\n",
+              pre_header->id(), hoist_block->id(), pre_header->loop_depth_);
+        }
+        hoist_block = pre_header;
+      }
+    }
+
+    ScheduleNode(block, node);
+
+    return GenericGraphVisit::CONTINUE;
+  }
+
+ private:
+  BasicBlock* GetBlockForUse(Node::Edge edge) {
+    Node* use = edge.from();
+    IrOpcode::Value opcode = use->opcode();
+    // If the use is a phi, forward through the the phi to the basic block
+    // corresponding to the phi's input.
+    if (opcode == IrOpcode::kPhi || opcode == IrOpcode::kEffectPhi) {
+      int index = edge.index();
+      if (FLAG_trace_turbo_scheduler) {
+        PrintF("Use %d is input %d to a phi\n", use->id(), index);
+      }
+      use = NodeProperties::GetControlInput(use, 0);
+      opcode = use->opcode();
+      ASSERT(opcode == IrOpcode::kMerge || opcode == IrOpcode::kLoop);
+      use = NodeProperties::GetControlInput(use, index);
+    }
+    BasicBlock* result = schedule_->block(use);
+    if (result == NULL) return NULL;
+    if (FLAG_trace_turbo_scheduler) {
+      PrintF("Must dominate use %d in block %d\n", use->id(), result->id());
+    }
+    return result;
+  }
+
+  bool IsNodeEligible(Node* node) {
+    bool eligible = scheduler_->unscheduled_uses_[node->id()] == 0;
+    return eligible;
+  }
+
+  void ScheduleNode(BasicBlock* block, Node* node) {
+    schedule_->PlanNode(block, node);
+    scheduler_->scheduled_nodes_[block->id()].push_back(node);
+
+    // Reduce the use count of the node's inputs to potentially make them
+    // scheduable.
+    for (InputIter i = node->inputs().begin(); i != node->inputs().end(); ++i) {
+      ASSERT(scheduler_->unscheduled_uses_[(*i)->id()] > 0);
+      --scheduler_->unscheduled_uses_[(*i)->id()];
+      if (FLAG_trace_turbo_scheduler) {
+        PrintF("Decrementing use count for node %d from node %d (now %d)\n",
+               (*i)->id(), i.edge().from()->id(),
+               scheduler_->unscheduled_uses_[(*i)->id()]);
+        if (scheduler_->unscheduled_uses_[(*i)->id()] == 0) {
+          PrintF("node %d is now eligible for scheduling\n", (*i)->id());
+        }
+      }
+    }
+  }
+
+  Scheduler* scheduler_;
+  Schedule* schedule_;
+};
+
+
+void Scheduler::ScheduleLate() {
+  if (FLAG_trace_turbo_scheduler) {
+    PrintF("------------------- SCHEDULE LATE -----------------\n");
+  }
+
+  // Schedule: Places nodes in dominator block of all their uses.
+  ScheduleLateNodeVisitor schedule_late_visitor(this);
+
+  for (NodeVectorIter i = schedule_root_nodes_.begin();
+       i != schedule_root_nodes_.end(); ++i) {
+    GenericGraphVisit::Visit<ScheduleLateNodeVisitor,
+                             NodeInputIterationTraits<Node> >(
+        graph_, *i, &schedule_late_visitor);
+  }
+
+  // Add collected nodes for basic blocks to their blocks in the right order.
+  int block_num = 0;
+  for (NodeVectorVectorIter i = scheduled_nodes_.begin();
+       i != scheduled_nodes_.end(); ++i) {
+    for (NodeVectorRIter j = i->rbegin(); j != i->rend(); ++j) {
+      schedule_->AddNode(schedule_->all_blocks_.at(block_num), *j);
+    }
+    block_num++;
+  }
+}
+
+
+// Numbering for BasicBlockData.rpo_number_ for this block traversal:
+static const int kBlockOnStack = -2;
+static const int kBlockVisited1 = -3;
+static const int kBlockVisited2 = -4;
+static const int kBlockUnvisited1 = -1;
+static const int kBlockUnvisited2 = kBlockVisited1;
+
+struct SpecialRPOStackFrame {
+  BasicBlock* block;
+  int index;
+};
+
+struct BlockList {
+  BasicBlock* block;
+  BlockList* next;
+
+  BlockList* Add(Zone* zone, BasicBlock* b) {
+    BlockList* list = static_cast<BlockList*>(zone->New(sizeof(BlockList)));
+    list->block = b;
+    list->next = this;
+    return list;
+  }
+
+  void Serialize(BasicBlockVector* final_order) {
+    for (BlockList* l = this; l != NULL; l = l->next) {
+      l->block->rpo_number_ = static_cast<int>(final_order->size());
+      final_order->push_back(l->block);
+    }
+  }
+};
+
+struct LoopInfo {
+  BasicBlock* header;
+  ZoneList<BasicBlock*>* outgoing;
+  BitVector* members;
+  LoopInfo* prev;
+  BlockList* end;
+  BlockList* start;
+
+  void AddOutgoing(Zone* zone, BasicBlock* block) {
+    if (outgoing == NULL) outgoing = new (zone) ZoneList<BasicBlock*>(2, zone);
+    outgoing->Add(block, zone);
+  }
+};
+
+
+static int Push(SpecialRPOStackFrame* stack, int depth, BasicBlock* child,
+                int unvisited) {
+  if (child->rpo_number_ == unvisited) {
+    stack[depth].block = child;
+    stack[depth].index = 0;
+    child->rpo_number_ = kBlockOnStack;
+    return depth + 1;
+  }
+  return depth;
+}
+
+
+// Computes loop membership from the backedges of the control flow graph.
+static LoopInfo* ComputeLoopInfo(
+    Zone* zone, SpecialRPOStackFrame* queue, int num_loops, int num_blocks,
+    ZoneList<std::pair<BasicBlock*, int> >* backedges) {
+  LoopInfo* loops = zone->NewArray<LoopInfo>(num_loops);
+  memset(loops, 0, num_loops * sizeof(LoopInfo));
+
+  // Compute loop membership starting from backedges.
+  // O(max(loop_depth) * max(|loop|)
+  for (int i = 0; i < backedges->length(); i++) {
+    BasicBlock* member = backedges->at(i).first;
+    BasicBlock* header = member->SuccessorAt(backedges->at(i).second);
+    int loop_num = header->loop_end_;
+    if (loops[loop_num].header == NULL) {
+      loops[loop_num].header = header;
+      loops[loop_num].members = new (zone) BitVector(num_blocks, zone);
+    }
+
+    int queue_length = 0;
+    if (member != header) {
+      // As long as the header doesn't have a backedge to itself,
+      // Push the member onto the queue and process its predecessors.
+      if (!loops[loop_num].members->Contains(member->id())) {
+        loops[loop_num].members->Add(member->id());
+      }
+      queue[queue_length++].block = member;
+    }
+
+    // Propagate loop membership backwards. All predecessors of M up to the
+    // loop header H are members of the loop too. O(|blocks between M and H|).
+    while (queue_length > 0) {
+      BasicBlock* block = queue[--queue_length].block;
+      for (int i = 0; i < block->PredecessorCount(); i++) {
+        BasicBlock* pred = block->PredecessorAt(i);
+        if (pred != header) {
+          if (!loops[loop_num].members->Contains(pred->id())) {
+            loops[loop_num].members->Add(pred->id());
+            queue[queue_length++].block = pred;
+          }
+        }
+      }
+    }
+  }
+  return loops;
+}
+
+
+#if DEBUG
+static void PrintRPO(int num_loops, LoopInfo* loops, BasicBlockVector* order) {
+  PrintF("-- RPO with %d loops ", num_loops);
+  if (num_loops > 0) {
+    PrintF("(");
+    for (int i = 0; i < num_loops; i++) {
+      if (i > 0) PrintF(" ");
+      PrintF("B%d", loops[i].header->id());
+    }
+    PrintF(") ");
+  }
+  PrintF("-- \n");
+
+  for (int i = 0; i < static_cast<int>(order->size()); i++) {
+    BasicBlock* block = (*order)[i];
+    int bid = block->id();
+    PrintF("%5d:", i);
+    for (int i = 0; i < num_loops; i++) {
+      bool membership = loops[i].members->Contains(bid);
+      bool range = loops[i].header->LoopContains(block);
+      PrintF(membership ? " |" : "  ");
+      PrintF(range ? "x" : " ");
+    }
+    PrintF("  B%d: ", bid);
+    if (block->loop_end_ >= 0) {
+      PrintF(" range: [%d, %d)", block->rpo_number_, block->loop_end_);
+    }
+    PrintF("\n");
+  }
+}
+
+
+static void VerifySpecialRPO(int num_loops, LoopInfo* loops,
+                             BasicBlockVector* order) {
+  ASSERT(order->size() > 0);
+  ASSERT((*order)[0]->id() == 0);  // entry should be first.
+
+  for (int i = 0; i < num_loops; i++) {
+    LoopInfo* loop = &loops[i];
+    BasicBlock* header = loop->header;
+
+    ASSERT(header != NULL);
+    ASSERT(header->rpo_number_ >= 0);
+    ASSERT(header->rpo_number_ < static_cast<int>(order->size()));
+    ASSERT(header->loop_end_ >= 0);
+    ASSERT(header->loop_end_ <= static_cast<int>(order->size()));
+    ASSERT(header->loop_end_ > header->rpo_number_);
+
+    // Verify the start ... end list relationship.
+    int links = 0;
+    BlockList* l = loop->start;
+    ASSERT(l != NULL && l->block == header);
+    bool end_found;
+    while (true) {
+      if (l == NULL || l == loop->end) {
+        end_found = (loop->end == l);
+        break;
+      }
+      // The list should be in same order as the final result.
+      ASSERT(l->block->rpo_number_ == links + loop->header->rpo_number_);
+      links++;
+      l = l->next;
+      ASSERT(links < static_cast<int>(2 * order->size()));  // cycle?
+    }
+    ASSERT(links > 0);
+    ASSERT(links == (header->loop_end_ - header->rpo_number_));
+    ASSERT(end_found);
+
+    // Check the contiguousness of loops.
+    int count = 0;
+    for (int j = 0; j < static_cast<int>(order->size()); j++) {
+      BasicBlock* block = order->at(j);
+      ASSERT(block->rpo_number_ == j);
+      if (j < header->rpo_number_ || j >= header->loop_end_) {
+        ASSERT(!loop->members->Contains(block->id()));
+      } else {
+        if (block == header) {
+          ASSERT(!loop->members->Contains(block->id()));
+        } else {
+          ASSERT(loop->members->Contains(block->id()));
+        }
+        count++;
+      }
+    }
+    ASSERT(links == count);
+  }
+}
+#endif  // DEBUG
+
+
+// Compute the special reverse-post-order block ordering, which is essentially
+// a RPO of the graph where loop bodies are contiguous. Properties:
+// 1. If block A is a predecessor of B, then A appears before B in the order,
+//    unless B is a loop header and A is in the loop headed at B
+//    (i.e. A -> B is a backedge).
+// => If block A dominates block B, then A appears before B in the order.
+// => If block A is a loop header, A appears before all blocks in the loop
+//    headed at A.
+// 2. All loops are contiguous in the order (i.e. no intervening blocks that
+//    do not belong to the loop.)
+// Note a simple RPO traversal satisfies (1) but not (3).
+BasicBlockVector* Scheduler::ComputeSpecialRPO() {
+  if (FLAG_trace_turbo_scheduler) {
+    PrintF("------------- COMPUTING SPECIAL RPO ---------------\n");
+  }
+  // RPO should not have been computed for this schedule yet.
+  CHECK_EQ(kBlockUnvisited1, schedule_->entry()->rpo_number_);
+  CHECK_EQ(0, schedule_->rpo_order_.size());
+
+  // Perform an iterative RPO traversal using an explicit stack,
+  // recording backedges that form cycles. O(|B|).
+  ZoneList<std::pair<BasicBlock*, int> > backedges(1, zone_);
+  SpecialRPOStackFrame* stack =
+      zone_->NewArray<SpecialRPOStackFrame>(schedule_->BasicBlockCount());
+  BasicBlock* entry = schedule_->entry();
+  BlockList* order = NULL;
+  int stack_depth = Push(stack, 0, entry, kBlockUnvisited1);
+  int num_loops = 0;
+
+  while (stack_depth > 0) {
+    int current = stack_depth - 1;
+    SpecialRPOStackFrame* frame = stack + current;
+
+    if (frame->index < frame->block->SuccessorCount()) {
+      // Process the next successor.
+      BasicBlock* succ = frame->block->SuccessorAt(frame->index++);
+      if (succ->rpo_number_ == kBlockVisited1) continue;
+      if (succ->rpo_number_ == kBlockOnStack) {
+        // The successor is on the stack, so this is a backedge (cycle).
+        backedges.Add(
+            std::pair<BasicBlock*, int>(frame->block, frame->index - 1), zone_);
+        if (succ->loop_end_ < 0) {
+          // Assign a new loop number to the header if it doesn't have one.
+          succ->loop_end_ = num_loops++;
+        }
+      } else {
+        // Push the successor onto the stack.
+        ASSERT(succ->rpo_number_ == kBlockUnvisited1);
+        stack_depth = Push(stack, stack_depth, succ, kBlockUnvisited1);
+      }
+    } else {
+      // Finished with all successors; pop the stack and add the block.
+      order = order->Add(zone_, frame->block);
+      frame->block->rpo_number_ = kBlockVisited1;
+      stack_depth--;
+    }
+  }
+
+  // If no loops were encountered, then the order we computed was correct.
+  LoopInfo* loops = NULL;
+  if (num_loops != 0) {
+    // Otherwise, compute the loop information from the backedges in order
+    // to perform a traversal that groups loop bodies together.
+    loops = ComputeLoopInfo(zone_, stack, num_loops,
+                            schedule_->BasicBlockCount(), &backedges);
+
+    // Initialize the "loop stack". Note the entry could be a loop header.
+    LoopInfo* loop = entry->IsLoopHeader() ? &loops[entry->loop_end_] : NULL;
+    order = NULL;
+
+    // Perform an iterative post-order traversal, visiting loop bodies before
+    // edges that lead out of loops. Visits each block once, but linking loop
+    // sections together is linear in the loop size, so overall is
+    // O(|B| + max(loop_depth) * max(|loop|))
+    stack_depth = Push(stack, 0, entry, kBlockUnvisited2);
+    while (stack_depth > 0) {
+      SpecialRPOStackFrame* frame = stack + (stack_depth - 1);
+      BasicBlock* block = frame->block;
+      BasicBlock* succ = NULL;
+
+      if (frame->index < block->SuccessorCount()) {
+        // Process the next normal successor.
+        succ = block->SuccessorAt(frame->index++);
+      } else if (block->IsLoopHeader()) {
+        // Process additional outgoing edges from the loop header.
+        if (block->rpo_number_ == kBlockOnStack) {
+          // Finish the loop body the first time the header is left on the
+          // stack.
+          ASSERT(loop != NULL && loop->header == block);
+          loop->start = order->Add(zone_, block);
+          order = loop->end;
+          block->rpo_number_ = kBlockVisited2;
+          // Pop the loop stack and continue visiting outgoing edges within the
+          // the context of the outer loop, if any.
+          loop = loop->prev;
+          // We leave the loop header on the stack; the rest of this iteration
+          // and later iterations will go through its outgoing edges list.
+        }
+
+        // Use the next outgoing edge if there are any.
+        int outgoing_index = frame->index - block->SuccessorCount();
+        LoopInfo* info = &loops[block->loop_end_];
+        ASSERT(loop != info);
+        if (info->outgoing != NULL &&
+            outgoing_index < info->outgoing->length()) {
+          succ = info->outgoing->at(outgoing_index);
+          frame->index++;
+        }
+      }
+
+      if (succ != NULL) {
+        // Process the next successor.
+        if (succ->rpo_number_ == kBlockOnStack) continue;
+        if (succ->rpo_number_ == kBlockVisited2) continue;
+        ASSERT(succ->rpo_number_ == kBlockUnvisited2);
+        if (loop != NULL && !loop->members->Contains(succ->id())) {
+          // The successor is not in the current loop or any nested loop.
+          // Add it to the outgoing edges of this loop and visit it later.
+          loop->AddOutgoing(zone_, succ);
+        } else {
+          // Push the successor onto the stack.
+          stack_depth = Push(stack, stack_depth, succ, kBlockUnvisited2);
+          if (succ->IsLoopHeader()) {
+            // Push the inner loop onto the loop stack.
+            ASSERT(succ->loop_end_ >= 0 && succ->loop_end_ < num_loops);
+            LoopInfo* next = &loops[succ->loop_end_];
+            next->end = order;
+            next->prev = loop;
+            loop = next;
+          }
+        }
+      } else {
+        // Finished with all successors of the current block.
+        if (block->IsLoopHeader()) {
+          // If we are going to pop a loop header, then add its entire body.
+          LoopInfo* info = &loops[block->loop_end_];
+          for (BlockList* l = info->start; true; l = l->next) {
+            if (l->next == info->end) {
+              l->next = order;
+              info->end = order;
+              break;
+            }
+          }
+          order = info->start;
+        } else {
+          // Pop a single node off the stack and add it to the order.
+          order = order->Add(zone_, block);
+          block->rpo_number_ = kBlockVisited2;
+        }
+        stack_depth--;
+      }
+    }
+  }
+
+  // Construct the final order from the list.
+  BasicBlockVector* final_order = &schedule_->rpo_order_;
+  order->Serialize(final_order);
+
+  // Compute the correct loop header for every block and set the correct loop
+  // ends.
+  LoopInfo* current_loop = NULL;
+  BasicBlock* current_header = NULL;
+  int loop_depth = 0;
+  for (BasicBlockVectorIter i = final_order->begin(); i != final_order->end();
+       ++i) {
+    BasicBlock* current = *i;
+    current->loop_header_ = current_header;
+    if (current->IsLoopHeader()) {
+      loop_depth++;
+      current_loop = &loops[current->loop_end_];
+      BlockList* end = current_loop->end;
+      current->loop_end_ = end == NULL ? static_cast<int>(final_order->size())
+                                       : end->block->rpo_number_;
+      current_header = current_loop->header;
+      if (FLAG_trace_turbo_scheduler) {
+        PrintF("Block %d is a loop header, increment loop depth to %d\n",
+               current->id(), loop_depth);
+      }
+    } else {
+      while (current_header != NULL &&
+             current->rpo_number_ >= current_header->loop_end_) {
+        ASSERT(current_header->IsLoopHeader());
+        ASSERT(current_loop != NULL);
+        current_loop = current_loop->prev;
+        current_header = current_loop == NULL ? NULL : current_loop->header;
+        --loop_depth;
+      }
+    }
+    current->loop_depth_ = loop_depth;
+    if (FLAG_trace_turbo_scheduler) {
+      if (current->loop_header_ == NULL) {
+        PrintF("Block %d's loop header is NULL, loop depth %d\n", current->id(),
+               current->loop_depth_);
+      } else {
+        PrintF("Block %d's loop header is block %d, loop depth %d\n",
+               current->id(), current->loop_header_->id(),
+               current->loop_depth_);
+      }
+    }
+  }
+
+#if DEBUG
+  if (FLAG_trace_turbo_scheduler) PrintRPO(num_loops, loops, final_order);
+  VerifySpecialRPO(num_loops, loops, final_order);
+#endif
+  return final_order;
+}
+}
+}
+}  // namespace v8::internal::compiler
diff --git a/src/compiler/scheduler.h b/src/compiler/scheduler.h
new file mode 100644 (file)
index 0000000..18e8f92
--- /dev/null
@@ -0,0 +1,82 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_SCHEDULER_H_
+#define V8_COMPILER_SCHEDULER_H_
+
+#include <vector>
+
+#include "src/v8.h"
+
+#include "src/compiler/opcodes.h"
+#include "src/compiler/schedule.h"
+#include "src/zone-allocator.h"
+#include "src/zone-containers.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+class Scheduler {
+ public:
+  explicit Scheduler(Zone* zone);
+  Scheduler(Zone* zone, Graph* graph, Schedule* schedule);
+
+  Schedule* NewSchedule(Graph* graph);
+
+  BasicBlockVector* ComputeSpecialRPO();
+
+ private:
+  Zone* zone_;
+  Graph* graph_;
+  Schedule* schedule_;
+  NodeVector branches_;
+  NodeVector calls_;
+  NodeVector deopts_;
+  NodeVector returns_;
+  NodeVector loops_and_merges_;
+  BasicBlockVector node_block_placement_;
+  IntVector unscheduled_uses_;
+  NodeVectorVector scheduled_nodes_;
+  NodeVector schedule_root_nodes_;
+  IntVector schedule_early_rpo_index_;
+
+  int GetRPONumber(BasicBlock* block) {
+    ASSERT(block->rpo_number_ >= 0 &&
+           block->rpo_number_ < static_cast<int>(schedule_->rpo_order_.size()));
+    ASSERT(schedule_->rpo_order_[block->rpo_number_] == block);
+    return block->rpo_number_;
+  }
+
+  void PrepareAuxiliaryNodeData();
+  void PrepareAuxiliaryBlockData();
+
+  friend class CreateBlockVisitor;
+  void CreateBlocks();
+
+  void WireBlocks();
+
+  void AddPredecessorsForLoopsAndMerges();
+  void AddSuccessorsForBranches();
+  void AddSuccessorsForReturns();
+  void AddSuccessorsForCalls();
+  void AddSuccessorsForDeopts();
+
+  void GenerateImmediateDominatorTree();
+  BasicBlock* GetCommonDominator(BasicBlock* b1, BasicBlock* b2);
+
+  friend class ScheduleEarlyNodeVisitor;
+  void ScheduleEarly();
+
+  friend class PrepareUsesVisitor;
+  void PrepareUses();
+
+  friend class ScheduleLateNodeVisitor;
+  void ScheduleLate();
+};
+}
+}
+}  // namespace v8::internal::compiler
+
+#endif  // V8_COMPILER_SCHEDULER_H_
diff --git a/src/compiler/simplified-lowering.cc b/src/compiler/simplified-lowering.cc
new file mode 100644 (file)
index 0000000..bf19aec
--- /dev/null
@@ -0,0 +1,217 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/simplified-lowering.h"
+
+#include "src/compiler/graph-inl.h"
+#include "src/objects.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+Node* SimplifiedLowering::DoChangeTaggedToInt32(Node* node, Node* effect,
+                                                Node* control) {
+  return node;
+}
+
+
+Node* SimplifiedLowering::DoChangeTaggedToUint32(Node* node, Node* effect,
+                                                 Node* control) {
+  return node;
+}
+
+
+Node* SimplifiedLowering::DoChangeTaggedToFloat64(Node* node, Node* effect,
+                                                  Node* control) {
+  return node;
+}
+
+
+Node* SimplifiedLowering::DoChangeInt32ToTagged(Node* node, Node* effect,
+                                                Node* control) {
+  return node;
+}
+
+
+Node* SimplifiedLowering::DoChangeUint32ToTagged(Node* node, Node* effect,
+                                                 Node* control) {
+  return node;
+}
+
+
+Node* SimplifiedLowering::DoChangeFloat64ToTagged(Node* node, Node* effect,
+                                                  Node* control) {
+  return node;
+}
+
+
+Node* SimplifiedLowering::DoChangeBoolToBit(Node* node, Node* effect,
+                                            Node* control) {
+  Node* val = node->InputAt(0);
+  Operator* op = machine()->WordEqual();
+  return graph()->NewNode(op, val, jsgraph()->TrueConstant());
+}
+
+
+Node* SimplifiedLowering::DoChangeBitToBool(Node* node, Node* effect,
+                                            Node* control) {
+  return node;
+}
+
+
+static WriteBarrierKind ComputeWriteBarrierKind(
+    MachineRepresentation representation, Type* type) {
+  // TODO(turbofan): skip write barriers for Smis, etc.
+  if (representation == kMachineTagged) {
+    return kFullWriteBarrier;
+  }
+  return kNoWriteBarrier;
+}
+
+
+Node* SimplifiedLowering::DoLoadField(Node* node, Node* effect, Node* control) {
+  const FieldAccess& access = FieldAccessOf(node->op());
+  node->set_op(machine_.Load(access.representation));
+  Node* offset =
+      graph()->NewNode(common()->Int32Constant(access.offset - kHeapObjectTag));
+  node->InsertInput(zone(), 1, offset);
+  return node;
+}
+
+
+Node* SimplifiedLowering::DoStoreField(Node* node, Node* effect,
+                                       Node* control) {
+  const FieldAccess& access = FieldAccessOf(node->op());
+  WriteBarrierKind kind =
+      ComputeWriteBarrierKind(access.representation, access.type);
+  node->set_op(machine_.Store(access.representation, kind));
+  Node* offset =
+      graph()->NewNode(common()->Int32Constant(access.offset - kHeapObjectTag));
+  node->InsertInput(zone(), 1, offset);
+  return node;
+}
+
+
+Node* SimplifiedLowering::ComputeIndex(const ElementAccess& access,
+                                       Node* index) {
+  int element_size = 0;
+  switch (access.representation) {
+    case kMachineTagged:
+      element_size = kPointerSize;
+      break;
+    case kMachineWord8:
+      element_size = 1;
+      break;
+    case kMachineWord16:
+      element_size = 2;
+      break;
+    case kMachineWord32:
+      element_size = 4;
+      break;
+    case kMachineWord64:
+    case kMachineFloat64:
+      element_size = 8;
+      break;
+    case kMachineLast:
+      UNREACHABLE();
+      break;
+  }
+  if (element_size != 1) {
+    index = graph()->NewNode(
+        machine()->Int32Mul(),
+        graph()->NewNode(common()->Int32Constant(element_size)), index);
+  }
+  int fixed_offset = access.header_size - kHeapObjectTag;
+  if (fixed_offset == 0) return index;
+  return graph()->NewNode(
+      machine()->Int32Add(),
+      graph()->NewNode(common()->Int32Constant(fixed_offset)), index);
+}
+
+
+Node* SimplifiedLowering::DoLoadElement(Node* node, Node* effect,
+                                        Node* control) {
+  const ElementAccess& access = ElementAccessOf(node->op());
+  node->set_op(machine_.Load(access.representation));
+  node->ReplaceInput(1, ComputeIndex(access, node->InputAt(1)));
+  return node;
+}
+
+
+Node* SimplifiedLowering::DoStoreElement(Node* node, Node* effect,
+                                         Node* control) {
+  const ElementAccess& access = ElementAccessOf(node->op());
+  WriteBarrierKind kind =
+      ComputeWriteBarrierKind(access.representation, access.type);
+  node->set_op(machine_.Store(access.representation, kind));
+  node->ReplaceInput(1, ComputeIndex(access, node->InputAt(1)));
+  return node;
+}
+
+
+void SimplifiedLowering::Lower(Node* node) {
+  Node* start = graph()->start();
+  switch (node->opcode()) {
+    case IrOpcode::kBooleanNot:
+    case IrOpcode::kNumberEqual:
+    case IrOpcode::kNumberLessThan:
+    case IrOpcode::kNumberLessThanOrEqual:
+    case IrOpcode::kNumberAdd:
+    case IrOpcode::kNumberSubtract:
+    case IrOpcode::kNumberMultiply:
+    case IrOpcode::kNumberDivide:
+    case IrOpcode::kNumberModulus:
+    case IrOpcode::kNumberToInt32:
+    case IrOpcode::kNumberToUint32:
+    case IrOpcode::kReferenceEqual:
+    case IrOpcode::kStringEqual:
+    case IrOpcode::kStringLessThan:
+    case IrOpcode::kStringLessThanOrEqual:
+    case IrOpcode::kStringAdd:
+      break;
+    case IrOpcode::kChangeTaggedToInt32:
+      DoChangeTaggedToInt32(node, start, start);
+      break;
+    case IrOpcode::kChangeTaggedToUint32:
+      DoChangeTaggedToUint32(node, start, start);
+      break;
+    case IrOpcode::kChangeTaggedToFloat64:
+      DoChangeTaggedToFloat64(node, start, start);
+      break;
+    case IrOpcode::kChangeInt32ToTagged:
+      DoChangeInt32ToTagged(node, start, start);
+      break;
+    case IrOpcode::kChangeUint32ToTagged:
+      DoChangeUint32ToTagged(node, start, start);
+      break;
+    case IrOpcode::kChangeFloat64ToTagged:
+      DoChangeFloat64ToTagged(node, start, start);
+      break;
+    case IrOpcode::kChangeBoolToBit:
+      node->ReplaceUses(DoChangeBoolToBit(node, start, start));
+      break;
+    case IrOpcode::kChangeBitToBool:
+      DoChangeBitToBool(node, start, start);
+      break;
+    case IrOpcode::kLoadField:
+      DoLoadField(node, start, start);
+      break;
+    case IrOpcode::kStoreField:
+      DoStoreField(node, start, start);
+      break;
+    case IrOpcode::kLoadElement:
+      DoLoadElement(node, start, start);
+      break;
+    case IrOpcode::kStoreElement:
+      DoStoreElement(node, start, start);
+      break;
+    default:
+      break;
+  }
+}
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
diff --git a/src/compiler/simplified-lowering.h b/src/compiler/simplified-lowering.h
new file mode 100644 (file)
index 0000000..0e67cc6
--- /dev/null
@@ -0,0 +1,60 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_SIMPLIFIED_LOWERING_H_
+#define V8_COMPILER_SIMPLIFIED_LOWERING_H_
+
+#include "src/compiler/graph-reducer.h"
+#include "src/compiler/js-graph.h"
+#include "src/compiler/lowering-builder.h"
+#include "src/compiler/machine-operator.h"
+#include "src/compiler/node.h"
+#include "src/compiler/simplified-operator.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+class SimplifiedLowering : public LoweringBuilder {
+ public:
+  explicit SimplifiedLowering(JSGraph* jsgraph,
+                              SourcePositionTable* source_positions)
+      : LoweringBuilder(jsgraph->graph(), source_positions),
+        jsgraph_(jsgraph),
+        machine_(jsgraph->zone()) {}
+  virtual ~SimplifiedLowering() {}
+
+  virtual void Lower(Node* node);
+
+ private:
+  JSGraph* jsgraph_;
+  MachineOperatorBuilder machine_;
+
+  Node* DoChangeTaggedToInt32(Node* node, Node* effect, Node* control);
+  Node* DoChangeTaggedToUint32(Node* node, Node* effect, Node* control);
+  Node* DoChangeTaggedToFloat64(Node* node, Node* effect, Node* control);
+  Node* DoChangeInt32ToTagged(Node* node, Node* effect, Node* control);
+  Node* DoChangeUint32ToTagged(Node* node, Node* effect, Node* control);
+  Node* DoChangeFloat64ToTagged(Node* node, Node* effect, Node* control);
+  Node* DoChangeBoolToBit(Node* node, Node* effect, Node* control);
+  Node* DoChangeBitToBool(Node* node, Node* effect, Node* control);
+  Node* DoLoadField(Node* node, Node* effect, Node* control);
+  Node* DoStoreField(Node* node, Node* effect, Node* control);
+  Node* DoLoadElement(Node* node, Node* effect, Node* control);
+  Node* DoStoreElement(Node* node, Node* effect, Node* control);
+
+  Node* ComputeIndex(const ElementAccess& access, Node* index);
+
+  Zone* zone() { return jsgraph_->zone(); }
+  JSGraph* jsgraph() { return jsgraph_; }
+  Graph* graph() { return jsgraph()->graph(); }
+  CommonOperatorBuilder* common() { return jsgraph()->common(); }
+  MachineOperatorBuilder* machine() { return &machine_; }
+};
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_COMPILER_SIMPLIFIED_LOWERING_H_
diff --git a/src/compiler/simplified-node-factory.h b/src/compiler/simplified-node-factory.h
new file mode 100644 (file)
index 0000000..8660ce6
--- /dev/null
@@ -0,0 +1,128 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_SIMPLIFIED_NODE_FACTORY_H_
+#define V8_COMPILER_SIMPLIFIED_NODE_FACTORY_H_
+
+#include "src/compiler/node.h"
+#include "src/compiler/simplified-operator.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+#define SIMPLIFIED() static_cast<NodeFactory*>(this)->simplified()
+#define NEW_NODE_1(op, a) static_cast<NodeFactory*>(this)->NewNode(op, a)
+#define NEW_NODE_2(op, a, b) static_cast<NodeFactory*>(this)->NewNode(op, a, b)
+#define NEW_NODE_3(op, a, b, c) \
+  static_cast<NodeFactory*>(this)->NewNode(op, a, b, c)
+
+template <typename NodeFactory>
+class SimplifiedNodeFactory {
+ public:
+  Node* BooleanNot(Node* a) {
+    return NEW_NODE_1(SIMPLIFIED()->BooleanNot(), a);
+  }
+
+  Node* NumberEqual(Node* a, Node* b) {
+    return NEW_NODE_2(SIMPLIFIED()->NumberEqual(), a, b);
+  }
+  Node* NumberNotEqual(Node* a, Node* b) {
+    return NEW_NODE_2(SIMPLIFIED()->NumberNotEqual(), a, b);
+  }
+  Node* NumberLessThan(Node* a, Node* b) {
+    return NEW_NODE_2(SIMPLIFIED()->NumberLessThan(), a, b);
+  }
+  Node* NumberLessThanOrEqual(Node* a, Node* b) {
+    return NEW_NODE_2(SIMPLIFIED()->NumberLessThanOrEqual(), a, b);
+  }
+  Node* NumberAdd(Node* a, Node* b) {
+    return NEW_NODE_2(SIMPLIFIED()->NumberAdd(), a, b);
+  }
+  Node* NumberSubtract(Node* a, Node* b) {
+    return NEW_NODE_2(SIMPLIFIED()->NumberSubtract(), a, b);
+  }
+  Node* NumberMultiply(Node* a, Node* b) {
+    return NEW_NODE_2(SIMPLIFIED()->NumberMultiply(), a, b);
+  }
+  Node* NumberDivide(Node* a, Node* b) {
+    return NEW_NODE_2(SIMPLIFIED()->NumberDivide(), a, b);
+  }
+  Node* NumberModulus(Node* a, Node* b) {
+    return NEW_NODE_2(SIMPLIFIED()->NumberModulus(), a, b);
+  }
+  Node* NumberToInt32(Node* a) {
+    return NEW_NODE_1(SIMPLIFIED()->NumberToInt32(), a);
+  }
+  Node* NumberToUint32(Node* a) {
+    return NEW_NODE_1(SIMPLIFIED()->NumberToUint32(), a);
+  }
+
+  Node* ReferenceEqual(Type* type, Node* a, Node* b) {
+    return NEW_NODE_2(SIMPLIFIED()->ReferenceEqual(), a, b);
+  }
+
+  Node* StringEqual(Node* a, Node* b) {
+    return NEW_NODE_2(SIMPLIFIED()->StringEqual(), a, b);
+  }
+  Node* StringLessThan(Node* a, Node* b) {
+    return NEW_NODE_2(SIMPLIFIED()->StringLessThan(), a, b);
+  }
+  Node* StringLessThanOrEqual(Node* a, Node* b) {
+    return NEW_NODE_2(SIMPLIFIED()->StringLessThanOrEqual(), a, b);
+  }
+  Node* StringAdd(Node* a, Node* b) {
+    return NEW_NODE_2(SIMPLIFIED()->StringAdd(), a, b);
+  }
+
+  Node* ChangeTaggedToInt32(Node* a) {
+    return NEW_NODE_1(SIMPLIFIED()->ChangeTaggedToInt32(), a);
+  }
+  Node* ChangeTaggedToUint32(Node* a) {
+    return NEW_NODE_1(SIMPLIFIED()->ChangeTaggedToUint32(), a);
+  }
+  Node* ChangeTaggedToFloat64(Node* a) {
+    return NEW_NODE_1(SIMPLIFIED()->ChangeTaggedToFloat64(), a);
+  }
+  Node* ChangeInt32ToTagged(Node* a) {
+    return NEW_NODE_1(SIMPLIFIED()->ChangeInt32ToTagged(), a);
+  }
+  Node* ChangeUint32ToTagged(Node* a) {
+    return NEW_NODE_1(SIMPLIFIED()->ChangeUint32ToTagged(), a);
+  }
+  Node* ChangeFloat64ToTagged(Node* a) {
+    return NEW_NODE_1(SIMPLIFIED()->ChangeFloat64ToTagged(), a);
+  }
+  Node* ChangeBoolToBit(Node* a) {
+    return NEW_NODE_1(SIMPLIFIED()->ChangeBoolToBit(), a);
+  }
+  Node* ChangeBitToBool(Node* a) {
+    return NEW_NODE_1(SIMPLIFIED()->ChangeBitToBool(), a);
+  }
+
+  Node* LoadField(const FieldAccess& access, Node* object) {
+    return NEW_NODE_1(SIMPLIFIED()->LoadField(access), object);
+  }
+  Node* StoreField(const FieldAccess& access, Node* object, Node* value) {
+    return NEW_NODE_2(SIMPLIFIED()->StoreField(access), object, value);
+  }
+  Node* LoadElement(const ElementAccess& access, Node* object, Node* index) {
+    return NEW_NODE_2(SIMPLIFIED()->LoadElement(access), object, index);
+  }
+  Node* StoreElement(const ElementAccess& access, Node* object, Node* index,
+                     Node* value) {
+    return NEW_NODE_3(SIMPLIFIED()->StoreElement(access), object, index, value);
+  }
+};
+
+#undef NEW_NODE_1
+#undef NEW_NODE_2
+#undef NEW_NODE_3
+#undef SIMPLIFIED
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_COMPILER_SIMPLIFIED_NODE_FACTORY_H_
diff --git a/src/compiler/simplified-operator.h b/src/compiler/simplified-operator.h
new file mode 100644 (file)
index 0000000..6410f2f
--- /dev/null
@@ -0,0 +1,177 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_SIMPLIFIED_OPERATOR_H_
+#define V8_COMPILER_SIMPLIFIED_OPERATOR_H_
+
+#include "src/compiler/machine-operator.h"
+#include "src/compiler/opcodes.h"
+#include "src/zone.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// An access descriptor for loads/stores from/to fixed structures
+// like field accesses of heap objects.
+struct FieldAccess {
+  int offset;
+  Handle<Name> name;  // debug only.
+  Type* type;
+  MachineRepresentation representation;
+};
+
+
+// An access descriptor for loads/stores of indexed structures
+// like characters in strings or off-heap backing stores.
+struct ElementAccess {
+  int header_size;
+  Type* type;
+  MachineRepresentation representation;
+};
+
+
+// If the accessed object is not a heap object, add this to the header_size.
+static const int kNonHeapObjectHeaderSize = kHeapObjectTag;
+
+
+// Specialization for static parameters of type {FieldAccess}.
+template <>
+struct StaticParameterTraits<const FieldAccess> {
+  static OStream& PrintTo(OStream& os, const FieldAccess& val) {  // NOLINT
+    return os << val.offset;
+  }
+  static int HashCode(const FieldAccess& val) {
+    return (val.offset < 16) | (val.representation & 0xffff);
+  }
+  static bool Equals(const FieldAccess& a, const FieldAccess& b) {
+    return a.offset == b.offset && a.representation == b.representation &&
+           a.type->Is(b.type);
+  }
+};
+
+
+// Specialization for static parameters of type {ElementAccess}.
+template <>
+struct StaticParameterTraits<const ElementAccess> {
+  static OStream& PrintTo(OStream& os, const ElementAccess& val) {  // NOLINT
+    return os << val.header_size;
+  }
+  static int HashCode(const ElementAccess& val) {
+    return (val.header_size < 16) | (val.representation & 0xffff);
+  }
+  static bool Equals(const ElementAccess& a, const ElementAccess& b) {
+    return a.header_size == b.header_size &&
+           a.representation == b.representation && a.type->Is(b.type);
+  }
+};
+
+
+inline const FieldAccess FieldAccessOf(Operator* op) {
+  ASSERT(op->opcode() == IrOpcode::kLoadField ||
+         op->opcode() == IrOpcode::kStoreField);
+  return static_cast<Operator1<FieldAccess>*>(op)->parameter();
+}
+
+
+inline const ElementAccess ElementAccessOf(Operator* op) {
+  ASSERT(op->opcode() == IrOpcode::kLoadElement ||
+         op->opcode() == IrOpcode::kStoreElement);
+  return static_cast<Operator1<ElementAccess>*>(op)->parameter();
+}
+
+
+// Interface for building simplified operators, which represent the
+// medium-level operations of V8, including adding numbers, allocating objects,
+// indexing into objects and arrays, etc.
+// All operators are typed but many are representation independent.
+
+// Number values from JS can be in one of these representations:
+//   - Tagged: word-sized integer that is either
+//     - a signed small integer (31 or 32 bits plus a tag)
+//     - a tagged pointer to a HeapNumber object that has a float64 field
+//   - Int32: an untagged signed 32-bit integer
+//   - Uint32: an untagged unsigned 32-bit integer
+//   - Float64: an untagged float64
+
+// Additional representations for intermediate code or non-JS code:
+//   - Int64: an untagged signed 64-bit integer
+//   - Uint64: an untagged unsigned 64-bit integer
+//   - Float32: an untagged float32
+
+// Boolean values can be:
+//   - Bool: a tagged pointer to either the canonical JS #false or
+//           the canonical JS #true object
+//   - Bit: an untagged integer 0 or 1, but word-sized
+class SimplifiedOperatorBuilder {
+ public:
+  explicit inline SimplifiedOperatorBuilder(Zone* zone) : zone_(zone) {}
+
+#define SIMPLE(name, properties, inputs, outputs) \
+  return new (zone_)                              \
+      SimpleOperator(IrOpcode::k##name, properties, inputs, outputs, #name);
+
+#define OP1(name, ptype, pname, properties, inputs, outputs)               \
+  return new (zone_)                                                       \
+      Operator1<ptype>(IrOpcode::k##name, properties | Operator::kNoThrow, \
+                       inputs, outputs, #name, pname)
+
+#define UNOP(name) SIMPLE(name, Operator::kPure, 1, 1)
+#define BINOP(name) SIMPLE(name, Operator::kPure, 2, 1)
+
+  Operator* BooleanNot() const { UNOP(BooleanNot); }
+
+  Operator* NumberEqual() const { BINOP(NumberEqual); }
+  Operator* NumberLessThan() const { BINOP(NumberLessThan); }
+  Operator* NumberLessThanOrEqual() const { BINOP(NumberLessThanOrEqual); }
+  Operator* NumberAdd() const { BINOP(NumberAdd); }
+  Operator* NumberSubtract() const { BINOP(NumberSubtract); }
+  Operator* NumberMultiply() const { BINOP(NumberMultiply); }
+  Operator* NumberDivide() const { BINOP(NumberDivide); }
+  Operator* NumberModulus() const { BINOP(NumberModulus); }
+  Operator* NumberToInt32() const { UNOP(NumberToInt32); }
+  Operator* NumberToUint32() const { UNOP(NumberToUint32); }
+
+  Operator* ReferenceEqual(Type* type) const { BINOP(ReferenceEqual); }
+
+  Operator* StringEqual() const { BINOP(StringEqual); }
+  Operator* StringLessThan() const { BINOP(StringLessThan); }
+  Operator* StringLessThanOrEqual() const { BINOP(StringLessThanOrEqual); }
+  Operator* StringAdd() const { BINOP(StringAdd); }
+
+  Operator* ChangeTaggedToInt32() const { UNOP(ChangeTaggedToInt32); }
+  Operator* ChangeTaggedToUint32() const { UNOP(ChangeTaggedToUint32); }
+  Operator* ChangeTaggedToFloat64() const { UNOP(ChangeTaggedToFloat64); }
+  Operator* ChangeInt32ToTagged() const { UNOP(ChangeInt32ToTagged); }
+  Operator* ChangeUint32ToTagged() const { UNOP(ChangeUint32ToTagged); }
+  Operator* ChangeFloat64ToTagged() const { UNOP(ChangeFloat64ToTagged); }
+  Operator* ChangeBoolToBit() const { UNOP(ChangeBoolToBit); }
+  Operator* ChangeBitToBool() const { UNOP(ChangeBitToBool); }
+
+  Operator* LoadField(const FieldAccess& access) const {
+    OP1(LoadField, FieldAccess, access, Operator::kNoWrite, 1, 1);
+  }
+  Operator* StoreField(const FieldAccess& access) const {
+    OP1(StoreField, FieldAccess, access, Operator::kNoRead, 2, 0);
+  }
+  Operator* LoadElement(const ElementAccess& access) const {
+    OP1(LoadElement, ElementAccess, access, Operator::kNoWrite, 2, 1);
+  }
+  Operator* StoreElement(const ElementAccess& access) const {
+    OP1(StoreElement, ElementAccess, access, Operator::kNoRead, 3, 0);
+  }
+
+#undef BINOP
+#undef UNOP
+#undef OP1
+#undef SIMPLE
+
+ private:
+  Zone* zone_;
+};
+}
+}
+}  // namespace v8::internal::compiler
+
+#endif  // V8_COMPILER_SIMPLIFIED_OPERATOR_H_
diff --git a/src/compiler/source-position.cc b/src/compiler/source-position.cc
new file mode 100644 (file)
index 0000000..9f122fa
--- /dev/null
@@ -0,0 +1,55 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/source-position.h"
+#include "src/compiler/graph.h"
+#include "src/compiler/node-aux-data-inl.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+class SourcePositionTable::Decorator : public GraphDecorator {
+ public:
+  explicit Decorator(SourcePositionTable* source_positions)
+      : source_positions_(source_positions) {}
+
+  virtual void Decorate(Node* node) {
+    ASSERT(!source_positions_->current_position_.IsInvalid());
+    source_positions_->table_.Set(node, source_positions_->current_position_);
+  }
+
+ private:
+  SourcePositionTable* source_positions_;
+};
+
+
+SourcePositionTable::SourcePositionTable(Graph* graph)
+    : graph_(graph),
+      decorator_(NULL),
+      current_position_(SourcePosition::Invalid()),
+      table_(graph) {}
+
+
+void SourcePositionTable::AddDecorator() {
+  ASSERT(decorator_ == NULL);
+  decorator_ = new (graph_->zone()) Decorator(this);
+  graph_->AddDecorator(decorator_);
+}
+
+
+void SourcePositionTable::RemoveDecorator() {
+  ASSERT(decorator_ != NULL);
+  graph_->RemoveDecorator(decorator_);
+  decorator_ = NULL;
+}
+
+
+SourcePosition SourcePositionTable::GetSourcePosition(Node* node) {
+  return table_.Get(node);
+}
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
diff --git a/src/compiler/source-position.h b/src/compiler/source-position.h
new file mode 100644 (file)
index 0000000..b81582f
--- /dev/null
@@ -0,0 +1,99 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_SOURCE_POSITION_H_
+#define V8_COMPILER_SOURCE_POSITION_H_
+
+#include "src/assembler.h"
+#include "src/compiler/node-aux-data.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// Encapsulates encoding and decoding of sources positions from which Nodes
+// originated.
+class SourcePosition V8_FINAL {
+ public:
+  explicit SourcePosition(int raw = kUnknownPosition) : raw_(raw) {}
+
+  static SourcePosition Unknown() { return SourcePosition(kUnknownPosition); }
+  bool IsUnknown() const { return raw() == kUnknownPosition; }
+
+  static SourcePosition Invalid() { return SourcePosition(kInvalidPosition); }
+  bool IsInvalid() const { return raw() == kInvalidPosition; }
+
+  int raw() const { return raw_; }
+
+ private:
+  static const int kInvalidPosition = -2;
+  static const int kUnknownPosition = RelocInfo::kNoPosition;
+  STATIC_ASSERT(kInvalidPosition != kUnknownPosition);
+  int raw_;
+};
+
+
+inline bool operator==(const SourcePosition& lhs, const SourcePosition& rhs) {
+  return lhs.raw() == rhs.raw();
+}
+
+inline bool operator!=(const SourcePosition& lhs, const SourcePosition& rhs) {
+  return !(lhs == rhs);
+}
+
+
+class SourcePositionTable V8_FINAL {
+ public:
+  class Scope {
+   public:
+    Scope(SourcePositionTable* source_positions, SourcePosition position)
+        : source_positions_(source_positions),
+          prev_position_(source_positions->current_position_) {
+      Init(position);
+    }
+    Scope(SourcePositionTable* source_positions, Node* node)
+        : source_positions_(source_positions),
+          prev_position_(source_positions->current_position_) {
+      Init(source_positions_->GetSourcePosition(node));
+    }
+    ~Scope() { source_positions_->current_position_ = prev_position_; }
+
+   private:
+    void Init(SourcePosition position) {
+      if (!position.IsUnknown() || prev_position_.IsInvalid()) {
+        source_positions_->current_position_ = position;
+      }
+    }
+
+    SourcePositionTable* source_positions_;
+    SourcePosition prev_position_;
+    DISALLOW_COPY_AND_ASSIGN(Scope);
+  };
+
+  explicit SourcePositionTable(Graph* graph);
+  ~SourcePositionTable() {
+    if (decorator_ != NULL) RemoveDecorator();
+  }
+
+  void AddDecorator();
+  void RemoveDecorator();
+
+  SourcePosition GetSourcePosition(Node* node);
+
+ private:
+  class Decorator;
+
+  Graph* graph_;
+  Decorator* decorator_;
+  SourcePosition current_position_;
+  NodeAuxData<SourcePosition> table_;
+
+  DISALLOW_COPY_AND_ASSIGN(SourcePositionTable);
+};
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
+
+#endif
diff --git a/src/compiler/structured-machine-assembler.cc b/src/compiler/structured-machine-assembler.cc
new file mode 100644 (file)
index 0000000..3e4c343
--- /dev/null
@@ -0,0 +1,662 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/pipeline.h"
+#include "src/compiler/scheduler.h"
+#include "src/compiler/structured-machine-assembler.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+Node* Variable::Get() const { return smasm_->GetVariable(offset_); }
+
+
+void Variable::Set(Node* value) const { smasm_->SetVariable(offset_, value); }
+
+
+StructuredMachineAssembler::StructuredMachineAssembler(
+    Graph* graph, MachineCallDescriptorBuilder* call_descriptor_builder,
+    MachineRepresentation word)
+    : GraphBuilder(graph),
+      schedule_(new (zone()) Schedule(zone())),
+      machine_(zone(), word),
+      common_(zone()),
+      call_descriptor_builder_(call_descriptor_builder),
+      parameters_(NULL),
+      current_environment_(new (zone())
+                           Environment(zone(), schedule()->entry(), false)),
+      number_of_variables_(0) {
+  if (parameter_count() == 0) return;
+  parameters_ = zone()->NewArray<Node*>(parameter_count());
+  for (int i = 0; i < parameter_count(); ++i) {
+    parameters_[i] = NewNode(common()->Parameter(i));
+  }
+}
+
+
+Schedule* StructuredMachineAssembler::Export() {
+  // Compute the correct codegen order.
+  ASSERT(schedule_->rpo_order()->empty());
+  Scheduler scheduler(zone(), graph(), schedule_);
+  scheduler.ComputeSpecialRPO();
+  // Invalidate MachineAssembler.
+  Schedule* schedule = schedule_;
+  schedule_ = NULL;
+  return schedule;
+}
+
+
+Node* StructuredMachineAssembler::Parameter(int index) {
+  ASSERT(0 <= index && index < parameter_count());
+  return parameters_[index];
+}
+
+
+Node* StructuredMachineAssembler::MakeNode(Operator* op, int input_count,
+                                           Node** inputs) {
+  ASSERT(ScheduleValid());
+  ASSERT(current_environment_ != NULL);
+  Node* node = graph()->NewNode(op, input_count, inputs);
+  BasicBlock* block = NULL;
+  switch (op->opcode()) {
+    case IrOpcode::kParameter:
+    case IrOpcode::kInt32Constant:
+    case IrOpcode::kInt64Constant:
+    case IrOpcode::kFloat64Constant:
+    case IrOpcode::kExternalConstant:
+    case IrOpcode::kNumberConstant:
+    case IrOpcode::kHeapConstant:
+      // Parameters and constants must be in start.
+      block = schedule()->start();
+      break;
+    default:
+      // Verify all leaf nodes handled above.
+      ASSERT((op->OutputCount() == 0) == (op->opcode() == IrOpcode::kStore));
+      block = current_environment_->block_;
+      break;
+  }
+  if (block != NULL) {
+    schedule()->AddNode(block, node);
+  }
+  return node;
+}
+
+
+Variable StructuredMachineAssembler::NewVariable(Node* initial_value) {
+  CHECK(initial_value != NULL);
+  int offset = number_of_variables_++;
+  // Extend current environment to correct number of values.
+  NodeVector* variables = CurrentVars();
+  size_t to_add = number_of_variables_ - variables->size();
+  if (to_add != 0) {
+    variables->reserve(number_of_variables_);
+    variables->insert(variables->end(), to_add, NULL);
+  }
+  variables->at(offset) = initial_value;
+  return Variable(this, offset);
+}
+
+
+Node* StructuredMachineAssembler::GetVariable(int offset) {
+  ASSERT(ScheduleValid());
+  return VariableAt(current_environment_, offset);
+}
+
+
+void StructuredMachineAssembler::SetVariable(int offset, Node* value) {
+  ASSERT(ScheduleValid());
+  Node*& ref = VariableAt(current_environment_, offset);
+  ref = value;
+}
+
+
+Node*& StructuredMachineAssembler::VariableAt(Environment* environment,
+                                              int32_t offset) {
+  // Variable used out of scope.
+  CHECK(static_cast<size_t>(offset) < environment->variables_.size());
+  Node*& value = environment->variables_.at(offset);
+  CHECK(value != NULL);  // Variable used out of scope.
+  return value;
+}
+
+
+void StructuredMachineAssembler::Return(Node* value) {
+  BasicBlock* block = current_environment_->block_;
+  if (block != NULL) {
+    schedule()->AddReturn(block, value);
+  }
+  CopyCurrentAsDead();
+}
+
+
+void StructuredMachineAssembler::CopyCurrentAsDead() {
+  ASSERT(current_environment_ != NULL);
+  bool is_dead = current_environment_->is_dead_;
+  current_environment_->is_dead_ = true;
+  Environment* next = Copy(current_environment_);
+  current_environment_->is_dead_ = is_dead;
+  current_environment_ = next;
+}
+
+
+StructuredMachineAssembler::Environment* StructuredMachineAssembler::Copy(
+    Environment* env, int truncate_at) {
+  Environment* new_env = new (zone()) Environment(zone(), NULL, env->is_dead_);
+  if (!new_env->is_dead_) {
+    new_env->block_ = schedule()->NewBasicBlock();
+  }
+  new_env->variables_.reserve(truncate_at);
+  NodeVectorIter end = env->variables_.end();
+  ASSERT(truncate_at <= static_cast<int>(env->variables_.size()));
+  end -= static_cast<int>(env->variables_.size()) - truncate_at;
+  new_env->variables_.insert(new_env->variables_.begin(),
+                             env->variables_.begin(), end);
+  return new_env;
+}
+
+
+StructuredMachineAssembler::Environment*
+StructuredMachineAssembler::CopyForLoopHeader(Environment* env) {
+  Environment* new_env = new (zone()) Environment(zone(), NULL, env->is_dead_);
+  if (!new_env->is_dead_) {
+    new_env->block_ = schedule()->NewBasicBlock();
+  }
+  new_env->variables_.reserve(env->variables_.size());
+  for (NodeVectorIter i = env->variables_.begin(); i != env->variables_.end();
+       ++i) {
+    Node* phi = NULL;
+    if (*i != NULL) {
+      phi = graph()->NewNode(common()->Phi(1), *i);
+      if (new_env->block_ != NULL) {
+        schedule()->AddNode(new_env->block_, phi);
+      }
+    }
+    new_env->variables_.push_back(phi);
+  }
+  return new_env;
+}
+
+
+void StructuredMachineAssembler::MergeBackEdgesToLoopHeader(
+    Environment* header, EnvironmentVector* environments) {
+  // Only merge as many variables are were declared before this loop.
+  size_t n = header->variables_.size();
+  // TODO(dcarney): invert loop order and extend phis once.
+  for (EnvironmentVector::iterator i = environments->begin();
+       i != environments->end(); ++i) {
+    Environment* from = *i;
+    if (from->is_dead_) continue;
+    AddGoto(from, header);
+    for (size_t i = 0; i < n; ++i) {
+      Node* phi = header->variables_[i];
+      if (phi == NULL) continue;
+      phi->set_op(common()->Phi(phi->InputCount() + 1));
+      phi->AppendInput(zone(), VariableAt(from, i));
+    }
+  }
+}
+
+
+void StructuredMachineAssembler::Merge(EnvironmentVector* environments,
+                                       int truncate_at) {
+  ASSERT(current_environment_ == NULL || current_environment_->is_dead_);
+  Environment* next = new (zone()) Environment(zone(), NULL, false);
+  current_environment_ = next;
+  size_t n_vars = number_of_variables_;
+  NodeVector& vars = next->variables_;
+  vars.reserve(n_vars);
+  Node** scratch = NULL;
+  size_t n_envs = environments->size();
+  Environment** live_environments = reinterpret_cast<Environment**>(
+      alloca(sizeof(environments->at(0)) * n_envs));
+  size_t n_live = 0;
+  for (size_t i = 0; i < n_envs; i++) {
+    if (environments->at(i)->is_dead_) continue;
+    live_environments[n_live++] = environments->at(i);
+  }
+  n_envs = n_live;
+  if (n_live == 0) next->is_dead_ = true;
+  if (!next->is_dead_) {
+    next->block_ = schedule()->NewBasicBlock();
+  }
+  for (size_t j = 0; j < n_vars; ++j) {
+    Node* resolved = NULL;
+    // Find first non equal variable.
+    size_t i = 0;
+    for (; i < n_envs; i++) {
+      ASSERT(live_environments[i]->variables_.size() <= n_vars);
+      Node* val = NULL;
+      if (j < static_cast<size_t>(truncate_at)) {
+        val = live_environments[i]->variables_.at(j);
+        // TODO(dcarney): record start position at time of split.
+        //                all variables after this should not be NULL.
+        if (val != NULL) {
+          val = VariableAt(live_environments[i], j);
+        }
+      }
+      if (val == resolved) continue;
+      if (i != 0) break;
+      resolved = val;
+    }
+    // Have to generate a phi.
+    if (i < n_envs) {
+      // All values thus far uninitialized, variable used out of scope.
+      CHECK(resolved != NULL);
+      // Init scratch buffer.
+      if (scratch == NULL) {
+        scratch = static_cast<Node**>(alloca(n_envs * sizeof(resolved)));
+      }
+      for (size_t k = 0; k < i; k++) {
+        scratch[k] = resolved;
+      }
+      for (; i < n_envs; i++) {
+        scratch[i] = live_environments[i]->variables_[j];
+      }
+      resolved = graph()->NewNode(common()->Phi(n_envs), n_envs, scratch);
+      if (next->block_ != NULL) {
+        schedule()->AddNode(next->block_, resolved);
+      }
+    }
+    vars.push_back(resolved);
+  }
+}
+
+
+void StructuredMachineAssembler::AddGoto(Environment* from, Environment* to) {
+  if (to->is_dead_) {
+    ASSERT(from->is_dead_);
+    return;
+  }
+  ASSERT(!from->is_dead_);
+  schedule()->AddGoto(from->block_, to->block_);
+}
+
+
+// TODO(dcarney): add pass before rpo to schedule to compute these.
+BasicBlock* StructuredMachineAssembler::TrampolineFor(BasicBlock* block) {
+  BasicBlock* trampoline = schedule()->NewBasicBlock();
+  schedule()->AddGoto(trampoline, block);
+  return trampoline;
+}
+
+
+void StructuredMachineAssembler::AddBranch(Environment* environment,
+                                           Node* condition,
+                                           Environment* true_val,
+                                           Environment* false_val) {
+  ASSERT(environment->is_dead_ == true_val->is_dead_);
+  ASSERT(environment->is_dead_ == false_val->is_dead_);
+  if (true_val->block_ == false_val->block_) {
+    if (environment->is_dead_) return;
+    AddGoto(environment, true_val);
+    return;
+  }
+  Node* branch = graph()->NewNode(common()->Branch(), condition);
+  if (environment->is_dead_) return;
+  BasicBlock* true_block = TrampolineFor(true_val->block_);
+  BasicBlock* false_block = TrampolineFor(false_val->block_);
+  schedule()->AddBranch(environment->block_, branch, true_block, false_block);
+}
+
+
+StructuredMachineAssembler::Environment::Environment(Zone* zone,
+                                                     BasicBlock* block,
+                                                     bool is_dead)
+    : block_(block),
+      variables_(NodeVector::allocator_type(zone)),
+      is_dead_(is_dead) {}
+
+
+StructuredMachineAssembler::IfBuilder::IfBuilder(
+    StructuredMachineAssembler* smasm)
+    : smasm_(smasm),
+      if_clauses_(IfClauses::allocator_type(smasm_->zone())),
+      pending_exit_merges_(EnvironmentVector::allocator_type(smasm_->zone())) {
+  ASSERT(smasm_->current_environment_ != NULL);
+  PushNewIfClause();
+  ASSERT(!IsDone());
+}
+
+
+StructuredMachineAssembler::IfBuilder&
+StructuredMachineAssembler::IfBuilder::If() {
+  ASSERT(smasm_->current_environment_ != NULL);
+  IfClause* clause = CurrentClause();
+  if (clause->then_environment_ != NULL || clause->else_environment_ != NULL) {
+    PushNewIfClause();
+  }
+  return *this;
+}
+
+
+StructuredMachineAssembler::IfBuilder&
+StructuredMachineAssembler::IfBuilder::If(Node* condition) {
+  If();
+  IfClause* clause = CurrentClause();
+  // Store branch for future resolution.
+  UnresolvedBranch* next = new (smasm_->zone())
+      UnresolvedBranch(smasm_->current_environment_, condition, NULL);
+  if (clause->unresolved_list_tail_ != NULL) {
+    clause->unresolved_list_tail_->next_ = next;
+  }
+  clause->unresolved_list_tail_ = next;
+  // Push onto merge queues.
+  clause->pending_else_merges_.push_back(next);
+  clause->pending_then_merges_.push_back(next);
+  smasm_->current_environment_ = NULL;
+  return *this;
+}
+
+
+void StructuredMachineAssembler::IfBuilder::And() {
+  CurrentClause()->ResolvePendingMerges(smasm_, kCombineThen, kExpressionTerm);
+}
+
+
+void StructuredMachineAssembler::IfBuilder::Or() {
+  CurrentClause()->ResolvePendingMerges(smasm_, kCombineElse, kExpressionTerm);
+}
+
+
+void StructuredMachineAssembler::IfBuilder::Then() {
+  CurrentClause()->ResolvePendingMerges(smasm_, kCombineThen, kExpressionDone);
+}
+
+
+void StructuredMachineAssembler::IfBuilder::Else() {
+  AddCurrentToPending();
+  CurrentClause()->ResolvePendingMerges(smasm_, kCombineElse, kExpressionDone);
+}
+
+
+void StructuredMachineAssembler::IfBuilder::AddCurrentToPending() {
+  if (smasm_->current_environment_ != NULL &&
+      !smasm_->current_environment_->is_dead_) {
+    pending_exit_merges_.push_back(smasm_->current_environment_);
+  }
+  smasm_->current_environment_ = NULL;
+}
+
+
+void StructuredMachineAssembler::IfBuilder::PushNewIfClause() {
+  int curr_size =
+      static_cast<int>(smasm_->current_environment_->variables_.size());
+  IfClause* clause = new (smasm_->zone()) IfClause(smasm_->zone(), curr_size);
+  if_clauses_.push_back(clause);
+}
+
+
+StructuredMachineAssembler::IfBuilder::IfClause::IfClause(
+    Zone* zone, int initial_environment_size)
+    : unresolved_list_tail_(NULL),
+      initial_environment_size_(initial_environment_size),
+      expression_states_(ExpressionStates::allocator_type(zone)),
+      pending_then_merges_(PendingMergeStack::allocator_type(zone)),
+      pending_else_merges_(PendingMergeStack::allocator_type(zone)),
+      then_environment_(NULL),
+      else_environment_(NULL) {
+  PushNewExpressionState();
+}
+
+
+StructuredMachineAssembler::IfBuilder::PendingMergeStackRange
+StructuredMachineAssembler::IfBuilder::IfClause::ComputeRelevantMerges(
+    CombineType combine_type) {
+  ASSERT(!expression_states_.empty());
+  PendingMergeStack* stack;
+  int start;
+  if (combine_type == kCombineThen) {
+    stack = &pending_then_merges_;
+    start = expression_states_.back().pending_then_size_;
+  } else {
+    ASSERT(combine_type == kCombineElse);
+    stack = &pending_else_merges_;
+    start = expression_states_.back().pending_else_size_;
+  }
+  PendingMergeStackRange data;
+  data.merge_stack_ = stack;
+  data.start_ = start;
+  data.size_ = static_cast<int>(stack->size()) - start;
+  return data;
+}
+
+
+void StructuredMachineAssembler::IfBuilder::IfClause::ResolvePendingMerges(
+    StructuredMachineAssembler* smasm, CombineType combine_type,
+    ResolutionType resolution_type) {
+  ASSERT(smasm->current_environment_ == NULL);
+  PendingMergeStackRange data = ComputeRelevantMerges(combine_type);
+  ASSERT_EQ(data.merge_stack_->back(), unresolved_list_tail_);
+  ASSERT(data.size_ > 0);
+  // TODO(dcarney): assert no new variables created during expression building.
+  int truncate_at = initial_environment_size_;
+  if (data.size_ == 1) {
+    // Just copy environment in common case.
+    smasm->current_environment_ =
+        smasm->Copy(unresolved_list_tail_->environment_, truncate_at);
+  } else {
+    EnvironmentVector environments(
+        EnvironmentVector::allocator_type(smasm->zone()));
+    environments.reserve(data.size_);
+    CopyEnvironments(data, &environments);
+    ASSERT(static_cast<int>(environments.size()) == data.size_);
+    smasm->Merge(&environments, truncate_at);
+  }
+  Environment* then_environment = then_environment_;
+  Environment* else_environment = NULL;
+  if (resolution_type == kExpressionDone) {
+    ASSERT(expression_states_.size() == 1);
+    // Set the current then_ or else_environment_ to the new merged environment.
+    if (combine_type == kCombineThen) {
+      ASSERT(then_environment_ == NULL && else_environment_ == NULL);
+      this->then_environment_ = smasm->current_environment_;
+    } else {
+      ASSERT(else_environment_ == NULL);
+      this->else_environment_ = smasm->current_environment_;
+    }
+  } else {
+    ASSERT(resolution_type == kExpressionTerm);
+    ASSERT(then_environment_ == NULL && else_environment_ == NULL);
+  }
+  if (combine_type == kCombineThen) {
+    then_environment = smasm->current_environment_;
+  } else {
+    ASSERT(combine_type == kCombineElse);
+    else_environment = smasm->current_environment_;
+  }
+  // Finalize branches and clear the pending stack.
+  FinalizeBranches(smasm, data, combine_type, then_environment,
+                   else_environment);
+}
+
+
+void StructuredMachineAssembler::IfBuilder::IfClause::CopyEnvironments(
+    const PendingMergeStackRange& data, EnvironmentVector* environments) {
+  PendingMergeStack::iterator i = data.merge_stack_->begin();
+  PendingMergeStack::iterator end = data.merge_stack_->end();
+  for (i += data.start_; i != end; ++i) {
+    environments->push_back((*i)->environment_);
+  }
+}
+
+
+void StructuredMachineAssembler::IfBuilder::IfClause::PushNewExpressionState() {
+  ExpressionState next;
+  next.pending_then_size_ = static_cast<int>(pending_then_merges_.size());
+  next.pending_else_size_ = static_cast<int>(pending_else_merges_.size());
+  expression_states_.push_back(next);
+}
+
+
+void StructuredMachineAssembler::IfBuilder::IfClause::PopExpressionState() {
+  expression_states_.pop_back();
+  ASSERT(!expression_states_.empty());
+}
+
+
+void StructuredMachineAssembler::IfBuilder::IfClause::FinalizeBranches(
+    StructuredMachineAssembler* smasm, const PendingMergeStackRange& data,
+    CombineType combine_type, Environment* const then_environment,
+    Environment* const else_environment) {
+  ASSERT(unresolved_list_tail_ != NULL);
+  ASSERT(smasm->current_environment_ != NULL);
+  if (data.size_ == 0) return;
+  PendingMergeStack::iterator curr = data.merge_stack_->begin();
+  PendingMergeStack::iterator end = data.merge_stack_->end();
+  // Finalize everything but the head first,
+  // in the order the branches enter the merge block.
+  end -= 1;
+  Environment* true_val = then_environment;
+  Environment* false_val = else_environment;
+  Environment** next;
+  if (combine_type == kCombineThen) {
+    next = &false_val;
+  } else {
+    ASSERT(combine_type == kCombineElse);
+    next = &true_val;
+  }
+  for (curr += data.start_; curr != end; ++curr) {
+    UnresolvedBranch* branch = *curr;
+    *next = branch->next_->environment_;
+    smasm->AddBranch(branch->environment_, branch->condition_, true_val,
+                     false_val);
+  }
+  ASSERT(curr + 1 == data.merge_stack_->end());
+  // Now finalize the tail if possible.
+  if (then_environment != NULL && else_environment != NULL) {
+    UnresolvedBranch* branch = *curr;
+    smasm->AddBranch(branch->environment_, branch->condition_, then_environment,
+                     else_environment);
+  }
+  // Clear the merge stack.
+  PendingMergeStack::iterator begin = data.merge_stack_->begin();
+  begin += data.start_;
+  data.merge_stack_->erase(begin, data.merge_stack_->end());
+  ASSERT_EQ(static_cast<int>(data.merge_stack_->size()), data.start_);
+}
+
+
+void StructuredMachineAssembler::IfBuilder::End() {
+  ASSERT(!IsDone());
+  AddCurrentToPending();
+  size_t current_pending = pending_exit_merges_.size();
+  // All unresolved branch edges are now set to pending.
+  for (IfClauses::iterator i = if_clauses_.begin(); i != if_clauses_.end();
+       ++i) {
+    IfClause* clause = *i;
+    ASSERT(clause->expression_states_.size() == 1);
+    PendingMergeStackRange data;
+    // Copy then environments.
+    data = clause->ComputeRelevantMerges(kCombineThen);
+    clause->CopyEnvironments(data, &pending_exit_merges_);
+    Environment* head = NULL;
+    // Will resolve the head node in the else_merge
+    if (data.size_ > 0 && clause->then_environment_ == NULL &&
+        clause->else_environment_ == NULL) {
+      head = pending_exit_merges_.back();
+      pending_exit_merges_.pop_back();
+    }
+    // Copy else environments.
+    data = clause->ComputeRelevantMerges(kCombineElse);
+    clause->CopyEnvironments(data, &pending_exit_merges_);
+    if (head != NULL) {
+      // Must have data to merge, or else head will never get a branch.
+      ASSERT(data.size_ != 0);
+      pending_exit_merges_.push_back(head);
+    }
+  }
+  smasm_->Merge(&pending_exit_merges_,
+                if_clauses_[0]->initial_environment_size_);
+  // Anything initally pending jumps into the new environment.
+  for (size_t i = 0; i < current_pending; ++i) {
+    smasm_->AddGoto(pending_exit_merges_[i], smasm_->current_environment_);
+  }
+  // Resolve all branches.
+  for (IfClauses::iterator i = if_clauses_.begin(); i != if_clauses_.end();
+       ++i) {
+    IfClause* clause = *i;
+    // Must finalize all environments, so ensure they are set correctly.
+    Environment* then_environment = clause->then_environment_;
+    if (then_environment == NULL) {
+      then_environment = smasm_->current_environment_;
+    }
+    Environment* else_environment = clause->else_environment_;
+    PendingMergeStackRange data;
+    // Finalize then environments.
+    data = clause->ComputeRelevantMerges(kCombineThen);
+    clause->FinalizeBranches(smasm_, data, kCombineThen, then_environment,
+                             else_environment);
+    // Finalize else environments.
+    // Now set the else environment so head is finalized for edge case above.
+    if (else_environment == NULL) {
+      else_environment = smasm_->current_environment_;
+    }
+    data = clause->ComputeRelevantMerges(kCombineElse);
+    clause->FinalizeBranches(smasm_, data, kCombineElse, then_environment,
+                             else_environment);
+  }
+  // Future accesses to this builder should crash immediately.
+  pending_exit_merges_.clear();
+  if_clauses_.clear();
+  ASSERT(IsDone());
+}
+
+
+StructuredMachineAssembler::LoopBuilder::LoopBuilder(
+    StructuredMachineAssembler* smasm)
+    : smasm_(smasm),
+      header_environment_(NULL),
+      pending_header_merges_(EnvironmentVector::allocator_type(smasm_->zone())),
+      pending_exit_merges_(EnvironmentVector::allocator_type(smasm_->zone())) {
+  ASSERT(smasm_->current_environment_ != NULL);
+  // Create header environment.
+  header_environment_ = smasm_->CopyForLoopHeader(smasm_->current_environment_);
+  smasm_->AddGoto(smasm_->current_environment_, header_environment_);
+  // Create body environment.
+  Environment* body = smasm_->Copy(header_environment_);
+  smasm_->AddGoto(header_environment_, body);
+  smasm_->current_environment_ = body;
+  ASSERT(!IsDone());
+}
+
+
+void StructuredMachineAssembler::LoopBuilder::Continue() {
+  ASSERT(!IsDone());
+  pending_header_merges_.push_back(smasm_->current_environment_);
+  smasm_->CopyCurrentAsDead();
+}
+
+
+void StructuredMachineAssembler::LoopBuilder::Break() {
+  ASSERT(!IsDone());
+  pending_exit_merges_.push_back(smasm_->current_environment_);
+  smasm_->CopyCurrentAsDead();
+}
+
+
+void StructuredMachineAssembler::LoopBuilder::End() {
+  ASSERT(!IsDone());
+  if (smasm_->current_environment_ != NULL) {
+    Continue();
+  }
+  // Do loop header merges.
+  smasm_->MergeBackEdgesToLoopHeader(header_environment_,
+                                     &pending_header_merges_);
+  int initial_size = header_environment_->variables_.size();
+  // Do loop exit merges, truncating loop variables away.
+  smasm_->Merge(&pending_exit_merges_, initial_size);
+  for (EnvironmentVector::iterator i = pending_exit_merges_.begin();
+       i != pending_exit_merges_.end(); ++i) {
+    smasm_->AddGoto(*i, smasm_->current_environment_);
+  }
+  pending_header_merges_.clear();
+  pending_exit_merges_.clear();
+  header_environment_ = NULL;
+  ASSERT(IsDone());
+}
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
diff --git a/src/compiler/structured-machine-assembler.h b/src/compiler/structured-machine-assembler.h
new file mode 100644 (file)
index 0000000..383c3af
--- /dev/null
@@ -0,0 +1,312 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_STRUCTURED_MACHINE_ASSEMBLER_H_
+#define V8_COMPILER_STRUCTURED_MACHINE_ASSEMBLER_H_
+
+#include "src/v8.h"
+
+#include "src/compiler/common-operator.h"
+#include "src/compiler/graph-builder.h"
+#include "src/compiler/machine-node-factory.h"
+#include "src/compiler/machine-operator.h"
+#include "src/compiler/node.h"
+#include "src/compiler/operator.h"
+
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+class BasicBlock;
+class Schedule;
+class StructuredMachineAssembler;
+
+
+class Variable : public ZoneObject {
+ public:
+  Node* Get() const;
+  void Set(Node* value) const;
+
+ private:
+  Variable(StructuredMachineAssembler* smasm, int offset)
+      : smasm_(smasm), offset_(offset) {}
+
+  friend class StructuredMachineAssembler;
+  friend class StructuredMachineAssemblerFriend;
+  StructuredMachineAssembler* const smasm_;
+  const int offset_;
+};
+
+
+class StructuredMachineAssembler
+    : public GraphBuilder,
+      public MachineNodeFactory<StructuredMachineAssembler> {
+ public:
+  class Environment : public ZoneObject {
+   public:
+    Environment(Zone* zone, BasicBlock* block, bool is_dead_);
+
+   private:
+    BasicBlock* block_;
+    NodeVector variables_;
+    bool is_dead_;
+    friend class StructuredMachineAssembler;
+    DISALLOW_COPY_AND_ASSIGN(Environment);
+  };
+
+  class IfBuilder;
+  friend class IfBuilder;
+  class LoopBuilder;
+  friend class LoopBuilder;
+
+  StructuredMachineAssembler(
+      Graph* graph, MachineCallDescriptorBuilder* call_descriptor_builder,
+      MachineRepresentation word = MachineOperatorBuilder::pointer_rep());
+  virtual ~StructuredMachineAssembler() {}
+
+  Isolate* isolate() const { return zone()->isolate(); }
+  Zone* zone() const { return graph()->zone(); }
+  MachineOperatorBuilder* machine() { return &machine_; }
+  CommonOperatorBuilder* common() { return &common_; }
+  CallDescriptor* call_descriptor() const {
+    return call_descriptor_builder_->BuildCallDescriptor(zone());
+  }
+  int parameter_count() const {
+    return call_descriptor_builder_->parameter_count();
+  }
+  const MachineRepresentation* parameter_types() const {
+    return call_descriptor_builder_->parameter_types();
+  }
+
+  // Parameters.
+  Node* Parameter(int index);
+  // Variables.
+  Variable NewVariable(Node* initial_value);
+  // Control flow.
+  void Return(Node* value);
+
+  // MachineAssembler is invalid after export.
+  Schedule* Export();
+
+ protected:
+  virtual Node* MakeNode(Operator* op, int input_count, Node** inputs);
+
+  Schedule* schedule() {
+    ASSERT(ScheduleValid());
+    return schedule_;
+  }
+
+ private:
+  bool ScheduleValid() { return schedule_ != NULL; }
+
+  typedef std::vector<Environment*, zone_allocator<Environment*> >
+      EnvironmentVector;
+
+  NodeVector* CurrentVars() { return &current_environment_->variables_; }
+  Node*& VariableAt(Environment* environment, int offset);
+  Node* GetVariable(int offset);
+  void SetVariable(int offset, Node* value);
+
+  void AddBranch(Environment* environment, Node* condition,
+                 Environment* true_val, Environment* false_val);
+  void AddGoto(Environment* from, Environment* to);
+  BasicBlock* TrampolineFor(BasicBlock* block);
+
+  void CopyCurrentAsDead();
+  Environment* Copy(Environment* environment) {
+    return Copy(environment, static_cast<int>(environment->variables_.size()));
+  }
+  Environment* Copy(Environment* environment, int truncate_at);
+  void Merge(EnvironmentVector* environments, int truncate_at);
+  Environment* CopyForLoopHeader(Environment* environment);
+  void MergeBackEdgesToLoopHeader(Environment* header,
+                                  EnvironmentVector* environments);
+
+  typedef std::vector<MachineRepresentation,
+                      zone_allocator<MachineRepresentation> >
+      RepresentationVector;
+
+  Schedule* schedule_;
+  MachineOperatorBuilder machine_;
+  CommonOperatorBuilder common_;
+  MachineCallDescriptorBuilder* call_descriptor_builder_;
+  Node** parameters_;
+  Environment* current_environment_;
+  int number_of_variables_;
+
+  friend class Variable;
+  // For testing only.
+  friend class StructuredMachineAssemblerFriend;
+  DISALLOW_COPY_AND_ASSIGN(StructuredMachineAssembler);
+};
+
+// IfBuilder constructs of nested if-else expressions which more or less follow
+// C semantics.  Foe example:
+//
+//  if (x) {do_x} else if (y) {do_y} else {do_z}
+//
+// would look like this:
+//
+//  IfBuilder b;
+//  b.If(x).Then();
+//  do_x
+//  b.Else();
+//  b.If().Then();
+//  do_y
+//  b.Else();
+//  do_z
+//  b.End();
+//
+// Then() and Else() can be skipped, representing an empty block in C.
+// Combinations like If(x).Then().If(x).Then() are legitimate, but
+// Else().Else() is not. That is, once you've nested an If(), you can't get to a
+// higher level If() branch.
+// TODO(dcarney): describe expressions once the api is finalized.
+class StructuredMachineAssembler::IfBuilder {
+ public:
+  explicit IfBuilder(StructuredMachineAssembler* smasm);
+  ~IfBuilder() {
+    if (!IsDone()) End();
+  }
+
+  IfBuilder& If();  // TODO(dcarney): this should take an expression.
+  IfBuilder& If(Node* condition);
+  void Then();
+  void Else();
+  void End();
+
+  // The next 4 functions are exposed for expression support.
+  // They will be private once I have a nice expression api.
+  void And();
+  void Or();
+  IfBuilder& OpenParen() {
+    ASSERT(smasm_->current_environment_ != NULL);
+    CurrentClause()->PushNewExpressionState();
+    return *this;
+  }
+  IfBuilder& CloseParen() {
+    ASSERT(smasm_->current_environment_ == NULL);
+    CurrentClause()->PopExpressionState();
+    return *this;
+  }
+
+ private:
+  // UnresolvedBranch represents the chain of environments created while
+  // generating an expression.  At this point, a branch Node
+  // cannot be created, as the target environments of the branch are not yet
+  // available, so everything required to create the branch Node is
+  // stored in this structure until the target environments are resolved.
+  struct UnresolvedBranch : public ZoneObject {
+    UnresolvedBranch(Environment* environment, Node* condition,
+                     UnresolvedBranch* next)
+        : environment_(environment), condition_(condition), next_(next) {}
+    // environment_ will eventually be terminated by a branch on condition_.
+    Environment* environment_;
+    Node* condition_;
+    // next_ is the next link in the UnresolvedBranch chain, and will be
+    // either the true or false branch jumped to from environment_.
+    UnresolvedBranch* next_;
+  };
+
+  struct ExpressionState {
+    int pending_then_size_;
+    int pending_else_size_;
+  };
+
+  typedef std::vector<ExpressionState, zone_allocator<ExpressionState> >
+      ExpressionStates;
+  typedef std::vector<UnresolvedBranch*, zone_allocator<UnresolvedBranch*> >
+      PendingMergeStack;
+  struct IfClause;
+  typedef std::vector<IfClause*, zone_allocator<IfClause*> > IfClauses;
+
+  struct PendingMergeStackRange {
+    PendingMergeStack* merge_stack_;
+    int start_;
+    int size_;
+  };
+
+  enum CombineType { kCombineThen, kCombineElse };
+  enum ResolutionType { kExpressionTerm, kExpressionDone };
+
+  // IfClause represents one level of if-then-else nesting plus the associated
+  // expression.
+  // A call to If() triggers creation of a new nesting level after expression
+  // creation is complete - ie Then() or Else() has been called.
+  struct IfClause : public ZoneObject {
+    IfClause(Zone* zone, int initial_environment_size);
+    void CopyEnvironments(const PendingMergeStackRange& data,
+                          EnvironmentVector* environments);
+    void ResolvePendingMerges(StructuredMachineAssembler* smasm,
+                              CombineType combine_type,
+                              ResolutionType resolution_type);
+    PendingMergeStackRange ComputeRelevantMerges(CombineType combine_type);
+    void FinalizeBranches(StructuredMachineAssembler* smasm,
+                          const PendingMergeStackRange& offset_data,
+                          CombineType combine_type,
+                          Environment* then_environment,
+                          Environment* else_environment);
+    void PushNewExpressionState();
+    void PopExpressionState();
+
+    // Each invocation of And or Or creates a new UnresolvedBranch.
+    // These form a singly-linked list, of which we only need to keep track of
+    // the tail.  On creation of an UnresolvedBranch, pending_then_merges_ and
+    // pending_else_merges_ each push a copy, which are removed on merges to the
+    // respective environment.
+    UnresolvedBranch* unresolved_list_tail_;
+    int initial_environment_size_;
+    // expression_states_ keeps track of the state of pending_*_merges_,
+    // pushing and popping the lengths of these on
+    // OpenParend() and CloseParend() respectively.
+    ExpressionStates expression_states_;
+    PendingMergeStack pending_then_merges_;
+    PendingMergeStack pending_else_merges_;
+    // then_environment_ is created iff there is a call to Then(), otherwise
+    // branches which would merge to it merge to the exit environment instead.
+    // Likewise for else_environment_.
+    Environment* then_environment_;
+    Environment* else_environment_;
+  };
+
+  IfClause* CurrentClause() { return if_clauses_.back(); }
+  void AddCurrentToPending();
+  void PushNewIfClause();
+  bool IsDone() { return if_clauses_.empty(); }
+
+  StructuredMachineAssembler* smasm_;
+  IfClauses if_clauses_;
+  EnvironmentVector pending_exit_merges_;
+  DISALLOW_COPY_AND_ASSIGN(IfBuilder);
+};
+
+
+class StructuredMachineAssembler::LoopBuilder {
+ public:
+  explicit LoopBuilder(StructuredMachineAssembler* smasm);
+  ~LoopBuilder() {
+    if (!IsDone()) End();
+  }
+
+  void Break();
+  void Continue();
+  void End();
+
+ private:
+  friend class StructuredMachineAssembler;
+  bool IsDone() { return header_environment_ == NULL; }
+
+  StructuredMachineAssembler* smasm_;
+  Environment* header_environment_;
+  EnvironmentVector pending_header_merges_;
+  EnvironmentVector pending_exit_merges_;
+  DISALLOW_COPY_AND_ASSIGN(LoopBuilder);
+};
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_COMPILER_STRUCTURED_MACHINE_ASSEMBLER_H_
diff --git a/src/compiler/typer.cc b/src/compiler/typer.cc
new file mode 100644 (file)
index 0000000..b1c3838
--- /dev/null
@@ -0,0 +1,842 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/graph-inl.h"
+#include "src/compiler/js-operator.h"
+#include "src/compiler/node.h"
+#include "src/compiler/node-properties-inl.h"
+#include "src/compiler/node-properties.h"
+#include "src/compiler/simplified-operator.h"
+#include "src/compiler/typer.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+Typer::Typer(Zone* zone) : zone_(zone) {
+  Type* number = Type::Number(zone);
+  Type* signed32 = Type::Signed32(zone);
+  Type* unsigned32 = Type::Unsigned32(zone);
+  Type* integral32 = Type::Integral32(zone);
+  Type* object = Type::Object(zone);
+  Type* undefined = Type::Undefined(zone);
+  number_fun0_ = Type::Function(number, zone);
+  number_fun1_ = Type::Function(number, number, zone);
+  number_fun2_ = Type::Function(number, number, number, zone);
+  imul_fun_ = Type::Function(signed32, integral32, integral32, zone);
+
+#define NATIVE_TYPE(sem, rep) \
+  Type::Intersect(Type::sem(zone), Type::rep(zone), zone)
+  // TODO(rossberg): Use range types for more precision, once we have them.
+  Type* int8 = NATIVE_TYPE(SignedSmall, UntaggedInt8);
+  Type* int16 = NATIVE_TYPE(SignedSmall, UntaggedInt16);
+  Type* int32 = NATIVE_TYPE(Signed32, UntaggedInt32);
+  Type* uint8 = NATIVE_TYPE(UnsignedSmall, UntaggedInt8);
+  Type* uint16 = NATIVE_TYPE(UnsignedSmall, UntaggedInt16);
+  Type* uint32 = NATIVE_TYPE(Unsigned32, UntaggedInt32);
+  Type* float32 = NATIVE_TYPE(Number, UntaggedFloat32);
+  Type* float64 = NATIVE_TYPE(Number, UntaggedFloat64);
+#undef NATIVE_TYPE
+  Type* buffer = Type::Buffer(zone);
+  Type* int8_array = Type::Array(int8, zone);
+  Type* int16_array = Type::Array(int16, zone);
+  Type* int32_array = Type::Array(int32, zone);
+  Type* uint8_array = Type::Array(uint8, zone);
+  Type* uint16_array = Type::Array(uint16, zone);
+  Type* uint32_array = Type::Array(uint32, zone);
+  Type* float32_array = Type::Array(float32, zone);
+  Type* float64_array = Type::Array(float64, zone);
+  Type* arg1 = Type::Union(unsigned32, object, zone);
+  Type* arg2 = Type::Union(unsigned32, undefined, zone);
+  Type* arg3 = arg2;
+  array_buffer_fun_ = Type::Function(buffer, unsigned32, zone);
+  int8_array_fun_ = Type::Function(int8_array, arg1, arg2, arg3, zone);
+  int16_array_fun_ = Type::Function(int16_array, arg1, arg2, arg3, zone);
+  int32_array_fun_ = Type::Function(int32_array, arg1, arg2, arg3, zone);
+  uint8_array_fun_ = Type::Function(uint8_array, arg1, arg2, arg3, zone);
+  uint16_array_fun_ = Type::Function(uint16_array, arg1, arg2, arg3, zone);
+  uint32_array_fun_ = Type::Function(uint32_array, arg1, arg2, arg3, zone);
+  float32_array_fun_ = Type::Function(float32_array, arg1, arg2, arg3, zone);
+  float64_array_fun_ = Type::Function(float64_array, arg1, arg2, arg3, zone);
+}
+
+
+class Typer::Visitor : public NullNodeVisitor {
+ public:
+  Visitor(Typer* typer, MaybeHandle<Context> context)
+      : typer_(typer), context_(context) {}
+
+  Bounds TypeNode(Node* node) {
+    switch (node->opcode()) {
+#define DECLARE_CASE(x) \
+  case IrOpcode::k##x:  \
+    return Type##x(node);
+      VALUE_OP_LIST(DECLARE_CASE)
+#undef DECLARE_CASE
+
+#define DECLARE_CASE(x) case IrOpcode::k##x:
+      CONTROL_OP_LIST(DECLARE_CASE)
+#undef DECLARE_CASE
+      break;
+    }
+    return Bounds(Type::None(zone()));
+  }
+
+  Type* TypeConstant(Handle<Object> value);
+
+ protected:
+#define DECLARE_METHOD(x) inline Bounds Type##x(Node* node);
+  VALUE_OP_LIST(DECLARE_METHOD)
+#undef DECLARE_METHOD
+
+  Bounds OperandType(Node* node, int i) {
+    return NodeProperties::GetBounds(NodeProperties::GetValueInput(node, i));
+  }
+
+  Type* ContextType(Node* node) {
+    Bounds result =
+        NodeProperties::GetBounds(NodeProperties::GetContextInput(node));
+    ASSERT(result.upper->Is(Type::Internal()));
+    ASSERT(result.lower->Equals(result.upper));
+    return result.upper;
+  }
+
+  Zone* zone() { return typer_->zone(); }
+  Isolate* isolate() { return typer_->isolate(); }
+  MaybeHandle<Context> context() { return context_; }
+
+ private:
+  Typer* typer_;
+  MaybeHandle<Context> context_;
+};
+
+
+class Typer::RunVisitor : public Typer::Visitor {
+ public:
+  RunVisitor(Typer* typer, MaybeHandle<Context> context)
+      : Visitor(typer, context),
+        phis(NodeSet::key_compare(), NodeSet::allocator_type(typer->zone())) {}
+
+  GenericGraphVisit::Control Pre(Node* node) {
+    return NodeProperties::IsControl(node) &&
+                   node->opcode() != IrOpcode::kEnd &&
+                   node->opcode() != IrOpcode::kMerge &&
+                   node->opcode() != IrOpcode::kReturn
+               ? GenericGraphVisit::SKIP
+               : GenericGraphVisit::CONTINUE;
+  }
+
+  GenericGraphVisit::Control Post(Node* node) {
+    Bounds bounds = TypeNode(node);
+    if (node->opcode() == IrOpcode::kPhi) {
+      // Remember phis for least fixpoint iteration.
+      phis.insert(node);
+    } else {
+      NodeProperties::SetBounds(node, bounds);
+    }
+    return GenericGraphVisit::CONTINUE;
+  }
+
+  NodeSet phis;
+};
+
+
+class Typer::NarrowVisitor : public Typer::Visitor {
+ public:
+  NarrowVisitor(Typer* typer, MaybeHandle<Context> context)
+      : Visitor(typer, context) {}
+
+  GenericGraphVisit::Control Pre(Node* node) {
+    Bounds previous = NodeProperties::GetBounds(node);
+    Bounds bounds = TypeNode(node);
+    NodeProperties::SetBounds(node, Bounds::Both(bounds, previous, zone()));
+    ASSERT(bounds.Narrows(previous));
+    // Stop when nothing changed (but allow reentry in case it does later).
+    return previous.Narrows(bounds) ? GenericGraphVisit::DEFER
+                                    : GenericGraphVisit::REENTER;
+  }
+
+  GenericGraphVisit::Control Post(Node* node) {
+    return GenericGraphVisit::REENTER;
+  }
+};
+
+
+class Typer::WidenVisitor : public Typer::Visitor {
+ public:
+  WidenVisitor(Typer* typer, MaybeHandle<Context> context)
+      : Visitor(typer, context) {}
+
+  GenericGraphVisit::Control Pre(Node* node) {
+    Bounds previous = NodeProperties::GetBounds(node);
+    Bounds bounds = TypeNode(node);
+    ASSERT(previous.lower->Is(bounds.lower));
+    ASSERT(previous.upper->Is(bounds.upper));
+    NodeProperties::SetBounds(node, bounds);  // TODO(rossberg): Either?
+    // Stop when nothing changed (but allow reentry in case it does later).
+    return bounds.Narrows(previous) ? GenericGraphVisit::DEFER
+                                    : GenericGraphVisit::REENTER;
+  }
+
+  GenericGraphVisit::Control Post(Node* node) {
+    return GenericGraphVisit::REENTER;
+  }
+};
+
+
+void Typer::Run(Graph* graph, MaybeHandle<Context> context) {
+  RunVisitor typing(this, context);
+  graph->VisitNodeInputsFromEnd(&typing);
+  // Find least fixpoint.
+  for (NodeSetIter i = typing.phis.begin(); i != typing.phis.end(); ++i) {
+    Widen(graph, *i, context);
+  }
+}
+
+
+void Typer::Narrow(Graph* graph, Node* start, MaybeHandle<Context> context) {
+  NarrowVisitor typing(this, context);
+  graph->VisitNodeUsesFrom(start, &typing);
+}
+
+
+void Typer::Widen(Graph* graph, Node* start, MaybeHandle<Context> context) {
+  WidenVisitor typing(this, context);
+  graph->VisitNodeUsesFrom(start, &typing);
+}
+
+
+void Typer::Init(Node* node) {
+  Visitor typing(this, MaybeHandle<Context>());
+  Bounds bounds = typing.TypeNode(node);
+  NodeProperties::SetBounds(node, bounds);
+}
+
+
+// Common operators.
+Bounds Typer::Visitor::TypeParameter(Node* node) {
+  return Bounds::Unbounded(zone());
+}
+
+
+Bounds Typer::Visitor::TypeInt32Constant(Node* node) {
+  // TODO(titzer): only call Type::Of() if the type is not already known.
+  return Bounds(Type::Of(ValueOf<int32_t>(node->op()), zone()));
+}
+
+
+Bounds Typer::Visitor::TypeInt64Constant(Node* node) {
+  // TODO(titzer): only call Type::Of() if the type is not already known.
+  return Bounds(
+      Type::Of(static_cast<double>(ValueOf<int64_t>(node->op())), zone()));
+}
+
+
+Bounds Typer::Visitor::TypeFloat64Constant(Node* node) {
+  // TODO(titzer): only call Type::Of() if the type is not already known.
+  return Bounds(Type::Of(ValueOf<double>(node->op()), zone()));
+}
+
+
+Bounds Typer::Visitor::TypeNumberConstant(Node* node) {
+  // TODO(titzer): only call Type::Of() if the type is not already known.
+  return Bounds(Type::Of(ValueOf<double>(node->op()), zone()));
+}
+
+
+Bounds Typer::Visitor::TypeHeapConstant(Node* node) {
+  return Bounds(TypeConstant(ValueOf<Handle<Object> >(node->op())));
+}
+
+
+Bounds Typer::Visitor::TypeExternalConstant(Node* node) {
+  return Bounds(Type::Internal(zone()));
+}
+
+
+Bounds Typer::Visitor::TypePhi(Node* node) {
+  int arity = NodeProperties::GetValueInputCount(node);
+  Bounds bounds = OperandType(node, 0);
+  for (int i = 1; i < arity; ++i) {
+    bounds = Bounds::Either(bounds, OperandType(node, i), zone());
+  }
+  return bounds;
+}
+
+
+Bounds Typer::Visitor::TypeEffectPhi(Node* node) {
+  return Bounds(Type::None(zone()));
+}
+
+
+Bounds Typer::Visitor::TypeFrameState(Node* node) {
+  return Bounds(Type::None(zone()));
+}
+
+
+Bounds Typer::Visitor::TypeCall(Node* node) {
+  return Bounds::Unbounded(zone());
+}
+
+
+Bounds Typer::Visitor::TypeProjection(Node* node) {
+  // TODO(titzer): use the output type of the input to determine the bounds.
+  return Bounds::Unbounded(zone());
+}
+
+
+// JS comparison operators.
+
+#define DEFINE_METHOD(x)                       \
+  Bounds Typer::Visitor::Type##x(Node* node) { \
+    return Bounds(Type::Boolean(zone()));      \
+  }
+JS_COMPARE_BINOP_LIST(DEFINE_METHOD)
+#undef DEFINE_METHOD
+
+
+// JS bitwise operators.
+
+Bounds Typer::Visitor::TypeJSBitwiseOr(Node* node) {
+  Bounds left = OperandType(node, 0);
+  Bounds right = OperandType(node, 1);
+  Type* upper = Type::Union(left.upper, right.upper, zone());
+  if (!upper->Is(Type::Signed32())) upper = Type::Signed32(zone());
+  Type* lower = Type::Intersect(Type::SignedSmall(zone()), upper, zone());
+  return Bounds(lower, upper);
+}
+
+
+Bounds Typer::Visitor::TypeJSBitwiseAnd(Node* node) {
+  Bounds left = OperandType(node, 0);
+  Bounds right = OperandType(node, 1);
+  Type* upper = Type::Union(left.upper, right.upper, zone());
+  if (!upper->Is(Type::Signed32())) upper = Type::Signed32(zone());
+  Type* lower = Type::Intersect(Type::SignedSmall(zone()), upper, zone());
+  return Bounds(lower, upper);
+}
+
+
+Bounds Typer::Visitor::TypeJSBitwiseXor(Node* node) {
+  return Bounds(Type::SignedSmall(zone()), Type::Signed32(zone()));
+}
+
+
+Bounds Typer::Visitor::TypeJSShiftLeft(Node* node) {
+  return Bounds(Type::SignedSmall(zone()), Type::Signed32(zone()));
+}
+
+
+Bounds Typer::Visitor::TypeJSShiftRight(Node* node) {
+  return Bounds(Type::SignedSmall(zone()), Type::Signed32(zone()));
+}
+
+
+Bounds Typer::Visitor::TypeJSShiftRightLogical(Node* node) {
+  return Bounds(Type::UnsignedSmall(zone()), Type::Unsigned32(zone()));
+}
+
+
+// JS arithmetic operators.
+
+Bounds Typer::Visitor::TypeJSAdd(Node* node) {
+  Bounds left = OperandType(node, 0);
+  Bounds right = OperandType(node, 1);
+  Type* lower =
+      left.lower->Is(Type::None()) || right.lower->Is(Type::None())
+          ? Type::None(zone())
+          : left.lower->Is(Type::Number()) && right.lower->Is(Type::Number())
+                ? Type::SignedSmall(zone())
+                : left.lower->Is(Type::String()) ||
+                          right.lower->Is(Type::String())
+                      ? Type::String(zone())
+                      : Type::None(zone());
+  Type* upper =
+      left.upper->Is(Type::None()) && right.upper->Is(Type::None())
+          ? Type::None(zone())
+          : left.upper->Is(Type::Number()) && right.upper->Is(Type::Number())
+                ? Type::Number(zone())
+                : left.upper->Is(Type::String()) ||
+                          right.upper->Is(Type::String())
+                      ? Type::String(zone())
+                      : Type::NumberOrString(zone());
+  return Bounds(lower, upper);
+}
+
+
+Bounds Typer::Visitor::TypeJSSubtract(Node* node) {
+  return Bounds(Type::SignedSmall(zone()), Type::Number(zone()));
+}
+
+
+Bounds Typer::Visitor::TypeJSMultiply(Node* node) {
+  return Bounds(Type::SignedSmall(zone()), Type::Number(zone()));
+}
+
+
+Bounds Typer::Visitor::TypeJSDivide(Node* node) {
+  return Bounds(Type::SignedSmall(zone()), Type::Number(zone()));
+}
+
+
+Bounds Typer::Visitor::TypeJSModulus(Node* node) {
+  return Bounds(Type::SignedSmall(zone()), Type::Number(zone()));
+}
+
+
+// JS unary operators.
+
+Bounds Typer::Visitor::TypeJSUnaryNot(Node* node) {
+  return Bounds(Type::Boolean(zone()));
+}
+
+
+Bounds Typer::Visitor::TypeJSTypeOf(Node* node) {
+  return Bounds(Type::InternalizedString(zone()));
+}
+
+
+// JS conversion operators.
+
+Bounds Typer::Visitor::TypeJSToBoolean(Node* node) {
+  return Bounds(Type::Boolean(zone()));
+}
+
+
+Bounds Typer::Visitor::TypeJSToNumber(Node* node) {
+  return Bounds(Type::SignedSmall(zone()), Type::Number(zone()));
+}
+
+
+Bounds Typer::Visitor::TypeJSToString(Node* node) {
+  return Bounds(Type::None(zone()), Type::String(zone()));
+}
+
+
+Bounds Typer::Visitor::TypeJSToName(Node* node) {
+  return Bounds(Type::None(zone()), Type::Name(zone()));
+}
+
+
+Bounds Typer::Visitor::TypeJSToObject(Node* node) {
+  return Bounds(Type::None(zone()), Type::Object(zone()));
+}
+
+
+// JS object operators.
+
+Bounds Typer::Visitor::TypeJSCreate(Node* node) {
+  return Bounds(Type::None(zone()), Type::Object(zone()));
+}
+
+
+Bounds Typer::Visitor::TypeJSLoadProperty(Node* node) {
+  Bounds object = OperandType(node, 0);
+  Bounds name = OperandType(node, 1);
+  Bounds result = Bounds::Unbounded(zone());
+  // TODO(rossberg): Use range types and sized array types to filter undefined.
+  if (object.lower->IsArray() && name.lower->Is(Type::Integral32())) {
+    result.lower = Type::Union(object.lower->AsArray()->Element(),
+                               Type::Undefined(zone()), zone());
+  }
+  if (object.upper->IsArray() && name.upper->Is(Type::Integral32())) {
+    result.upper = Type::Union(object.upper->AsArray()->Element(),
+                               Type::Undefined(zone()), zone());
+  }
+  return result;
+}
+
+
+Bounds Typer::Visitor::TypeJSLoadNamed(Node* node) {
+  return Bounds::Unbounded(zone());
+}
+
+
+Bounds Typer::Visitor::TypeJSStoreProperty(Node* node) {
+  return Bounds(Type::None(zone()));
+}
+
+
+Bounds Typer::Visitor::TypeJSStoreNamed(Node* node) {
+  return Bounds(Type::None(zone()));
+}
+
+
+Bounds Typer::Visitor::TypeJSDeleteProperty(Node* node) {
+  return Bounds(Type::Boolean(zone()));
+}
+
+
+Bounds Typer::Visitor::TypeJSHasProperty(Node* node) {
+  return Bounds(Type::Boolean(zone()));
+}
+
+
+Bounds Typer::Visitor::TypeJSInstanceOf(Node* node) {
+  return Bounds(Type::Boolean(zone()));
+}
+
+
+// JS context operators.
+
+Bounds Typer::Visitor::TypeJSLoadContext(Node* node) {
+  Bounds outer = OperandType(node, 0);
+  ASSERT(outer.upper->Is(Type::Internal()));
+  ASSERT(outer.lower->Equals(outer.upper));
+  ContextAccess access = OpParameter<ContextAccess>(node);
+  Type* context_type = outer.upper;
+  MaybeHandle<Context> context;
+  if (context_type->IsConstant()) {
+    context = Handle<Context>::cast(context_type->AsConstant()->Value());
+  }
+  // Walk context chain (as far as known), mirroring dynamic lookup.
+  // Since contexts are mutable, the information is only useful as a lower
+  // bound.
+  // TODO(rossberg): Could use scope info to fix upper bounds for constant
+  // bindings if we know that this code is never shared.
+  for (int i = access.depth(); i > 0; --i) {
+    if (context_type->IsContext()) {
+      context_type = context_type->AsContext()->Outer();
+      if (context_type->IsConstant()) {
+        context = Handle<Context>::cast(context_type->AsConstant()->Value());
+      }
+    } else {
+      context = handle(context.ToHandleChecked()->previous(), isolate());
+    }
+  }
+  if (context.is_null()) {
+    return Bounds::Unbounded(zone());
+  } else {
+    Handle<Object> value =
+        handle(context.ToHandleChecked()->get(access.index()), isolate());
+    Type* lower = TypeConstant(value);
+    return Bounds(lower, Type::Any(zone()));
+  }
+}
+
+
+Bounds Typer::Visitor::TypeJSStoreContext(Node* node) {
+  return Bounds(Type::None(zone()));
+}
+
+
+Bounds Typer::Visitor::TypeJSCreateFunctionContext(Node* node) {
+  Type* outer = ContextType(node);
+  return Bounds(Type::Context(outer, zone()));
+}
+
+
+Bounds Typer::Visitor::TypeJSCreateCatchContext(Node* node) {
+  Type* outer = ContextType(node);
+  return Bounds(Type::Context(outer, zone()));
+}
+
+
+Bounds Typer::Visitor::TypeJSCreateWithContext(Node* node) {
+  Type* outer = ContextType(node);
+  return Bounds(Type::Context(outer, zone()));
+}
+
+
+Bounds Typer::Visitor::TypeJSCreateBlockContext(Node* node) {
+  Type* outer = ContextType(node);
+  return Bounds(Type::Context(outer, zone()));
+}
+
+
+Bounds Typer::Visitor::TypeJSCreateModuleContext(Node* node) {
+  // TODO(rossberg): this is probably incorrect
+  Type* outer = ContextType(node);
+  return Bounds(Type::Context(outer, zone()));
+}
+
+
+Bounds Typer::Visitor::TypeJSCreateGlobalContext(Node* node) {
+  Type* outer = ContextType(node);
+  return Bounds(Type::Context(outer, zone()));
+}
+
+
+// JS other operators.
+
+Bounds Typer::Visitor::TypeJSYield(Node* node) {
+  return Bounds::Unbounded(zone());
+}
+
+
+Bounds Typer::Visitor::TypeJSCallConstruct(Node* node) {
+  return Bounds(Type::None(zone()), Type::Receiver(zone()));
+}
+
+
+Bounds Typer::Visitor::TypeJSCallFunction(Node* node) {
+  Bounds fun = OperandType(node, 0);
+  Type* lower = fun.lower->IsFunction() ? fun.lower->AsFunction()->Result()
+                                        : Type::None(zone());
+  Type* upper = fun.upper->IsFunction() ? fun.upper->AsFunction()->Result()
+                                        : Type::Any(zone());
+  return Bounds(lower, upper);
+}
+
+
+Bounds Typer::Visitor::TypeJSCallRuntime(Node* node) {
+  return Bounds::Unbounded(zone());
+}
+
+
+Bounds Typer::Visitor::TypeJSDebugger(Node* node) {
+  return Bounds::Unbounded(zone());
+}
+
+
+// Simplified operators.
+
+Bounds Typer::Visitor::TypeBooleanNot(Node* node) {
+  return Bounds(Type::Boolean(zone()));
+}
+
+
+Bounds Typer::Visitor::TypeNumberEqual(Node* node) {
+  return Bounds(Type::Boolean(zone()));
+}
+
+
+Bounds Typer::Visitor::TypeNumberLessThan(Node* node) {
+  return Bounds(Type::Boolean(zone()));
+}
+
+
+Bounds Typer::Visitor::TypeNumberLessThanOrEqual(Node* node) {
+  return Bounds(Type::Boolean(zone()));
+}
+
+
+Bounds Typer::Visitor::TypeNumberAdd(Node* node) {
+  return Bounds(Type::Number(zone()));
+}
+
+
+Bounds Typer::Visitor::TypeNumberSubtract(Node* node) {
+  return Bounds(Type::Number(zone()));
+}
+
+
+Bounds Typer::Visitor::TypeNumberMultiply(Node* node) {
+  return Bounds(Type::Number(zone()));
+}
+
+
+Bounds Typer::Visitor::TypeNumberDivide(Node* node) {
+  return Bounds(Type::Number(zone()));
+}
+
+
+Bounds Typer::Visitor::TypeNumberModulus(Node* node) {
+  return Bounds(Type::Number(zone()));
+}
+
+
+Bounds Typer::Visitor::TypeNumberToInt32(Node* node) {
+  Bounds arg = OperandType(node, 0);
+  Type* s32 = Type::Signed32(zone());
+  Type* lower = arg.lower->Is(s32) ? arg.lower : s32;
+  Type* upper = arg.upper->Is(s32) ? arg.upper : s32;
+  return Bounds(lower, upper);
+}
+
+
+Bounds Typer::Visitor::TypeNumberToUint32(Node* node) {
+  Bounds arg = OperandType(node, 0);
+  Type* u32 = Type::Unsigned32(zone());
+  Type* lower = arg.lower->Is(u32) ? arg.lower : u32;
+  Type* upper = arg.upper->Is(u32) ? arg.upper : u32;
+  return Bounds(lower, upper);
+}
+
+
+Bounds Typer::Visitor::TypeReferenceEqual(Node* node) {
+  return Bounds(Type::Boolean(zone()));
+}
+
+
+Bounds Typer::Visitor::TypeStringEqual(Node* node) {
+  return Bounds(Type::Boolean(zone()));
+}
+
+
+Bounds Typer::Visitor::TypeStringLessThan(Node* node) {
+  return Bounds(Type::Boolean(zone()));
+}
+
+
+Bounds Typer::Visitor::TypeStringLessThanOrEqual(Node* node) {
+  return Bounds(Type::Boolean(zone()));
+}
+
+
+Bounds Typer::Visitor::TypeStringAdd(Node* node) {
+  return Bounds(Type::String(zone()));
+}
+
+
+Bounds Typer::Visitor::TypeChangeTaggedToInt32(Node* node) {
+  // TODO(titzer): type is type of input, representation is Word32.
+  return Bounds(Type::Integral32());
+}
+
+
+Bounds Typer::Visitor::TypeChangeTaggedToUint32(Node* node) {
+  return Bounds(Type::Integral32());  // TODO(titzer): add appropriate rep
+}
+
+
+Bounds Typer::Visitor::TypeChangeTaggedToFloat64(Node* node) {
+  // TODO(titzer): type is type of input, representation is Float64.
+  return Bounds(Type::Number());
+}
+
+
+Bounds Typer::Visitor::TypeChangeInt32ToTagged(Node* node) {
+  // TODO(titzer): type is type of input, representation is Tagged.
+  return Bounds(Type::Integral32());
+}
+
+
+Bounds Typer::Visitor::TypeChangeUint32ToTagged(Node* node) {
+  // TODO(titzer): type is type of input, representation is Tagged.
+  return Bounds(Type::Unsigned32());
+}
+
+
+Bounds Typer::Visitor::TypeChangeFloat64ToTagged(Node* node) {
+  // TODO(titzer): type is type of input, representation is Tagged.
+  return Bounds(Type::Number());
+}
+
+
+Bounds Typer::Visitor::TypeChangeBoolToBit(Node* node) {
+  // TODO(titzer): type is type of input, representation is Bit.
+  return Bounds(Type::Boolean());
+}
+
+
+Bounds Typer::Visitor::TypeChangeBitToBool(Node* node) {
+  // TODO(titzer): type is type of input, representation is Tagged.
+  return Bounds(Type::Boolean());
+}
+
+
+Bounds Typer::Visitor::TypeLoadField(Node* node) {
+  return Bounds(FieldAccessOf(node->op()).type);
+}
+
+
+Bounds Typer::Visitor::TypeLoadElement(Node* node) {
+  return Bounds(ElementAccessOf(node->op()).type);
+}
+
+
+Bounds Typer::Visitor::TypeStoreField(Node* node) {
+  return Bounds(Type::None());
+}
+
+
+Bounds Typer::Visitor::TypeStoreElement(Node* node) {
+  return Bounds(Type::None());
+}
+
+
+// Machine operators.
+
+// TODO(rossberg): implement
+#define DEFINE_METHOD(x) \
+  Bounds Typer::Visitor::Type##x(Node* node) { return Bounds(Type::None()); }
+MACHINE_OP_LIST(DEFINE_METHOD)
+#undef DEFINE_METHOD
+
+
+// Heap constants.
+
+Type* Typer::Visitor::TypeConstant(Handle<Object> value) {
+  if (value->IsJSFunction() && JSFunction::cast(*value)->IsBuiltin() &&
+      !context().is_null()) {
+    Handle<Context> native =
+        handle(context().ToHandleChecked()->native_context(), isolate());
+    if (*value == native->math_abs_fun()) {
+      return typer_->number_fun1_;  // TODO(rossberg): can't express overloading
+    } else if (*value == native->math_acos_fun()) {
+      return typer_->number_fun1_;
+    } else if (*value == native->math_asin_fun()) {
+      return typer_->number_fun1_;
+    } else if (*value == native->math_atan_fun()) {
+      return typer_->number_fun1_;
+    } else if (*value == native->math_atan2_fun()) {
+      return typer_->number_fun2_;
+    } else if (*value == native->math_ceil_fun()) {
+      return typer_->number_fun1_;
+    } else if (*value == native->math_cos_fun()) {
+      return typer_->number_fun1_;
+    } else if (*value == native->math_exp_fun()) {
+      return typer_->number_fun1_;
+    } else if (*value == native->math_floor_fun()) {
+      return typer_->number_fun1_;
+    } else if (*value == native->math_imul_fun()) {
+      return typer_->imul_fun_;
+    } else if (*value == native->math_log_fun()) {
+      return typer_->number_fun1_;
+    } else if (*value == native->math_pow_fun()) {
+      return typer_->number_fun2_;
+    } else if (*value == native->math_random_fun()) {
+      return typer_->number_fun0_;
+    } else if (*value == native->math_round_fun()) {
+      return typer_->number_fun1_;
+    } else if (*value == native->math_sin_fun()) {
+      return typer_->number_fun1_;
+    } else if (*value == native->math_sqrt_fun()) {
+      return typer_->number_fun1_;
+    } else if (*value == native->math_tan_fun()) {
+      return typer_->number_fun1_;
+    } else if (*value == native->array_buffer_fun()) {
+      return typer_->array_buffer_fun_;
+    } else if (*value == native->int8_array_fun()) {
+      return typer_->int8_array_fun_;
+    } else if (*value == native->int16_array_fun()) {
+      return typer_->int16_array_fun_;
+    } else if (*value == native->int32_array_fun()) {
+      return typer_->int32_array_fun_;
+    } else if (*value == native->uint8_array_fun()) {
+      return typer_->uint8_array_fun_;
+    } else if (*value == native->uint16_array_fun()) {
+      return typer_->uint16_array_fun_;
+    } else if (*value == native->uint32_array_fun()) {
+      return typer_->uint32_array_fun_;
+    } else if (*value == native->float32_array_fun()) {
+      return typer_->float32_array_fun_;
+    } else if (*value == native->float64_array_fun()) {
+      return typer_->float64_array_fun_;
+    }
+  }
+  return Type::Constant(value, zone());
+}
+
+
+namespace {
+
+class TyperDecorator : public GraphDecorator {
+ public:
+  explicit TyperDecorator(Typer* typer) : typer_(typer) {}
+  virtual void Decorate(Node* node) { typer_->Init(node); }
+
+ private:
+  Typer* typer_;
+};
+}
+
+
+void Typer::DecorateGraph(Graph* graph) {
+  graph->AddDecorator(new (zone()) TyperDecorator(this));
+}
+}
+}
+}  // namespace v8::internal::compiler
diff --git a/src/compiler/typer.h b/src/compiler/typer.h
new file mode 100644 (file)
index 0000000..2957e4b
--- /dev/null
@@ -0,0 +1,57 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_TYPER_H_
+#define V8_COMPILER_TYPER_H_
+
+#include "src/v8.h"
+
+#include "src/compiler/graph.h"
+#include "src/compiler/opcodes.h"
+#include "src/types.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+class Typer {
+ public:
+  explicit Typer(Zone* zone);
+
+  void Init(Node* node);
+  void Run(Graph* graph, MaybeHandle<Context> context);
+  void Narrow(Graph* graph, Node* node, MaybeHandle<Context> context);
+  void Widen(Graph* graph, Node* node, MaybeHandle<Context> context);
+
+  void DecorateGraph(Graph* graph);
+
+  Zone* zone() { return zone_; }
+  Isolate* isolate() { return zone_->isolate(); }
+
+ private:
+  class Visitor;
+  class RunVisitor;
+  class NarrowVisitor;
+  class WidenVisitor;
+
+  Zone* zone_;
+  Type* number_fun0_;
+  Type* number_fun1_;
+  Type* number_fun2_;
+  Type* imul_fun_;
+  Type* array_buffer_fun_;
+  Type* int8_array_fun_;
+  Type* int16_array_fun_;
+  Type* int32_array_fun_;
+  Type* uint8_array_fun_;
+  Type* uint16_array_fun_;
+  Type* uint32_array_fun_;
+  Type* float32_array_fun_;
+  Type* float64_array_fun_;
+};
+}
+}
+}  // namespace v8::internal::compiler
+
+#endif  // V8_COMPILER_TYPER_H_
diff --git a/src/compiler/verifier.cc b/src/compiler/verifier.cc
new file mode 100644 (file)
index 0000000..64eb72e
--- /dev/null
@@ -0,0 +1,232 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/verifier.h"
+
+#include "src/compiler/generic-algorithm.h"
+#include "src/compiler/generic-node-inl.h"
+#include "src/compiler/generic-node.h"
+#include "src/compiler/graph-inl.h"
+#include "src/compiler/graph.h"
+#include "src/compiler/node.h"
+#include "src/compiler/node-properties-inl.h"
+#include "src/compiler/node-properties.h"
+#include "src/compiler/opcodes.h"
+#include "src/compiler/operator.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+
+static bool IsDefUseChainLinkPresent(Node* def, Node* use) {
+  Node::Uses uses = def->uses();
+  for (Node::Uses::iterator it = uses.begin(); it != uses.end(); ++it) {
+    if (*it == use) return true;
+  }
+  return false;
+}
+
+
+static bool IsUseDefChainLinkPresent(Node* def, Node* use) {
+  Node::Inputs inputs = use->inputs();
+  for (Node::Inputs::iterator it = inputs.begin(); it != inputs.end(); ++it) {
+    if (*it == def) return true;
+  }
+  return false;
+}
+
+
+class Verifier::Visitor : public NullNodeVisitor {
+ public:
+  explicit Visitor(Zone* zone)
+      : reached_from_start(NodeSet::key_compare(),
+                           NodeSet::allocator_type(zone)),
+        reached_from_end(NodeSet::key_compare(),
+                         NodeSet::allocator_type(zone)) {}
+
+  // Fulfills the PreNodeCallback interface.
+  GenericGraphVisit::Control Pre(Node* node);
+
+  bool from_start;
+  NodeSet reached_from_start;
+  NodeSet reached_from_end;
+};
+
+
+GenericGraphVisit::Control Verifier::Visitor::Pre(Node* node) {
+  int value_count = NodeProperties::GetValueInputCount(node);
+  int context_count = NodeProperties::GetContextInputCount(node);
+  int effect_count = NodeProperties::GetEffectInputCount(node);
+  int control_count = NodeProperties::GetControlInputCount(node);
+
+  // Verify number of inputs matches up.
+  int input_count = value_count + context_count + effect_count + control_count;
+  CHECK_EQ(input_count, node->InputCount());
+
+  // Verify all value inputs actually produce a value.
+  for (int i = 0; i < value_count; ++i) {
+    Node* value = NodeProperties::GetValueInput(node, i);
+    CHECK(NodeProperties::HasValueOutput(value));
+    CHECK(IsDefUseChainLinkPresent(value, node));
+    CHECK(IsUseDefChainLinkPresent(value, node));
+  }
+
+  // Verify all context inputs are value nodes.
+  for (int i = 0; i < context_count; ++i) {
+    Node* context = NodeProperties::GetContextInput(node);
+    CHECK(NodeProperties::HasValueOutput(context));
+    CHECK(IsDefUseChainLinkPresent(context, node));
+    CHECK(IsUseDefChainLinkPresent(context, node));
+  }
+
+  // Verify all effect inputs actually have an effect.
+  for (int i = 0; i < effect_count; ++i) {
+    Node* effect = NodeProperties::GetEffectInput(node);
+    CHECK(NodeProperties::HasEffectOutput(effect));
+    CHECK(IsDefUseChainLinkPresent(effect, node));
+    CHECK(IsUseDefChainLinkPresent(effect, node));
+  }
+
+  // Verify all control inputs are control nodes.
+  for (int i = 0; i < control_count; ++i) {
+    Node* control = NodeProperties::GetControlInput(node, i);
+    CHECK(NodeProperties::HasControlOutput(control));
+    CHECK(IsDefUseChainLinkPresent(control, node));
+    CHECK(IsUseDefChainLinkPresent(control, node));
+  }
+
+  // Verify all successors are projections if multiple value outputs exist.
+  if (NodeProperties::GetValueOutputCount(node) > 1) {
+    Node::Uses uses = node->uses();
+    for (Node::Uses::iterator it = uses.begin(); it != uses.end(); ++it) {
+      CHECK(!NodeProperties::IsValueEdge(it.edge()) ||
+            (*it)->opcode() == IrOpcode::kProjection);
+    }
+  }
+
+  switch (node->opcode()) {
+    case IrOpcode::kStart:
+      // Start has no inputs.
+      CHECK_EQ(0, input_count);
+      break;
+    case IrOpcode::kEnd:
+      // End has no outputs.
+      CHECK(!NodeProperties::HasValueOutput(node));
+      CHECK(!NodeProperties::HasEffectOutput(node));
+      CHECK(!NodeProperties::HasControlOutput(node));
+      break;
+    case IrOpcode::kDead:
+      // Dead is never connected to the graph.
+      UNREACHABLE();
+    case IrOpcode::kBranch: {
+      // Branch uses are IfTrue and IfFalse.
+      Node::Uses uses = node->uses();
+      bool got_true = false, got_false = false;
+      for (Node::Uses::iterator it = uses.begin(); it != uses.end(); ++it) {
+        CHECK(((*it)->opcode() == IrOpcode::kIfTrue && !got_true) ||
+              ((*it)->opcode() == IrOpcode::kIfFalse && !got_false));
+        if ((*it)->opcode() == IrOpcode::kIfTrue) got_true = true;
+        if ((*it)->opcode() == IrOpcode::kIfFalse) got_false = true;
+      }
+      // TODO(rossberg): Currently fails for various tests.
+      // CHECK(got_true && got_false);
+      break;
+    }
+    case IrOpcode::kIfTrue:
+    case IrOpcode::kIfFalse:
+      CHECK_EQ(IrOpcode::kBranch,
+               NodeProperties::GetControlInput(node, 0)->opcode());
+      break;
+    case IrOpcode::kLoop:
+    case IrOpcode::kMerge:
+      break;
+    case IrOpcode::kReturn:
+      // TODO(rossberg): check successor is End
+      break;
+    case IrOpcode::kThrow:
+      // TODO(rossberg): what are the constraints on these?
+      break;
+    case IrOpcode::kParameter:
+      // Parameters have no inputs.
+      CHECK_EQ(0, input_count);
+      break;
+    case IrOpcode::kInt32Constant:
+    case IrOpcode::kInt64Constant:
+    case IrOpcode::kFloat64Constant:
+    case IrOpcode::kExternalConstant:
+    case IrOpcode::kNumberConstant:
+    case IrOpcode::kHeapConstant:
+      // Constants have no inputs.
+      CHECK_EQ(0, input_count);
+      break;
+    case IrOpcode::kPhi: {
+      // Phi input count matches parent control node.
+      CHECK_EQ(1, control_count);
+      Node* control = NodeProperties::GetControlInput(node, 0);
+      CHECK_EQ(value_count, NodeProperties::GetControlInputCount(control));
+      break;
+    }
+    case IrOpcode::kEffectPhi: {
+      // EffectPhi input count matches parent control node.
+      CHECK_EQ(1, control_count);
+      Node* control = NodeProperties::GetControlInput(node, 0);
+      CHECK_EQ(effect_count, NodeProperties::GetControlInputCount(control));
+      break;
+    }
+    case IrOpcode::kLazyDeoptimization:
+      // TODO(jarin): what are the constraints on these?
+      break;
+    case IrOpcode::kDeoptimize:
+      // TODO(jarin): what are the constraints on these?
+      break;
+    case IrOpcode::kFrameState:
+      // TODO(jarin): what are the constraints on these?
+      break;
+    case IrOpcode::kCall:
+      // TODO(rossberg): what are the constraints on these?
+      break;
+    case IrOpcode::kContinuation:
+      // TODO(jarin): what are the constraints on these?
+      break;
+    case IrOpcode::kProjection: {
+      // Projection has an input that produces enough values.
+      int index = static_cast<Operator1<int>*>(node->op())->parameter();
+      Node* input = NodeProperties::GetValueInput(node, 0);
+      CHECK_GT(NodeProperties::GetValueOutputCount(input), index);
+      break;
+    }
+    default:
+      // TODO(rossberg): Check other node kinds.
+      break;
+  }
+
+  if (from_start) {
+    reached_from_start.insert(node);
+  } else {
+    reached_from_end.insert(node);
+  }
+
+  return GenericGraphVisit::CONTINUE;
+}
+
+
+void Verifier::Run(Graph* graph) {
+  Visitor visitor(graph->zone());
+
+  visitor.from_start = true;
+  graph->VisitNodeUsesFromStart(&visitor);
+  visitor.from_start = false;
+  graph->VisitNodeInputsFromEnd(&visitor);
+
+  // All control nodes reachable from end are reachable from start.
+  for (NodeSet::iterator it = visitor.reached_from_end.begin();
+       it != visitor.reached_from_end.end(); ++it) {
+    CHECK(!NodeProperties::IsControl(*it) ||
+          visitor.reached_from_start.count(*it));
+  }
+}
+}
+}
+}  // namespace v8::internal::compiler
diff --git a/src/compiler/verifier.h b/src/compiler/verifier.h
new file mode 100644 (file)
index 0000000..788c6a5
--- /dev/null
@@ -0,0 +1,28 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_VERIFIER_H_
+#define V8_COMPILER_VERIFIER_H_
+
+#include "src/v8.h"
+
+#include "src/compiler/graph.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+class Verifier {
+ public:
+  static void Run(Graph* graph);
+
+ private:
+  class Visitor;
+  DISALLOW_COPY_AND_ASSIGN(Verifier);
+};
+}
+}
+}  // namespace v8::internal::compiler
+
+#endif  // V8_COMPILER_VERIFIER_H_
diff --git a/src/compiler/x64/code-generator-x64.cc b/src/compiler/x64/code-generator-x64.cc
new file mode 100644 (file)
index 0000000..ea68dc9
--- /dev/null
@@ -0,0 +1,972 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/code-generator.h"
+
+#include "src/compiler/code-generator-impl.h"
+#include "src/compiler/gap-resolver.h"
+#include "src/compiler/node-matchers.h"
+#include "src/compiler/node-properties-inl.h"
+#include "src/scopes.h"
+#include "src/x64/assembler-x64.h"
+#include "src/x64/macro-assembler-x64.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+#define __ masm()->
+
+
+// TODO(turbofan): Cleanup these hacks.
+enum Immediate64Type { kImm64Value, kImm64Handle, kImm64Reference };
+
+
+struct Immediate64 {
+  uint64_t value;
+  Handle<Object> handle;
+  ExternalReference reference;
+  Immediate64Type type;
+};
+
+
+enum RegisterOrOperandType { kRegister, kDoubleRegister, kOperand };
+
+
+struct RegisterOrOperand {
+  RegisterOrOperand() : operand(no_reg, 0) {}
+  Register reg;
+  DoubleRegister double_reg;
+  Operand operand;
+  RegisterOrOperandType type;
+};
+
+
+// Adds X64 specific methods for decoding operands.
+class X64OperandConverter : public InstructionOperandConverter {
+ public:
+  X64OperandConverter(CodeGenerator* gen, Instruction* instr)
+      : InstructionOperandConverter(gen, instr) {}
+
+  RegisterOrOperand InputRegisterOrOperand(int index) {
+    return ToRegisterOrOperand(instr_->InputAt(index));
+  }
+
+  Immediate InputImmediate(int index) {
+    return ToImmediate(instr_->InputAt(index));
+  }
+
+  RegisterOrOperand OutputRegisterOrOperand() {
+    return ToRegisterOrOperand(instr_->Output());
+  }
+
+  Immediate64 InputImmediate64(int index) {
+    return ToImmediate64(instr_->InputAt(index));
+  }
+
+  Immediate64 ToImmediate64(InstructionOperand* operand) {
+    Constant constant = ToConstant(operand);
+    Immediate64 immediate;
+    immediate.value = 0xbeefdeaddeefbeed;
+    immediate.type = kImm64Value;
+    switch (constant.type()) {
+      case Constant::kInt32:
+      case Constant::kInt64:
+        immediate.value = constant.ToInt64();
+        return immediate;
+      case Constant::kFloat64:
+        immediate.type = kImm64Handle;
+        immediate.handle =
+            isolate()->factory()->NewNumber(constant.ToFloat64(), TENURED);
+        return immediate;
+      case Constant::kExternalReference:
+        immediate.type = kImm64Reference;
+        immediate.reference = constant.ToExternalReference();
+        return immediate;
+      case Constant::kHeapObject:
+        immediate.type = kImm64Handle;
+        immediate.handle = constant.ToHeapObject();
+        return immediate;
+    }
+    UNREACHABLE();
+    return immediate;
+  }
+
+  Immediate ToImmediate(InstructionOperand* operand) {
+    Constant constant = ToConstant(operand);
+    switch (constant.type()) {
+      case Constant::kInt32:
+        return Immediate(constant.ToInt32());
+      case Constant::kInt64:
+      case Constant::kFloat64:
+      case Constant::kExternalReference:
+      case Constant::kHeapObject:
+        break;
+    }
+    UNREACHABLE();
+    return Immediate(-1);
+  }
+
+  Operand ToOperand(InstructionOperand* op, int extra = 0) {
+    RegisterOrOperand result = ToRegisterOrOperand(op, extra);
+    ASSERT_EQ(kOperand, result.type);
+    return result.operand;
+  }
+
+  RegisterOrOperand ToRegisterOrOperand(InstructionOperand* op, int extra = 0) {
+    RegisterOrOperand result;
+    if (op->IsRegister()) {
+      ASSERT(extra == 0);
+      result.type = kRegister;
+      result.reg = ToRegister(op);
+      return result;
+    } else if (op->IsDoubleRegister()) {
+      ASSERT(extra == 0);
+      ASSERT(extra == 0);
+      result.type = kDoubleRegister;
+      result.double_reg = ToDoubleRegister(op);
+      return result;
+    }
+
+    ASSERT(op->IsStackSlot() || op->IsDoubleStackSlot());
+
+    result.type = kOperand;
+    // The linkage computes where all spill slots are located.
+    FrameOffset offset = linkage()->GetFrameOffset(op->index(), frame(), extra);
+    result.operand =
+        Operand(offset.from_stack_pointer() ? rsp : rbp, offset.offset());
+    return result;
+  }
+
+  Operand MemoryOperand(int* first_input) {
+    const int offset = *first_input;
+    switch (AddressingModeField::decode(instr_->opcode())) {
+      case kMode_MR1I: {
+        *first_input += 2;
+        Register index = InputRegister(offset + 1);
+        return Operand(InputRegister(offset + 0), index, times_1,
+                       0);  // TODO(dcarney): K != 0
+      }
+      case kMode_MRI:
+        *first_input += 2;
+        return Operand(InputRegister(offset + 0), InputInt32(offset + 1));
+      default:
+        UNREACHABLE();
+        return Operand(no_reg, 0);
+    }
+  }
+
+  Operand MemoryOperand() {
+    int first_input = 0;
+    return MemoryOperand(&first_input);
+  }
+};
+
+
+static bool HasImmediateInput(Instruction* instr, int index) {
+  return instr->InputAt(index)->IsImmediate();
+}
+
+
+#define ASSEMBLE_BINOP(asm_instr)                            \
+  do {                                                       \
+    if (HasImmediateInput(instr, 1)) {                       \
+      RegisterOrOperand input = i.InputRegisterOrOperand(0); \
+      if (input.type == kRegister) {                         \
+        __ asm_instr(input.reg, i.InputImmediate(1));        \
+      } else {                                               \
+        __ asm_instr(input.operand, i.InputImmediate(1));    \
+      }                                                      \
+    } else {                                                 \
+      RegisterOrOperand input = i.InputRegisterOrOperand(1); \
+      if (input.type == kRegister) {                         \
+        __ asm_instr(i.InputRegister(0), input.reg);         \
+      } else {                                               \
+        __ asm_instr(i.InputRegister(0), input.operand);     \
+      }                                                      \
+    }                                                        \
+  } while (0)
+
+
+#define ASSEMBLE_SHIFT(asm_instr, width)                                 \
+  do {                                                                   \
+    if (HasImmediateInput(instr, 1)) {                                   \
+      __ asm_instr(i.OutputRegister(), Immediate(i.InputInt##width(1))); \
+    } else {                                                             \
+      __ asm_instr##_cl(i.OutputRegister());                             \
+    }                                                                    \
+  } while (0)
+
+
+// Assembles an instruction after register allocation, producing machine code.
+void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
+  X64OperandConverter i(this, instr);
+
+  switch (ArchOpcodeField::decode(instr->opcode())) {
+    case kArchJmp:
+      __ jmp(code_->GetLabel(i.InputBlock(0)));
+      break;
+    case kArchNop:
+      // don't emit code for nops.
+      break;
+    case kArchRet:
+      AssembleReturn();
+      break;
+    case kArchDeoptimize: {
+      int deoptimization_id = MiscField::decode(instr->opcode());
+      BuildTranslation(instr, deoptimization_id);
+
+      Address deopt_entry = Deoptimizer::GetDeoptimizationEntry(
+          isolate(), deoptimization_id, Deoptimizer::LAZY);
+      __ call(deopt_entry, RelocInfo::RUNTIME_ENTRY);
+      break;
+    }
+    case kX64Add32:
+      ASSEMBLE_BINOP(addl);
+      break;
+    case kX64Add:
+      ASSEMBLE_BINOP(addq);
+      break;
+    case kX64Sub32:
+      ASSEMBLE_BINOP(subl);
+      break;
+    case kX64Sub:
+      ASSEMBLE_BINOP(subq);
+      break;
+    case kX64And32:
+      ASSEMBLE_BINOP(andl);
+      break;
+    case kX64And:
+      ASSEMBLE_BINOP(andq);
+      break;
+    case kX64Cmp32:
+      ASSEMBLE_BINOP(cmpl);
+      break;
+    case kX64Cmp:
+      ASSEMBLE_BINOP(cmpq);
+      break;
+    case kX64Test32:
+      ASSEMBLE_BINOP(testl);
+      break;
+    case kX64Test:
+      ASSEMBLE_BINOP(testq);
+      break;
+    case kX64Imul32:
+      if (HasImmediateInput(instr, 1)) {
+        RegisterOrOperand input = i.InputRegisterOrOperand(0);
+        if (input.type == kRegister) {
+          __ imull(i.OutputRegister(), input.reg, i.InputImmediate(1));
+        } else {
+          __ movq(kScratchRegister, input.operand);
+          __ imull(i.OutputRegister(), kScratchRegister, i.InputImmediate(1));
+        }
+      } else {
+        RegisterOrOperand input = i.InputRegisterOrOperand(1);
+        if (input.type == kRegister) {
+          __ imull(i.OutputRegister(), input.reg);
+        } else {
+          __ imull(i.OutputRegister(), input.operand);
+        }
+      }
+      break;
+    case kX64Imul:
+      if (HasImmediateInput(instr, 1)) {
+        RegisterOrOperand input = i.InputRegisterOrOperand(0);
+        if (input.type == kRegister) {
+          __ imulq(i.OutputRegister(), input.reg, i.InputImmediate(1));
+        } else {
+          __ movq(kScratchRegister, input.operand);
+          __ imulq(i.OutputRegister(), kScratchRegister, i.InputImmediate(1));
+        }
+      } else {
+        RegisterOrOperand input = i.InputRegisterOrOperand(1);
+        if (input.type == kRegister) {
+          __ imulq(i.OutputRegister(), input.reg);
+        } else {
+          __ imulq(i.OutputRegister(), input.operand);
+        }
+      }
+      break;
+    case kX64Idiv32:
+      __ cdq();
+      __ idivl(i.InputRegister(1));
+      break;
+    case kX64Idiv:
+      __ cqo();
+      __ idivq(i.InputRegister(1));
+      break;
+    case kX64Udiv32:
+      __ xorl(rdx, rdx);
+      __ divl(i.InputRegister(1));
+      break;
+    case kX64Udiv:
+      __ xorq(rdx, rdx);
+      __ divq(i.InputRegister(1));
+      break;
+    case kX64Not: {
+      RegisterOrOperand output = i.OutputRegisterOrOperand();
+      if (output.type == kRegister) {
+        __ notq(output.reg);
+      } else {
+        __ notq(output.operand);
+      }
+      break;
+    }
+    case kX64Not32: {
+      RegisterOrOperand output = i.OutputRegisterOrOperand();
+      if (output.type == kRegister) {
+        __ notl(output.reg);
+      } else {
+        __ notl(output.operand);
+      }
+      break;
+    }
+    case kX64Neg: {
+      RegisterOrOperand output = i.OutputRegisterOrOperand();
+      if (output.type == kRegister) {
+        __ negq(output.reg);
+      } else {
+        __ negq(output.operand);
+      }
+      break;
+    }
+    case kX64Neg32: {
+      RegisterOrOperand output = i.OutputRegisterOrOperand();
+      if (output.type == kRegister) {
+        __ negl(output.reg);
+      } else {
+        __ negl(output.operand);
+      }
+      break;
+    }
+    case kX64Or32:
+      ASSEMBLE_BINOP(orl);
+      break;
+    case kX64Or:
+      ASSEMBLE_BINOP(orq);
+      break;
+    case kX64Xor32:
+      ASSEMBLE_BINOP(xorl);
+      break;
+    case kX64Xor:
+      ASSEMBLE_BINOP(xorq);
+      break;
+    case kX64Shl32:
+      ASSEMBLE_SHIFT(shll, 5);
+      break;
+    case kX64Shl:
+      ASSEMBLE_SHIFT(shlq, 6);
+      break;
+    case kX64Shr32:
+      ASSEMBLE_SHIFT(shrl, 5);
+      break;
+    case kX64Shr:
+      ASSEMBLE_SHIFT(shrq, 6);
+      break;
+    case kX64Sar32:
+      ASSEMBLE_SHIFT(sarl, 5);
+      break;
+    case kX64Sar:
+      ASSEMBLE_SHIFT(sarq, 6);
+      break;
+    case kX64Push: {
+      RegisterOrOperand input = i.InputRegisterOrOperand(0);
+      if (input.type == kRegister) {
+        __ pushq(input.reg);
+      } else {
+        __ pushq(input.operand);
+      }
+      break;
+    }
+    case kX64PushI:
+      __ pushq(i.InputImmediate(0));
+      break;
+    case kX64CallCodeObject: {
+      if (HasImmediateInput(instr, 0)) {
+        Handle<Code> code = Handle<Code>::cast(i.InputHeapObject(0));
+        __ Call(code, RelocInfo::CODE_TARGET);
+      } else {
+        Register reg = i.InputRegister(0);
+        int entry = Code::kHeaderSize - kHeapObjectTag;
+        __ Call(Operand(reg, entry));
+      }
+      RecordSafepoint(instr->pointer_map(), Safepoint::kSimple, 0,
+                      Safepoint::kNoLazyDeopt);
+      bool lazy_deopt = (MiscField::decode(instr->opcode()) == 1);
+      if (lazy_deopt) {
+        RecordLazyDeoptimizationEntry(instr);
+      }
+      AddNopForSmiCodeInlining();
+      break;
+    }
+    case kX64CallAddress:
+      if (HasImmediateInput(instr, 0)) {
+        Immediate64 imm = i.InputImmediate64(0);
+        ASSERT_EQ(kImm64Value, imm.type);
+        __ Call(reinterpret_cast<byte*>(imm.value), RelocInfo::NONE64);
+      } else {
+        __ call(i.InputRegister(0));
+      }
+      break;
+    case kPopStack: {
+      int words = MiscField::decode(instr->opcode());
+      __ addq(rsp, Immediate(kPointerSize * words));
+      break;
+    }
+    case kX64CallJSFunction: {
+      Register func = i.InputRegister(0);
+
+      // TODO(jarin) The load of the context should be separated from the call.
+      __ movp(rsi, FieldOperand(func, JSFunction::kContextOffset));
+      __ Call(FieldOperand(func, JSFunction::kCodeEntryOffset));
+
+      RecordSafepoint(instr->pointer_map(), Safepoint::kSimple, 0,
+                      Safepoint::kNoLazyDeopt);
+      RecordLazyDeoptimizationEntry(instr);
+      break;
+    }
+    case kSSEFloat64Cmp: {
+      RegisterOrOperand input = i.InputRegisterOrOperand(1);
+      if (input.type == kDoubleRegister) {
+        __ ucomisd(i.InputDoubleRegister(0), input.double_reg);
+      } else {
+        __ ucomisd(i.InputDoubleRegister(0), input.operand);
+      }
+      break;
+    }
+    case kSSEFloat64Add:
+      __ addsd(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
+      break;
+    case kSSEFloat64Sub:
+      __ subsd(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
+      break;
+    case kSSEFloat64Mul:
+      __ mulsd(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
+      break;
+    case kSSEFloat64Div:
+      __ divsd(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
+      break;
+    case kSSEFloat64Mod: {
+      __ subq(rsp, Immediate(kDoubleSize));
+      // Move values to st(0) and st(1).
+      __ movsd(Operand(rsp, 0), i.InputDoubleRegister(1));
+      __ fld_d(Operand(rsp, 0));
+      __ movsd(Operand(rsp, 0), i.InputDoubleRegister(0));
+      __ fld_d(Operand(rsp, 0));
+      // Loop while fprem isn't done.
+      Label mod_loop;
+      __ bind(&mod_loop);
+      // This instructions traps on all kinds inputs, but we are assuming the
+      // floating point control word is set to ignore them all.
+      __ fprem();
+      // The following 2 instruction implicitly use rax.
+      __ fnstsw_ax();
+      if (CpuFeatures::IsSupported(SAHF) && masm()->IsEnabled(SAHF)) {
+        __ sahf();
+      } else {
+        __ shrl(rax, Immediate(8));
+        __ andl(rax, Immediate(0xFF));
+        __ pushq(rax);
+        __ popfq();
+      }
+      __ j(parity_even, &mod_loop);
+      // Move output to stack and clean up.
+      __ fstp(1);
+      __ fstp_d(Operand(rsp, 0));
+      __ movsd(i.OutputDoubleRegister(), Operand(rsp, 0));
+      __ addq(rsp, Immediate(kDoubleSize));
+      break;
+    }
+    case kX64Int32ToInt64:
+      __ movzxwq(i.OutputRegister(), i.InputRegister(0));
+      break;
+    case kX64Int64ToInt32:
+      __ Move(i.OutputRegister(), i.InputRegister(0));
+      break;
+    case kSSEFloat64ToInt32: {
+      RegisterOrOperand input = i.InputRegisterOrOperand(0);
+      if (input.type == kDoubleRegister) {
+        __ cvttsd2si(i.OutputRegister(), input.double_reg);
+      } else {
+        __ cvttsd2si(i.OutputRegister(), input.operand);
+      }
+      break;
+    }
+    case kSSEInt32ToFloat64: {
+      RegisterOrOperand input = i.InputRegisterOrOperand(0);
+      if (input.type == kRegister) {
+        __ cvtlsi2sd(i.OutputDoubleRegister(), input.reg);
+      } else {
+        __ cvtlsi2sd(i.OutputDoubleRegister(), input.operand);
+      }
+      break;
+    }
+    case kSSELoad:
+      __ movsd(i.OutputDoubleRegister(), i.MemoryOperand());
+      break;
+    case kSSEStore: {
+      int index = 0;
+      Operand operand = i.MemoryOperand(&index);
+      __ movsd(operand, i.InputDoubleRegister(index));
+      break;
+    }
+    case kX64LoadWord8:
+      __ movzxbl(i.OutputRegister(), i.MemoryOperand());
+      break;
+    case kX64StoreWord8: {
+      int index = 0;
+      Operand operand = i.MemoryOperand(&index);
+      __ movb(operand, i.InputRegister(index));
+      break;
+    }
+    case kX64StoreWord8I: {
+      int index = 0;
+      Operand operand = i.MemoryOperand(&index);
+      __ movb(operand, Immediate(i.InputInt8(index)));
+      break;
+    }
+    case kX64LoadWord16:
+      __ movzxwl(i.OutputRegister(), i.MemoryOperand());
+      break;
+    case kX64StoreWord16: {
+      int index = 0;
+      Operand operand = i.MemoryOperand(&index);
+      __ movw(operand, i.InputRegister(index));
+      break;
+    }
+    case kX64StoreWord16I: {
+      int index = 0;
+      Operand operand = i.MemoryOperand(&index);
+      __ movw(operand, Immediate(i.InputInt16(index)));
+      break;
+    }
+    case kX64LoadWord32:
+      __ movl(i.OutputRegister(), i.MemoryOperand());
+      break;
+    case kX64StoreWord32: {
+      int index = 0;
+      Operand operand = i.MemoryOperand(&index);
+      __ movl(operand, i.InputRegister(index));
+      break;
+    }
+    case kX64StoreWord32I: {
+      int index = 0;
+      Operand operand = i.MemoryOperand(&index);
+      __ movl(operand, i.InputImmediate(index));
+      break;
+    }
+    case kX64LoadWord64:
+      __ movq(i.OutputRegister(), i.MemoryOperand());
+      break;
+    case kX64StoreWord64: {
+      int index = 0;
+      Operand operand = i.MemoryOperand(&index);
+      __ movq(operand, i.InputRegister(index));
+      break;
+    }
+    case kX64StoreWord64I: {
+      int index = 0;
+      Operand operand = i.MemoryOperand(&index);
+      __ movq(operand, i.InputImmediate(index));
+      break;
+    }
+    case kX64StoreWriteBarrier: {
+      Register object = i.InputRegister(0);
+      Register index = i.InputRegister(1);
+      Register value = i.InputRegister(2);
+      __ movsxlq(index, index);
+      __ movq(Operand(object, index, times_1, 0), value);
+      __ leaq(index, Operand(object, index, times_1, 0));
+      SaveFPRegsMode mode = code_->frame()->DidAllocateDoubleRegisters()
+                                ? kSaveFPRegs
+                                : kDontSaveFPRegs;
+      __ RecordWrite(object, index, value, mode);
+      break;
+    }
+  }
+}
+
+
+// Assembles branches after this instruction.
+void CodeGenerator::AssembleArchBranch(Instruction* instr,
+                                       FlagsCondition condition) {
+  X64OperandConverter i(this, instr);
+  Label done;
+
+  // Emit a branch. The true and false targets are always the last two inputs
+  // to the instruction.
+  BasicBlock* tblock = i.InputBlock(instr->InputCount() - 2);
+  BasicBlock* fblock = i.InputBlock(instr->InputCount() - 1);
+  bool fallthru = IsNextInAssemblyOrder(fblock);
+  Label* tlabel = code()->GetLabel(tblock);
+  Label* flabel = fallthru ? &done : code()->GetLabel(fblock);
+  Label::Distance flabel_distance = fallthru ? Label::kNear : Label::kFar;
+  switch (condition) {
+    case kUnorderedEqual:
+      __ j(parity_even, flabel, flabel_distance);
+    // Fall through.
+    case kEqual:
+      __ j(equal, tlabel);
+      break;
+    case kUnorderedNotEqual:
+      __ j(parity_even, tlabel);
+    // Fall through.
+    case kNotEqual:
+      __ j(not_equal, tlabel);
+      break;
+    case kSignedLessThan:
+      __ j(less, tlabel);
+      break;
+    case kSignedGreaterThanOrEqual:
+      __ j(greater_equal, tlabel);
+      break;
+    case kSignedLessThanOrEqual:
+      __ j(less_equal, tlabel);
+      break;
+    case kSignedGreaterThan:
+      __ j(greater, tlabel);
+      break;
+    case kUnorderedLessThan:
+      __ j(parity_even, flabel, flabel_distance);
+    // Fall through.
+    case kUnsignedLessThan:
+      __ j(below, tlabel);
+      break;
+    case kUnorderedGreaterThanOrEqual:
+      __ j(parity_even, tlabel);
+    // Fall through.
+    case kUnsignedGreaterThanOrEqual:
+      __ j(above_equal, tlabel);
+      break;
+    case kUnorderedLessThanOrEqual:
+      __ j(parity_even, flabel, flabel_distance);
+    // Fall through.
+    case kUnsignedLessThanOrEqual:
+      __ j(below_equal, tlabel);
+      break;
+    case kUnorderedGreaterThan:
+      __ j(parity_even, tlabel);
+    // Fall through.
+    case kUnsignedGreaterThan:
+      __ j(above, tlabel);
+      break;
+  }
+  if (!fallthru) __ jmp(flabel, flabel_distance);  // no fallthru to flabel.
+  __ bind(&done);
+}
+
+
+// Assembles boolean materializations after this instruction.
+void CodeGenerator::AssembleArchBoolean(Instruction* instr,
+                                        FlagsCondition condition) {
+  X64OperandConverter i(this, instr);
+  Label done;
+
+  // Materialize a full 32-bit 1 or 0 value.
+  Label check;
+  Register reg = i.OutputRegister();
+  Condition cc = no_condition;
+  switch (condition) {
+    case kUnorderedEqual:
+      __ j(parity_odd, &check, Label::kNear);
+      __ movl(reg, Immediate(0));
+      __ jmp(&done, Label::kNear);
+    // Fall through.
+    case kEqual:
+      cc = equal;
+      break;
+    case kUnorderedNotEqual:
+      __ j(parity_odd, &check, Label::kNear);
+      __ movl(reg, Immediate(1));
+      __ jmp(&done, Label::kNear);
+    // Fall through.
+    case kNotEqual:
+      cc = not_equal;
+      break;
+    case kSignedLessThan:
+      cc = less;
+      break;
+    case kSignedGreaterThanOrEqual:
+      cc = greater_equal;
+      break;
+    case kSignedLessThanOrEqual:
+      cc = less_equal;
+      break;
+    case kSignedGreaterThan:
+      cc = greater;
+      break;
+    case kUnorderedLessThan:
+      __ j(parity_odd, &check, Label::kNear);
+      __ movl(reg, Immediate(0));
+      __ jmp(&done, Label::kNear);
+    // Fall through.
+    case kUnsignedLessThan:
+      cc = below;
+      break;
+    case kUnorderedGreaterThanOrEqual:
+      __ j(parity_odd, &check, Label::kNear);
+      __ movl(reg, Immediate(1));
+      __ jmp(&done, Label::kNear);
+    // Fall through.
+    case kUnsignedGreaterThanOrEqual:
+      cc = above_equal;
+      break;
+    case kUnorderedLessThanOrEqual:
+      __ j(parity_odd, &check, Label::kNear);
+      __ movl(reg, Immediate(0));
+      __ jmp(&done, Label::kNear);
+    // Fall through.
+    case kUnsignedLessThanOrEqual:
+      cc = below_equal;
+      break;
+    case kUnorderedGreaterThan:
+      __ j(parity_odd, &check, Label::kNear);
+      __ movl(reg, Immediate(1));
+      __ jmp(&done, Label::kNear);
+    // Fall through.
+    case kUnsignedGreaterThan:
+      cc = above;
+      break;
+  }
+  __ bind(&check);
+  __ setcc(cc, reg);
+  __ movzxbl(reg, reg);
+  __ bind(&done);
+}
+
+
+void CodeGenerator::AssemblePrologue() {
+  CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
+  int stack_slots = frame()->GetSpillSlotCount();
+  if (descriptor->kind() == CallDescriptor::kCallAddress) {
+    __ pushq(rbp);
+    __ movq(rbp, rsp);
+    const RegList saves = descriptor->CalleeSavedRegisters();
+    if (saves != 0) {  // Save callee-saved registers.
+      int register_save_area_size = 0;
+      for (int i = Register::kNumRegisters - 1; i >= 0; i--) {
+        if (!((1 << i) & saves)) continue;
+        __ pushq(Register::from_code(i));
+        register_save_area_size += kPointerSize;
+      }
+      frame()->SetRegisterSaveAreaSize(register_save_area_size);
+    }
+  } else if (descriptor->IsJSFunctionCall()) {
+    CompilationInfo* info = linkage()->info();
+    __ Prologue(info->IsCodePreAgingActive());
+    frame()->SetRegisterSaveAreaSize(
+        StandardFrameConstants::kFixedFrameSizeFromFp);
+
+    // Sloppy mode functions and builtins need to replace the receiver with the
+    // global proxy when called as functions (without an explicit receiver
+    // object).
+    // TODO(mstarzinger/verwaest): Should this be moved back into the CallIC?
+    if (info->strict_mode() == SLOPPY && !info->is_native()) {
+      Label ok;
+      StackArgumentsAccessor args(rbp, info->scope()->num_parameters());
+      __ movp(rcx, args.GetReceiverOperand());
+      __ CompareRoot(rcx, Heap::kUndefinedValueRootIndex);
+      __ j(not_equal, &ok, Label::kNear);
+      __ movp(rcx, GlobalObjectOperand());
+      __ movp(rcx, FieldOperand(rcx, GlobalObject::kGlobalProxyOffset));
+      __ movp(args.GetReceiverOperand(), rcx);
+      __ bind(&ok);
+    }
+
+  } else {
+    __ StubPrologue();
+    frame()->SetRegisterSaveAreaSize(
+        StandardFrameConstants::kFixedFrameSizeFromFp);
+  }
+  if (stack_slots > 0) {
+    __ subq(rsp, Immediate(stack_slots * kPointerSize));
+  }
+}
+
+
+void CodeGenerator::AssembleReturn() {
+  CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
+  if (descriptor->kind() == CallDescriptor::kCallAddress) {
+    if (frame()->GetRegisterSaveAreaSize() > 0) {
+      // Remove this frame's spill slots first.
+      int stack_slots = frame()->GetSpillSlotCount();
+      if (stack_slots > 0) {
+        __ addq(rsp, Immediate(stack_slots * kPointerSize));
+      }
+      const RegList saves = descriptor->CalleeSavedRegisters();
+      // Restore registers.
+      if (saves != 0) {
+        for (int i = 0; i < Register::kNumRegisters; i++) {
+          if (!((1 << i) & saves)) continue;
+          __ popq(Register::from_code(i));
+        }
+      }
+      __ popq(rbp);  // Pop caller's frame pointer.
+      __ ret(0);
+    } else {
+      // No saved registers.
+      __ movq(rsp, rbp);  // Move stack pointer back to frame pointer.
+      __ popq(rbp);       // Pop caller's frame pointer.
+      __ ret(0);
+    }
+  } else {
+    __ movq(rsp, rbp);  // Move stack pointer back to frame pointer.
+    __ popq(rbp);       // Pop caller's frame pointer.
+    int pop_count =
+        descriptor->IsJSFunctionCall() ? descriptor->ParameterCount() : 0;
+    __ ret(pop_count * kPointerSize);
+  }
+}
+
+
+void CodeGenerator::AssembleMove(InstructionOperand* source,
+                                 InstructionOperand* destination) {
+  X64OperandConverter g(this, NULL);
+  // Dispatch on the source and destination operand kinds.  Not all
+  // combinations are possible.
+  if (source->IsRegister()) {
+    ASSERT(destination->IsRegister() || destination->IsStackSlot());
+    Register src = g.ToRegister(source);
+    if (destination->IsRegister()) {
+      __ movq(g.ToRegister(destination), src);
+    } else {
+      __ movq(g.ToOperand(destination), src);
+    }
+  } else if (source->IsStackSlot()) {
+    ASSERT(destination->IsRegister() || destination->IsStackSlot());
+    Operand src = g.ToOperand(source);
+    if (destination->IsRegister()) {
+      Register dst = g.ToRegister(destination);
+      __ movq(dst, src);
+    } else {
+      // Spill on demand to use a temporary register for memory-to-memory
+      // moves.
+      Register tmp = kScratchRegister;
+      Operand dst = g.ToOperand(destination);
+      __ movq(tmp, src);
+      __ movq(dst, tmp);
+    }
+  } else if (source->IsConstant()) {
+    ConstantOperand* constant_source = ConstantOperand::cast(source);
+    if (destination->IsRegister() || destination->IsStackSlot()) {
+      Register dst = destination->IsRegister() ? g.ToRegister(destination)
+                                               : kScratchRegister;
+      Immediate64 imm = g.ToImmediate64(constant_source);
+      switch (imm.type) {
+        case kImm64Value:
+          __ Set(dst, imm.value);
+          break;
+        case kImm64Reference:
+          __ Move(dst, imm.reference);
+          break;
+        case kImm64Handle:
+          __ Move(dst, imm.handle);
+          break;
+      }
+      if (destination->IsStackSlot()) {
+        __ movq(g.ToOperand(destination), kScratchRegister);
+      }
+    } else {
+      __ movq(kScratchRegister,
+              BitCast<uint64_t, double>(g.ToDouble(constant_source)));
+      if (destination->IsDoubleRegister()) {
+        __ movq(g.ToDoubleRegister(destination), kScratchRegister);
+      } else {
+        ASSERT(destination->IsDoubleStackSlot());
+        __ movq(g.ToOperand(destination), kScratchRegister);
+      }
+    }
+  } else if (source->IsDoubleRegister()) {
+    XMMRegister src = g.ToDoubleRegister(source);
+    if (destination->IsDoubleRegister()) {
+      XMMRegister dst = g.ToDoubleRegister(destination);
+      __ movsd(dst, src);
+    } else {
+      ASSERT(destination->IsDoubleStackSlot());
+      Operand dst = g.ToOperand(destination);
+      __ movsd(dst, src);
+    }
+  } else if (source->IsDoubleStackSlot()) {
+    ASSERT(destination->IsDoubleRegister() || destination->IsDoubleStackSlot());
+    Operand src = g.ToOperand(source);
+    if (destination->IsDoubleRegister()) {
+      XMMRegister dst = g.ToDoubleRegister(destination);
+      __ movsd(dst, src);
+    } else {
+      // We rely on having xmm0 available as a fixed scratch register.
+      Operand dst = g.ToOperand(destination);
+      __ movsd(xmm0, src);
+      __ movsd(dst, xmm0);
+    }
+  } else {
+    UNREACHABLE();
+  }
+}
+
+
+void CodeGenerator::AssembleSwap(InstructionOperand* source,
+                                 InstructionOperand* destination) {
+  X64OperandConverter g(this, NULL);
+  // Dispatch on the source and destination operand kinds.  Not all
+  // combinations are possible.
+  if (source->IsRegister() && destination->IsRegister()) {
+    // Register-register.
+    __ xchgq(g.ToRegister(source), g.ToRegister(destination));
+  } else if (source->IsRegister() && destination->IsStackSlot()) {
+    Register src = g.ToRegister(source);
+    Operand dst = g.ToOperand(destination);
+    __ xchgq(src, dst);
+  } else if ((source->IsStackSlot() && destination->IsStackSlot()) ||
+             (source->IsDoubleStackSlot() &&
+              destination->IsDoubleStackSlot())) {
+    // Memory-memory.
+    Register tmp = kScratchRegister;
+    Operand src = g.ToOperand(source);
+    Operand dst = g.ToOperand(destination);
+    __ movq(tmp, dst);
+    __ xchgq(tmp, src);
+    __ movq(dst, tmp);
+  } else if (source->IsDoubleRegister() && destination->IsDoubleRegister()) {
+    // XMM register-register swap. We rely on having xmm0
+    // available as a fixed scratch register.
+    XMMRegister src = g.ToDoubleRegister(source);
+    XMMRegister dst = g.ToDoubleRegister(destination);
+    __ movsd(xmm0, src);
+    __ movsd(src, dst);
+    __ movsd(dst, xmm0);
+  } else if (source->IsDoubleRegister() && destination->IsDoubleRegister()) {
+    // XMM register-memory swap.  We rely on having xmm0
+    // available as a fixed scratch register.
+    XMMRegister src = g.ToDoubleRegister(source);
+    Operand dst = g.ToOperand(destination);
+    __ movsd(xmm0, src);
+    __ movsd(src, dst);
+    __ movsd(dst, xmm0);
+  } else {
+    // No other combinations are possible.
+    UNREACHABLE();
+  }
+}
+
+
+void CodeGenerator::AddNopForSmiCodeInlining() { __ nop(); }
+
+#undef __
+
+#ifdef DEBUG
+
+// Checks whether the code between start_pc and end_pc is a no-op.
+bool CodeGenerator::IsNopForSmiCodeInlining(Handle<Code> code, int start_pc,
+                                            int end_pc) {
+  if (start_pc + 1 != end_pc) {
+    return false;
+  }
+  return *(code->instruction_start() + start_pc) ==
+         v8::internal::Assembler::kNopByte;
+}
+
+#endif
+}
+}
+}  // namespace v8::internal::compiler
diff --git a/src/compiler/x64/instruction-codes-x64.h b/src/compiler/x64/instruction-codes-x64.h
new file mode 100644 (file)
index 0000000..307a184
--- /dev/null
@@ -0,0 +1,106 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_X64_INSTRUCTION_CODES_X64_H_
+#define V8_COMPILER_X64_INSTRUCTION_CODES_X64_H_
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// X64-specific opcodes that specify which assembly sequence to emit.
+// Most opcodes specify a single instruction.
+#define TARGET_ARCH_OPCODE_LIST(V) \
+  V(X64Add)                        \
+  V(X64Add32)                      \
+  V(X64And)                        \
+  V(X64And32)                      \
+  V(X64Cmp)                        \
+  V(X64Cmp32)                      \
+  V(X64Test)                       \
+  V(X64Test32)                     \
+  V(X64Or)                         \
+  V(X64Or32)                       \
+  V(X64Xor)                        \
+  V(X64Xor32)                      \
+  V(X64Sub)                        \
+  V(X64Sub32)                      \
+  V(X64Imul)                       \
+  V(X64Imul32)                     \
+  V(X64Idiv)                       \
+  V(X64Idiv32)                     \
+  V(X64Udiv)                       \
+  V(X64Udiv32)                     \
+  V(X64Not)                        \
+  V(X64Not32)                      \
+  V(X64Neg)                        \
+  V(X64Neg32)                      \
+  V(X64Shl)                        \
+  V(X64Shl32)                      \
+  V(X64Shr)                        \
+  V(X64Shr32)                      \
+  V(X64Sar)                        \
+  V(X64Sar32)                      \
+  V(X64Push)                       \
+  V(X64PushI)                      \
+  V(X64CallCodeObject)             \
+  V(X64CallAddress)                \
+  V(PopStack)                      \
+  V(X64CallJSFunction)             \
+  V(SSEFloat64Cmp)                 \
+  V(SSEFloat64Add)                 \
+  V(SSEFloat64Sub)                 \
+  V(SSEFloat64Mul)                 \
+  V(SSEFloat64Div)                 \
+  V(SSEFloat64Mod)                 \
+  V(X64Int32ToInt64)               \
+  V(X64Int64ToInt32)               \
+  V(SSEFloat64ToInt32)             \
+  V(SSEInt32ToFloat64)             \
+  V(SSELoad)                       \
+  V(SSEStore)                      \
+  V(X64LoadWord8)                  \
+  V(X64StoreWord8)                 \
+  V(X64StoreWord8I)                \
+  V(X64LoadWord16)                 \
+  V(X64StoreWord16)                \
+  V(X64StoreWord16I)               \
+  V(X64LoadWord32)                 \
+  V(X64StoreWord32)                \
+  V(X64StoreWord32I)               \
+  V(X64LoadWord64)                 \
+  V(X64StoreWord64)                \
+  V(X64StoreWord64I)               \
+  V(X64StoreWriteBarrier)
+
+
+// Addressing modes represent the "shape" of inputs to an instruction.
+// Many instructions support multiple addressing modes. Addressing modes
+// are encoded into the InstructionCode of the instruction and tell the
+// code generator after register allocation which assembler method to call.
+//
+// We use the following local notation for addressing modes:
+//
+// R = register
+// O = register or stack slot
+// D = double register
+// I = immediate (handle, external, int32)
+// MR = [register]
+// MI = [immediate]
+// MRN = [register + register * N in {1, 2, 4, 8}]
+// MRI = [register + immediate]
+// MRNI = [register + register * N in {1, 2, 4, 8} + immediate]
+#define TARGET_ADDRESSING_MODE_LIST(V) \
+  V(MR)   /* [%r1] */                  \
+  V(MRI)  /* [%r1 + K] */              \
+  V(MR1I) /* [%r1 + %r2 + K] */        \
+  V(MR2I) /* [%r1 + %r2*2 + K] */      \
+  V(MR4I) /* [%r1 + %r2*4 + K] */      \
+  V(MR8I) /* [%r1 + %r2*8 + K] */
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_COMPILER_X64_INSTRUCTION_CODES_X64_H_
diff --git a/src/compiler/x64/instruction-selector-x64.cc b/src/compiler/x64/instruction-selector-x64.cc
new file mode 100644 (file)
index 0000000..7d71ace
--- /dev/null
@@ -0,0 +1,666 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/instruction-selector-impl.h"
+#include "src/compiler/node-matchers.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// Adds X64-specific methods for generating operands.
+class X64OperandGenerator V8_FINAL : public OperandGenerator {
+ public:
+  explicit X64OperandGenerator(InstructionSelector* selector)
+      : OperandGenerator(selector) {}
+
+  InstructionOperand* TempRegister(Register reg) {
+    return new (zone()) UnallocatedOperand(UnallocatedOperand::FIXED_REGISTER,
+                                           Register::ToAllocationIndex(reg));
+  }
+
+  InstructionOperand* UseByteRegister(Node* node) {
+    // TODO(dcarney): relax constraint.
+    return UseFixed(node, rdx);
+  }
+
+  InstructionOperand* UseImmediate64(Node* node) { return UseImmediate(node); }
+
+  bool CanBeImmediate(Node* node) {
+    switch (node->opcode()) {
+      case IrOpcode::kInt32Constant:
+        return true;
+      default:
+        return false;
+    }
+  }
+
+  bool CanBeImmediate64(Node* node) {
+    switch (node->opcode()) {
+      case IrOpcode::kInt32Constant:
+        return true;
+      case IrOpcode::kNumberConstant:
+        return true;
+      case IrOpcode::kHeapConstant: {
+        // Constants in new space cannot be used as immediates in V8 because
+        // the GC does not scan code objects when collecting the new generation.
+        Handle<HeapObject> value = ValueOf<Handle<HeapObject> >(node->op());
+        return !isolate()->heap()->InNewSpace(*value);
+      }
+      default:
+        return false;
+    }
+  }
+};
+
+
+void InstructionSelector::VisitLoad(Node* node) {
+  MachineRepresentation rep = OpParameter<MachineRepresentation>(node);
+  X64OperandGenerator g(this);
+  Node* base = node->InputAt(0);
+  Node* index = node->InputAt(1);
+
+  InstructionOperand* output = rep == kMachineFloat64
+                                   ? g.DefineAsDoubleRegister(node)
+                                   : g.DefineAsRegister(node);
+  ArchOpcode opcode;
+  switch (rep) {
+    case kMachineFloat64:
+      opcode = kSSELoad;
+      break;
+    case kMachineWord8:
+      opcode = kX64LoadWord8;
+      break;
+    case kMachineWord16:
+      opcode = kX64LoadWord16;
+      break;
+    case kMachineWord32:
+      opcode = kX64LoadWord32;
+      break;
+    case kMachineTagged:  // Fall through.
+    case kMachineWord64:
+      opcode = kX64LoadWord64;
+      break;
+    default:
+      UNREACHABLE();
+      return;
+  }
+  if (g.CanBeImmediate(base)) {
+    // load [#base + %index]
+    Emit(opcode | AddressingModeField::encode(kMode_MRI), output,
+         g.UseRegister(index), g.UseImmediate(base));
+  } else if (g.CanBeImmediate(index)) {  // load [%base + #index]
+    Emit(opcode | AddressingModeField::encode(kMode_MRI), output,
+         g.UseRegister(base), g.UseImmediate(index));
+  } else {  // load [%base + %index + K]
+    Emit(opcode | AddressingModeField::encode(kMode_MR1I), output,
+         g.UseRegister(base), g.UseRegister(index));
+  }
+  // TODO(turbofan): addressing modes [r+r*{2,4,8}+K]
+}
+
+
+void InstructionSelector::VisitStore(Node* node) {
+  X64OperandGenerator g(this);
+  Node* base = node->InputAt(0);
+  Node* index = node->InputAt(1);
+  Node* value = node->InputAt(2);
+
+  StoreRepresentation store_rep = OpParameter<StoreRepresentation>(node);
+  MachineRepresentation rep = store_rep.rep;
+  if (store_rep.write_barrier_kind == kFullWriteBarrier) {
+    ASSERT(rep == kMachineTagged);
+    // TODO(dcarney): refactor RecordWrite function to take temp registers
+    //                and pass them here instead of using fixed regs
+    // TODO(dcarney): handle immediate indices.
+    InstructionOperand* temps[] = {g.TempRegister(rcx), g.TempRegister(rdx)};
+    Emit(kX64StoreWriteBarrier, NULL, g.UseFixed(base, rbx),
+         g.UseFixed(index, rcx), g.UseFixed(value, rdx), ARRAY_SIZE(temps),
+         temps);
+    return;
+  }
+  ASSERT_EQ(kNoWriteBarrier, store_rep.write_barrier_kind);
+  bool is_immediate = false;
+  InstructionOperand* val;
+  if (rep == kMachineFloat64) {
+    val = g.UseDoubleRegister(value);
+  } else {
+    is_immediate = g.CanBeImmediate(value);
+    if (is_immediate) {
+      val = g.UseImmediate(value);
+    } else if (rep == kMachineWord8) {
+      val = g.UseByteRegister(value);
+    } else {
+      val = g.UseRegister(value);
+    }
+  }
+  ArchOpcode opcode;
+  switch (rep) {
+    case kMachineFloat64:
+      opcode = kSSEStore;
+      break;
+    case kMachineWord8:
+      opcode = is_immediate ? kX64StoreWord8I : kX64StoreWord8;
+      break;
+    case kMachineWord16:
+      opcode = is_immediate ? kX64StoreWord16I : kX64StoreWord16;
+      break;
+    case kMachineWord32:
+      opcode = is_immediate ? kX64StoreWord32I : kX64StoreWord32;
+      break;
+    case kMachineTagged:  // Fall through.
+    case kMachineWord64:
+      opcode = is_immediate ? kX64StoreWord64I : kX64StoreWord64;
+      break;
+    default:
+      UNREACHABLE();
+      return;
+  }
+  if (g.CanBeImmediate(base)) {
+    // store [#base + %index], %|#value
+    Emit(opcode | AddressingModeField::encode(kMode_MRI), NULL,
+         g.UseRegister(index), g.UseImmediate(base), val);
+  } else if (g.CanBeImmediate(index)) {  // store [%base + #index], %|#value
+    Emit(opcode | AddressingModeField::encode(kMode_MRI), NULL,
+         g.UseRegister(base), g.UseImmediate(index), val);
+  } else {  // store [%base + %index], %|#value
+    Emit(opcode | AddressingModeField::encode(kMode_MR1I), NULL,
+         g.UseRegister(base), g.UseRegister(index), val);
+  }
+  // TODO(turbofan): addressing modes [r+r*{2,4,8}+K]
+}
+
+
+// Shared routine for multiple binary operations.
+static void VisitBinop(InstructionSelector* selector, Node* node,
+                       ArchOpcode opcode, bool commutative) {
+  X64OperandGenerator g(selector);
+  Node* left = node->InputAt(0);
+  Node* right = node->InputAt(1);
+  // TODO(turbofan): match complex addressing modes.
+  // TODO(turbofan): if commutative, pick the non-live-in operand as the left as
+  // this might be the last use and therefore its register can be reused.
+  if (g.CanBeImmediate(right)) {
+    selector->Emit(opcode, g.DefineSameAsFirst(node), g.Use(left),
+                   g.UseImmediate(right));
+  } else if (commutative && g.CanBeImmediate(left)) {
+    selector->Emit(opcode, g.DefineSameAsFirst(node), g.Use(right),
+                   g.UseImmediate(left));
+  } else {
+    selector->Emit(opcode, g.DefineSameAsFirst(node), g.UseRegister(left),
+                   g.Use(right));
+  }
+}
+
+
+void InstructionSelector::VisitWord32And(Node* node) {
+  VisitBinop(this, node, kX64And32, true);
+}
+
+
+void InstructionSelector::VisitWord64And(Node* node) {
+  VisitBinop(this, node, kX64And, true);
+}
+
+
+void InstructionSelector::VisitWord32Or(Node* node) {
+  VisitBinop(this, node, kX64Or32, true);
+}
+
+
+void InstructionSelector::VisitWord64Or(Node* node) {
+  VisitBinop(this, node, kX64Or, true);
+}
+
+
+template <typename T>
+static void VisitXor(InstructionSelector* selector, Node* node,
+                     ArchOpcode xor_opcode, ArchOpcode not_opcode) {
+  X64OperandGenerator g(selector);
+  BinopMatcher<IntMatcher<T>, IntMatcher<T> > m(node);
+  if (m.right().Is(-1)) {
+    selector->Emit(not_opcode, g.DefineSameAsFirst(node),
+                   g.Use(m.left().node()));
+  } else {
+    VisitBinop(selector, node, xor_opcode, true);
+  }
+}
+
+
+void InstructionSelector::VisitWord32Xor(Node* node) {
+  VisitXor<int32_t>(this, node, kX64Xor32, kX64Not32);
+}
+
+
+void InstructionSelector::VisitWord64Xor(Node* node) {
+  VisitXor<int64_t>(this, node, kX64Xor, kX64Not);
+}
+
+
+// Shared routine for multiple 32-bit shift operations.
+// TODO(bmeurer): Merge this with VisitWord64Shift using template magic?
+static void VisitWord32Shift(InstructionSelector* selector, Node* node,
+                             ArchOpcode opcode) {
+  X64OperandGenerator g(selector);
+  Node* left = node->InputAt(0);
+  Node* right = node->InputAt(1);
+
+  // TODO(turbofan): assembler only supports some addressing modes for shifts.
+  if (g.CanBeImmediate(right)) {
+    selector->Emit(opcode, g.DefineSameAsFirst(node), g.UseRegister(left),
+                   g.UseImmediate(right));
+  } else {
+    Int32BinopMatcher m(node);
+    if (m.right().IsWord32And()) {
+      Int32BinopMatcher mright(right);
+      if (mright.right().Is(0x1F)) {
+        right = mright.left().node();
+      }
+    }
+    selector->Emit(opcode, g.DefineSameAsFirst(node), g.UseRegister(left),
+                   g.UseFixed(right, rcx));
+  }
+}
+
+
+// Shared routine for multiple 64-bit shift operations.
+// TODO(bmeurer): Merge this with VisitWord32Shift using template magic?
+static void VisitWord64Shift(InstructionSelector* selector, Node* node,
+                             ArchOpcode opcode) {
+  X64OperandGenerator g(selector);
+  Node* left = node->InputAt(0);
+  Node* right = node->InputAt(1);
+
+  // TODO(turbofan): assembler only supports some addressing modes for shifts.
+  if (g.CanBeImmediate(right)) {
+    selector->Emit(opcode, g.DefineSameAsFirst(node), g.UseRegister(left),
+                   g.UseImmediate(right));
+  } else {
+    Int64BinopMatcher m(node);
+    if (m.right().IsWord64And()) {
+      Int64BinopMatcher mright(right);
+      if (mright.right().Is(0x3F)) {
+        right = mright.left().node();
+      }
+    }
+    selector->Emit(opcode, g.DefineSameAsFirst(node), g.UseRegister(left),
+                   g.UseFixed(right, rcx));
+  }
+}
+
+
+void InstructionSelector::VisitWord32Shl(Node* node) {
+  VisitWord32Shift(this, node, kX64Shl32);
+}
+
+
+void InstructionSelector::VisitWord64Shl(Node* node) {
+  VisitWord64Shift(this, node, kX64Shl);
+}
+
+
+void InstructionSelector::VisitWord32Shr(Node* node) {
+  VisitWord32Shift(this, node, kX64Shr32);
+}
+
+
+void InstructionSelector::VisitWord64Shr(Node* node) {
+  VisitWord64Shift(this, node, kX64Shr);
+}
+
+
+void InstructionSelector::VisitWord32Sar(Node* node) {
+  VisitWord32Shift(this, node, kX64Sar32);
+}
+
+
+void InstructionSelector::VisitWord64Sar(Node* node) {
+  VisitWord64Shift(this, node, kX64Sar);
+}
+
+
+void InstructionSelector::VisitInt32Add(Node* node) {
+  VisitBinop(this, node, kX64Add32, true);
+}
+
+
+void InstructionSelector::VisitInt64Add(Node* node) {
+  VisitBinop(this, node, kX64Add, true);
+}
+
+
+template <typename T>
+static void VisitSub(InstructionSelector* selector, Node* node,
+                     ArchOpcode sub_opcode, ArchOpcode neg_opcode) {
+  X64OperandGenerator g(selector);
+  BinopMatcher<IntMatcher<T>, IntMatcher<T> > m(node);
+  if (m.left().Is(0)) {
+    selector->Emit(neg_opcode, g.DefineSameAsFirst(node),
+                   g.Use(m.right().node()));
+  } else {
+    VisitBinop(selector, node, sub_opcode, false);
+  }
+}
+
+
+void InstructionSelector::VisitInt32Sub(Node* node) {
+  VisitSub<int32_t>(this, node, kX64Sub32, kX64Neg32);
+}
+
+
+void InstructionSelector::VisitInt64Sub(Node* node) {
+  VisitSub<int64_t>(this, node, kX64Sub, kX64Neg);
+}
+
+
+static void VisitMul(InstructionSelector* selector, Node* node,
+                     ArchOpcode opcode) {
+  X64OperandGenerator g(selector);
+  Node* left = node->InputAt(0);
+  Node* right = node->InputAt(1);
+  if (g.CanBeImmediate(right)) {
+    selector->Emit(opcode, g.DefineAsRegister(node), g.Use(left),
+                   g.UseImmediate(right));
+  } else if (g.CanBeImmediate(left)) {
+    selector->Emit(opcode, g.DefineAsRegister(node), g.Use(right),
+                   g.UseImmediate(left));
+  } else {
+    // TODO(turbofan): select better left operand.
+    selector->Emit(opcode, g.DefineSameAsFirst(node), g.UseRegister(left),
+                   g.Use(right));
+  }
+}
+
+
+void InstructionSelector::VisitInt32Mul(Node* node) {
+  VisitMul(this, node, kX64Imul32);
+}
+
+
+void InstructionSelector::VisitInt64Mul(Node* node) {
+  VisitMul(this, node, kX64Imul);
+}
+
+
+static void VisitDiv(InstructionSelector* selector, Node* node,
+                     ArchOpcode opcode) {
+  X64OperandGenerator g(selector);
+  InstructionOperand* temps[] = {g.TempRegister(rdx)};
+  selector->Emit(
+      opcode, g.DefineAsFixed(node, rax), g.UseFixed(node->InputAt(0), rax),
+      g.UseUniqueRegister(node->InputAt(1)), ARRAY_SIZE(temps), temps);
+}
+
+
+void InstructionSelector::VisitInt32Div(Node* node) {
+  VisitDiv(this, node, kX64Idiv32);
+}
+
+
+void InstructionSelector::VisitInt64Div(Node* node) {
+  VisitDiv(this, node, kX64Idiv);
+}
+
+
+void InstructionSelector::VisitInt32UDiv(Node* node) {
+  VisitDiv(this, node, kX64Udiv32);
+}
+
+
+void InstructionSelector::VisitInt64UDiv(Node* node) {
+  VisitDiv(this, node, kX64Udiv);
+}
+
+
+static void VisitMod(InstructionSelector* selector, Node* node,
+                     ArchOpcode opcode) {
+  X64OperandGenerator g(selector);
+  InstructionOperand* temps[] = {g.TempRegister(rax), g.TempRegister(rdx)};
+  selector->Emit(
+      opcode, g.DefineAsFixed(node, rdx), g.UseFixed(node->InputAt(0), rax),
+      g.UseUniqueRegister(node->InputAt(1)), ARRAY_SIZE(temps), temps);
+}
+
+
+void InstructionSelector::VisitInt32Mod(Node* node) {
+  VisitMod(this, node, kX64Idiv32);
+}
+
+
+void InstructionSelector::VisitInt64Mod(Node* node) {
+  VisitMod(this, node, kX64Idiv);
+}
+
+
+void InstructionSelector::VisitInt32UMod(Node* node) {
+  VisitMod(this, node, kX64Udiv32);
+}
+
+
+void InstructionSelector::VisitInt64UMod(Node* node) {
+  VisitMod(this, node, kX64Udiv);
+}
+
+
+void InstructionSelector::VisitConvertInt32ToFloat64(Node* node) {
+  X64OperandGenerator g(this);
+  Emit(kSSEInt32ToFloat64, g.DefineAsDoubleRegister(node),
+       g.Use(node->InputAt(0)));
+}
+
+
+void InstructionSelector::VisitConvertFloat64ToInt32(Node* node) {
+  X64OperandGenerator g(this);
+  Emit(kSSEFloat64ToInt32, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
+}
+
+
+void InstructionSelector::VisitFloat64Add(Node* node) {
+  X64OperandGenerator g(this);
+  Emit(kSSEFloat64Add, g.DefineSameAsFirst(node),
+       g.UseDoubleRegister(node->InputAt(0)),
+       g.UseDoubleRegister(node->InputAt(1)));
+}
+
+
+void InstructionSelector::VisitFloat64Sub(Node* node) {
+  X64OperandGenerator g(this);
+  Emit(kSSEFloat64Sub, g.DefineSameAsFirst(node),
+       g.UseDoubleRegister(node->InputAt(0)),
+       g.UseDoubleRegister(node->InputAt(1)));
+}
+
+
+void InstructionSelector::VisitFloat64Mul(Node* node) {
+  X64OperandGenerator g(this);
+  Emit(kSSEFloat64Mul, g.DefineSameAsFirst(node),
+       g.UseDoubleRegister(node->InputAt(0)),
+       g.UseDoubleRegister(node->InputAt(1)));
+}
+
+
+void InstructionSelector::VisitFloat64Div(Node* node) {
+  X64OperandGenerator g(this);
+  Emit(kSSEFloat64Div, g.DefineSameAsFirst(node),
+       g.UseDoubleRegister(node->InputAt(0)),
+       g.UseDoubleRegister(node->InputAt(1)));
+}
+
+
+void InstructionSelector::VisitFloat64Mod(Node* node) {
+  X64OperandGenerator g(this);
+  InstructionOperand* temps[] = {g.TempRegister(rax)};
+  Emit(kSSEFloat64Mod, g.DefineSameAsFirst(node),
+       g.UseDoubleRegister(node->InputAt(0)),
+       g.UseDoubleRegister(node->InputAt(1)), 1, temps);
+}
+
+
+void InstructionSelector::VisitConvertInt64ToInt32(Node* node) {
+  X64OperandGenerator g(this);
+  // TODO(dcarney): other modes
+  Emit(kX64Int64ToInt32, g.DefineAsRegister(node),
+       g.UseRegister(node->InputAt(0)));
+}
+
+
+void InstructionSelector::VisitConvertInt32ToInt64(Node* node) {
+  X64OperandGenerator g(this);
+  // TODO(dcarney): other modes
+  Emit(kX64Int32ToInt64, g.DefineAsRegister(node),
+       g.UseRegister(node->InputAt(0)));
+}
+
+
+// Shared routine for multiple compare operations.
+static void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
+                         InstructionOperand* left, InstructionOperand* right,
+                         FlagsContinuation* cont) {
+  X64OperandGenerator g(selector);
+  opcode = cont->Encode(opcode);
+  if (cont->IsBranch()) {
+    selector->Emit(opcode, NULL, left, right, g.Label(cont->true_block()),
+                   g.Label(cont->false_block()))->MarkAsControl();
+  } else {
+    ASSERT(cont->IsSet());
+    selector->Emit(opcode, g.DefineAsRegister(cont->result()), left, right);
+  }
+}
+
+
+// Shared routine for multiple word compare operations.
+static void VisitWordCompare(InstructionSelector* selector, Node* node,
+                             InstructionCode opcode, FlagsContinuation* cont,
+                             bool commutative) {
+  X64OperandGenerator g(selector);
+  Node* left = node->InputAt(0);
+  Node* right = node->InputAt(1);
+
+  // Match immediates on left or right side of comparison.
+  if (g.CanBeImmediate(right)) {
+    VisitCompare(selector, opcode, g.Use(left), g.UseImmediate(right), cont);
+  } else if (g.CanBeImmediate(left)) {
+    if (!commutative) cont->Commute();
+    VisitCompare(selector, opcode, g.Use(right), g.UseImmediate(left), cont);
+  } else {
+    VisitCompare(selector, opcode, g.UseRegister(left), g.Use(right), cont);
+  }
+}
+
+
+void InstructionSelector::VisitWord32Test(Node* node, FlagsContinuation* cont) {
+  switch (node->opcode()) {
+    case IrOpcode::kInt32Sub:
+      return VisitWordCompare(this, node, kX64Cmp32, cont, false);
+    case IrOpcode::kWord32And:
+      return VisitWordCompare(this, node, kX64Test32, cont, true);
+    default:
+      break;
+  }
+
+  X64OperandGenerator g(this);
+  VisitCompare(this, kX64Test32, g.Use(node), g.TempImmediate(-1), cont);
+}
+
+
+void InstructionSelector::VisitWord64Test(Node* node, FlagsContinuation* cont) {
+  switch (node->opcode()) {
+    case IrOpcode::kInt64Sub:
+      return VisitWordCompare(this, node, kX64Cmp, cont, false);
+    case IrOpcode::kWord64And:
+      return VisitWordCompare(this, node, kX64Test, cont, true);
+    default:
+      break;
+  }
+
+  X64OperandGenerator g(this);
+  VisitCompare(this, kX64Test, g.Use(node), g.TempImmediate(-1), cont);
+}
+
+
+void InstructionSelector::VisitWord32Compare(Node* node,
+                                             FlagsContinuation* cont) {
+  VisitWordCompare(this, node, kX64Cmp32, cont, false);
+}
+
+
+void InstructionSelector::VisitWord64Compare(Node* node,
+                                             FlagsContinuation* cont) {
+  VisitWordCompare(this, node, kX64Cmp, cont, false);
+}
+
+
+void InstructionSelector::VisitFloat64Compare(Node* node,
+                                              FlagsContinuation* cont) {
+  X64OperandGenerator g(this);
+  Node* left = node->InputAt(0);
+  Node* right = node->InputAt(1);
+  VisitCompare(this, kSSEFloat64Cmp, g.UseDoubleRegister(left), g.Use(right),
+               cont);
+}
+
+
+void InstructionSelector::VisitCall(Node* call, BasicBlock* continuation,
+                                    BasicBlock* deoptimization) {
+  X64OperandGenerator g(this);
+  CallDescriptor* descriptor = OpParameter<CallDescriptor*>(call);
+  CallBuffer buffer(zone(), descriptor);  // TODO(turbofan): temp zone here?
+
+  // Compute InstructionOperands for inputs and outputs.
+  InitializeCallBuffer(call, &buffer, true, true, continuation, deoptimization);
+
+  // TODO(dcarney): stack alignment for c calls.
+  // TODO(dcarney): shadow space on window for c calls.
+  // Push any stack arguments.
+  for (int i = buffer.pushed_count - 1; i >= 0; --i) {
+    Node* input = buffer.pushed_nodes[i];
+    // TODO(titzer): handle pushing double parameters.
+    if (g.CanBeImmediate(input)) {
+      Emit(kX64PushI, NULL, g.UseImmediate(input));
+    } else {
+      Emit(kX64Push, NULL, g.Use(input));
+    }
+  }
+
+  // Select the appropriate opcode based on the call type.
+  InstructionCode opcode;
+  switch (descriptor->kind()) {
+    case CallDescriptor::kCallCodeObject: {
+      bool lazy_deopt = descriptor->CanLazilyDeoptimize();
+      opcode = kX64CallCodeObject | MiscField::encode(lazy_deopt ? 1 : 0);
+      break;
+    }
+    case CallDescriptor::kCallAddress:
+      opcode = kX64CallAddress;
+      break;
+    case CallDescriptor::kCallJSFunction:
+      opcode = kX64CallJSFunction;
+      break;
+    default:
+      UNREACHABLE();
+      return;
+  }
+
+  // Emit the call instruction.
+  Instruction* call_instr =
+      Emit(opcode, buffer.output_count, buffer.outputs,
+           buffer.fixed_and_control_count(), buffer.fixed_and_control_args);
+
+  call_instr->MarkAsCall();
+  if (deoptimization != NULL) {
+    ASSERT(continuation != NULL);
+    call_instr->MarkAsControl();
+  }
+
+  // Caller clean up of stack for C-style calls.
+  if (descriptor->kind() == CallDescriptor::kCallAddress &&
+      buffer.pushed_count > 0) {
+    ASSERT(deoptimization == NULL && continuation == NULL);
+    Emit(kPopStack | MiscField::encode(buffer.pushed_count), NULL);
+  }
+}
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
diff --git a/src/compiler/x64/linkage-x64.cc b/src/compiler/x64/linkage-x64.cc
new file mode 100644 (file)
index 0000000..3a92bb1
--- /dev/null
@@ -0,0 +1,77 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#include "src/assembler.h"
+#include "src/code-stubs.h"
+#include "src/compiler/linkage.h"
+#include "src/compiler/linkage-impl.h"
+#include "src/zone.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+struct LinkageHelperTraits {
+  static Register ReturnValueReg() { return rax; }
+  static Register ReturnValue2Reg() { return rdx; }
+  static Register JSCallFunctionReg() { return rdi; }
+  static Register ContextReg() { return rsi; }
+  static Register RuntimeCallFunctionReg() { return rbx; }
+  static Register RuntimeCallArgCountReg() { return rax; }
+#ifdef _WIN64
+  static RegList CCalleeSaveRegisters() {
+    return rbx.bit() | rdi.bit() | rsi.bit() | r12.bit() | r13.bit() |
+           r14.bit() | r15.bit();
+  }
+  static Register CRegisterParameter(int i) {
+    static Register register_parameters[] = {rcx, rdx, r8, r9};
+    return register_parameters[i];
+  }
+  static int CRegisterParametersLength() { return 4; }
+#else
+  static RegList CCalleeSaveRegisters() {
+    return rbx.bit() | r12.bit() | r13.bit() | r14.bit() | r15.bit();
+  }
+  static Register CRegisterParameter(int i) {
+    static Register register_parameters[] = {rdi, rsi, rdx, rcx, r8, r9};
+    return register_parameters[i];
+  }
+  static int CRegisterParametersLength() { return 6; }
+#endif
+};
+
+
+CallDescriptor* Linkage::GetJSCallDescriptor(int parameter_count, Zone* zone) {
+  return LinkageHelper::GetJSCallDescriptor<LinkageHelperTraits>(
+      zone, parameter_count);
+}
+
+
+CallDescriptor* Linkage::GetRuntimeCallDescriptor(
+    Runtime::FunctionId function, int parameter_count,
+    Operator::Property properties,
+    CallDescriptor::DeoptimizationSupport can_deoptimize, Zone* zone) {
+  return LinkageHelper::GetRuntimeCallDescriptor<LinkageHelperTraits>(
+      zone, function, parameter_count, properties, can_deoptimize);
+}
+
+
+CallDescriptor* Linkage::GetStubCallDescriptor(
+    CodeStubInterfaceDescriptor* descriptor, int stack_parameter_count) {
+  return LinkageHelper::GetStubCallDescriptor<LinkageHelperTraits>(
+      this->info_->zone(), descriptor, stack_parameter_count);
+}
+
+
+CallDescriptor* Linkage::GetSimplifiedCDescriptor(
+    Zone* zone, int num_params, MachineRepresentation return_type,
+    const MachineRepresentation* param_types) {
+  return LinkageHelper::GetSimplifiedCDescriptor<LinkageHelperTraits>(
+      zone, num_params, return_type, param_types);
+}
+}
+}
+}  // namespace v8::internal::compiler
index 60cf93f8d35dce48439c70b035d27cc24b2fed95..a2041658a1585d6b2cd4e94a1aa5d8b912c17ec3 100644 (file)
@@ -140,8 +140,11 @@ Handle<Object> Context::Lookup(Handle<String> name,
       }
       VariableMode mode;
       InitializationFlag init_flag;
-      int slot_index =
-          ScopeInfo::ContextSlotIndex(scope_info, name, &mode, &init_flag);
+      // TODO(sigurds) Figure out whether maybe_assigned_flag should
+      // be used to compute binding_flags.
+      MaybeAssignedFlag maybe_assigned_flag;
+      int slot_index = ScopeInfo::ContextSlotIndex(
+          scope_info, name, &mode, &init_flag, &maybe_assigned_flag);
       ASSERT(slot_index < 0 || slot_index >= MIN_CONTEXT_SLOTS);
       if (slot_index >= 0) {
         if (FLAG_trace_contexts) {
index 08773b79a587d52d4ee71539025c088a348108f1..b1237370c45e2edf8db59fc353d8bdf4aa874258 100644 (file)
@@ -101,6 +101,25 @@ enum BindingFlags {
   V(GLOBAL_EVAL_FUN_INDEX, JSFunction, global_eval_fun)                        \
   V(INSTANTIATE_FUN_INDEX, JSFunction, instantiate_fun)                        \
   V(CONFIGURE_INSTANCE_FUN_INDEX, JSFunction, configure_instance_fun)          \
+  V(MATH_ABS_FUN_INDEX, JSFunction, math_abs_fun)                              \
+  V(MATH_ACOS_FUN_INDEX, JSFunction, math_acos_fun)                            \
+  V(MATH_ASIN_FUN_INDEX, JSFunction, math_asin_fun)                            \
+  V(MATH_ATAN_FUN_INDEX, JSFunction, math_atan_fun)                            \
+  V(MATH_ATAN2_FUN_INDEX, JSFunction, math_atan2_fun)                          \
+  V(MATH_CEIL_FUN_INDEX, JSFunction, math_ceil_fun)                            \
+  V(MATH_COS_FUN_INDEX, JSFunction, math_cos_fun)                              \
+  V(MATH_EXP_FUN_INDEX, JSFunction, math_exp_fun)                              \
+  V(MATH_FLOOR_FUN_INDEX, JSFunction, math_floor_fun)                          \
+  V(MATH_IMUL_FUN_INDEX, JSFunction, math_imul_fun)                            \
+  V(MATH_LOG_FUN_INDEX, JSFunction, math_log_fun)                              \
+  V(MATH_MAX_FUN_INDEX, JSFunction, math_max_fun)                              \
+  V(MATH_MIN_FUN_INDEX, JSFunction, math_min_fun)                              \
+  V(MATH_POW_FUN_INDEX, JSFunction, math_pow_fun)                              \
+  V(MATH_RANDOM_FUN_INDEX, JSFunction, math_random_fun)                        \
+  V(MATH_ROUND_FUN_INDEX, JSFunction, math_round_fun)                          \
+  V(MATH_SIN_FUN_INDEX, JSFunction, math_sin_fun)                              \
+  V(MATH_SQRT_FUN_INDEX, JSFunction, math_sqrt_fun)                            \
+  V(MATH_TAN_FUN_INDEX, JSFunction, math_tan_fun)                              \
   V(ARRAY_BUFFER_FUN_INDEX, JSFunction, array_buffer_fun)                      \
   V(UINT8_ARRAY_FUN_INDEX, JSFunction, uint8_array_fun)                        \
   V(INT8_ARRAY_FUN_INDEX, JSFunction, int8_array_fun)                          \
@@ -291,6 +310,25 @@ class Context: public FixedArray {
     GLOBAL_EVAL_FUN_INDEX,
     INSTANTIATE_FUN_INDEX,
     CONFIGURE_INSTANCE_FUN_INDEX,
+    MATH_ABS_FUN_INDEX,
+    MATH_ACOS_FUN_INDEX,
+    MATH_ASIN_FUN_INDEX,
+    MATH_ATAN_FUN_INDEX,
+    MATH_ATAN2_FUN_INDEX,
+    MATH_CEIL_FUN_INDEX,
+    MATH_COS_FUN_INDEX,
+    MATH_EXP_FUN_INDEX,
+    MATH_FLOOR_FUN_INDEX,
+    MATH_IMUL_FUN_INDEX,
+    MATH_LOG_FUN_INDEX,
+    MATH_MAX_FUN_INDEX,
+    MATH_MIN_FUN_INDEX,
+    MATH_POW_FUN_INDEX,
+    MATH_RANDOM_FUN_INDEX,
+    MATH_ROUND_FUN_INDEX,
+    MATH_SIN_FUN_INDEX,
+    MATH_SQRT_FUN_INDEX,
+    MATH_TAN_FUN_INDEX,
     ARRAY_BUFFER_FUN_INDEX,
     UINT8_ARRAY_FUN_INDEX,
     INT8_ARRAY_FUN_INDEX,
index 5c214ae8319a8bb332b3ed1fb722b6b7d2cacf7d..58ef34781ee5cc14bcf625c8e289d7acc4e48717 100644 (file)
@@ -164,6 +164,15 @@ class BitVector: public ZoneObject {
     return true;
   }
 
+  int Count() const {
+    int count = 0;
+    for (int i = 0; i < data_length_; i++) {
+      int data = data_[i];
+      if (data != 0) count += CompilerIntrinsics::CountSetBits(data);
+    }
+    return count;
+  }
+
   int length() const { return length_; }
 
 #ifdef DEBUG
index e6432c94d16d907aa7768f26996ed78d9f776d50..e90a5272ad38a9825b8553627b8c26fa18281124 100644 (file)
@@ -352,8 +352,11 @@ void Deoptimizer::DeoptimizeMarkedCodeForContext(Context* context) {
       }
       SafepointEntry safepoint = code->GetSafepointEntry(it.frame()->pc());
       int deopt_index = safepoint.deoptimization_index();
-      bool safe_to_deopt = deopt_index != Safepoint::kNoDeoptimizationIndex;
-      CHECK(topmost_optimized_code == NULL || safe_to_deopt);
+      // Turbofan deopt is checked when we are patching addresses on stack.
+      bool turbofanned = code->is_turbofanned();
+      bool safe_to_deopt =
+          deopt_index != Safepoint::kNoDeoptimizationIndex || turbofanned;
+      CHECK(topmost_optimized_code == NULL || safe_to_deopt || turbofanned);
       if (topmost_optimized_code == NULL) {
         topmost_optimized_code = code;
         safe_to_deopt_topmost_optimized_code = safe_to_deopt;
@@ -374,6 +377,7 @@ void Deoptimizer::DeoptimizeMarkedCodeForContext(Context* context) {
     Code* code = Code::cast(element);
     CHECK_EQ(code->kind(), Code::OPTIMIZED_FUNCTION);
     Object* next = code->next_code_link();
+
     if (code->marked_for_deoptimization()) {
       // Put the code into the list for later patching.
       codes.Add(code, &zone);
@@ -396,6 +400,10 @@ void Deoptimizer::DeoptimizeMarkedCodeForContext(Context* context) {
     element = next;
   }
 
+  if (FLAG_turbo_deoptimization) {
+    PatchStackForMarkedCode(isolate);
+  }
+
   // TODO(titzer): we need a handle scope only because of the macro assembler,
   // which is only used in EnsureCodeForDeoptimizationEntry.
   HandleScope scope(isolate);
@@ -408,13 +416,73 @@ void Deoptimizer::DeoptimizeMarkedCodeForContext(Context* context) {
     }
 #endif
     // It is finally time to die, code object.
+
+    // Remove the code from optimized code map.
+    DeoptimizationInputData* deopt_data =
+        DeoptimizationInputData::cast(codes[i]->deoptimization_data());
+    SharedFunctionInfo* shared =
+        SharedFunctionInfo::cast(deopt_data->SharedFunctionInfo());
+    shared->EvictFromOptimizedCodeMap(codes[i], "deoptimized code");
+
     // Do platform-specific patching to force any activations to lazy deopt.
-    PatchCodeForDeoptimization(isolate, codes[i]);
+    //
+    // We skip patching Turbofan code - we patch return addresses on stack.
+    // TODO(jarin) We should still zap the code object (but we have to
+    // be careful not to zap the deoptimization block).
+    if (!codes[i]->is_turbofanned()) {
+      PatchCodeForDeoptimization(isolate, codes[i]);
+
+      // We might be in the middle of incremental marking with compaction.
+      // Tell collector to treat this code object in a special way and
+      // ignore all slots that might have been recorded on it.
+      isolate->heap()->mark_compact_collector()->InvalidateCode(codes[i]);
+    }
+  }
+}
+
+
+static int FindPatchAddressForReturnAddress(Code* code, int pc) {
+  DeoptimizationInputData* input_data =
+      DeoptimizationInputData::cast(code->deoptimization_data());
+  int patch_count = input_data->ReturnAddressPatchCount();
+  for (int i = 0; i < patch_count; i++) {
+    int return_pc = input_data->ReturnAddressPc(i)->value();
+    if (pc == return_pc) {
+      return input_data->PatchedAddressPc(i)->value();
+    }
+  }
+  return -1;
+}
+
 
-    // We might be in the middle of incremental marking with compaction.
-    // Tell collector to treat this code object in a special way and
-    // ignore all slots that might have been recorded on it.
-    isolate->heap()->mark_compact_collector()->InvalidateCode(codes[i]);
+// For all marked Turbofanned code on stack, change the return address to go
+// to the deoptimization block.
+void Deoptimizer::PatchStackForMarkedCode(Isolate* isolate) {
+  // TODO(jarin) We should tolerate missing patch entry for the topmost frame.
+  for (StackFrameIterator it(isolate, isolate->thread_local_top()); !it.done();
+       it.Advance()) {
+    StackFrame::Type type = it.frame()->type();
+    if (type == StackFrame::OPTIMIZED) {
+      Code* code = it.frame()->LookupCode();
+      if (code->is_turbofanned() && code->marked_for_deoptimization()) {
+        JSFunction* function =
+            static_cast<OptimizedFrame*>(it.frame())->function();
+        Address* pc_address = it.frame()->pc_address();
+        int pc_offset = *pc_address - code->instruction_start();
+        int new_pc_offset = FindPatchAddressForReturnAddress(code, pc_offset);
+
+        if (FLAG_trace_deopt) {
+          CodeTracer::Scope scope(isolate->GetCodeTracer());
+          PrintF(scope.file(), "[patching stack address for function: ");
+          function->PrintName(scope.file());
+          PrintF(scope.file(), " (Pc offset %i -> %i)]\n", pc_offset,
+                 new_pc_offset);
+        }
+
+        CHECK_LE(0, new_pc_offset);
+        *pc_address += new_pc_offset - pc_offset;
+      }
+    }
   }
 }
 
@@ -903,7 +971,10 @@ void Deoptimizer::DoComputeJSFrame(TranslationIterator* iterator,
   intptr_t top_address;
   if (is_bottommost) {
     // Determine whether the input frame contains alignment padding.
-    has_alignment_padding_ = HasAlignmentPadding(function) ? 1 : 0;
+    has_alignment_padding_ =
+        (!compiled_code_->is_turbofanned() && HasAlignmentPadding(function))
+            ? 1
+            : 0;
     // 2 = context and function in the frame.
     // If the optimized frame had alignment padding, adjust the frame pointer
     // to point to the new position of the old frame pointer after padding
index 1a6f668d8f3c99bea2019298493f3df6d6bfbd9e..02449f1c7033a5200e18da2e6255597055504773 100644 (file)
@@ -177,6 +177,8 @@ class Deoptimizer : public Malloced {
   // refer to that code.
   static void DeoptimizeMarkedCode(Isolate* isolate);
 
+  static void PatchStackForMarkedCode(Isolate* isolate);
+
   // Visit all the known optimized functions in a given isolate.
   static void VisitAllOptimizedFunctions(
       Isolate* isolate, OptimizedFunctionVisitor* visitor);
index 318adddf9f2f2839541da277007bd385a36582fa..945ebf92f77f6e44d043521f3e8fa3ff4f8cc4cf 100644 (file)
@@ -6,7 +6,6 @@
 #define V8_ELEMENTS_KIND_H_
 
 #include "src/checks.h"
-#include "src/ostreams.h"
 
 namespace v8 {
 namespace internal {
index 096b33509f7d40dd16d7f046df897b70d511fdf5..08c84058e0048b5a64effc9e15eb17a21fb3d106 100644 (file)
@@ -95,13 +95,13 @@ class FieldIndex V8_FINAL {
   class IsInObjectBits: public BitField<bool, IndexBits::kNext, 1> {};
   class IsDoubleBits: public BitField<bool, IsInObjectBits::kNext, 1> {};
   // Number of inobject properties.
-  class InObjectPropertyBits: public BitField<int, IsDoubleBits::kNext,
-                                              kDescriptorIndexBitCount> {};
+  class InObjectPropertyBits
+      : public BitField<int, IsDoubleBits::kNext, kDescriptorIndexBitCount> {};
   // Offset of first inobject property from beginning of object.
-  class FirstInobjectPropertyOffsetBits:
-      public BitField<int, InObjectPropertyBits::kNext, 7> {};
-  class IsHiddenField:
-      public BitField<bool, FirstInobjectPropertyOffsetBits::kNext, 1> {};
+  class FirstInobjectPropertyOffsetBits
+      public BitField<int, InObjectPropertyBits::kNext, 7> {};
+  class IsHiddenField
+      public BitField<bool, FirstInobjectPropertyOffsetBits::kNext, 1> {};
   STATIC_ASSERT(IsHiddenField::kNext <= 32);
 
   int bit_field_;
index 3fc16fbef392c220a762c873bfa074c9056336ae..5ee23ab619ef7910774f6d0bc616a4aa7a484275 100644 (file)
@@ -333,6 +333,20 @@ DEFINE_BOOL(omit_map_checks_for_leaf_maps, true,
             "do not emit check maps for constant values that have a leaf map, "
             "deoptimize the optimized code if the layout of the maps changes.")
 
+// Flags for TurboFan.
+DEFINE_STRING(turbo_filter, "~", "optimization filter for TurboFan compiler")
+DEFINE_BOOL(trace_turbo, false, "trace generated TurboFan IR")
+DEFINE_BOOL(trace_turbo_types, true, "trace generated TurboFan types")
+DEFINE_BOOL(trace_turbo_scheduler, false, "trace generated TurboFan scheduler")
+DEFINE_BOOL(turbo_verify, false, "verify TurboFan graphs at each phase")
+DEFINE_BOOL(turbo_stats, false, "print TurboFan statistics")
+DEFINE_BOOL(turbo_types, false, "use typed lowering in TurboFan")
+DEFINE_BOOL(turbo_source_positions, false,
+            "track source code positions when building TurboFan IR")
+DEFINE_BOOL(context_specialization, true,
+            "enable context specialization in TurboFan")
+DEFINE_BOOL(turbo_deoptimization, false, "enable deoptimization in TurboFan")
+
 DEFINE_INT(typed_array_max_size_in_heap, 64,
            "threshold for in-heap typed array")
 
@@ -727,6 +741,10 @@ DEFINE_BOOL(verify_native_context_separation, false,
 DEFINE_BOOL(print_handles, false, "report handles after GC")
 DEFINE_BOOL(print_global_handles, false, "report global handles after GC")
 
+// TurboFan debug-only flags.
+DEFINE_BOOL(print_turbo_replay, false,
+            "print C++ code to recreate TurboFan graphs")
+
 // interface.cc
 DEFINE_BOOL(print_interfaces, false, "print interfaces")
 DEFINE_BOOL(print_interface_details, false, "print interface inference details")
index 7a675cfd443d7138f5982737d511e1a47263810d..1a5993d9dd32203cd182517d98749cbc2bf7a81b 100644 (file)
@@ -931,6 +931,12 @@ void OptimizedFrame::Summarize(List<FrameSummary>* frames) {
   ASSERT(frames->length() == 0);
   ASSERT(is_optimized());
 
+  // Delegate to JS frame in absence of inlining.
+  // TODO(turbofan): Revisit once we support inlining.
+  if (LookupCode()->is_turbofanned()) {
+    return JavaScriptFrame::Summarize(frames);
+  }
+
   int deopt_index = Safepoint::kNoDeoptimizationIndex;
   DeoptimizationInputData* data = GetDeoptimizationData(&deopt_index);
   FixedArray* literal_array = data->LiteralArray();
@@ -940,10 +946,7 @@ void OptimizedFrame::Summarize(List<FrameSummary>* frames) {
   // throw. An entry with no deoptimization index indicates a call-site
   // without a lazy-deopt. As a consequence we are not allowed to inline
   // functions containing throw.
-  if (deopt_index == Safepoint::kNoDeoptimizationIndex) {
-    JavaScriptFrame::Summarize(frames);
-    return;
-  }
+  ASSERT(deopt_index != Safepoint::kNoDeoptimizationIndex);
 
   TranslationIterator it(data->TranslationByteArray(),
                          data->TranslationIndex(deopt_index)->value());
@@ -1055,6 +1058,12 @@ DeoptimizationInputData* OptimizedFrame::GetDeoptimizationData(
 int OptimizedFrame::GetInlineCount() {
   ASSERT(is_optimized());
 
+  // Delegate to JS frame in absence of inlining.
+  // TODO(turbofan): Revisit once we support inlining.
+  if (LookupCode()->is_turbofanned()) {
+    return JavaScriptFrame::GetInlineCount();
+  }
+
   int deopt_index = Safepoint::kNoDeoptimizationIndex;
   DeoptimizationInputData* data = GetDeoptimizationData(&deopt_index);
 
@@ -1073,6 +1082,12 @@ void OptimizedFrame::GetFunctions(List<JSFunction*>* functions) {
   ASSERT(functions->length() == 0);
   ASSERT(is_optimized());
 
+  // Delegate to JS frame in absence of inlining.
+  // TODO(turbofan): Revisit once we support inlining.
+  if (LookupCode()->is_turbofanned()) {
+    return JavaScriptFrame::GetFunctions(functions);
+  }
+
   int deopt_index = Safepoint::kNoDeoptimizationIndex;
   DeoptimizationInputData* data = GetDeoptimizationData(&deopt_index);
   FixedArray* literal_array = data->LiteralArray();
index f3a5cb55a266add09583dc392338c1e24596d32c..460a853e6c4bc94a3d19b11f82a625b4db03b869 100644 (file)
@@ -14,6 +14,7 @@
 #include "src/global-handles.h"
 #include "src/messages.h"
 #include "src/natives.h"
+#include "src/ostreams.h"
 #include "src/scopes.h"
 
 namespace v8 {
index 5959750a72a549aaaded258f49a9b8b306268f89..6b6a816497a97fa76034d2d4047553bb2c60a4a0 100644 (file)
@@ -722,6 +722,9 @@ enum InitializationFlag {
 };
 
 
+enum MaybeAssignedFlag { kNotAssigned, kMaybeAssigned };
+
+
 enum ClearExceptionFlag {
   KEEP_EXCEPTION,
   CLEAR_EXCEPTION
index 71a7a2f85670483ff7bec1bba48cbcadcf1b5598..aac4e0e4d30ee7ab2f51d40c90e3b6a2e0656b3d 100644 (file)
@@ -8,12 +8,13 @@
 #include "src/compiler.h"
 #include "src/hydrogen.h"
 #include "src/hydrogen-instructions.h"
-#include "src/ostreams.h"
 #include "src/zone.h"
 
 namespace v8 {
 namespace internal {
 
+class OStream;
+
 // This class extends GVNFlagSet with additional "special" dynamic side effects,
 // which can be used to represent side effects that cannot be expressed using
 // the GVNFlags of an HInstruction. These special side effects are tracked by a
index 82c861ff0f994db03f7c376380d6fd99e7fc8128..a78bbee255505145d2869f3bb21b616fc6707741 100644 (file)
@@ -14,7 +14,6 @@
 #include "src/deoptimizer.h"
 #include "src/feedback-slots.h"
 #include "src/hydrogen-types.h"
-#include "src/ostreams.h"
 #include "src/small-pointer-list.h"
 #include "src/unique.h"
 #include "src/utils.h"
@@ -35,6 +34,7 @@ class HStoreNamedField;
 class HValue;
 class LInstruction;
 class LChunkBuilder;
+class OStream;
 
 #define HYDROGEN_ABSTRACT_INSTRUCTION_LIST(V)  \
   V(ArithmeticBinaryOperation)                 \
index 114aeeaa9cf693bcb098ddc7db65166f630c4393..9ce9e3f1f1177cba5d10d19f69a1b7a896e8be96 100644 (file)
@@ -4,6 +4,7 @@
 
 #include "src/hydrogen-types.h"
 
+#include "src/ostreams.h"
 #include "src/types-inl.h"
 
 
index 08550f837d5100f4df46b9990b002ce07f67d5ff..d662a167b9fdae221e1cc117aa499243031a703b 100644 (file)
@@ -8,7 +8,6 @@
 #include <climits>
 
 #include "src/base/macros.h"
-#include "src/ostreams.h"
 
 namespace v8 {
 namespace internal {
@@ -16,6 +15,7 @@ namespace internal {
 // Forward declarations.
 template <typename T> class Handle;
 class Object;
+class OStream;
 
 #define HTYPE_LIST(V)                                 \
   V(Any, 0x0)              /* 0000 0000 0000 0000 */  \
index e85e16a6386b28f93ff661421cd8786ed8eb2608..622e37a1959eab9ee845d6a637046fdc1a19a48d 100644 (file)
@@ -12406,15 +12406,22 @@ void HStatistics::Initialize(CompilationInfo* info) {
 }
 
 
-void HStatistics::Print() {
-  PrintF("Timing results:\n");
+void HStatistics::Print(const char* stats_name) {
+  PrintF(
+      "\n"
+      "----------------------------------------"
+      "----------------------------------------\n"
+      "--- %s timing results:\n"
+      "----------------------------------------"
+      "----------------------------------------\n",
+      stats_name);
   base::TimeDelta sum;
   for (int i = 0; i < times_.length(); ++i) {
     sum += times_[i];
   }
 
   for (int i = 0; i < names_.length(); ++i) {
-    PrintF("%32s", names_[i]);
+    PrintF("%33s", names_[i]);
     double ms = times_[i].InMillisecondsF();
     double percent = times_[i].PercentOf(sum);
     PrintF(" %8.3f ms / %4.1f %% ", ms, percent);
@@ -12424,26 +12431,22 @@ void HStatistics::Print() {
     PrintF(" %9u bytes / %4.1f %%\n", size, size_percent);
   }
 
-  PrintF("----------------------------------------"
-         "---------------------------------------\n");
+  PrintF(
+      "----------------------------------------"
+      "----------------------------------------\n");
   base::TimeDelta total = create_graph_ + optimize_graph_ + generate_code_;
-  PrintF("%32s %8.3f ms / %4.1f %% \n",
-         "Create graph",
-         create_graph_.InMillisecondsF(),
-         create_graph_.PercentOf(total));
-  PrintF("%32s %8.3f ms / %4.1f %% \n",
-         "Optimize graph",
-         optimize_graph_.InMillisecondsF(),
-         optimize_graph_.PercentOf(total));
-  PrintF("%32s %8.3f ms / %4.1f %% \n",
-         "Generate and install code",
-         generate_code_.InMillisecondsF(),
-         generate_code_.PercentOf(total));
-  PrintF("----------------------------------------"
-         "---------------------------------------\n");
-  PrintF("%32s %8.3f ms (%.1f times slower than full code gen)\n",
-         "Total",
-         total.InMillisecondsF(),
+  PrintF("%33s %8.3f ms / %4.1f %% \n", "Create graph",
+         create_graph_.InMillisecondsF(), create_graph_.PercentOf(total));
+  PrintF("%33s %8.3f ms / %4.1f %% \n", "Optimize graph",
+         optimize_graph_.InMillisecondsF(), optimize_graph_.PercentOf(total));
+  PrintF("%33s %8.3f ms / %4.1f %% \n", "Generate and install code",
+         generate_code_.InMillisecondsF(), generate_code_.PercentOf(total));
+  PrintF(
+      "----------------------------------------"
+      "----------------------------------------\n");
+  PrintF("%33s %8.3f ms           %9u bytes\n", "Total",
+         total.InMillisecondsF(), total_size_);
+  PrintF("%33s     (%.1f times slower than full code gen)\n", "",
          total.TimesOf(full_code_gen_));
 
   double source_size_in_kb = static_cast<double>(source_size_) / 1024;
@@ -12453,9 +12456,8 @@ void HStatistics::Print() {
   double normalized_size_in_kb = source_size_in_kb > 0
       ? total_size_ / 1024 / source_size_in_kb
       : 0;
-  PrintF("%32s %8.3f ms           %7.3f kB allocated\n",
-         "Average per kB source",
-         normalized_time, normalized_size_in_kb);
+  PrintF("%33s %8.3f ms           %7.3f kB allocated\n",
+         "Average per kB source", normalized_time, normalized_size_in_kb);
 }
 
 
index d8d0da77fc2411973b577e24fe9c341706eb732d..cb5294dce803f392f8fd82b20eeda8a13fcd8299 100644 (file)
@@ -2753,19 +2753,27 @@ class HStatistics V8_FINAL: public Malloced {
         source_size_(0) { }
 
   void Initialize(CompilationInfo* info);
-  void Print();
+  void Print(const char* stats_name);
   void SaveTiming(const char* name, base::TimeDelta time, unsigned size);
 
   void IncrementFullCodeGen(base::TimeDelta full_code_gen) {
     full_code_gen_ += full_code_gen;
   }
 
+  void IncrementCreateGraph(base::TimeDelta delta) { create_graph_ += delta; }
+
+  void IncrementOptimizeGraph(base::TimeDelta delta) {
+    optimize_graph_ += delta;
+  }
+
+  void IncrementGenerateCode(base::TimeDelta delta) { generate_code_ += delta; }
+
   void IncrementSubtotals(base::TimeDelta create_graph,
                           base::TimeDelta optimize_graph,
                           base::TimeDelta generate_code) {
-    create_graph_ += create_graph;
-    optimize_graph_ += optimize_graph;
-    generate_code_ += generate_code;
+    IncrementCreateGraph(create_graph);
+    IncrementOptimizeGraph(optimize_graph);
+    IncrementGenerateCode(generate_code);
   }
 
  private:
index ff8978c966b2274ee0883742d5dabdf803356f07..d945a6e82465dab467638ecbded3ebd8f7a4d28a 100644 (file)
@@ -560,6 +560,12 @@ Operand::Operand(int32_t disp, RelocInfo::Mode rmode) {
   set_dispr(disp, rmode);
 }
 
+
+Operand::Operand(Immediate imm) {
+  // [disp/r]
+  set_modrm(0, ebp);
+  set_dispr(imm.x_, imm.rmode_);
+}
 } }  // namespace v8::internal
 
 #endif  // V8_IA32_ASSEMBLER_IA32_INL_H_
index 6497ba6db0a980e8af1c3b0153ea18a84b18db9d..e3f439e0fb64b6e33a60606a0b3357cbfbfb93ca 100644 (file)
@@ -634,6 +634,13 @@ void Assembler::xchg(Register dst, Register src) {
 }
 
 
+void Assembler::xchg(Register dst, const Operand& src) {
+  EnsureSpace ensure_space(this);
+  EMIT(0x87);
+  emit_operand(dst, src);
+}
+
+
 void Assembler::adc(Register dst, int32_t imm32) {
   EnsureSpace ensure_space(this);
   emit_arith(2, Operand(dst), Immediate(imm32));
@@ -817,10 +824,17 @@ void Assembler::cdq() {
 }
 
 
-void Assembler::idiv(Register src) {
+void Assembler::idiv(const Operand& src) {
+  EnsureSpace ensure_space(this);
+  EMIT(0xF7);
+  emit_operand(edi, src);
+}
+
+
+void Assembler::div(const Operand& src) {
   EnsureSpace ensure_space(this);
   EMIT(0xF7);
-  EMIT(0xF8 | src.code());
+  emit_operand(esi, src);
 }
 
 
@@ -840,14 +854,19 @@ void Assembler::imul(Register dst, const Operand& src) {
 
 
 void Assembler::imul(Register dst, Register src, int32_t imm32) {
+  imul(dst, Operand(src), imm32);
+}
+
+
+void Assembler::imul(Register dst, const Operand& src, int32_t imm32) {
   EnsureSpace ensure_space(this);
   if (is_int8(imm32)) {
     EMIT(0x6B);
-    EMIT(0xC0 | dst.code() << 3 | src.code());
+    emit_operand(dst, src);
     EMIT(imm32);
   } else {
     EMIT(0x69);
-    EMIT(0xC0 | dst.code() << 3 | src.code());
+    emit_operand(dst, src);
     emit(imm32);
   }
 }
@@ -887,6 +906,13 @@ void Assembler::neg(Register dst) {
 }
 
 
+void Assembler::neg(const Operand& dst) {
+  EnsureSpace ensure_space(this);
+  EMIT(0xF7);
+  emit_operand(ebx, dst);
+}
+
+
 void Assembler::not_(Register dst) {
   EnsureSpace ensure_space(this);
   EMIT(0xF7);
@@ -894,6 +920,13 @@ void Assembler::not_(Register dst) {
 }
 
 
+void Assembler::not_(const Operand& dst) {
+  EnsureSpace ensure_space(this);
+  EMIT(0xF7);
+  emit_operand(edx, dst);
+}
+
+
 void Assembler::or_(Register dst, int32_t imm32) {
   EnsureSpace ensure_space(this);
   emit_arith(1, Operand(dst), Immediate(imm32));
@@ -969,24 +1002,24 @@ void Assembler::ror_cl(Register dst) {
 }
 
 
-void Assembler::sar(Register dst, uint8_t imm8) {
+void Assembler::sar(const Operand& dst, uint8_t imm8) {
   EnsureSpace ensure_space(this);
   ASSERT(is_uint5(imm8));  // illegal shift count
   if (imm8 == 1) {
     EMIT(0xD1);
-    EMIT(0xF8 | dst.code());
+    emit_operand(edi, dst);
   } else {
     EMIT(0xC1);
-    EMIT(0xF8 | dst.code());
+    emit_operand(edi, dst);
     EMIT(imm8);
   }
 }
 
 
-void Assembler::sar_cl(Register dst) {
+void Assembler::sar_cl(const Operand& dst) {
   EnsureSpace ensure_space(this);
   EMIT(0xD3);
-  EMIT(0xF8 | dst.code());
+  emit_operand(edi, dst);
 }
 
 
@@ -1005,24 +1038,24 @@ void Assembler::shld(Register dst, const Operand& src) {
 }
 
 
-void Assembler::shl(Register dst, uint8_t imm8) {
+void Assembler::shl(const Operand& dst, uint8_t imm8) {
   EnsureSpace ensure_space(this);
   ASSERT(is_uint5(imm8));  // illegal shift count
   if (imm8 == 1) {
     EMIT(0xD1);
-    EMIT(0xE0 | dst.code());
+    emit_operand(esp, dst);
   } else {
     EMIT(0xC1);
-    EMIT(0xE0 | dst.code());
+    emit_operand(esp, dst);
     EMIT(imm8);
   }
 }
 
 
-void Assembler::shl_cl(Register dst) {
+void Assembler::shl_cl(const Operand& dst) {
   EnsureSpace ensure_space(this);
   EMIT(0xD3);
-  EMIT(0xE0 | dst.code());
+  emit_operand(esp, dst);
 }
 
 
@@ -1034,24 +1067,24 @@ void Assembler::shrd(Register dst, const Operand& src) {
 }
 
 
-void Assembler::shr(Register dst, uint8_t imm8) {
+void Assembler::shr(const Operand& dst, uint8_t imm8) {
   EnsureSpace ensure_space(this);
   ASSERT(is_uint5(imm8));  // illegal shift count
   if (imm8 == 1) {
     EMIT(0xD1);
-    EMIT(0xE8 | dst.code());
+    emit_operand(ebp, dst);
   } else {
     EMIT(0xC1);
-    EMIT(0xE8 | dst.code());
+    emit_operand(ebp, dst);
     EMIT(imm8);
   }
 }
 
 
-void Assembler::shr_cl(Register dst) {
+void Assembler::shr_cl(const Operand& dst) {
   EnsureSpace ensure_space(this);
   EMIT(0xD3);
-  EMIT(0xE8 | dst.code());
+  emit_operand(ebp, dst);
 }
 
 
index de8b04f84e3e27287eaf0175d322545f1ce9cc3b..8b5f998f18c39caba8f5e85c222394976dcee083 100644 (file)
@@ -300,6 +300,7 @@ class Immediate BASE_EMBEDDED {
   int x_;
   RelocInfo::Mode rmode_;
 
+  friend class Operand;
   friend class Assembler;
   friend class MacroAssembler;
 };
@@ -322,12 +323,17 @@ enum ScaleFactor {
 
 class Operand BASE_EMBEDDED {
  public:
+  // reg
+  INLINE(explicit Operand(Register reg));
+
   // XMM reg
   INLINE(explicit Operand(XMMRegister xmm_reg));
 
   // [disp/r]
   INLINE(explicit Operand(int32_t disp, RelocInfo::Mode rmode));
-  // disp only must always be relocated
+
+  // [disp/r]
+  INLINE(explicit Operand(Immediate imm));
 
   // [base + disp/r]
   explicit Operand(Register base, int32_t disp,
@@ -364,6 +370,10 @@ class Operand BASE_EMBEDDED {
                    RelocInfo::CELL);
   }
 
+  static Operand ForRegisterPlusImmediate(Register base, Immediate imm) {
+    return Operand(base, imm.x_, imm.rmode_);
+  }
+
   // Returns true if this Operand is a wrapper for the specified register.
   bool is_reg(Register reg) const;
 
@@ -375,9 +385,6 @@ class Operand BASE_EMBEDDED {
   Register reg() const;
 
  private:
-  // reg
-  INLINE(explicit Operand(Register reg));
-
   // Set the ModRM byte without an encoded 'reg' register. The
   // register is encoded later as part of the emit_operand operation.
   inline void set_modrm(int mod, Register rm);
@@ -394,7 +401,6 @@ class Operand BASE_EMBEDDED {
 
   friend class Assembler;
   friend class MacroAssembler;
-  friend class LCodeGen;
 };
 
 
@@ -647,8 +653,9 @@ class Assembler : public AssemblerBase {
   void rep_stos();
   void stos();
 
-  // Exchange two registers
+  // Exchange
   void xchg(Register dst, Register src);
+  void xchg(Register dst, const Operand& src);
 
   // Arithmetics
   void adc(Register dst, int32_t imm32);
@@ -690,13 +697,17 @@ class Assembler : public AssemblerBase {
 
   void cdq();
 
-  void idiv(Register src);
+  void idiv(Register src) { idiv(Operand(src)); }
+  void idiv(const Operand& src);
+  void div(Register src) { div(Operand(src)); }
+  void div(const Operand& src);
 
   // Signed multiply instructions.
   void imul(Register src);                               // edx:eax = eax * src.
   void imul(Register dst, Register src) { imul(dst, Operand(src)); }
   void imul(Register dst, const Operand& src);           // dst = dst * src.
   void imul(Register dst, Register src, int32_t imm32);  // dst = src * imm32.
+  void imul(Register dst, const Operand& src, int32_t imm32);
 
   void inc(Register dst);
   void inc(const Operand& dst);
@@ -707,8 +718,10 @@ class Assembler : public AssemblerBase {
   void mul(Register src);                                // edx:eax = eax * reg.
 
   void neg(Register dst);
+  void neg(const Operand& dst);
 
   void not_(Register dst);
+  void not_(const Operand& dst);
 
   void or_(Register dst, int32_t imm32);
   void or_(Register dst, Register src) { or_(dst, Operand(src)); }
@@ -722,22 +735,28 @@ class Assembler : public AssemblerBase {
   void ror(Register dst, uint8_t imm8);
   void ror_cl(Register dst);
 
-  void sar(Register dst, uint8_t imm8);
-  void sar_cl(Register dst);
+  void sar(Register dst, uint8_t imm8) { sar(Operand(dst), imm8); }
+  void sar(const Operand& dst, uint8_t imm8);
+  void sar_cl(Register dst) { sar_cl(Operand(dst)); }
+  void sar_cl(const Operand& dst);
 
   void sbb(Register dst, const Operand& src);
 
   void shld(Register dst, Register src) { shld(dst, Operand(src)); }
   void shld(Register dst, const Operand& src);
 
-  void shl(Register dst, uint8_t imm8);
-  void shl_cl(Register dst);
+  void shl(Register dst, uint8_t imm8) { shl(Operand(dst), imm8); }
+  void shl(const Operand& dst, uint8_t imm8);
+  void shl_cl(Register dst) { shl_cl(Operand(dst)); }
+  void shl_cl(const Operand& dst);
 
   void shrd(Register dst, Register src) { shrd(dst, Operand(src)); }
   void shrd(Register dst, const Operand& src);
 
-  void shr(Register dst, uint8_t imm8);
-  void shr_cl(Register dst);
+  void shr(Register dst, uint8_t imm8) { shr(Operand(dst), imm8); }
+  void shr(const Operand& dst, uint8_t imm8);
+  void shr_cl(Register dst) { shr_cl(Operand(dst)); }
+  void shr_cl(const Operand& dst);
 
   void sub(Register dst, const Immediate& imm) { sub(Operand(dst), imm); }
   void sub(const Operand& dst, const Immediate& x);
@@ -921,6 +940,9 @@ class Assembler : public AssemblerBase {
     cvttss2si(dst, Operand(src));
   }
   void cvttsd2si(Register dst, const Operand& src);
+  void cvttsd2si(Register dst, XMMRegister src) {
+    cvttsd2si(dst, Operand(src));
+  }
   void cvtsd2si(Register dst, XMMRegister src);
 
   void cvtsi2sd(XMMRegister dst, Register src) { cvtsi2sd(dst, Operand(src)); }
index 8679b471e37c44bf97e7f8d857891aec14ef716c..00574367440093db162bc90ab23438c832b5615d 100644 (file)
@@ -23,7 +23,7 @@ void FastNewClosureStub::InitializeInterfaceDescriptor(
     CodeStubInterfaceDescriptor* descriptor) {
   Register registers[] = { esi, ebx };
   descriptor->Initialize(
-      ARRAY_SIZE(registers), registers,
+      MajorKey(), ARRAY_SIZE(registers), registers,
       Runtime::FunctionForId(Runtime::kNewClosureFromStubFailure)->entry);
 }
 
@@ -31,7 +31,7 @@ void FastNewClosureStub::InitializeInterfaceDescriptor(
 void FastNewContextStub::InitializeInterfaceDescriptor(
     CodeStubInterfaceDescriptor* descriptor) {
   Register registers[] = { esi, edi };
-  descriptor->Initialize(ARRAY_SIZE(registers), registers);
+  descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers);
 }
 
 
@@ -39,7 +39,7 @@ void ToNumberStub::InitializeInterfaceDescriptor(
     CodeStubInterfaceDescriptor* descriptor) {
   // ToNumberStub invokes a function, and therefore needs a context.
   Register registers[] = { esi, eax };
-  descriptor->Initialize(ARRAY_SIZE(registers), registers);
+  descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers);
 }
 
 
@@ -47,7 +47,7 @@ void NumberToStringStub::InitializeInterfaceDescriptor(
     CodeStubInterfaceDescriptor* descriptor) {
   Register registers[] = { esi, eax };
   descriptor->Initialize(
-      ARRAY_SIZE(registers), registers,
+      MajorKey(), ARRAY_SIZE(registers), registers,
       Runtime::FunctionForId(Runtime::kNumberToStringRT)->entry);
 }
 
@@ -62,9 +62,8 @@ void FastCloneShallowArrayStub::InitializeInterfaceDescriptor(
     Representation::Tagged() };
 
   descriptor->Initialize(
-      ARRAY_SIZE(registers), registers,
-      Runtime::FunctionForId(
-          Runtime::kCreateArrayLiteralStubBailout)->entry,
+      MajorKey(), ARRAY_SIZE(registers), registers,
+      Runtime::FunctionForId(Runtime::kCreateArrayLiteralStubBailout)->entry,
       representations);
 }
 
@@ -73,7 +72,7 @@ void FastCloneShallowObjectStub::InitializeInterfaceDescriptor(
     CodeStubInterfaceDescriptor* descriptor) {
   Register registers[] = { esi, eax, ebx, ecx, edx };
   descriptor->Initialize(
-      ARRAY_SIZE(registers), registers,
+      MajorKey(), ARRAY_SIZE(registers), registers,
       Runtime::FunctionForId(Runtime::kCreateObjectLiteral)->entry);
 }
 
@@ -81,7 +80,35 @@ void FastCloneShallowObjectStub::InitializeInterfaceDescriptor(
 void CreateAllocationSiteStub::InitializeInterfaceDescriptor(
     CodeStubInterfaceDescriptor* descriptor) {
   Register registers[] = { esi, ebx, edx };
-  descriptor->Initialize(ARRAY_SIZE(registers), registers);
+  descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers);
+}
+
+
+void InstanceofStub::InitializeInterfaceDescriptor(
+    Isolate* isolate, CodeStubInterfaceDescriptor* descriptor) {
+  Register registers[] = {esi, left(), right()};
+  descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers);
+}
+
+
+void CallFunctionStub::InitializeInterfaceDescriptor(
+    Isolate* isolate, CodeStubInterfaceDescriptor* descriptor) {
+  Register registers[] = {esi, edi};
+  descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers);
+}
+
+
+void CallConstructStub::InitializeInterfaceDescriptor(
+    Isolate* isolate, CodeStubInterfaceDescriptor* descriptor) {
+  // eax : number of arguments
+  // ebx : feedback vector
+  // edx : (only if ebx is not the megamorphic symbol) slot in feedback
+  //       vector (Smi)
+  // edi : constructor function
+  // TODO(turbofan): So far we don't gather type feedback and hence skip the
+  // slot parameter, but ArrayConstructStub needs the vector to be undefined.
+  Register registers[] = {esi, eax, edi, ebx};
+  descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers);
 }
 
 
@@ -89,7 +116,7 @@ void RegExpConstructResultStub::InitializeInterfaceDescriptor(
     CodeStubInterfaceDescriptor* descriptor) {
   Register registers[] = { esi, ecx, ebx, eax };
   descriptor->Initialize(
-      ARRAY_SIZE(registers), registers,
+      MajorKey(), ARRAY_SIZE(registers), registers,
       Runtime::FunctionForId(Runtime::kRegExpConstructResult)->entry);
 }
 
@@ -98,7 +125,7 @@ void TransitionElementsKindStub::InitializeInterfaceDescriptor(
     CodeStubInterfaceDescriptor* descriptor) {
   Register registers[] = { esi, eax, ebx };
   descriptor->Initialize(
-      ARRAY_SIZE(registers), registers,
+      MajorKey(), ARRAY_SIZE(registers), registers,
       Runtime::FunctionForId(Runtime::kTransitionElementsKind)->entry);
 }
 
@@ -107,7 +134,7 @@ const Register InterfaceDescriptor::ContextRegister() { return esi; }
 
 
 static void InitializeArrayConstructorDescriptor(
-    Isolate* isolate,
+    Isolate* isolate, CodeStub::Major major,
     CodeStubInterfaceDescriptor* descriptor,
     int constant_stack_parameter_count) {
   // register state
@@ -119,10 +146,8 @@ static void InitializeArrayConstructorDescriptor(
 
   if (constant_stack_parameter_count == 0) {
     Register registers[] = { esi, edi, ebx };
-    descriptor->Initialize(ARRAY_SIZE(registers), registers,
-                           deopt_handler,
-                           NULL,
-                           constant_stack_parameter_count,
+    descriptor->Initialize(major, ARRAY_SIZE(registers), registers,
+                           deopt_handler, NULL, constant_stack_parameter_count,
                            JS_FUNCTION_STUB_MODE);
   } else {
     // stack param count needs (constructor pointer, and single argument)
@@ -132,19 +157,16 @@ static void InitializeArrayConstructorDescriptor(
         Representation::Tagged(),
         Representation::Tagged(),
         Representation::Integer32() };
-    descriptor->Initialize(ARRAY_SIZE(registers), registers,
-                           eax,
-                           deopt_handler,
-                           representations,
+    descriptor->Initialize(major, ARRAY_SIZE(registers), registers, eax,
+                           deopt_handler, representations,
                            constant_stack_parameter_count,
-                           JS_FUNCTION_STUB_MODE,
-                           PASS_ARGUMENTS);
+                           JS_FUNCTION_STUB_MODE, PASS_ARGUMENTS);
   }
 }
 
 
 static void InitializeInternalArrayConstructorDescriptor(
-    CodeStubInterfaceDescriptor* descriptor,
+    CodeStub::Major major, CodeStubInterfaceDescriptor* descriptor,
     int constant_stack_parameter_count) {
   // register state
   // eax -- number of arguments
@@ -154,10 +176,8 @@ static void InitializeInternalArrayConstructorDescriptor(
 
   if (constant_stack_parameter_count == 0) {
     Register registers[] = { esi, edi };
-    descriptor->Initialize(ARRAY_SIZE(registers), registers,
-                           deopt_handler,
-                           NULL,
-                           constant_stack_parameter_count,
+    descriptor->Initialize(major, ARRAY_SIZE(registers), registers,
+                           deopt_handler, NULL, constant_stack_parameter_count,
                            JS_FUNCTION_STUB_MODE);
   } else {
     // stack param count needs (constructor pointer, and single argument)
@@ -166,57 +186,54 @@ static void InitializeInternalArrayConstructorDescriptor(
         Representation::Tagged(),
         Representation::Tagged(),
         Representation::Integer32() };
-    descriptor->Initialize(ARRAY_SIZE(registers), registers,
-                           eax,
-                           deopt_handler,
-                           representations,
+    descriptor->Initialize(major, ARRAY_SIZE(registers), registers, eax,
+                           deopt_handler, representations,
                            constant_stack_parameter_count,
-                           JS_FUNCTION_STUB_MODE,
-                           PASS_ARGUMENTS);
+                           JS_FUNCTION_STUB_MODE, PASS_ARGUMENTS);
   }
 }
 
 
 void ArrayNoArgumentConstructorStub::InitializeInterfaceDescriptor(
     CodeStubInterfaceDescriptor* descriptor) {
-  InitializeArrayConstructorDescriptor(isolate(), descriptor, 0);
+  InitializeArrayConstructorDescriptor(isolate(), MajorKey(), descriptor, 0);
 }
 
 
 void ArraySingleArgumentConstructorStub::InitializeInterfaceDescriptor(
     CodeStubInterfaceDescriptor* descriptor) {
-  InitializeArrayConstructorDescriptor(isolate(), descriptor, 1);
+  InitializeArrayConstructorDescriptor(isolate(), MajorKey(), descriptor, 1);
 }
 
 
 void ArrayNArgumentsConstructorStub::InitializeInterfaceDescriptor(
     CodeStubInterfaceDescriptor* descriptor) {
-  InitializeArrayConstructorDescriptor(isolate(), descriptor, -1);
+  InitializeArrayConstructorDescriptor(isolate(), MajorKey(), descriptor, -1);
 }
 
 
 void InternalArrayNoArgumentConstructorStub::InitializeInterfaceDescriptor(
     CodeStubInterfaceDescriptor* descriptor) {
-  InitializeInternalArrayConstructorDescriptor(descriptor, 0);
+  InitializeInternalArrayConstructorDescriptor(MajorKey(), descriptor, 0);
 }
 
 
 void InternalArraySingleArgumentConstructorStub::InitializeInterfaceDescriptor(
     CodeStubInterfaceDescriptor* descriptor) {
-  InitializeInternalArrayConstructorDescriptor(descriptor, 1);
+  InitializeInternalArrayConstructorDescriptor(MajorKey(), descriptor, 1);
 }
 
 
 void InternalArrayNArgumentsConstructorStub::InitializeInterfaceDescriptor(
     CodeStubInterfaceDescriptor* descriptor) {
-  InitializeInternalArrayConstructorDescriptor(descriptor, -1);
+  InitializeInternalArrayConstructorDescriptor(MajorKey(), descriptor, -1);
 }
 
 
 void CompareNilICStub::InitializeInterfaceDescriptor(
     CodeStubInterfaceDescriptor* descriptor) {
   Register registers[] = { esi, eax };
-  descriptor->Initialize(ARRAY_SIZE(registers), registers,
+  descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers,
                          FUNCTION_ADDR(CompareNilIC_Miss));
   descriptor->SetMissHandler(
       ExternalReference(IC_Utility(IC::kCompareNilIC_Miss), isolate()));
@@ -225,7 +242,7 @@ void CompareNilICStub::InitializeInterfaceDescriptor(
 void ToBooleanStub::InitializeInterfaceDescriptor(
     CodeStubInterfaceDescriptor* descriptor) {
   Register registers[] = { esi, eax };
-  descriptor->Initialize(ARRAY_SIZE(registers), registers,
+  descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers,
                          FUNCTION_ADDR(ToBooleanIC_Miss));
   descriptor->SetMissHandler(
       ExternalReference(IC_Utility(IC::kToBooleanIC_Miss), isolate()));
@@ -235,7 +252,7 @@ void ToBooleanStub::InitializeInterfaceDescriptor(
 void BinaryOpICStub::InitializeInterfaceDescriptor(
     CodeStubInterfaceDescriptor* descriptor) {
   Register registers[] = { esi, edx, eax };
-  descriptor->Initialize(ARRAY_SIZE(registers), registers,
+  descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers,
                          FUNCTION_ADDR(BinaryOpIC_Miss));
   descriptor->SetMissHandler(
       ExternalReference(IC_Utility(IC::kBinaryOpIC_Miss), isolate()));
@@ -245,7 +262,7 @@ void BinaryOpICStub::InitializeInterfaceDescriptor(
 void BinaryOpWithAllocationSiteStub::InitializeInterfaceDescriptor(
     CodeStubInterfaceDescriptor* descriptor) {
   Register registers[] = { esi, ecx, edx, eax };
-  descriptor->Initialize(ARRAY_SIZE(registers), registers,
+  descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers,
                          FUNCTION_ADDR(BinaryOpIC_MissWithAllocationSite));
 }
 
@@ -253,9 +270,8 @@ void BinaryOpWithAllocationSiteStub::InitializeInterfaceDescriptor(
 void StringAddStub::InitializeInterfaceDescriptor(
     CodeStubInterfaceDescriptor* descriptor) {
   Register registers[] = { esi, edx, eax };
-  descriptor->Initialize(
-      ARRAY_SIZE(registers), registers,
-      Runtime::FunctionForId(Runtime::kStringAdd)->entry);
+  descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers,
+                         Runtime::FunctionForId(Runtime::kStringAdd)->entry);
 }
 
 
@@ -2783,7 +2799,7 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
 
   // If there is a call site cache don't look in the global cache, but do the
   // real lookup and update the call site cache.
-  if (!HasCallSiteInlineCheck()) {
+  if (!HasCallSiteInlineCheck() && !ReturnTrueFalseObject()) {
     // Look up the function and the map in the instanceof cache.
     Label miss;
     __ CompareRoot(function, scratch, Heap::kInstanceofCacheFunctionRootIndex);
@@ -2842,6 +2858,9 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
   if (!HasCallSiteInlineCheck()) {
     __ mov(eax, Immediate(0));
     __ StoreRoot(eax, scratch, Heap::kInstanceofCacheAnswerRootIndex);
+    if (ReturnTrueFalseObject()) {
+      __ mov(eax, factory->true_value());
+    }
   } else {
     // Get return address and delta to inlined map check.
     __ mov(eax, factory->true_value());
@@ -2862,6 +2881,9 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
   if (!HasCallSiteInlineCheck()) {
     __ mov(eax, Immediate(Smi::FromInt(1)));
     __ StoreRoot(eax, scratch, Heap::kInstanceofCacheAnswerRootIndex);
+    if (ReturnTrueFalseObject()) {
+      __ mov(eax, factory->false_value());
+    }
   } else {
     // Get return address and delta to inlined map check.
     __ mov(eax, factory->false_value());
@@ -2889,20 +2911,32 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
   // Null is not instance of anything.
   __ cmp(object, factory->null_value());
   __ j(not_equal, &object_not_null, Label::kNear);
-  __ Move(eax, Immediate(Smi::FromInt(1)));
+  if (ReturnTrueFalseObject()) {
+    __ mov(eax, factory->false_value());
+  } else {
+    __ Move(eax, Immediate(Smi::FromInt(1)));
+  }
   __ ret((HasArgsInRegisters() ? 0 : 2) * kPointerSize);
 
   __ bind(&object_not_null);
   // Smi values is not instance of anything.
   __ JumpIfNotSmi(object, &object_not_null_or_smi, Label::kNear);
-  __ Move(eax, Immediate(Smi::FromInt(1)));
+  if (ReturnTrueFalseObject()) {
+    __ mov(eax, factory->false_value());
+  } else {
+    __ Move(eax, Immediate(Smi::FromInt(1)));
+  }
   __ ret((HasArgsInRegisters() ? 0 : 2) * kPointerSize);
 
   __ bind(&object_not_null_or_smi);
   // String values is not instance of anything.
   Condition is_string = masm->IsObjectStringType(object, scratch, scratch);
   __ j(NegateCondition(is_string), &slow, Label::kNear);
-  __ Move(eax, Immediate(Smi::FromInt(1)));
+  if (ReturnTrueFalseObject()) {
+    __ mov(eax, factory->false_value());
+  } else {
+    __ Move(eax, Immediate(Smi::FromInt(1)));
+  }
   __ ret((HasArgsInRegisters() ? 0 : 2) * kPointerSize);
 
   // Slow-case: Go through the JavaScript implementation.
index 51a2b890bc50435e512cac970cb61edf92990047..1d5bceadf37deda36c82f028ffdf2afb36be43ac 100644 (file)
@@ -128,9 +128,6 @@ void Deoptimizer::PatchCodeForDeoptimization(Isolate* isolate, Code* code) {
   // Emit call to lazy deoptimization at all lazy deopt points.
   DeoptimizationInputData* deopt_data =
       DeoptimizationInputData::cast(code->deoptimization_data());
-  SharedFunctionInfo* shared =
-      SharedFunctionInfo::cast(deopt_data->SharedFunctionInfo());
-  shared->EvictFromOptimizedCodeMap(code, "deoptimized code");
 #ifdef DEBUG
   Address prev_call_address = NULL;
 #endif
index e93566452a383b3bdd6f651a323f32b54d65b5ed..b41ecaa36a453a0afae5998cd6ff0bd35098a9f2 100644 (file)
@@ -529,77 +529,94 @@ int DisassemblerIA32::PrintImmediateOp(byte* data) {
 // Returns number of bytes used, including *data.
 int DisassemblerIA32::F7Instruction(byte* data) {
   ASSERT_EQ(0xF7, *data);
-  byte modrm = *(data+1);
+  byte modrm = *++data;
   int mod, regop, rm;
   get_modrm(modrm, &mod, &regop, &rm);
-  if (mod == 3 && regop != 0) {
-    const char* mnem = NULL;
-    switch (regop) {
-      case 2: mnem = "not"; break;
-      case 3: mnem = "neg"; break;
-      case 4: mnem = "mul"; break;
-      case 5: mnem = "imul"; break;
-      case 7: mnem = "idiv"; break;
-      default: UnimplementedInstruction();
-    }
-    AppendToBuffer("%s %s", mnem, NameOfCPURegister(rm));
-    return 2;
-  } else if (mod == 3 && regop == eax) {
-    int32_t imm = *reinterpret_cast<int32_t*>(data+2);
-    AppendToBuffer("test %s,0x%x", NameOfCPURegister(rm), imm);
-    return 6;
-  } else if (regop == eax) {
-    AppendToBuffer("test ");
-    int count = PrintRightOperand(data+1);
-    int32_t imm = *reinterpret_cast<int32_t*>(data+1+count);
-    AppendToBuffer(",0x%x", imm);
-    return 1+count+4 /*int32_t*/;
-  } else {
-    UnimplementedInstruction();
-    return 2;
+  const char* mnem = NULL;
+  switch (regop) {
+    case 0:
+      mnem = "test";
+      break;
+    case 2:
+      mnem = "not";
+      break;
+    case 3:
+      mnem = "neg";
+      break;
+    case 4:
+      mnem = "mul";
+      break;
+    case 5:
+      mnem = "imul";
+      break;
+    case 6:
+      mnem = "div";
+      break;
+    case 7:
+      mnem = "idiv";
+      break;
+    default:
+      UnimplementedInstruction();
+  }
+  AppendToBuffer("%s ", mnem);
+  int count = PrintRightOperand(data);
+  if (regop == 0) {
+    AppendToBuffer(",0x%x", *reinterpret_cast<int32_t*>(data + count));
+    count += 4;
   }
+  return 1 + count;
 }
 
 
 int DisassemblerIA32::D1D3C1Instruction(byte* data) {
   byte op = *data;
   ASSERT(op == 0xD1 || op == 0xD3 || op == 0xC1);
-  byte modrm = *(data+1);
+  byte modrm = *++data;
   int mod, regop, rm;
   get_modrm(modrm, &mod, &regop, &rm);
   int imm8 = -1;
-  int num_bytes = 2;
-  if (mod == 3) {
-    const char* mnem = NULL;
-    switch (regop) {
-      case kROL: mnem = "rol"; break;
-      case kROR: mnem = "ror"; break;
-      case kRCL: mnem = "rcl"; break;
-      case kRCR: mnem = "rcr"; break;
-      case kSHL: mnem = "shl"; break;
-      case KSHR: mnem = "shr"; break;
-      case kSAR: mnem = "sar"; break;
-      default: UnimplementedInstruction();
-    }
-    if (op == 0xD1) {
-      imm8 = 1;
-    } else if (op == 0xC1) {
-      imm8 = *(data+2);
-      num_bytes = 3;
-    } else if (op == 0xD3) {
-      // Shift/rotate by cl.
-    }
-    ASSERT_NE(NULL, mnem);
-    AppendToBuffer("%s %s,", mnem, NameOfCPURegister(rm));
-    if (imm8 >= 0) {
-      AppendToBuffer("%d", imm8);
-    } else {
-      AppendToBuffer("cl");
-    }
+  const char* mnem = NULL;
+  switch (regop) {
+    case kROL:
+      mnem = "rol";
+      break;
+    case kROR:
+      mnem = "ror";
+      break;
+    case kRCL:
+      mnem = "rcl";
+      break;
+    case kRCR:
+      mnem = "rcr";
+      break;
+    case kSHL:
+      mnem = "shl";
+      break;
+    case KSHR:
+      mnem = "shr";
+      break;
+    case kSAR:
+      mnem = "sar";
+      break;
+    default:
+      UnimplementedInstruction();
+  }
+  AppendToBuffer("%s ", mnem);
+  int count = PrintRightOperand(data);
+  if (op == 0xD1) {
+    imm8 = 1;
+  } else if (op == 0xC1) {
+    imm8 = *(data + 2);
+    count++;
+  } else if (op == 0xD3) {
+    // Shift/rotate by cl.
+  }
+  if (imm8 >= 0) {
+    AppendToBuffer(",%d", imm8);
   } else {
-    UnimplementedInstruction();
+    AppendToBuffer(",cl");
   }
-  return num_bytes;
+  return 1 + count;
 }
 
 
@@ -954,17 +971,18 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
         data += 3;
         break;
 
-      case 0x69:  // fall through
-      case 0x6B:
-        { int mod, regop, rm;
-          get_modrm(*(data+1), &mod, &regop, &rm);
-          int32_t imm =
-              *data == 0x6B ? *(data+2) : *reinterpret_cast<int32_t*>(data+2);
-          AppendToBuffer("imul %s,%s,0x%x",
-                         NameOfCPURegister(regop),
-                         NameOfCPURegister(rm),
-                         imm);
-          data += 2 + (*data == 0x6B ? 1 : 4);
+      case 0x6B: {
+        data++;
+        data += PrintOperands("imul", REG_OPER_OP_ORDER, data);
+        AppendToBuffer(",%d", *data);
+        data++;
+      } break;
+
+      case 0x69: {
+        data++;
+        data += PrintOperands("imul", REG_OPER_OP_ORDER, data);
+        AppendToBuffer(",%d", *reinterpret_cast<int32_t*>(data));
+        data += 4;
         }
         break;
 
index c1881b6cbb6a03711dab3d90a17ec79d17039a19..67eba267b00fcaceeeaea7c38994de619c8511dd 100644 (file)
@@ -902,7 +902,7 @@ void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
   int length = deoptimizations_.length();
   if (length == 0) return;
   Handle<DeoptimizationInputData> data =
-      DeoptimizationInputData::New(isolate(), length, TENURED);
+      DeoptimizationInputData::New(isolate(), length, 0, TENURED);
 
   Handle<ByteArray> translations =
       translations_.CreateByteArray(isolate()->factory());
index 3be96ed7289a436d79ff4bbbbe7af111f0e305e5..9ee02851fb3fc8c98c16bf0d91354d761d718718 100644 (file)
@@ -8,8 +8,7 @@
 
 #include "src/hydrogen-osr.h"
 #include "src/ia32/lithium-codegen-ia32.h"
-#include "src/ia32/lithium-ia32.h"
-#include "src/lithium-allocator-inl.h"
+#include "src/lithium-inl.h"
 
 namespace v8 {
 namespace internal {
index 319aaee2cade91fb9a7245cc1b6f981af6d93cb8..362252de112abe8c53433a56e34212b33a287a9c 100644 (file)
 namespace v8 {
 namespace internal {
 
+namespace compiler {
+class RCodeVisualizer;
+}
+
 // Forward declarations.
 class LCodeGen;
 
@@ -201,7 +205,7 @@ class LInstruction : public ZoneObject {
   enum Opcode {
     // Declare a unique enum value for each instruction.
 #define DECLARE_OPCODE(type) k##type,
-    LITHIUM_CONCRETE_INSTRUCTION_LIST(DECLARE_OPCODE)
+    LITHIUM_CONCRETE_INSTRUCTION_LIST(DECLARE_OPCODE) kAdapter,
     kNumberOfInstructions
 #undef DECLARE_OPCODE
   };
@@ -220,6 +224,9 @@ class LInstruction : public ZoneObject {
 
   virtual bool IsControl() const { return false; }
 
+  // Try deleting this instruction if possible.
+  virtual bool TryDelete() { return false; }
+
   void set_environment(LEnvironment* env) { environment_ = env; }
   LEnvironment* environment() const { return environment_; }
   bool HasEnvironment() const { return environment_ != NULL; }
@@ -258,11 +265,12 @@ class LInstruction : public ZoneObject {
   void VerifyCall();
 #endif
 
+  virtual int InputCount() = 0;
+  virtual LOperand* InputAt(int i) = 0;
+
  private:
   // Iterator support.
   friend class InputIterator;
-  virtual int InputCount() = 0;
-  virtual LOperand* InputAt(int i) = 0;
 
   friend class TempIterator;
   virtual int TempCount() = 0;
index 6405115b9ff70daae2030c83d3caf8f6978e7782..41751db5021f40065f42a554aefc81e9c057a707 100644 (file)
@@ -1574,7 +1574,8 @@ void Isolate::Deinit() {
       heap_.mark_compact_collector()->EnsureSweepingCompleted();
     }
 
-    if (FLAG_hydrogen_stats) GetHStatistics()->Print();
+    if (FLAG_turbo_stats) GetTStatistics()->Print("TurboFan");
+    if (FLAG_hydrogen_stats) GetHStatistics()->Print("Hydrogen");
 
     if (FLAG_print_deopt_stress) {
       PrintF(stdout, "=== Stress deopt counter: %u\n", stress_deopt_count_);
@@ -2120,6 +2121,12 @@ HStatistics* Isolate::GetHStatistics() {
 }
 
 
+HStatistics* Isolate::GetTStatistics() {
+  if (tstatistics() == NULL) set_tstatistics(new HStatistics());
+  return tstatistics();
+}
+
+
 HTracer* Isolate::GetHTracer() {
   if (htracer() == NULL) set_htracer(new HTracer(id()));
   return htracer();
index a358335fba739c94eeb2d78e725f68e9ec5d4c2f..ef2952692012810d9a5b179ee1bcf8b7a370da4d 100644 (file)
@@ -363,6 +363,7 @@ typedef List<HeapObject*> DebugObjectCache;
   V(int, pending_microtask_count, 0)                                           \
   V(bool, autorun_microtasks, true)                                            \
   V(HStatistics*, hstatistics, NULL)                                           \
+  V(HStatistics*, tstatistics, NULL)                                           \
   V(HTracer*, htracer, NULL)                                                   \
   V(CodeTracer*, code_tracer, NULL)                                            \
   V(bool, fp_stubs_generated, false)                                           \
@@ -1067,6 +1068,7 @@ class Isolate {
   int id() const { return static_cast<int>(id_); }
 
   HStatistics* GetHStatistics();
+  HStatistics* GetTStatistics();
   HTracer* GetHTracer();
   CodeTracer* GetCodeTracer();
 
index 4fb5b116f325bc008270aebe5b2e4f3bb9981e73..a03a60d3ed81c9ab6d80d2d1d7c0b25ffb896cc9 100644 (file)
@@ -41,93 +41,6 @@ LGap* LAllocator::GapAt(int index) {
 }
 
 
-TempIterator::TempIterator(LInstruction* instr)
-    : instr_(instr),
-      limit_(instr->TempCount()),
-      current_(0) {
-  SkipUninteresting();
-}
-
-
-bool TempIterator::Done() { return current_ >= limit_; }
-
-
-LOperand* TempIterator::Current() {
-  ASSERT(!Done());
-  return instr_->TempAt(current_);
-}
-
-
-void TempIterator::SkipUninteresting() {
-  while (current_ < limit_ && instr_->TempAt(current_) == NULL) ++current_;
-}
-
-
-void TempIterator::Advance() {
-  ++current_;
-  SkipUninteresting();
-}
-
-
-InputIterator::InputIterator(LInstruction* instr)
-    : instr_(instr),
-      limit_(instr->InputCount()),
-      current_(0) {
-  SkipUninteresting();
-}
-
-
-bool InputIterator::Done() { return current_ >= limit_; }
-
-
-LOperand* InputIterator::Current() {
-  ASSERT(!Done());
-  ASSERT(instr_->InputAt(current_) != NULL);
-  return instr_->InputAt(current_);
-}
-
-
-void InputIterator::Advance() {
-  ++current_;
-  SkipUninteresting();
-}
-
-
-void InputIterator::SkipUninteresting() {
-  while (current_ < limit_) {
-    LOperand* current = instr_->InputAt(current_);
-    if (current != NULL && !current->IsConstantOperand()) break;
-    ++current_;
-  }
-}
-
-
-UseIterator::UseIterator(LInstruction* instr)
-    : input_iterator_(instr), env_iterator_(instr->environment()) { }
-
-
-bool UseIterator::Done() {
-  return input_iterator_.Done() && env_iterator_.Done();
-}
-
-
-LOperand* UseIterator::Current() {
-  ASSERT(!Done());
-  LOperand* result = input_iterator_.Done()
-      ? env_iterator_.Current()
-      : input_iterator_.Current();
-  ASSERT(result != NULL);
-  return result;
-}
-
-
-void UseIterator::Advance() {
-  input_iterator_.Done()
-      ? env_iterator_.Advance()
-      : input_iterator_.Advance();
-}
-
-
 void LAllocator::SetLiveRangeAssignedRegister(LiveRange* range, int reg) {
   if (range->Kind() == DOUBLE_REGISTERS) {
     assigned_double_registers_->Add(reg);
index 10a34d144fe17841c6f6f1f0d582ee10d3c06abe..0ccb77399b8198d43053df1d479a8c326df0c5ec 100644 (file)
@@ -5,27 +5,10 @@
 #include "src/v8.h"
 
 #include "src/hydrogen.h"
+#include "src/lithium-inl.h"
 #include "src/lithium-allocator-inl.h"
 #include "src/string-stream.h"
 
-#if V8_TARGET_ARCH_IA32
-#include "src/ia32/lithium-ia32.h"  // NOLINT
-#elif V8_TARGET_ARCH_X64
-#include "src/x64/lithium-x64.h"  // NOLINT
-#elif V8_TARGET_ARCH_ARM64
-#include "src/arm64/lithium-arm64.h"  // NOLINT
-#elif V8_TARGET_ARCH_ARM
-#include "src/arm/lithium-arm.h"  // NOLINT
-#elif V8_TARGET_ARCH_MIPS
-#include "src/mips/lithium-mips.h"  // NOLINT
-#elif V8_TARGET_ARCH_MIPS64
-#include "src/mips64/lithium-mips64.h"  // NOLINT
-#elif V8_TARGET_ARCH_X87
-#include "src/x87/lithium-x87.h"  // NOLINT
-#else
-#error "Unknown architecture."
-#endif
-
 namespace v8 {
 namespace internal {
 
index 1d313a5a54825e890981cd0ddab90317fb2d37a4..b7d6d09dbd1ad1e482a9f5c467f8f1b528baef94 100644 (file)
@@ -17,7 +17,6 @@ namespace internal {
 // Forward declarations.
 class HBasicBlock;
 class HGraph;
-class HInstruction;
 class HPhi;
 class HTracer;
 class HValue;
@@ -118,64 +117,6 @@ class LifetimePosition {
 };
 
 
-enum RegisterKind {
-  UNALLOCATED_REGISTERS,
-  GENERAL_REGISTERS,
-  DOUBLE_REGISTERS
-};
-
-
-// A register-allocator view of a Lithium instruction. It contains the id of
-// the output operand and a list of input operand uses.
-
-class LInstruction;
-class LEnvironment;
-
-// Iterator for non-null temp operands.
-class TempIterator BASE_EMBEDDED {
- public:
-  inline explicit TempIterator(LInstruction* instr);
-  inline bool Done();
-  inline LOperand* Current();
-  inline void Advance();
-
- private:
-  inline void SkipUninteresting();
-  LInstruction* instr_;
-  int limit_;
-  int current_;
-};
-
-
-// Iterator for non-constant input operands.
-class InputIterator BASE_EMBEDDED {
- public:
-  inline explicit InputIterator(LInstruction* instr);
-  inline bool Done();
-  inline LOperand* Current();
-  inline void Advance();
-
- private:
-  inline void SkipUninteresting();
-  LInstruction* instr_;
-  int limit_;
-  int current_;
-};
-
-
-class UseIterator BASE_EMBEDDED {
- public:
-  inline explicit UseIterator(LInstruction* instr);
-  inline bool Done();
-  inline LOperand* Current();
-  inline void Advance();
-
- private:
-  InputIterator input_iterator_;
-  DeepIterator env_iterator_;
-};
-
-
 // Representation of the non-empty interval [start,end[.
 class UseInterval: public ZoneObject {
  public:
diff --git a/src/lithium-inl.h b/src/lithium-inl.h
new file mode 100644 (file)
index 0000000..27b9292
--- /dev/null
@@ -0,0 +1,110 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_LITHIUM_INL_H_
+#define V8_LITHIUM_INL_H_
+
+#include "src/lithium.h"
+
+#if V8_TARGET_ARCH_IA32
+#include "src/ia32/lithium-ia32.h"  // NOLINT
+#elif V8_TARGET_ARCH_X64
+#include "src/x64/lithium-x64.h"  // NOLINT
+#elif V8_TARGET_ARCH_ARM64
+#include "src/arm64/lithium-arm64.h"  // NOLINT
+#elif V8_TARGET_ARCH_ARM
+#include "src/arm/lithium-arm.h"  // NOLINT
+#elif V8_TARGET_ARCH_MIPS
+#include "src/mips/lithium-mips.h"  // NOLINT
+#elif V8_TARGET_ARCH_X87
+#include "src/x87/lithium-x87.h"  // NOLINT
+#else
+#error "Unknown architecture."
+#endif
+
+namespace v8 {
+namespace internal {
+
+TempIterator::TempIterator(LInstruction* instr)
+    : instr_(instr), limit_(instr->TempCount()), current_(0) {
+  SkipUninteresting();
+}
+
+
+bool TempIterator::Done() { return current_ >= limit_; }
+
+
+LOperand* TempIterator::Current() {
+  ASSERT(!Done());
+  return instr_->TempAt(current_);
+}
+
+
+void TempIterator::SkipUninteresting() {
+  while (current_ < limit_ && instr_->TempAt(current_) == NULL) ++current_;
+}
+
+
+void TempIterator::Advance() {
+  ++current_;
+  SkipUninteresting();
+}
+
+
+InputIterator::InputIterator(LInstruction* instr)
+    : instr_(instr), limit_(instr->InputCount()), current_(0) {
+  SkipUninteresting();
+}
+
+
+bool InputIterator::Done() { return current_ >= limit_; }
+
+
+LOperand* InputIterator::Current() {
+  ASSERT(!Done());
+  ASSERT(instr_->InputAt(current_) != NULL);
+  return instr_->InputAt(current_);
+}
+
+
+void InputIterator::Advance() {
+  ++current_;
+  SkipUninteresting();
+}
+
+
+void InputIterator::SkipUninteresting() {
+  while (current_ < limit_) {
+    LOperand* current = instr_->InputAt(current_);
+    if (current != NULL && !current->IsConstantOperand()) break;
+    ++current_;
+  }
+}
+
+
+UseIterator::UseIterator(LInstruction* instr)
+    : input_iterator_(instr), env_iterator_(instr->environment()) {}
+
+
+bool UseIterator::Done() {
+  return input_iterator_.Done() && env_iterator_.Done();
+}
+
+
+LOperand* UseIterator::Current() {
+  ASSERT(!Done());
+  LOperand* result = input_iterator_.Done() ? env_iterator_.Current()
+                                            : input_iterator_.Current();
+  ASSERT(result != NULL);
+  return result;
+}
+
+
+void UseIterator::Advance() {
+  input_iterator_.Done() ? env_iterator_.Advance() : input_iterator_.Advance();
+}
+}
+}  // namespace v8::internal
+
+#endif  // V8_LITHIUM_INL_H_
index 61b7e6702c4dd5fb930e02e7e4cdde338bea94d7..89e10796d8ff23d5357b1342c9d10e52e46c4956 100644 (file)
@@ -55,16 +55,26 @@ void LOperand::PrintTo(StringStream* stream) {
           break;
         case LUnallocated::FIXED_REGISTER: {
           int reg_index = unalloc->fixed_register_index();
-          const char* register_name =
-              Register::AllocationIndexToString(reg_index);
-          stream->Add("(=%s)", register_name);
+          if (reg_index < 0 ||
+              reg_index >= Register::kMaxNumAllocatableRegisters) {
+            stream->Add("(=invalid_reg#%d)", reg_index);
+          } else {
+            const char* register_name =
+                Register::AllocationIndexToString(reg_index);
+            stream->Add("(=%s)", register_name);
+          }
           break;
         }
         case LUnallocated::FIXED_DOUBLE_REGISTER: {
           int reg_index = unalloc->fixed_register_index();
-          const char* double_register_name =
-              DoubleRegister::AllocationIndexToString(reg_index);
-          stream->Add("(=%s)", double_register_name);
+          if (reg_index < 0 ||
+              reg_index >= DoubleRegister::kMaxNumAllocatableRegisters) {
+            stream->Add("(=invalid_double_reg#%d)", reg_index);
+          } else {
+            const char* double_register_name =
+                DoubleRegister::AllocationIndexToString(reg_index);
+            stream->Add("(=%s)", double_register_name);
+          }
           break;
         }
         case LUnallocated::MUST_HAVE_REGISTER:
@@ -93,12 +103,26 @@ void LOperand::PrintTo(StringStream* stream) {
     case DOUBLE_STACK_SLOT:
       stream->Add("[double_stack:%d]", index());
       break;
-    case REGISTER:
-      stream->Add("[%s|R]", Register::AllocationIndexToString(index()));
+    case REGISTER: {
+      int reg_index = index();
+      if (reg_index < 0 || reg_index >= Register::kMaxNumAllocatableRegisters) {
+        stream->Add("(=invalid_reg#%d|R)", reg_index);
+      } else {
+        stream->Add("[%s|R]", Register::AllocationIndexToString(reg_index));
+      }
       break;
-    case DOUBLE_REGISTER:
-      stream->Add("[%s|R]", DoubleRegister::AllocationIndexToString(index()));
+    }
+    case DOUBLE_REGISTER: {
+      int reg_index = index();
+      if (reg_index < 0 ||
+          reg_index >= DoubleRegister::kMaxNumAllocatableRegisters) {
+        stream->Add("(=invalid_double_reg#%d|R)", reg_index);
+      } else {
+        stream->Add("[%s|R]",
+                    DoubleRegister::AllocationIndexToString(reg_index));
+      }
       break;
+    }
   }
 }
 
@@ -242,12 +266,11 @@ LChunk::LChunk(CompilationInfo* info, HGraph* graph)
     : spill_slot_count_(0),
       info_(info),
       graph_(graph),
-      instructions_(32, graph->zone()),
-      pointer_maps_(8, graph->zone()),
-      inlined_closures_(1, graph->zone()),
-      deprecation_dependencies_(MapLess(), MapAllocator(graph->zone())),
-      stability_dependencies_(MapLess(), MapAllocator(graph->zone())) {
-}
+      instructions_(32, info->zone()),
+      pointer_maps_(8, info->zone()),
+      inlined_closures_(1, info->zone()),
+      deprecation_dependencies_(MapLess(), MapAllocator(info->zone())),
+      stability_dependencies_(MapLess(), MapAllocator(info->zone())) {}
 
 
 LLabel* LChunk::GetLabel(int block_id) const {
@@ -308,7 +331,7 @@ void LChunk::MarkEmptyBlocks() {
 
 
 void LChunk::AddInstruction(LInstruction* instr, HBasicBlock* block) {
-  LInstructionGap* gap = new(graph_->zone()) LInstructionGap(block);
+  LInstructionGap* gap = new (zone()) LInstructionGap(block);
   gap->set_hydrogen_value(instr->hydrogen_value());
   int index = -1;
   if (instr->IsControl()) {
index 8aeebe6c835190920549efa71433363534c935d5..821b35a59f01d5e694f81c02a69273b55a78f5c2 100644 (file)
@@ -22,7 +22,6 @@ namespace internal {
   V(Register,        REGISTER,          16)   \
   V(DoubleRegister,  DOUBLE_REGISTER,   16)
 
-
 class LOperand : public ZoneObject {
  public:
   enum Kind {
@@ -49,6 +48,7 @@ class LOperand : public ZoneObject {
 
   void PrintTo(StringStream* stream);
   void ConvertTo(Kind kind, int index) {
+    if (kind == REGISTER) ASSERT(index >= 0);
     value_ = KindField::encode(kind);
     value_ |= index << kKindFieldWidth;
     ASSERT(this->index() == index);
@@ -278,9 +278,10 @@ class LMoveOperands V8_FINAL BASE_EMBEDDED {
   }
 
   // A move is redundant if it's been eliminated, if its source and
-  // destination are the same, or if its destination is unneeded.
+  // destination are the same, or if its destination is unneeded or constant.
   bool IsRedundant() const {
-    return IsEliminated() || source_->Equals(destination_) || IsIgnored();
+    return IsEliminated() || source_->Equals(destination_) || IsIgnored() ||
+           (destination_ != NULL && destination_->IsConstantOperand());
   }
 
   bool IsIgnored() const {
@@ -341,9 +342,7 @@ class LParallelMove V8_FINAL : public ZoneObject {
 
   bool IsRedundant() const;
 
-  const ZoneList<LMoveOperands>* move_operands() const {
-    return &move_operands_;
-  }
+  ZoneList<LMoveOperands>* move_operands() { return &move_operands_; }
 
   void PrintDataTo(StringStream* stream) const;
 
@@ -747,6 +746,61 @@ class LPhase : public CompilationPhase {
 };
 
 
+// A register-allocator view of a Lithium instruction. It contains the id of
+// the output operand and a list of input operand uses.
+
+enum RegisterKind {
+  UNALLOCATED_REGISTERS,
+  GENERAL_REGISTERS,
+  DOUBLE_REGISTERS
+};
+
+// Iterator for non-null temp operands.
+class TempIterator BASE_EMBEDDED {
+ public:
+  inline explicit TempIterator(LInstruction* instr);
+  inline bool Done();
+  inline LOperand* Current();
+  inline void Advance();
+
+ private:
+  inline void SkipUninteresting();
+  LInstruction* instr_;
+  int limit_;
+  int current_;
+};
+
+
+// Iterator for non-constant input operands.
+class InputIterator BASE_EMBEDDED {
+ public:
+  inline explicit InputIterator(LInstruction* instr);
+  inline bool Done();
+  inline LOperand* Current();
+  inline void Advance();
+
+ private:
+  inline void SkipUninteresting();
+  LInstruction* instr_;
+  int limit_;
+  int current_;
+};
+
+
+class UseIterator BASE_EMBEDDED {
+ public:
+  inline explicit UseIterator(LInstruction* instr);
+  inline bool Done();
+  inline LOperand* Current();
+  inline void Advance();
+
+ private:
+  InputIterator input_iterator_;
+  DeepIterator env_iterator_;
+};
+
+class LInstruction;
+class LCodeGen;
 } }  // namespace v8::internal
 
 #endif  // V8_LITHIUM_H_
index 4774c946cbb185b0675d08d2467e777170af4140..0e55fe713ec86574c84485e3c5e4525c7145f19f 100644 (file)
@@ -20,7 +20,7 @@ void FastNewClosureStub::InitializeInterfaceDescriptor(
     CodeStubInterfaceDescriptor* descriptor) {
   Register registers[] = { cp, a2 };
   descriptor->Initialize(
-      ARRAY_SIZE(registers), registers,
+      MajorKey(), ARRAY_SIZE(registers), registers,
       Runtime::FunctionForId(Runtime::kNewClosureFromStubFailure)->entry);
 }
 
@@ -28,14 +28,14 @@ void FastNewClosureStub::InitializeInterfaceDescriptor(
 void FastNewContextStub::InitializeInterfaceDescriptor(
     CodeStubInterfaceDescriptor* descriptor) {
   Register registers[] = { cp, a1 };
-  descriptor->Initialize(ARRAY_SIZE(registers), registers);
+  descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers);
 }
 
 
 void ToNumberStub::InitializeInterfaceDescriptor(
     CodeStubInterfaceDescriptor* descriptor) {
   Register registers[] = { cp, a0 };
-  descriptor->Initialize(ARRAY_SIZE(registers), registers);
+  descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers);
 }
 
 
@@ -43,7 +43,7 @@ void NumberToStringStub::InitializeInterfaceDescriptor(
     CodeStubInterfaceDescriptor* descriptor) {
   Register registers[] = { cp, a0 };
   descriptor->Initialize(
-      ARRAY_SIZE(registers), registers,
+      MajorKey(), ARRAY_SIZE(registers), registers,
       Runtime::FunctionForId(Runtime::kNumberToStringRT)->entry);
 }
 
@@ -57,7 +57,7 @@ void FastCloneShallowArrayStub::InitializeInterfaceDescriptor(
     Representation::Smi(),
     Representation::Tagged() };
   descriptor->Initialize(
-      ARRAY_SIZE(registers), registers,
+      MajorKey(), ARRAY_SIZE(registers), registers,
       Runtime::FunctionForId(Runtime::kCreateArrayLiteralStubBailout)->entry,
       representations);
 }
@@ -67,7 +67,7 @@ void FastCloneShallowObjectStub::InitializeInterfaceDescriptor(
     CodeStubInterfaceDescriptor* descriptor) {
   Register registers[] = { cp, a3, a2, a1, a0 };
   descriptor->Initialize(
-      ARRAY_SIZE(registers), registers,
+      MajorKey(), ARRAY_SIZE(registers), registers,
       Runtime::FunctionForId(Runtime::kCreateObjectLiteral)->entry);
 }
 
@@ -75,7 +75,25 @@ void FastCloneShallowObjectStub::InitializeInterfaceDescriptor(
 void CreateAllocationSiteStub::InitializeInterfaceDescriptor(
     CodeStubInterfaceDescriptor* descriptor) {
   Register registers[] = { cp, a2, a3 };
-  descriptor->Initialize(ARRAY_SIZE(registers), registers);
+  descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers);
+}
+
+
+void InstanceofStub::InitializeInterfaceDescriptor(
+    Isolate* isolate, CodeStubInterfaceDescriptor* descriptor) {
+  UNIMPLEMENTED();
+}
+
+
+void CallFunctionStub::InitializeInterfaceDescriptor(
+    Isolate* isolate, CodeStubInterfaceDescriptor* descriptor) {
+  UNIMPLEMENTED();
+}
+
+
+void CallConstructStub::InitializeInterfaceDescriptor(
+    Isolate* isolate, CodeStubInterfaceDescriptor* descriptor) {
+  UNIMPLEMENTED();
 }
 
 
@@ -83,7 +101,7 @@ void RegExpConstructResultStub::InitializeInterfaceDescriptor(
     CodeStubInterfaceDescriptor* descriptor) {
   Register registers[] = { cp, a2, a1, a0 };
   descriptor->Initialize(
-      ARRAY_SIZE(registers), registers,
+      MajorKey(), ARRAY_SIZE(registers), registers,
       Runtime::FunctionForId(Runtime::kRegExpConstructResult)->entry);
 }
 
@@ -93,7 +111,7 @@ void TransitionElementsKindStub::InitializeInterfaceDescriptor(
   Register registers[] = { cp, a0, a1 };
   Address entry =
       Runtime::FunctionForId(Runtime::kTransitionElementsKind)->entry;
-  descriptor->Initialize(ARRAY_SIZE(registers), registers,
+  descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers,
                          FUNCTION_ADDR(entry));
 }
 
@@ -101,7 +119,7 @@ void TransitionElementsKindStub::InitializeInterfaceDescriptor(
 void CompareNilICStub::InitializeInterfaceDescriptor(
     CodeStubInterfaceDescriptor* descriptor) {
   Register registers[] = { cp, a0 };
-  descriptor->Initialize(ARRAY_SIZE(registers), registers,
+  descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers,
                          FUNCTION_ADDR(CompareNilIC_Miss));
   descriptor->SetMissHandler(
       ExternalReference(IC_Utility(IC::kCompareNilIC_Miss), isolate()));
@@ -112,7 +130,7 @@ const Register InterfaceDescriptor::ContextRegister() { return cp; }
 
 
 static void InitializeArrayConstructorDescriptor(
-    CodeStubInterfaceDescriptor* descriptor,
+    CodeStub::Major major, CodeStubInterfaceDescriptor* descriptor,
     int constant_stack_parameter_count) {
   // register state
   // cp -- context
@@ -124,10 +142,8 @@ static void InitializeArrayConstructorDescriptor(
 
   if (constant_stack_parameter_count == 0) {
     Register registers[] = { cp, a1, a2 };
-    descriptor->Initialize(ARRAY_SIZE(registers), registers,
-                           deopt_handler,
-                           NULL,
-                           constant_stack_parameter_count,
+    descriptor->Initialize(major, ARRAY_SIZE(registers), registers,
+                           deopt_handler, NULL, constant_stack_parameter_count,
                            JS_FUNCTION_STUB_MODE);
   } else {
     // stack param count needs (constructor pointer, and single argument)
@@ -137,19 +153,16 @@ static void InitializeArrayConstructorDescriptor(
         Representation::Tagged(),
         Representation::Tagged(),
         Representation::Integer32() };
-    descriptor->Initialize(ARRAY_SIZE(registers), registers,
-                           a0,
-                           deopt_handler,
-                           representations,
+    descriptor->Initialize(major, ARRAY_SIZE(registers), registers, a0,
+                           deopt_handler, representations,
                            constant_stack_parameter_count,
-                           JS_FUNCTION_STUB_MODE,
-                           PASS_ARGUMENTS);
+                           JS_FUNCTION_STUB_MODE, PASS_ARGUMENTS);
   }
 }
 
 
 static void InitializeInternalArrayConstructorDescriptor(
-    CodeStubInterfaceDescriptor* descriptor,
+    CodeStub::Major major, CodeStubInterfaceDescriptor* descriptor,
     int constant_stack_parameter_count) {
   // register state
   // cp -- context
@@ -160,10 +173,8 @@ static void InitializeInternalArrayConstructorDescriptor(
 
   if (constant_stack_parameter_count == 0) {
     Register registers[] = { cp, a1 };
-    descriptor->Initialize(ARRAY_SIZE(registers), registers,
-                           deopt_handler,
-                           NULL,
-                           constant_stack_parameter_count,
+    descriptor->Initialize(major, ARRAY_SIZE(registers), registers,
+                           deopt_handler, NULL, constant_stack_parameter_count,
                            JS_FUNCTION_STUB_MODE);
   } else {
     // stack param count needs (constructor pointer, and single argument)
@@ -172,39 +183,36 @@ static void InitializeInternalArrayConstructorDescriptor(
         Representation::Tagged(),
         Representation::Tagged(),
         Representation::Integer32() };
-    descriptor->Initialize(ARRAY_SIZE(registers), registers,
-                           a0,
-                           deopt_handler,
-                           representations,
+    descriptor->Initialize(major, ARRAY_SIZE(registers), registers, a0,
+                           deopt_handler, representations,
                            constant_stack_parameter_count,
-                           JS_FUNCTION_STUB_MODE,
-                           PASS_ARGUMENTS);
+                           JS_FUNCTION_STUB_MODE, PASS_ARGUMENTS);
   }
 }
 
 
 void ArrayNoArgumentConstructorStub::InitializeInterfaceDescriptor(
     CodeStubInterfaceDescriptor* descriptor) {
-  InitializeArrayConstructorDescriptor(descriptor, 0);
+  InitializeArrayConstructorDescriptor(MajorKey(), descriptor, 0);
 }
 
 
 void ArraySingleArgumentConstructorStub::InitializeInterfaceDescriptor(
     CodeStubInterfaceDescriptor* descriptor) {
-  InitializeArrayConstructorDescriptor(descriptor, 1);
+  InitializeArrayConstructorDescriptor(MajorKey(), descriptor, 1);
 }
 
 
 void ArrayNArgumentsConstructorStub::InitializeInterfaceDescriptor(
     CodeStubInterfaceDescriptor* descriptor) {
-  InitializeArrayConstructorDescriptor(descriptor, -1);
+  InitializeArrayConstructorDescriptor(MajorKey(), descriptor, -1);
 }
 
 
 void ToBooleanStub::InitializeInterfaceDescriptor(
     CodeStubInterfaceDescriptor* descriptor) {
   Register registers[] = { cp, a0 };
-  descriptor->Initialize(ARRAY_SIZE(registers), registers,
+  descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers,
                          FUNCTION_ADDR(ToBooleanIC_Miss));
   descriptor->SetMissHandler(
       ExternalReference(IC_Utility(IC::kToBooleanIC_Miss), isolate()));
@@ -213,26 +221,26 @@ void ToBooleanStub::InitializeInterfaceDescriptor(
 
 void InternalArrayNoArgumentConstructorStub::InitializeInterfaceDescriptor(
     CodeStubInterfaceDescriptor* descriptor) {
-  InitializeInternalArrayConstructorDescriptor(descriptor, 0);
+  InitializeInternalArrayConstructorDescriptor(MajorKey(), descriptor, 0);
 }
 
 
 void InternalArraySingleArgumentConstructorStub::InitializeInterfaceDescriptor(
     CodeStubInterfaceDescriptor* descriptor) {
-  InitializeInternalArrayConstructorDescriptor(descriptor, 1);
+  InitializeInternalArrayConstructorDescriptor(MajorKey(), descriptor, 1);
 }
 
 
 void InternalArrayNArgumentsConstructorStub::InitializeInterfaceDescriptor(
     CodeStubInterfaceDescriptor* descriptor) {
-  InitializeInternalArrayConstructorDescriptor(descriptor, -1);
+  InitializeInternalArrayConstructorDescriptor(MajorKey(), descriptor, -1);
 }
 
 
 void BinaryOpICStub::InitializeInterfaceDescriptor(
     CodeStubInterfaceDescriptor* descriptor) {
   Register registers[] = { cp, a1, a0 };
-  descriptor->Initialize(ARRAY_SIZE(registers), registers,
+  descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers,
                          FUNCTION_ADDR(BinaryOpIC_Miss));
   descriptor->SetMissHandler(
       ExternalReference(IC_Utility(IC::kBinaryOpIC_Miss), isolate()));
@@ -242,7 +250,7 @@ void BinaryOpICStub::InitializeInterfaceDescriptor(
 void BinaryOpWithAllocationSiteStub::InitializeInterfaceDescriptor(
     CodeStubInterfaceDescriptor* descriptor) {
   Register registers[] = { cp, a2, a1, a0 };
-  descriptor->Initialize(ARRAY_SIZE(registers), registers,
+  descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers,
                          FUNCTION_ADDR(BinaryOpIC_MissWithAllocationSite));
 }
 
@@ -250,9 +258,8 @@ void BinaryOpWithAllocationSiteStub::InitializeInterfaceDescriptor(
 void StringAddStub::InitializeInterfaceDescriptor(
     CodeStubInterfaceDescriptor* descriptor) {
   Register registers[] = { cp, a1, a0 };
-  descriptor->Initialize(
-      ARRAY_SIZE(registers), registers,
-      Runtime::FunctionForId(Runtime::kStringAdd)->entry);
+  descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers,
+                         Runtime::FunctionForId(Runtime::kStringAdd)->entry);
 }
 
 
index 93e7a735d5676eed16c192ee2830a051cadba87b..d6ebfac363bab2f4435cf14e0341d72bec79de1b 100644 (file)
@@ -48,9 +48,6 @@ void Deoptimizer::PatchCodeForDeoptimization(Isolate* isolate, Code* code) {
 
   DeoptimizationInputData* deopt_data =
       DeoptimizationInputData::cast(code->deoptimization_data());
-  SharedFunctionInfo* shared =
-      SharedFunctionInfo::cast(deopt_data->SharedFunctionInfo());
-  shared->EvictFromOptimizedCodeMap(code, "deoptimized code");
 #ifdef DEBUG
   Address prev_call_address = NULL;
 #endif
index 36bdff1fc4e1ac514d571324d0ab69de05baa987..141466e8f8ad6e65b809bcf8df2b5de8de4b6f01 100644 (file)
@@ -897,7 +897,7 @@ void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
   int length = deoptimizations_.length();
   if (length == 0) return;
   Handle<DeoptimizationInputData> data =
-      DeoptimizationInputData::New(isolate(), length, TENURED);
+      DeoptimizationInputData::New(isolate(), length, 0, TENURED);
 
   Handle<ByteArray> translations =
       translations_.CreateByteArray(isolate()->factory());
index fcdc1e059cdeb0f293315e83dd10415a0c2e7b60..b9efe7920ad5ebc78ff0e6348d03747c1eea8668 100644 (file)
@@ -4,10 +4,11 @@
 
 #include "src/v8.h"
 
+#if V8_TARGET_ARCH_MIPS
+
 #include "src/hydrogen-osr.h"
-#include "src/lithium-allocator-inl.h"
+#include "src/lithium-inl.h"
 #include "src/mips/lithium-codegen-mips.h"
-#include "src/mips/lithium-mips.h"
 
 namespace v8 {
 namespace internal {
@@ -2574,3 +2575,5 @@ LInstruction* LChunkBuilder::DoAllocateBlockContext(
 }
 
 } }  // namespace v8::internal
+
+#endif  // V8_TARGET_ARCH_MIPS
index adda4e256de8e1310b100ea85bf8dc6aaa3264d5..101d9c20580a4ce3b57046f744338f79e7f4a984 100644 (file)
@@ -220,6 +220,9 @@ class LInstruction : public ZoneObject {
 
   virtual bool IsControl() const { return false; }
 
+  // Try deleting this instruction if possible.
+  virtual bool TryDelete() { return false; }
+
   void set_environment(LEnvironment* env) { environment_ = env; }
   LEnvironment* environment() const { return environment_; }
   bool HasEnvironment() const { return environment_ != NULL; }
@@ -258,11 +261,12 @@ class LInstruction : public ZoneObject {
   void VerifyCall();
 #endif
 
+  virtual int InputCount() = 0;
+  virtual LOperand* InputAt(int i) = 0;
+
  private:
   // Iterator interface.
   friend class InputIterator;
-  virtual int InputCount() = 0;
-  virtual LOperand* InputAt(int i) = 0;
 
   friend class TempIterator;
   virtual int TempCount() = 0;
index 3080b32738ec987d3873b07fd456ee4d3bdb4ad5..65f808fc46eb950a2503ccdb1ae33553fda0ec42 100644 (file)
@@ -20,7 +20,7 @@ void FastNewClosureStub::InitializeInterfaceDescriptor(
     CodeStubInterfaceDescriptor* descriptor) {
   Register registers[] = { cp, a2 };
   descriptor->Initialize(
-      ARRAY_SIZE(registers), registers,
+      MajorKey(), ARRAY_SIZE(registers), registers,
       Runtime::FunctionForId(Runtime::kNewClosureFromStubFailure)->entry);
 }
 
@@ -28,14 +28,14 @@ void FastNewClosureStub::InitializeInterfaceDescriptor(
 void FastNewContextStub::InitializeInterfaceDescriptor(
     CodeStubInterfaceDescriptor* descriptor) {
   Register registers[] = { cp, a1 };
-  descriptor->Initialize(ARRAY_SIZE(registers), registers);
+  descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers);
 }
 
 
 void ToNumberStub::InitializeInterfaceDescriptor(
     CodeStubInterfaceDescriptor* descriptor) {
   Register registers[] = { cp, a0 };
-  descriptor->Initialize(ARRAY_SIZE(registers), registers);
+  descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers);
 }
 
 
@@ -43,7 +43,7 @@ void NumberToStringStub::InitializeInterfaceDescriptor(
     CodeStubInterfaceDescriptor* descriptor) {
   Register registers[] = { cp, a0 };
   descriptor->Initialize(
-      ARRAY_SIZE(registers), registers,
+      MajorKey(), ARRAY_SIZE(registers), registers,
       Runtime::FunctionForId(Runtime::kNumberToStringRT)->entry);
 }
 
@@ -57,7 +57,7 @@ void FastCloneShallowArrayStub::InitializeInterfaceDescriptor(
     Representation::Smi(),
     Representation::Tagged() };
   descriptor->Initialize(
-      ARRAY_SIZE(registers), registers,
+      MajorKey(), ARRAY_SIZE(registers), registers,
       Runtime::FunctionForId(Runtime::kCreateArrayLiteralStubBailout)->entry,
       representations);
 }
@@ -67,7 +67,7 @@ void FastCloneShallowObjectStub::InitializeInterfaceDescriptor(
     CodeStubInterfaceDescriptor* descriptor) {
   Register registers[] = { cp, a3, a2, a1, a0 };
   descriptor->Initialize(
-      ARRAY_SIZE(registers), registers,
+      MajorKey(), ARRAY_SIZE(registers), registers,
       Runtime::FunctionForId(Runtime::kCreateObjectLiteral)->entry);
 }
 
@@ -75,7 +75,7 @@ void FastCloneShallowObjectStub::InitializeInterfaceDescriptor(
 void CreateAllocationSiteStub::InitializeInterfaceDescriptor(
     CodeStubInterfaceDescriptor* descriptor) {
   Register registers[] = { cp, a2, a3 };
-  descriptor->Initialize(ARRAY_SIZE(registers), registers);
+  descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers);
 }
 
 
@@ -83,7 +83,7 @@ void RegExpConstructResultStub::InitializeInterfaceDescriptor(
     CodeStubInterfaceDescriptor* descriptor) {
   Register registers[] = { cp, a2, a1, a0 };
   descriptor->Initialize(
-      ARRAY_SIZE(registers), registers,
+      MajorKey(), ARRAY_SIZE(registers), registers,
       Runtime::FunctionForId(Runtime::kRegExpConstructResult)->entry);
 }
 
@@ -93,7 +93,7 @@ void TransitionElementsKindStub::InitializeInterfaceDescriptor(
   Register registers[] = { cp, a0, a1 };
   Address entry =
       Runtime::FunctionForId(Runtime::kTransitionElementsKind)->entry;
-  descriptor->Initialize(ARRAY_SIZE(registers), registers,
+  descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers,
                          FUNCTION_ADDR(entry));
 }
 
@@ -101,7 +101,7 @@ void TransitionElementsKindStub::InitializeInterfaceDescriptor(
 void CompareNilICStub::InitializeInterfaceDescriptor(
     CodeStubInterfaceDescriptor* descriptor) {
   Register registers[] = { cp, a0 };
-  descriptor->Initialize(ARRAY_SIZE(registers), registers,
+  descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers,
                          FUNCTION_ADDR(CompareNilIC_Miss));
   descriptor->SetMissHandler(
       ExternalReference(IC_Utility(IC::kCompareNilIC_Miss), isolate()));
@@ -112,7 +112,7 @@ const Register InterfaceDescriptor::ContextRegister() { return cp; }
 
 
 static void InitializeArrayConstructorDescriptor(
-    CodeStubInterfaceDescriptor* descriptor,
+    CodeStub::Major major, CodeStubInterfaceDescriptor* descriptor,
     int constant_stack_parameter_count) {
   // register state
   // cp -- context
@@ -124,10 +124,8 @@ static void InitializeArrayConstructorDescriptor(
 
   if (constant_stack_parameter_count == 0) {
     Register registers[] = { cp, a1, a2 };
-    descriptor->Initialize(ARRAY_SIZE(registers), registers,
-                           deopt_handler,
-                           NULL,
-                           constant_stack_parameter_count,
+    descriptor->Initialize(major, ARRAY_SIZE(registers), registers,
+                           deopt_handler, NULL, constant_stack_parameter_count,
                            JS_FUNCTION_STUB_MODE);
   } else {
     // stack param count needs (constructor pointer, and single argument)
@@ -137,19 +135,16 @@ static void InitializeArrayConstructorDescriptor(
         Representation::Tagged(),
         Representation::Tagged(),
         Representation::Integer32() };
-    descriptor->Initialize(ARRAY_SIZE(registers), registers,
-                           a0,
-                           deopt_handler,
-                           representations,
+    descriptor->Initialize(major, ARRAY_SIZE(registers), registers, a0,
+                           deopt_handler, representations,
                            constant_stack_parameter_count,
-                           JS_FUNCTION_STUB_MODE,
-                           PASS_ARGUMENTS);
+                           JS_FUNCTION_STUB_MODE, PASS_ARGUMENTS);
   }
 }
 
 
 static void InitializeInternalArrayConstructorDescriptor(
-    CodeStubInterfaceDescriptor* descriptor,
+    CodeStub::Major major, CodeStubInterfaceDescriptor* descriptor,
     int constant_stack_parameter_count) {
   // register state
   // cp -- context
@@ -160,10 +155,8 @@ static void InitializeInternalArrayConstructorDescriptor(
 
   if (constant_stack_parameter_count == 0) {
     Register registers[] = { cp, a1 };
-    descriptor->Initialize(ARRAY_SIZE(registers), registers,
-                           deopt_handler,
-                           NULL,
-                           constant_stack_parameter_count,
+    descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers,
+                           deopt_handler, NULL, constant_stack_parameter_count,
                            JS_FUNCTION_STUB_MODE);
   } else {
     // stack param count needs (constructor pointer, and single argument)
@@ -172,39 +165,36 @@ static void InitializeInternalArrayConstructorDescriptor(
         Representation::Tagged(),
         Representation::Tagged(),
         Representation::Integer32() };
-    descriptor->Initialize(ARRAY_SIZE(registers), registers,
-                           a0,
-                           deopt_handler,
-                           representations,
+    descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers, a0,
+                           deopt_handler, representations,
                            constant_stack_parameter_count,
-                           JS_FUNCTION_STUB_MODE,
-                           PASS_ARGUMENTS);
+                           JS_FUNCTION_STUB_MODE, PASS_ARGUMENTS);
   }
 }
 
 
 void ArrayNoArgumentConstructorStub::InitializeInterfaceDescriptor(
     CodeStubInterfaceDescriptor* descriptor) {
-  InitializeArrayConstructorDescriptor(descriptor, 0);
+  InitializeArrayConstructorDescriptor(MajorKey(), descriptor, 0);
 }
 
 
 void ArraySingleArgumentConstructorStub::InitializeInterfaceDescriptor(
     CodeStubInterfaceDescriptor* descriptor) {
-  InitializeArrayConstructorDescriptor(descriptor, 1);
+  InitializeArrayConstructorDescriptor(MajorKey(), descriptor, 1);
 }
 
 
 void ArrayNArgumentsConstructorStub::InitializeInterfaceDescriptor(
     CodeStubInterfaceDescriptor* descriptor) {
-  InitializeArrayConstructorDescriptor(descriptor, -1);
+  InitializeArrayConstructorDescriptor(MajorKey(), descriptor, -1);
 }
 
 
 void ToBooleanStub::InitializeInterfaceDescriptor(
     CodeStubInterfaceDescriptor* descriptor) {
   Register registers[] = { cp, a0 };
-  descriptor->Initialize(ARRAY_SIZE(registers), registers,
+  descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers,
                          FUNCTION_ADDR(ToBooleanIC_Miss));
   descriptor->SetMissHandler(
       ExternalReference(IC_Utility(IC::kToBooleanIC_Miss), isolate()));
@@ -213,26 +203,26 @@ void ToBooleanStub::InitializeInterfaceDescriptor(
 
 void InternalArrayNoArgumentConstructorStub::InitializeInterfaceDescriptor(
     CodeStubInterfaceDescriptor* descriptor) {
-  InitializeInternalArrayConstructorDescriptor(descriptor, 0);
+  InitializeInternalArrayConstructorDescriptor(MajorKey(), descriptor, 0);
 }
 
 
 void InternalArraySingleArgumentConstructorStub::InitializeInterfaceDescriptor(
     CodeStubInterfaceDescriptor* descriptor) {
-  InitializeInternalArrayConstructorDescriptor(descriptor, 1);
+  InitializeInternalArrayConstructorDescriptor(MajorKey(), descriptor, 1);
 }
 
 
 void InternalArrayNArgumentsConstructorStub::InitializeInterfaceDescriptor(
     CodeStubInterfaceDescriptor* descriptor) {
-  InitializeInternalArrayConstructorDescriptor(descriptor, -1);
+  InitializeInternalArrayConstructorDescriptor(MajorKey(), descriptor, -1);
 }
 
 
 void BinaryOpICStub::InitializeInterfaceDescriptor(
     CodeStubInterfaceDescriptor* descriptor) {
   Register registers[] = { cp, a1, a0 };
-  descriptor->Initialize(ARRAY_SIZE(registers), registers,
+  descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers,
                          FUNCTION_ADDR(BinaryOpIC_Miss));
   descriptor->SetMissHandler(
       ExternalReference(IC_Utility(IC::kBinaryOpIC_Miss), isolate()));
@@ -242,7 +232,7 @@ void BinaryOpICStub::InitializeInterfaceDescriptor(
 void BinaryOpWithAllocationSiteStub::InitializeInterfaceDescriptor(
     CodeStubInterfaceDescriptor* descriptor) {
   Register registers[] = { cp, a2, a1, a0 };
-  descriptor->Initialize(ARRAY_SIZE(registers), registers,
+  descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers,
                          FUNCTION_ADDR(BinaryOpIC_MissWithAllocationSite));
 }
 
@@ -250,9 +240,8 @@ void BinaryOpWithAllocationSiteStub::InitializeInterfaceDescriptor(
 void StringAddStub::InitializeInterfaceDescriptor(
     CodeStubInterfaceDescriptor* descriptor) {
   Register registers[] = { cp, a1, a0 };
-  descriptor->Initialize(
-      ARRAY_SIZE(registers), registers,
-      Runtime::FunctionForId(Runtime::kStringAdd)->entry);
+  descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers,
+                         Runtime::FunctionForId(Runtime::kStringAdd)->entry);
 }
 
 
index 6d9419b3311114eade56006c83220e9a676c6a67..390348090b79ae3545d2740d1659f0be7f637f51 100644 (file)
@@ -47,9 +47,6 @@ void Deoptimizer::PatchCodeForDeoptimization(Isolate* isolate, Code* code) {
 
   DeoptimizationInputData* deopt_data =
       DeoptimizationInputData::cast(code->deoptimization_data());
-  SharedFunctionInfo* shared =
-      SharedFunctionInfo::cast(deopt_data->SharedFunctionInfo());
-  shared->EvictFromOptimizedCodeMap(code, "deoptimized code");
 #ifdef DEBUG
   Address prev_call_address = NULL;
 #endif
index 8cfdd0f073daacb273156cc86a224bb2379a3b9c..d3baf671d67e64da98f3ab92880eb55976b660bb 100644 (file)
@@ -852,7 +852,7 @@ void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
   int length = deoptimizations_.length();
   if (length == 0) return;
   Handle<DeoptimizationInputData> data =
-      DeoptimizationInputData::New(isolate(), length, TENURED);
+      DeoptimizationInputData::New(isolate(), length, 0, TENURED);
 
   Handle<ByteArray> translations =
       translations_.CreateByteArray(isolate()->factory());
index df71c0b5baf5ce181a4d0e07ce798c4866dcd1f8..50347c571245625ba17097cf691e506825ac7e4c 100644 (file)
@@ -9,6 +9,7 @@
 #include "src/jsregexp.h"
 #include "src/macro-assembler.h"
 #include "src/objects-visiting.h"
+#include "src/ostreams.h"
 
 namespace v8 {
 namespace internal {
index e14c1af6db83bbb880df0768ed2c5fe726cf0a23..5ef23c5789f121c957f98a5853450a98475ff716 100644 (file)
@@ -731,10 +731,19 @@ bool Object::IsDeoptimizationInputData() const {
   // the entry size.
   int length = FixedArray::cast(this)->length();
   if (length == 0) return true;
+  if (length < DeoptimizationInputData::kFirstDeoptEntryIndex) return false;
 
-  length -= DeoptimizationInputData::kFirstDeoptEntryIndex;
-  return length >= 0 &&
-      length % DeoptimizationInputData::kDeoptEntrySize == 0;
+  FixedArray* self = FixedArray::cast(const_cast<Object*>(this));
+  int deopt_count =
+      Smi::cast(self->get(DeoptimizationInputData::kDeoptEntryCountIndex))
+          ->value();
+  int patch_count =
+      Smi::cast(
+          self->get(
+              DeoptimizationInputData::kReturnAddressPatchEntryCountIndex))
+          ->value();
+
+  return length == DeoptimizationInputData::LengthFor(deopt_count, patch_count);
 }
 
 
@@ -1082,6 +1091,12 @@ bool Object::IsNaN() const {
 }
 
 
+bool Object::IsMinusZero() const {
+  return this->IsHeapNumber() &&
+         i::IsMinusZero(HeapNumber::cast(this)->value());
+}
+
+
 MaybeHandle<Smi> Object::ToSmi(Isolate* isolate, Handle<Object> object) {
   if (object->IsSmi()) return Handle<Smi>::cast(object);
   if (object->IsHeapNumber()) {
@@ -4703,6 +4718,21 @@ inline void Code::set_is_crankshafted(bool value) {
 }
 
 
+inline bool Code::is_turbofanned() {
+  ASSERT(kind() == OPTIMIZED_FUNCTION || kind() == STUB);
+  return IsTurbofannedField::decode(
+      READ_UINT32_FIELD(this, kKindSpecificFlags1Offset));
+}
+
+
+inline void Code::set_is_turbofanned(bool value) {
+  ASSERT(kind() == OPTIMIZED_FUNCTION || kind() == STUB);
+  int previous = READ_UINT32_FIELD(this, kKindSpecificFlags1Offset);
+  int updated = IsTurbofannedField::update(previous, value);
+  WRITE_UINT32_FIELD(this, kKindSpecificFlags1Offset, updated);
+}
+
+
 bool Code::optimizable() {
   ASSERT_EQ(FUNCTION, kind());
   return READ_BYTE_FIELD(this, kOptimizableOffset) == 1;
index 02944df0e4eb4ef68c28ffacbcb830c66c34ccf4..0a07db7605e3b1c5cca9f598c0bde35d980b722e 100644 (file)
@@ -8,6 +8,7 @@
 #include "src/disassembler.h"
 #include "src/jsregexp.h"
 #include "src/objects-visiting.h"
+#include "src/ostreams.h"
 
 namespace v8 {
 namespace internal {
index f7341c98e3e7ae869c0d67bf049b35f8ea3511df..7f888456ba2417441668b48aca738950bdf9664f 100644 (file)
@@ -703,6 +703,9 @@ void StaticMarkingVisitor<StaticVisitor>::MarkTransitionArray(
 template<typename StaticVisitor>
 void StaticMarkingVisitor<StaticVisitor>::MarkInlinedFunctionsCode(
     Heap* heap, Code* code) {
+  // Skip in absence of inlining.
+  // TODO(turbofan): Revisit once we support inlining.
+  if (code->is_turbofanned()) return;
   // For optimized functions we should retain both non-optimized version
   // of its code and non-optimized version of all inlined functions.
   // This is required to support bailing out from inlined code.
index 79b2ffc150dcd79f1768270a3adc036304204215..4256ba6a2de37c40570c739e920876975deaba53 100644 (file)
@@ -1511,9 +1511,11 @@ void HeapObject::HeapObjectShortPrint(OStream& os) {  // NOLINT
     break;
   STRUCT_LIST(MAKE_STRUCT_CASE)
 #undef MAKE_STRUCT_CASE
-    case CODE_TYPE:
-      os << "<Code>";
+    case CODE_TYPE: {
+      Code* code = Code::cast(this);
+      os << "<Code: " << Code::Kind2String(code->kind()) << ">";
       break;
+    }
     case ODDBALL_TYPE: {
       if (IsUndefined()) {
         os << "<undefined>";
@@ -1735,7 +1737,7 @@ void HeapNumber::HeapNumberPrint(OStream& os) {  // NOLINT
 
 
 String* JSReceiver::class_name() {
-  if (IsJSFunction() && IsJSFunctionProxy()) {
+  if (IsJSFunction() || IsJSFunctionProxy()) {
     return GetHeap()->function_class_string();
   }
   if (map()->constructor()->IsJSFunction()) {
@@ -8418,13 +8420,17 @@ Object* AccessorPair::GetComponent(AccessorComponent component) {
 
 
 Handle<DeoptimizationInputData> DeoptimizationInputData::New(
-    Isolate* isolate,
-    int deopt_entry_count,
+    Isolate* isolate, int deopt_entry_count, int return_patch_address_count,
     PretenureFlag pretenure) {
-  ASSERT(deopt_entry_count > 0);
-  return Handle<DeoptimizationInputData>::cast(
-      isolate->factory()->NewFixedArray(
-          LengthFor(deopt_entry_count), pretenure));
+  ASSERT(deopt_entry_count + return_patch_address_count > 0);
+  Handle<FixedArray> deoptimization_data =
+      Handle<FixedArray>::cast(isolate->factory()->NewFixedArray(
+          LengthFor(deopt_entry_count, return_patch_address_count), pretenure));
+  deoptimization_data->set(kDeoptEntryCountIndex,
+                           Smi::FromInt(deopt_entry_count));
+  deoptimization_data->set(kReturnAddressPatchEntryCountIndex,
+                           Smi::FromInt(return_patch_address_count));
+  return Handle<DeoptimizationInputData>::cast(deoptimization_data);
 }
 
 
@@ -10162,6 +10168,7 @@ Context* JSFunction::NativeContextFromLiterals(FixedArray* literals) {
 //   ""       only the top-level function
 //   "name"   only the function "name"
 //   "name*"  only functions starting with "name"
+//   "~"      none; the tilde is not an identifier
 bool JSFunction::PassesFilter(const char* raw_filter) {
   if (*raw_filter == '*') return true;
   String* name = shared()->DebugName();
@@ -11241,11 +11248,11 @@ void DeoptimizationInputData::DeoptimizationInputDataPrint(
   disasm::NameConverter converter;
   int deopt_count = DeoptCount();
   os << "Deoptimization Input Data (deopt points = " << deopt_count << ")\n";
-  if (0 == deopt_count) return;
-
-  os << " index  ast id    argc     pc";
-  if (FLAG_print_code_verbose) os << "commands";
-  os << "\n";
+  if (0 != deopt_count) {
+    os << " index  ast id    argc     pc";
+    if (FLAG_print_code_verbose) os << "commands";
+    os << "\n";
+  }
   for (int i = 0; i < deopt_count; i++) {
     // TODO(svenpanne) Add some basic formatting to our streams.
     Vector<char> buf1 = Vector<char>::New(128);
@@ -11394,6 +11401,19 @@ void DeoptimizationInputData::DeoptimizationInputDataPrint(
       os << "\n";
     }
   }
+
+  int return_address_patch_count = ReturnAddressPatchCount();
+  if (return_address_patch_count != 0) {
+    os << "Return address patch data (count = " << return_address_patch_count
+       << ")\n";
+    os << "index pc    patched_pc\n";
+  }
+  for (int i = 0; i < return_address_patch_count; i++) {
+    Vector<char> buf = Vector<char>::New(128);
+    SNPrintF(buf, "%6d  %6d  %10d", i, ReturnAddressPc(i)->value(),
+             PatchedAddressPc(i)->value());
+    os << buf.start();
+  }
 }
 
 
index 1c0fa6462e0802414c5b8eff2489d5628c1efe15..cd596e9a8bb87b6a512a5e53bbf6132468a6cc80 100644 (file)
@@ -13,7 +13,6 @@
 #include "src/field-index.h"
 #include "src/flags.h"
 #include "src/list.h"
-#include "src/ostreams.h"
 #include "src/property-details.h"
 #include "src/smart-pointers.h"
 #include "src/unicode-inl.h"
 namespace v8 {
 namespace internal {
 
+class OStream;
+
 enum KeyedAccessStoreMode {
   STANDARD_STORE,
   STORE_TRANSITION_SMI_TO_OBJECT,
@@ -1418,6 +1419,7 @@ class Object {
   // Extract the number.
   inline double Number();
   INLINE(bool IsNaN() const);
+  INLINE(bool IsMinusZero() const);
   bool ToInt32(int32_t* value);
   bool ToUint32(uint32_t* value);
 
@@ -4620,6 +4622,9 @@ class ScopeInfo : public FixedArray {
   // Return the initialization flag of the given context local.
   InitializationFlag ContextLocalInitFlag(int var);
 
+  // Return the initialization flag of the given context local.
+  MaybeAssignedFlag ContextLocalMaybeAssignedFlag(int var);
+
   // Return true if this local was introduced by the compiler, and should not be
   // exposed to the user in a debugger.
   bool LocalIsSynthetic(int var);
@@ -4635,10 +4640,9 @@ class ScopeInfo : public FixedArray {
   // returns a value < 0. The name must be an internalized string.
   // If the slot is present and mode != NULL, sets *mode to the corresponding
   // mode for that variable.
-  static int ContextSlotIndex(Handle<ScopeInfo> scope_info,
-                              Handle<String> name,
-                              VariableMode* mode,
-                              InitializationFlag* init_flag);
+  static int ContextSlotIndex(Handle<ScopeInfo> scope_info, Handle<String> name,
+                              VariableMode* mode, InitializationFlag* init_flag,
+                              MaybeAssignedFlag* maybe_assigned_flag);
 
   // Lookup support for serialized scope info. Returns the
   // parameter index for a given parameter name if the parameter is present;
@@ -4756,6 +4760,8 @@ class ScopeInfo : public FixedArray {
   // ContextLocalInfoEntries part.
   class ContextLocalMode:      public BitField<VariableMode,         0, 3> {};
   class ContextLocalInitFlag:  public BitField<InitializationFlag,   3, 1> {};
+  class ContextLocalMaybeAssignedFlag
+      : public BitField<MaybeAssignedFlag, 4, 1> {};
 };
 
 
@@ -5238,14 +5244,16 @@ TYPED_ARRAYS(FIXED_TYPED_ARRAY_TRAITS)
 class DeoptimizationInputData: public FixedArray {
  public:
   // Layout description.  Indices in the array.
-  static const int kTranslationByteArrayIndex = 0;
-  static const int kInlinedFunctionCountIndex = 1;
-  static const int kLiteralArrayIndex = 2;
-  static const int kOsrAstIdIndex = 3;
-  static const int kOsrPcOffsetIndex = 4;
-  static const int kOptimizationIdIndex = 5;
-  static const int kSharedFunctionInfoIndex = 6;
-  static const int kFirstDeoptEntryIndex = 7;
+  static const int kDeoptEntryCountIndex = 0;
+  static const int kReturnAddressPatchEntryCountIndex = 1;
+  static const int kTranslationByteArrayIndex = 2;
+  static const int kInlinedFunctionCountIndex = 3;
+  static const int kLiteralArrayIndex = 4;
+  static const int kOsrAstIdIndex = 5;
+  static const int kOsrPcOffsetIndex = 6;
+  static const int kOptimizationIdIndex = 7;
+  static const int kSharedFunctionInfoIndex = 8;
+  static const int kFirstDeoptEntryIndex = 9;
 
   // Offsets of deopt entry elements relative to the start of the entry.
   static const int kAstIdRawOffset = 0;
@@ -5254,6 +5262,12 @@ class DeoptimizationInputData: public FixedArray {
   static const int kPcOffset = 3;
   static const int kDeoptEntrySize = 4;
 
+  // Offsets of return address patch entry elements relative to the start of the
+  // entry
+  static const int kReturnAddressPcOffset = 0;
+  static const int kPatchedAddressPcOffset = 1;
+  static const int kReturnAddressPatchEntrySize = 2;
+
   // Simple element accessors.
 #define DEFINE_ELEMENT_ACCESSORS(name, type)      \
   type* name() {                                  \
@@ -5274,20 +5288,35 @@ class DeoptimizationInputData: public FixedArray {
 #undef DEFINE_ELEMENT_ACCESSORS
 
   // Accessors for elements of the ith deoptimization entry.
-#define DEFINE_ENTRY_ACCESSORS(name, type)                       \
-  type* name(int i) {                                            \
-    return type::cast(get(IndexForEntry(i) + k##name##Offset));  \
-  }                                                              \
-  void Set##name(int i, type* value) {                           \
-    set(IndexForEntry(i) + k##name##Offset, value);              \
+#define DEFINE_DEOPT_ENTRY_ACCESSORS(name, type)                \
+  type* name(int i) {                                           \
+    return type::cast(get(IndexForEntry(i) + k##name##Offset)); \
+  }                                                             \
+  void Set##name(int i, type* value) {                          \
+    set(IndexForEntry(i) + k##name##Offset, value);             \
+  }
+
+  DEFINE_DEOPT_ENTRY_ACCESSORS(AstIdRaw, Smi)
+  DEFINE_DEOPT_ENTRY_ACCESSORS(TranslationIndex, Smi)
+  DEFINE_DEOPT_ENTRY_ACCESSORS(ArgumentsStackHeight, Smi)
+  DEFINE_DEOPT_ENTRY_ACCESSORS(Pc, Smi)
+
+#undef DEFINE_DEOPT_ENTRY_ACCESSORS
+
+// Accessors for elements of the ith deoptimization entry.
+#define DEFINE_PATCH_ENTRY_ACCESSORS(name, type)                      \
+  type* name(int i) {                                                 \
+    return type::cast(                                                \
+        get(IndexForReturnAddressPatchEntry(i) + k##name##Offset));   \
+  }                                                                   \
+  void Set##name(int i, type* value) {                                \
+    set(IndexForReturnAddressPatchEntry(i) + k##name##Offset, value); \
   }
 
-  DEFINE_ENTRY_ACCESSORS(AstIdRaw, Smi)
-  DEFINE_ENTRY_ACCESSORS(TranslationIndex, Smi)
-  DEFINE_ENTRY_ACCESSORS(ArgumentsStackHeight, Smi)
-  DEFINE_ENTRY_ACCESSORS(Pc, Smi)
+  DEFINE_PATCH_ENTRY_ACCESSORS(ReturnAddressPc, Smi)
+  DEFINE_PATCH_ENTRY_ACCESSORS(PatchedAddressPc, Smi)
 
-#undef DEFINE_ENTRY_ACCESSORS
+#undef DEFINE_PATCH_ENTRY_ACCESSORS
 
   BailoutId AstId(int i) {
     return BailoutId(AstIdRaw(i)->value());
@@ -5298,12 +5327,19 @@ class DeoptimizationInputData: public FixedArray {
   }
 
   int DeoptCount() {
-    return (length() - kFirstDeoptEntryIndex) / kDeoptEntrySize;
+    return length() == 0 ? 0 : Smi::cast(get(kDeoptEntryCountIndex))->value();
+  }
+
+  int ReturnAddressPatchCount() {
+    return length() == 0
+               ? 0
+               : Smi::cast(get(kReturnAddressPatchEntryCountIndex))->value();
   }
 
   // Allocates a DeoptimizationInputData.
   static Handle<DeoptimizationInputData> New(Isolate* isolate,
                                              int deopt_entry_count,
+                                             int return_address_patch_count,
                                              PretenureFlag pretenure);
 
   DECLARE_CAST(DeoptimizationInputData)
@@ -5313,12 +5349,20 @@ class DeoptimizationInputData: public FixedArray {
 #endif
 
  private:
+  friend class Object;  // For accessing LengthFor.
+
   static int IndexForEntry(int i) {
     return kFirstDeoptEntryIndex + (i * kDeoptEntrySize);
   }
 
-  static int LengthFor(int entry_count) {
-    return IndexForEntry(entry_count);
+  int IndexForReturnAddressPatchEntry(int i) {
+    return kFirstDeoptEntryIndex + (DeoptCount() * kDeoptEntrySize) +
+           (i * kReturnAddressPatchEntrySize);
+  }
+
+  static int LengthFor(int deopt_count, int return_address_patch_count) {
+    return kFirstDeoptEntryIndex + (deopt_count * kDeoptEntrySize) +
+           (return_address_patch_count * kReturnAddressPatchEntrySize);
   }
 };
 
@@ -5519,12 +5563,18 @@ class Code: public HeapObject {
   inline void set_raw_kind_specific_flags1(int value);
   inline void set_raw_kind_specific_flags2(int value);
 
-  // For kind STUB or ICs, tells whether or not a code object was generated by
-  // the optimizing compiler (but it may not be an optimized function).
-  bool is_crankshafted();
-  bool is_hydrogen_stub();  // Crankshafted, but not a function.
+  // [is_crankshafted]: For kind STUB or ICs, tells whether or not a code
+  // object was generated by either the hydrogen or the TurboFan optimizing
+  // compiler (but it may not be an optimized function).
+  inline bool is_crankshafted();
+  inline bool is_hydrogen_stub();  // Crankshafted, but not a function.
   inline void set_is_crankshafted(bool value);
 
+  // [is_turbofanned]: For kind STUB or OPTIMIZED_FUNCTION, tells whether the
+  // code object was generated by the TurboFan optimizing compiler.
+  inline bool is_turbofanned();
+  inline void set_is_turbofanned(bool value);
+
   // [optimizable]: For FUNCTION kind, tells if it is optimizable.
   inline bool optimizable();
   inline void set_optimizable(bool value);
@@ -5565,7 +5615,7 @@ class Code: public HeapObject {
   inline unsigned stack_slots();
   inline void set_stack_slots(unsigned slots);
 
-  // [safepoint_table_start]: For kind OPTIMIZED_CODE, the offset in
+  // [safepoint_table_start]: For kind OPTIMIZED_FUNCTION, the offset in
   // the instruction stream where the safepoint table starts.
   inline unsigned safepoint_table_offset();
   inline void set_safepoint_table_offset(unsigned offset);
@@ -5772,7 +5822,8 @@ class Code: public HeapObject {
   }
 
   inline bool IsWeakObject(Object* object) {
-    return (is_optimized_code() && IsWeakObjectInOptimizedCode(object)) ||
+    return (is_optimized_code() && !is_turbofanned() &&
+            IsWeakObjectInOptimizedCode(object)) ||
            (is_weak_stub() && IsWeakObjectInIC(object));
   }
 
@@ -5834,37 +5885,27 @@ class Code: public HeapObject {
   // KindSpecificFlags1 layout (STUB and OPTIMIZED_FUNCTION)
   static const int kStackSlotsFirstBit = 0;
   static const int kStackSlotsBitCount = 24;
-  static const int kHasFunctionCacheFirstBit =
+  static const int kHasFunctionCacheBit =
       kStackSlotsFirstBit + kStackSlotsBitCount;
-  static const int kHasFunctionCacheBitCount = 1;
-  static const int kMarkedForDeoptimizationFirstBit =
-      kStackSlotsFirstBit + kStackSlotsBitCount + 1;
-  static const int kMarkedForDeoptimizationBitCount = 1;
-  static const int kWeakStubFirstBit =
-      kMarkedForDeoptimizationFirstBit + kMarkedForDeoptimizationBitCount;
-  static const int kWeakStubBitCount = 1;
-  static const int kInvalidatedWeakStubFirstBit =
-      kWeakStubFirstBit + kWeakStubBitCount;
-  static const int kInvalidatedWeakStubBitCount = 1;
+  static const int kMarkedForDeoptimizationBit = kHasFunctionCacheBit + 1;
+  static const int kWeakStubBit = kMarkedForDeoptimizationBit + 1;
+  static const int kInvalidatedWeakStubBit = kWeakStubBit + 1;
+  static const int kIsTurbofannedBit = kInvalidatedWeakStubBit + 1;
 
   STATIC_ASSERT(kStackSlotsFirstBit + kStackSlotsBitCount <= 32);
-  STATIC_ASSERT(kHasFunctionCacheFirstBit + kHasFunctionCacheBitCount <= 32);
-  STATIC_ASSERT(kInvalidatedWeakStubFirstBit +
-                kInvalidatedWeakStubBitCount <= 32);
+  STATIC_ASSERT(kIsTurbofannedBit + 1 <= 32);
 
   class StackSlotsField: public BitField<int,
       kStackSlotsFirstBit, kStackSlotsBitCount> {};  // NOLINT
-  class HasFunctionCacheField: public BitField<bool,
-      kHasFunctionCacheFirstBit, kHasFunctionCacheBitCount> {};  // NOLINT
-  class MarkedForDeoptimizationField: public BitField<bool,
-      kMarkedForDeoptimizationFirstBit,
-      kMarkedForDeoptimizationBitCount> {};  // NOLINT
-  class WeakStubField: public BitField<bool,
-      kWeakStubFirstBit,
-      kWeakStubBitCount> {};  // NOLINT
-  class InvalidatedWeakStubField: public BitField<bool,
-      kInvalidatedWeakStubFirstBit,
-      kInvalidatedWeakStubBitCount> {};  // NOLINT
+  class HasFunctionCacheField : public BitField<bool, kHasFunctionCacheBit, 1> {
+  };  // NOLINT
+  class MarkedForDeoptimizationField
+      : public BitField<bool, kMarkedForDeoptimizationBit, 1> {};   // NOLINT
+  class WeakStubField : public BitField<bool, kWeakStubBit, 1> {};  // NOLINT
+  class InvalidatedWeakStubField
+      : public BitField<bool, kInvalidatedWeakStubBit, 1> {};  // NOLINT
+  class IsTurbofannedField : public BitField<bool, kIsTurbofannedBit, 1> {
+  };  // NOLINT
 
   // KindSpecificFlags2 layout (ALL)
   static const int kIsCrankshaftedBit = 0;
index 170b1f5a3523f1bc0eab16a81fef3fbc98f46f3c..d2925c0b02e9058f48f5024d7028c5832ccebd4d 100644 (file)
@@ -1442,6 +1442,19 @@ Statement* Parser::ParseExportDeclaration(bool* ok) {
       return NULL;
   }
 
+  // Every export of a module may be assigned.
+  for (int i = 0; i < names.length(); ++i) {
+    Variable* var = scope_->Lookup(names[i]);
+    if (var == NULL) {
+      // TODO(sigurds) This is an export that has no definition yet,
+      // not clear what to do in this case.
+      continue;
+    }
+    if (!IsImmutableVariableMode(var->mode())) {
+      var->set_maybe_assigned();
+    }
+  }
+
   // Extract declared names into export declarations and interface.
   Interface* interface = scope_->interface();
   for (int i = 0; i < names.length(); ++i) {
@@ -1655,8 +1668,9 @@ void Parser::Declare(Declaration* declaration, bool resolve, bool* ok) {
         : declaration_scope->LookupLocal(name);
     if (var == NULL) {
       // Declare the name.
-      var = declaration_scope->DeclareLocal(
-          name, mode, declaration->initialization(), proxy->interface());
+      var = declaration_scope->DeclareLocal(name, mode,
+                                            declaration->initialization(),
+                                            kNotAssigned, proxy->interface());
     } else if (IsLexicalVariableMode(mode) || IsLexicalVariableMode(var->mode())
                || ((mode == CONST_LEGACY || var->mode() == CONST_LEGACY) &&
                    !declaration_scope->is_global_scope())) {
@@ -1711,18 +1725,19 @@ void Parser::Declare(Declaration* declaration, bool resolve, bool* ok) {
     // For global const variables we bind the proxy to a variable.
     ASSERT(resolve);  // should be set by all callers
     Variable::Kind kind = Variable::NORMAL;
-    var = new(zone()) Variable(
-        declaration_scope, name, mode, true, kind,
-        kNeedsInitialization, proxy->interface());
+    var = new (zone())
+        Variable(declaration_scope, name, mode, true, kind,
+                 kNeedsInitialization, kNotAssigned, proxy->interface());
   } else if (declaration_scope->is_eval_scope() &&
              declaration_scope->strict_mode() == SLOPPY) {
     // For variable declarations in a sloppy eval scope the proxy is bound
     // to a lookup variable to force a dynamic declaration using the
     // DeclareLookupSlot runtime function.
     Variable::Kind kind = Variable::NORMAL;
-    var = new(zone()) Variable(
-        declaration_scope, name, mode, true, kind,
-        declaration->initialization(), proxy->interface());
+    // TODO(sigurds) figure out if kNotAssigned is OK here
+    var = new (zone()) Variable(declaration_scope, name, mode, true, kind,
+                                declaration->initialization(), kNotAssigned,
+                                proxy->interface());
     var->AllocateTo(Variable::LOOKUP, -1);
     resolve = true;
   }
@@ -2625,9 +2640,7 @@ TryStatement* Parser::ParseTryStatement(bool* ok) {
     Target target(&this->target_stack_, &catch_collector);
     VariableMode mode =
         allow_harmony_scoping() && strict_mode() == STRICT ? LET : VAR;
-    catch_variable =
-        catch_scope->DeclareLocal(name, mode, kCreatedInitialized);
-
+    catch_variable = catch_scope->DeclareLocal(name, mode, kCreatedInitialized);
     BlockState block_state(&scope_, catch_scope);
     catch_block = ParseBlock(NULL, CHECK_OK);
 
@@ -3454,7 +3467,12 @@ FunctionLiteral* Parser::ParseFunctionLiteral(
         dupe_error_loc = scanner()->location();
       }
 
-      scope_->DeclareParameter(param_name, VAR);
+      Variable* var = scope_->DeclareParameter(param_name, VAR);
+      // TODO(sigurds) Mark every parameter as maybe assigned. This is a
+      // conservative approximation necessary to account for parameters
+      // that are assigned via the arguments array.
+      var->set_maybe_assigned();
+
       num_parameters++;
       if (num_parameters > Code::kMaxArguments) {
         ReportMessage("too_many_parameters");
@@ -3485,9 +3503,10 @@ FunctionLiteral* Parser::ParseFunctionLiteral(
           allow_harmony_scoping() && strict_mode() == STRICT
               ? CONST : CONST_LEGACY;
       ASSERT(function_name != NULL);
-      fvar = new(zone()) Variable(scope_,
-         function_name, fvar_mode, true /* is valid LHS */,
-         Variable::NORMAL, kCreatedInitialized, Interface::NewConst());
+      fvar = new (zone())
+          Variable(scope_, function_name, fvar_mode, true /* is valid LHS */,
+                   Variable::NORMAL, kCreatedInitialized, kNotAssigned,
+                   Interface::NewConst());
       VariableProxy* proxy = factory()->NewVariableProxy(fvar);
       VariableDeclaration* fvar_declaration = factory()->NewVariableDeclaration(
           proxy, fvar_mode, scope_, RelocInfo::kNoPosition);
index 1c42c4929669c89077a6d18a3ef1a21387a82224..f1378ecdf939fb161a0dac63e3437be8f0a45591 100644 (file)
@@ -5,6 +5,7 @@
 #include "src/property.h"
 
 #include "src/handles-inl.h"
+#include "src/ostreams.h"
 
 namespace v8 {
 namespace internal {
index 191b45789fbec659d2f687612d50b2c93a069de2..624e16fb1011b014f3a45c66c55dbc427ac81549 100644 (file)
@@ -9,12 +9,13 @@
 #include "src/field-index.h"
 #include "src/field-index-inl.h"
 #include "src/isolate.h"
-#include "src/ostreams.h"
 #include "src/types.h"
 
 namespace v8 {
 namespace internal {
 
+class OStream;
+
 // Abstraction for elements in instance-descriptor arrays.
 //
 // Each descriptor has a key, property attributes, property type,
index 518e96a99bfc0c1cf6f636793a5784d5e0373a5a..2a0234bd7c05511c11c1e8272a4f8643b9bec489 100644 (file)
@@ -1821,15 +1821,6 @@ RUNTIME_FUNCTION(Runtime_GetWeakSetValues) {
 }
 
 
-RUNTIME_FUNCTION(Runtime_ClassOf) {
-  SealHandleScope shs(isolate);
-  ASSERT(args.length() == 1);
-  CONVERT_ARG_CHECKED(Object, obj, 0);
-  if (!obj->IsJSObject()) return isolate->heap()->null_value();
-  return JSObject::cast(obj)->class_name();
-}
-
-
 RUNTIME_FUNCTION(Runtime_GetPrototype) {
   HandleScope scope(isolate);
   ASSERT(args.length() == 1);
@@ -6078,6 +6069,35 @@ RUNTIME_FUNCTION(Runtime_Typeof) {
 }
 
 
+RUNTIME_FUNCTION(Runtime_Booleanize) {
+  SealHandleScope shs(isolate);
+  ASSERT(args.length() == 2);
+  CONVERT_ARG_CHECKED(Object, value_raw, 0);
+  CONVERT_SMI_ARG_CHECKED(token_raw, 1);
+  intptr_t value = reinterpret_cast<intptr_t>(value_raw);
+  Token::Value token = static_cast<Token::Value>(token_raw);
+  switch (token) {
+    case Token::EQ:
+    case Token::EQ_STRICT:
+      return isolate->heap()->ToBoolean(value == 0);
+    case Token::NE:
+    case Token::NE_STRICT:
+      return isolate->heap()->ToBoolean(value != 0);
+    case Token::LT:
+      return isolate->heap()->ToBoolean(value < 0);
+    case Token::GT:
+      return isolate->heap()->ToBoolean(value > 0);
+    case Token::LTE:
+      return isolate->heap()->ToBoolean(value <= 0);
+    case Token::GTE:
+      return isolate->heap()->ToBoolean(value >= 0);
+    default:
+      // This should only happen during natives fuzzing.
+      return isolate->heap()->undefined_value();
+  }
+}
+
+
 static bool AreDigits(const uint8_t*s, int from, int to) {
   for (int i = from; i < to; i++) {
     if (s[i] < '0' || s[i] > '9') return false;
@@ -7870,16 +7890,13 @@ RUNTIME_FUNCTION(Runtime_DateSetValue) {
 }
 
 
-RUNTIME_FUNCTION(Runtime_NewSloppyArguments) {
-  HandleScope scope(isolate);
-  ASSERT(args.length() == 3);
-
-  CONVERT_ARG_HANDLE_CHECKED(JSFunction, callee, 0);
-  Object** parameters = reinterpret_cast<Object**>(args[1]);
-  CONVERT_SMI_ARG_CHECKED(argument_count, 2);
-
+static Handle<JSObject> NewSloppyArguments(Isolate* isolate,
+                                           Handle<JSFunction> callee,
+                                           Object** parameters,
+                                           int argument_count) {
   Handle<JSObject> result =
       isolate->factory()->NewArgumentsObject(callee, argument_count);
+
   // Allocate the elements if needed.
   int parameter_count = callee->shared()->formal_parameter_count();
   if (argument_count > 0) {
@@ -7960,31 +7977,67 @@ RUNTIME_FUNCTION(Runtime_NewSloppyArguments) {
       }
     }
   }
-  return *result;
+  return result;
 }
 
 
-RUNTIME_FUNCTION(Runtime_NewStrictArguments) {
-  HandleScope scope(isolate);
-  ASSERT(args.length() == 3);
-  CONVERT_ARG_HANDLE_CHECKED(JSFunction, callee, 0)
-  Object** parameters = reinterpret_cast<Object**>(args[1]);
-  CONVERT_SMI_ARG_CHECKED(length, 2);
-
+static Handle<JSObject> NewStrictArguments(Isolate* isolate,
+                                           Handle<JSFunction> callee,
+                                           Object** parameters,
+                                           int argument_count) {
   Handle<JSObject> result =
-        isolate->factory()->NewArgumentsObject(callee, length);
+      isolate->factory()->NewArgumentsObject(callee, argument_count);
 
-  if (length > 0) {
+  if (argument_count > 0) {
     Handle<FixedArray> array =
-        isolate->factory()->NewUninitializedFixedArray(length);
+        isolate->factory()->NewUninitializedFixedArray(argument_count);
     DisallowHeapAllocation no_gc;
     WriteBarrierMode mode = array->GetWriteBarrierMode(no_gc);
-    for (int i = 0; i < length; i++) {
+    for (int i = 0; i < argument_count; i++) {
       array->set(i, *--parameters, mode);
     }
     result->set_elements(*array);
   }
-  return *result;
+  return result;
+}
+
+
+RUNTIME_FUNCTION(Runtime_NewArguments) {
+  HandleScope scope(isolate);
+  ASSERT(args.length() == 1);
+  CONVERT_ARG_HANDLE_CHECKED(JSFunction, callee, 0);
+  JavaScriptFrameIterator it(isolate);
+
+  // Find the frame that holds the actual arguments passed to the function.
+  it.AdvanceToArgumentsFrame();
+  JavaScriptFrame* frame = it.frame();
+
+  // Determine parameter location on the stack and dispatch on language mode.
+  int argument_count = frame->GetArgumentsLength();
+  Object** parameters = reinterpret_cast<Object**>(frame->GetParameterSlot(-1));
+  return callee->shared()->strict_mode() == STRICT
+             ? *NewStrictArguments(isolate, callee, parameters, argument_count)
+             : *NewSloppyArguments(isolate, callee, parameters, argument_count);
+}
+
+
+RUNTIME_FUNCTION(Runtime_NewSloppyArguments) {
+  HandleScope scope(isolate);
+  ASSERT(args.length() == 3);
+  CONVERT_ARG_HANDLE_CHECKED(JSFunction, callee, 0);
+  Object** parameters = reinterpret_cast<Object**>(args[1]);
+  CONVERT_SMI_ARG_CHECKED(argument_count, 2);
+  return *NewSloppyArguments(isolate, callee, parameters, argument_count);
+}
+
+
+RUNTIME_FUNCTION(Runtime_NewStrictArguments) {
+  HandleScope scope(isolate);
+  ASSERT(args.length() == 3);
+  CONVERT_ARG_HANDLE_CHECKED(JSFunction, callee, 0)
+  Object** parameters = reinterpret_cast<Object**>(args[1]);
+  CONVERT_SMI_ARG_CHECKED(argument_count, 2);
+  return *NewStrictArguments(isolate, callee, parameters, argument_count);
 }
 
 
@@ -8461,6 +8514,11 @@ RUNTIME_FUNCTION(Runtime_DeoptimizeFunction) {
   CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
   if (!function->IsOptimized()) return isolate->heap()->undefined_value();
 
+  // TODO(turbofan): Deoptimization is not supported yet.
+  if (function->code()->is_turbofanned() && !FLAG_turbo_deoptimization) {
+    return isolate->heap()->undefined_value();
+  }
+
   Deoptimizer::DeoptimizeFunction(*function);
 
   return isolate->heap()->undefined_value();
@@ -8570,6 +8628,9 @@ RUNTIME_FUNCTION(Runtime_GetOptimizationStatus) {
   if (FLAG_deopt_every_n_times) {
     return Smi::FromInt(6);  // 6 == "maybe deopted".
   }
+  if (function->IsOptimized() && function->code()->is_turbofanned()) {
+    return Smi::FromInt(7);  // 7 == "TurboFan compiler".
+  }
   return function->IsOptimized() ? Smi::FromInt(1)   // 1 == "yes".
                                  : Smi::FromInt(2);  // 2 == "no".
 }
@@ -8877,6 +8938,8 @@ RUNTIME_FUNCTION(Runtime_NewFunctionContext) {
   ASSERT(args.length() == 1);
 
   CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
+
+  ASSERT(function->context() == isolate->context());
   int length = function->shared()->scope_info()->ContextLength();
   return *isolate->factory()->NewFunctionContext(length, function);
 }
@@ -9345,6 +9408,36 @@ RUNTIME_FUNCTION(Runtime_StoreLookupSlot) {
 }
 
 
+RUNTIME_FUNCTION(Runtime_LoadContextRelative) {
+  SealHandleScope shs(isolate);
+  ASSERT(args.length() == 3);
+  CONVERT_ARG_CHECKED(Context, context, 0);
+  CONVERT_SMI_ARG_CHECKED(depth, 1);
+  CONVERT_SMI_ARG_CHECKED(index, 2);
+  while (depth-- > 0) {
+    context = context->previous();
+    ASSERT(context->IsContext());
+  }
+  return context->get(index);
+}
+
+
+RUNTIME_FUNCTION(Runtime_StoreContextRelative) {
+  SealHandleScope shs(isolate);
+  ASSERT(args.length() == 4);
+  CONVERT_ARG_CHECKED(Context, context, 0);
+  CONVERT_SMI_ARG_CHECKED(depth, 1);
+  CONVERT_SMI_ARG_CHECKED(index, 2);
+  CONVERT_ARG_CHECKED(Object, value, 3);
+  while (depth-- > 0) {
+    context = context->previous();
+    ASSERT(context->IsContext());
+  }
+  context->set(index, value);
+  return isolate->heap()->undefined_value();
+}
+
+
 RUNTIME_FUNCTION(Runtime_Throw) {
   HandleScope scope(isolate);
   ASSERT(args.length() == 1);
@@ -11105,6 +11198,15 @@ static SaveContext* FindSavedContextForFrame(Isolate* isolate,
 }
 
 
+RUNTIME_FUNCTION(Runtime_IsOptimized) {
+  SealHandleScope shs(isolate);
+  ASSERT(args.length() == 0);
+  JavaScriptFrameIterator it(isolate);
+  JavaScriptFrame* frame = it.frame();
+  return isolate->heap()->ToBoolean(frame->is_optimized());
+}
+
+
 // Return an array with frame details
 // args[0]: number: break id
 // args[1]: number: frame index
@@ -11207,9 +11309,10 @@ RUNTIME_FUNCTION(Runtime_GetFrameDetails) {
       Handle<String> name(scope_info->LocalName(i));
       VariableMode mode;
       InitializationFlag init_flag;
+      MaybeAssignedFlag maybe_assigned_flag;
       locals->set(local * 2, *name);
-      int context_slot_index =
-          ScopeInfo::ContextSlotIndex(scope_info, name, &mode, &init_flag);
+      int context_slot_index = ScopeInfo::ContextSlotIndex(
+          scope_info, name, &mode, &init_flag, &maybe_assigned_flag);
       Object* value = context->get(context_slot_index);
       locals->set(local * 2 + 1, value);
       local++;
@@ -11382,8 +11485,10 @@ RUNTIME_FUNCTION(Runtime_GetFrameDetails) {
 static bool ParameterIsShadowedByContextLocal(Handle<ScopeInfo> info,
                                               Handle<String> parameter_name) {
   VariableMode mode;
-  InitializationFlag flag;
-  return ScopeInfo::ContextSlotIndex(info, parameter_name, &mode, &flag) != -1;
+  InitializationFlag init_flag;
+  MaybeAssignedFlag maybe_assigned_flag;
+  return ScopeInfo::ContextSlotIndex(info, parameter_name, &mode, &init_flag,
+                                     &maybe_assigned_flag) != -1;
 }
 
 
@@ -11555,8 +11660,9 @@ static bool SetContextLocalValue(Isolate* isolate,
     if (String::Equals(variable_name, next_name)) {
       VariableMode mode;
       InitializationFlag init_flag;
-      int context_index =
-          ScopeInfo::ContextSlotIndex(scope_info, next_name, &mode, &init_flag);
+      MaybeAssignedFlag maybe_assigned_flag;
+      int context_index = ScopeInfo::ContextSlotIndex(
+          scope_info, next_name, &mode, &init_flag, &maybe_assigned_flag);
       context->set(context_index, *new_value);
       return true;
     }
@@ -15022,27 +15128,438 @@ RUNTIME_FUNCTION(Runtime_NormalizeElements) {
 
 
 RUNTIME_FUNCTION(Runtime_MaxSmi) {
+  SealHandleScope shs(isolate);
   ASSERT(args.length() == 0);
   return Smi::FromInt(Smi::kMaxValue);
 }
 
 
+// TODO(dcarney): remove this function when TurboFan supports it.
+// Takes the object to be iterated over and the result of GetPropertyNamesFast
+// Returns pair (cache_array, cache_type).
+RUNTIME_FUNCTION_RETURN_PAIR(Runtime_ForInInit) {
+  SealHandleScope scope(isolate);
+  ASSERT(args.length() == 2);
+  // This simulates CONVERT_ARG_HANDLE_CHECKED for calls returning pairs.
+  // Not worth creating a macro atm as this function should be removed.
+  if (!args[0]->IsJSReceiver() || !args[1]->IsObject()) {
+    return MakePair(isolate->ThrowIllegalOperation(),
+                    isolate->heap()->undefined_value());
+  }
+  Handle<JSReceiver> object = args.at<JSReceiver>(0);
+  Handle<Object> cache_type = args.at<Object>(1);
+  if (cache_type->IsMap()) {
+    // Enum cache case.
+    if (Map::EnumLengthBits::decode(Map::cast(*cache_type)->bit_field3()) ==
+        0) {
+      // 0 length enum.
+      // Can't handle this case in the graph builder,
+      // so transform it into the empty fixed array case.
+      return MakePair(isolate->heap()->empty_fixed_array(), Smi::FromInt(1));
+    }
+    return MakePair(object->map()->instance_descriptors()->GetEnumCache(),
+                    *cache_type);
+  } else {
+    // FixedArray case.
+    Smi* new_cache_type = Smi::FromInt(object->IsJSProxy() ? 0 : 1);
+    return MakePair(*Handle<FixedArray>::cast(cache_type), new_cache_type);
+  }
+}
+
+
+// TODO(dcarney): remove this function when TurboFan supports it.
+RUNTIME_FUNCTION(Runtime_ForInCacheArrayLength) {
+  SealHandleScope shs(isolate);
+  ASSERT(args.length() == 2);
+  CONVERT_ARG_HANDLE_CHECKED(Object, cache_type, 0);
+  CONVERT_ARG_HANDLE_CHECKED(FixedArray, array, 1);
+  int length = 0;
+  if (cache_type->IsMap()) {
+    length = Map::cast(*cache_type)->EnumLength();
+  } else {
+    ASSERT(cache_type->IsSmi());
+    length = array->length();
+  }
+  return Smi::FromInt(length);
+}
+
+
+// TODO(dcarney): remove this function when TurboFan supports it.
+// Takes (the object to be iterated over,
+//        cache_array from ForInInit,
+//        cache_type from ForInInit,
+//        the current index)
+// Returns pair (array[index], needs_filtering).
+RUNTIME_FUNCTION_RETURN_PAIR(Runtime_ForInNext) {
+  SealHandleScope scope(isolate);
+  ASSERT(args.length() == 4);
+  // This simulates CONVERT_ARG_HANDLE_CHECKED for calls returning pairs.
+  // Not worth creating a macro atm as this function should be removed.
+  if (!args[0]->IsJSReceiver() || !args[1]->IsFixedArray() ||
+      !args[2]->IsObject() || !args[3]->IsSmi()) {
+    return MakePair(isolate->ThrowIllegalOperation(),
+                    isolate->heap()->undefined_value());
+  }
+  Handle<JSReceiver> object = args.at<JSReceiver>(0);
+  Handle<FixedArray> array = args.at<FixedArray>(1);
+  Handle<Object> cache_type = args.at<Object>(2);
+  int index = args.smi_at(3);
+  // Figure out first if a slow check is needed for this object.
+  bool slow_check_needed = false;
+  if (cache_type->IsMap()) {
+    if (object->map() != Map::cast(*cache_type)) {
+      // Object transitioned.  Need slow check.
+      slow_check_needed = true;
+    }
+  } else {
+    // No slow check needed for proxies.
+    slow_check_needed = Smi::cast(*cache_type)->value() == 1;
+  }
+  return MakePair(array->get(index),
+                  isolate->heap()->ToBoolean(slow_check_needed));
+}
+
+
+// ----------------------------------------------------------------------------
+// Reference implementation for inlined runtime functions.  Only used when the
+// compiler does not support a certain intrinsic.  Don't optimize these, but
+// implement the intrinsic in the respective compiler instead.
+
+// TODO(mstarzinger): These are place-holder stubs for TurboFan and will
+// eventually all have a C++ implementation and this macro will be gone.
+#define U(name)                               \
+  RUNTIME_FUNCTION(RuntimeReference_##name) { \
+    UNIMPLEMENTED();                          \
+    return NULL;                              \
+  }
+
+U(IsStringWrapperSafeForDefaultValueOf)
+U(GeneratorNext)
+U(GeneratorThrow)
+U(DebugBreakInOptimizedCode)
+
+#undef U
+
+
+RUNTIME_FUNCTION(RuntimeReference_IsSmi) {
+  SealHandleScope shs(isolate);
+  ASSERT(args.length() == 1);
+  CONVERT_ARG_CHECKED(Object, obj, 0);
+  return isolate->heap()->ToBoolean(obj->IsSmi());
+}
+
+
+RUNTIME_FUNCTION(RuntimeReference_IsNonNegativeSmi) {
+  SealHandleScope shs(isolate);
+  ASSERT(args.length() == 1);
+  CONVERT_ARG_CHECKED(Object, obj, 0);
+  return isolate->heap()->ToBoolean(obj->IsSmi() &&
+                                    Smi::cast(obj)->value() >= 0);
+}
+
+
+RUNTIME_FUNCTION(RuntimeReference_IsArray) {
+  SealHandleScope shs(isolate);
+  ASSERT(args.length() == 1);
+  CONVERT_ARG_CHECKED(Object, obj, 0);
+  return isolate->heap()->ToBoolean(obj->IsJSArray());
+}
+
+
+RUNTIME_FUNCTION(RuntimeReference_IsRegExp) {
+  SealHandleScope shs(isolate);
+  ASSERT(args.length() == 1);
+  CONVERT_ARG_CHECKED(Object, obj, 0);
+  return isolate->heap()->ToBoolean(obj->IsJSRegExp());
+}
+
+
+RUNTIME_FUNCTION(RuntimeReference_IsConstructCall) {
+  SealHandleScope shs(isolate);
+  ASSERT(args.length() == 0);
+  JavaScriptFrameIterator it(isolate);
+  JavaScriptFrame* frame = it.frame();
+  return isolate->heap()->ToBoolean(frame->IsConstructor());
+}
+
+
+RUNTIME_FUNCTION(RuntimeReference_CallFunction) {
+  SealHandleScope shs(isolate);
+  return __RT_impl_Runtime_Call(args, isolate);
+}
+
+
+RUNTIME_FUNCTION(RuntimeReference_ArgumentsLength) {
+  SealHandleScope shs(isolate);
+  ASSERT(args.length() == 0);
+  JavaScriptFrameIterator it(isolate);
+  JavaScriptFrame* frame = it.frame();
+  return Smi::FromInt(frame->GetArgumentsLength());
+}
+
+
+RUNTIME_FUNCTION(RuntimeReference_Arguments) {
+  SealHandleScope shs(isolate);
+  return __RT_impl_Runtime_GetArgumentsProperty(args, isolate);
+}
+
+
+RUNTIME_FUNCTION(RuntimeReference_ValueOf) {
+  SealHandleScope shs(isolate);
+  ASSERT(args.length() == 1);
+  CONVERT_ARG_CHECKED(Object, obj, 0);
+  if (!obj->IsJSValue()) return obj;
+  return JSValue::cast(obj)->value();
+}
+
+
+RUNTIME_FUNCTION(RuntimeReference_SetValueOf) {
+  SealHandleScope shs(isolate);
+  ASSERT(args.length() == 2);
+  CONVERT_ARG_CHECKED(Object, obj, 0);
+  CONVERT_ARG_CHECKED(Object, value, 1);
+  if (!obj->IsJSValue()) return value;
+  JSValue::cast(obj)->set_value(value);
+  return value;
+}
+
+
+RUNTIME_FUNCTION(RuntimeReference_DateField) {
+  SealHandleScope shs(isolate);
+  ASSERT(args.length() == 2);
+  CONVERT_ARG_CHECKED(Object, obj, 0);
+  CONVERT_SMI_ARG_CHECKED(index, 1);
+  if (!obj->IsJSDate()) {
+    HandleScope scope(isolate);
+    return isolate->Throw(*isolate->factory()->NewTypeError(
+        "not_date_object", HandleVector<Object>(NULL, 0)));
+  }
+  JSDate* date = JSDate::cast(obj);
+  if (index == 0) return date->value();
+  return JSDate::GetField(date, Smi::FromInt(index));
+}
+
+
+RUNTIME_FUNCTION(RuntimeReference_StringCharFromCode) {
+  SealHandleScope shs(isolate);
+  return __RT_impl_Runtime_CharFromCode(args, isolate);
+}
+
+
+RUNTIME_FUNCTION(RuntimeReference_StringCharAt) {
+  SealHandleScope shs(isolate);
+  ASSERT(args.length() == 2);
+  if (!args[0]->IsString()) return Smi::FromInt(0);
+  if (!args[1]->IsNumber()) return Smi::FromInt(0);
+  if (std::isinf(args.number_at(1))) return isolate->heap()->empty_string();
+  Object* code = __RT_impl_Runtime_StringCharCodeAtRT(args, isolate);
+  if (code->IsNaN()) return isolate->heap()->empty_string();
+  return __RT_impl_Runtime_CharFromCode(Arguments(1, &code), isolate);
+}
+
+
+RUNTIME_FUNCTION(RuntimeReference_OneByteSeqStringSetChar) {
+  SealHandleScope shs(isolate);
+  ASSERT(args.length() == 3);
+  CONVERT_ARG_CHECKED(SeqOneByteString, string, 0);
+  CONVERT_SMI_ARG_CHECKED(index, 1);
+  CONVERT_SMI_ARG_CHECKED(value, 2);
+  string->SeqOneByteStringSet(index, value);
+  return string;
+}
+
+
+RUNTIME_FUNCTION(RuntimeReference_TwoByteSeqStringSetChar) {
+  SealHandleScope shs(isolate);
+  ASSERT(args.length() == 3);
+  CONVERT_ARG_CHECKED(SeqTwoByteString, string, 0);
+  CONVERT_SMI_ARG_CHECKED(index, 1);
+  CONVERT_SMI_ARG_CHECKED(value, 2);
+  string->SeqTwoByteStringSet(index, value);
+  return string;
+}
+
+
+RUNTIME_FUNCTION(RuntimeReference_ObjectEquals) {
+  SealHandleScope shs(isolate);
+  ASSERT(args.length() == 2);
+  CONVERT_ARG_CHECKED(Object, obj1, 0);
+  CONVERT_ARG_CHECKED(Object, obj2, 1);
+  return isolate->heap()->ToBoolean(obj1 == obj2);
+}
+
+
+RUNTIME_FUNCTION(RuntimeReference_IsObject) {
+  SealHandleScope shs(isolate);
+  ASSERT(args.length() == 1);
+  CONVERT_ARG_CHECKED(Object, obj, 0);
+  if (!obj->IsHeapObject()) return isolate->heap()->false_value();
+  if (obj->IsNull()) return isolate->heap()->true_value();
+  if (obj->IsUndetectableObject()) return isolate->heap()->false_value();
+  Map* map = HeapObject::cast(obj)->map();
+  bool is_non_callable_spec_object =
+      map->instance_type() >= FIRST_NONCALLABLE_SPEC_OBJECT_TYPE &&
+      map->instance_type() <= LAST_NONCALLABLE_SPEC_OBJECT_TYPE;
+  return isolate->heap()->ToBoolean(is_non_callable_spec_object);
+}
+
+
+RUNTIME_FUNCTION(RuntimeReference_IsFunction) {
+  SealHandleScope shs(isolate);
+  ASSERT(args.length() == 1);
+  CONVERT_ARG_CHECKED(Object, obj, 0);
+  return isolate->heap()->ToBoolean(obj->IsJSFunction());
+}
+
+
+RUNTIME_FUNCTION(RuntimeReference_IsUndetectableObject) {
+  SealHandleScope shs(isolate);
+  ASSERT(args.length() == 1);
+  CONVERT_ARG_CHECKED(Object, obj, 0);
+  return isolate->heap()->ToBoolean(obj->IsUndetectableObject());
+}
+
+
+RUNTIME_FUNCTION(RuntimeReference_IsSpecObject) {
+  SealHandleScope shs(isolate);
+  ASSERT(args.length() == 1);
+  CONVERT_ARG_CHECKED(Object, obj, 0);
+  return isolate->heap()->ToBoolean(obj->IsSpecObject());
+}
+
+
+RUNTIME_FUNCTION(RuntimeReference_MathPow) {
+  SealHandleScope shs(isolate);
+  return __RT_impl_Runtime_MathPowSlow(args, isolate);
+}
+
+
+RUNTIME_FUNCTION(RuntimeReference_IsMinusZero) {
+  SealHandleScope shs(isolate);
+  ASSERT(args.length() == 1);
+  CONVERT_ARG_CHECKED(Object, obj, 0);
+  if (!obj->IsHeapNumber()) return isolate->heap()->false_value();
+  HeapNumber* number = HeapNumber::cast(obj);
+  return isolate->heap()->ToBoolean(IsMinusZero(number->value()));
+}
+
+
+RUNTIME_FUNCTION(RuntimeReference_HasCachedArrayIndex) {
+  SealHandleScope shs(isolate);
+  ASSERT(args.length() == 1);
+  return isolate->heap()->false_value();
+}
+
+
+RUNTIME_FUNCTION(RuntimeReference_GetCachedArrayIndex) {
+  SealHandleScope shs(isolate);
+  ASSERT(args.length() == 1);
+  return isolate->heap()->undefined_value();
+}
+
+
+RUNTIME_FUNCTION(RuntimeReference_FastAsciiArrayJoin) {
+  SealHandleScope shs(isolate);
+  ASSERT(args.length() == 2);
+  return isolate->heap()->undefined_value();
+}
+
+
+RUNTIME_FUNCTION(RuntimeReference_ClassOf) {
+  SealHandleScope shs(isolate);
+  ASSERT(args.length() == 1);
+  CONVERT_ARG_CHECKED(Object, obj, 0);
+  if (!obj->IsJSReceiver()) return isolate->heap()->null_value();
+  return JSReceiver::cast(obj)->class_name();
+}
+
+
+RUNTIME_FUNCTION(RuntimeReference_StringCharCodeAt) {
+  SealHandleScope shs(isolate);
+  ASSERT(args.length() == 2);
+  if (!args[0]->IsString()) return isolate->heap()->undefined_value();
+  if (!args[1]->IsNumber()) return isolate->heap()->undefined_value();
+  if (std::isinf(args.number_at(1))) return isolate->heap()->nan_value();
+  return __RT_impl_Runtime_StringCharCodeAtRT(args, isolate);
+}
+
+
+RUNTIME_FUNCTION(RuntimeReference_StringAdd) {
+  SealHandleScope shs(isolate);
+  return __RT_impl_Runtime_StringAdd(args, isolate);
+}
+
+
+RUNTIME_FUNCTION(RuntimeReference_SubString) {
+  SealHandleScope shs(isolate);
+  return __RT_impl_Runtime_SubString(args, isolate);
+}
+
+
+RUNTIME_FUNCTION(RuntimeReference_StringCompare) {
+  SealHandleScope shs(isolate);
+  return __RT_impl_Runtime_StringCompare(args, isolate);
+}
+
+
+RUNTIME_FUNCTION(RuntimeReference_RegExpExec) {
+  SealHandleScope shs(isolate);
+  return __RT_impl_Runtime_RegExpExecRT(args, isolate);
+}
+
+
+RUNTIME_FUNCTION(RuntimeReference_RegExpConstructResult) {
+  SealHandleScope shs(isolate);
+  return __RT_impl_Runtime_RegExpConstructResult(args, isolate);
+}
+
+
+RUNTIME_FUNCTION(RuntimeReference_GetFromCache) {
+  HandleScope scope(isolate);
+  ASSERT(args.length() == 2);
+  CONVERT_SMI_ARG_CHECKED(id, 0);
+  args[0] = isolate->native_context()->jsfunction_result_caches()->get(id);
+  return __RT_impl_Runtime_GetFromCache(args, isolate);
+}
+
+
+RUNTIME_FUNCTION(RuntimeReference_NumberToString) {
+  SealHandleScope shs(isolate);
+  return __RT_impl_Runtime_NumberToStringRT(args, isolate);
+}
+
+
+RUNTIME_FUNCTION(RuntimeReference_DebugIsActive) {
+  SealHandleScope shs(isolate);
+  return Smi::FromInt(isolate->debug()->is_active());
+}
+
+
 // ----------------------------------------------------------------------------
 // Implementation of Runtime
 
-#define F(name, number_of_args, result_size)                             \
-  { Runtime::k##name, Runtime::RUNTIME, #name,   \
-    FUNCTION_ADDR(Runtime_##name), number_of_args, result_size },
+#define F(name, number_of_args, result_size)                                  \
+  {                                                                           \
+    Runtime::k##name, Runtime::RUNTIME, #name, FUNCTION_ADDR(Runtime_##name), \
+        number_of_args, result_size                                           \
+  }                                                                           \
+  ,
 
 
-#define I(name, number_of_args, result_size)                             \
-  { Runtime::kInline##name, Runtime::INLINE,     \
-    "_" #name, NULL, number_of_args, result_size },
+#define I(name, number_of_args, result_size)                                \
+  {                                                                         \
+    Runtime::kInline##name, Runtime::INLINE, "_" #name,                     \
+        FUNCTION_ADDR(RuntimeReference_##name), number_of_args, result_size \
+  }                                                                         \
+  ,
 
 
-#define IO(name, number_of_args, result_size) \
-  { Runtime::kInlineOptimized##name, Runtime::INLINE_OPTIMIZED, \
-    "_" #name, FUNCTION_ADDR(Runtime_##name), number_of_args, result_size },
+#define IO(name, number_of_args, result_size)                              \
+  {                                                                        \
+    Runtime::kInlineOptimized##name, Runtime::INLINE_OPTIMIZED, "_" #name, \
+        FUNCTION_ADDR(Runtime_##name), number_of_args, result_size         \
+  }                                                                        \
+  ,
 
 
 static const Runtime::Function kIntrinsicFunctions[] = {
@@ -15087,6 +15604,16 @@ const Runtime::Function* Runtime::FunctionForName(Handle<String> name) {
 }
 
 
+const Runtime::Function* Runtime::FunctionForEntry(Address entry) {
+  for (size_t i = 0; i < sizeof(kIntrinsicFunctions); ++i) {
+    if (entry == kIntrinsicFunctions[i].entry) {
+      return &(kIntrinsicFunctions[i]);
+    }
+  }
+  return NULL;
+}
+
+
 const Runtime::Function* Runtime::FunctionForId(Runtime::FunctionId id) {
   return &(kIntrinsicFunctions[static_cast<int>(id)]);
 }
index 3224a282b70772bbee7c2ad574d2e9176277e780..ca5a3f15389c62a830badd878ba5912a68a7307c 100644 (file)
@@ -21,472 +21,481 @@ namespace internal {
 // WARNING: RUNTIME_FUNCTION_LIST_ALWAYS_* is a very large macro that caused
 // MSVC Intellisense to crash.  It was broken into two macros to work around
 // this problem. Please avoid large recursive macros whenever possible.
-#define RUNTIME_FUNCTION_LIST_ALWAYS_1(F) \
-  /* Property access */ \
-  F(GetProperty, 2, 1) \
-  F(KeyedGetProperty, 2, 1) \
-  F(DeleteProperty, 3, 1) \
-  F(HasOwnProperty, 2, 1) \
-  F(HasProperty, 2, 1) \
-  F(HasElement, 2, 1) \
-  F(IsPropertyEnumerable, 2, 1) \
-  F(GetPropertyNames, 1, 1) \
-  F(GetPropertyNamesFast, 1, 1) \
-  F(GetOwnPropertyNames, 2, 1) \
-  F(GetOwnElementNames, 1, 1) \
-  F(GetInterceptorInfo, 1, 1) \
-  F(GetNamedInterceptorPropertyNames, 1, 1) \
-  F(GetIndexedInterceptorElementNames, 1, 1) \
-  F(GetArgumentsProperty, 1, 1) \
-  F(ToFastProperties, 1, 1) \
-  F(FinishArrayPrototypeSetup, 1, 1) \
-  F(SpecialArrayFunctions, 0, 1) \
-  F(IsSloppyModeFunction, 1, 1) \
-  F(GetDefaultReceiver, 1, 1) \
-  \
-  F(GetPrototype, 1, 1) \
-  F(SetPrototype, 2, 1) \
-  F(IsInPrototypeChain, 2, 1) \
-  \
-  F(GetOwnProperty, 2, 1) \
-  \
-  F(IsExtensible, 1, 1) \
-  F(PreventExtensions, 1, 1)\
-  \
-  /* Utilities */ \
-  F(CheckIsBootstrapping, 0, 1) \
-  F(GetRootNaN, 0, 1) \
-  F(Call, -1 /* >= 2 */, 1) \
-  F(Apply, 5, 1) \
-  F(GetFunctionDelegate, 1, 1) \
-  F(GetConstructorDelegate, 1, 1) \
-  F(DeoptimizeFunction, 1, 1) \
-  F(ClearFunctionTypeFeedback, 1, 1) \
-  F(RunningInSimulator, 0, 1) \
-  F(IsConcurrentRecompilationSupported, 0, 1) \
-  F(OptimizeFunctionOnNextCall, -1, 1) \
-  F(NeverOptimizeFunction, 1, 1) \
-  F(GetOptimizationStatus, -1, 1) \
-  F(GetOptimizationCount, 1, 1) \
-  F(UnblockConcurrentRecompilation, 0, 1) \
-  F(CompileForOnStackReplacement, 1, 1) \
-  F(SetAllocationTimeout, -1 /* 2 || 3 */, 1) \
-  F(SetNativeFlag, 1, 1) \
-  F(SetInlineBuiltinFlag, 1, 1) \
-  F(StoreArrayLiteralElement, 5, 1) \
-  F(DebugPrepareStepInIfStepping, 1, 1) \
-  F(DebugPromiseHandlePrologue, 1, 1) \
-  F(DebugPromiseHandleEpilogue, 0, 1) \
-  F(DebugPromiseEvent, 1, 1) \
-  F(DebugAsyncTaskEvent, 1, 1) \
-  F(FlattenString, 1, 1) \
-  F(LoadMutableDouble, 2, 1) \
-  F(TryMigrateInstance, 1, 1) \
-  F(NotifyContextDisposed, 0, 1) \
-  \
-  /* Array join support */ \
-  F(PushIfAbsent, 2, 1) \
-  F(ArrayConcat, 1, 1) \
-  \
-  /* Conversions */ \
-  F(ToBool, 1, 1) \
-  F(Typeof, 1, 1) \
-  \
-  F(StringToNumber, 1, 1) \
-  F(StringParseInt, 2, 1) \
-  F(StringParseFloat, 1, 1) \
-  F(StringToLowerCase, 1, 1) \
-  F(StringToUpperCase, 1, 1) \
-  F(StringSplit, 3, 1) \
-  F(CharFromCode, 1, 1) \
-  F(URIEscape, 1, 1) \
-  F(URIUnescape, 1, 1) \
-  \
-  F(NumberToInteger, 1, 1) \
-  F(NumberToIntegerMapMinusZero, 1, 1) \
-  F(NumberToJSUint32, 1, 1) \
-  F(NumberToJSInt32, 1, 1) \
-  \
-  /* Arithmetic operations */ \
-  F(NumberAdd, 2, 1) \
-  F(NumberSub, 2, 1) \
-  F(NumberMul, 2, 1) \
-  F(NumberDiv, 2, 1) \
-  F(NumberMod, 2, 1) \
-  F(NumberUnaryMinus, 1, 1) \
-  F(NumberImul, 2, 1) \
-  \
-  F(StringBuilderConcat, 3, 1) \
-  F(StringBuilderJoin, 3, 1) \
-  F(SparseJoinWithSeparator, 3, 1) \
-  \
-  /* Bit operations */ \
-  F(NumberOr, 2, 1) \
-  F(NumberAnd, 2, 1) \
-  F(NumberXor, 2, 1) \
-  \
-  F(NumberShl, 2, 1) \
-  F(NumberShr, 2, 1) \
-  F(NumberSar, 2, 1) \
-  \
-  /* Comparisons */ \
-  F(NumberEquals, 2, 1) \
-  F(StringEquals, 2, 1) \
-  \
-  F(NumberCompare, 3, 1) \
-  F(SmiLexicographicCompare, 2, 1) \
-  \
-  /* Math */ \
-  F(MathAcos, 1, 1) \
-  F(MathAsin, 1, 1) \
-  F(MathAtan, 1, 1) \
-  F(MathFloorRT, 1, 1) \
-  F(MathAtan2, 2, 1) \
-  F(MathExpRT, 1, 1) \
-  F(RoundNumber, 1, 1) \
-  F(MathFround, 1, 1) \
-  \
-  /* Regular expressions */ \
-  F(RegExpCompile, 3, 1) \
-  F(RegExpExecMultiple, 4, 1) \
-  F(RegExpInitializeObject, 5, 1) \
-  \
-  /* JSON */ \
-  F(ParseJson, 1, 1) \
-  F(BasicJSONStringify, 1, 1) \
-  F(QuoteJSONString, 1, 1) \
-  \
-  /* Strings */ \
-  F(StringIndexOf, 3, 1) \
-  F(StringLastIndexOf, 3, 1) \
-  F(StringLocaleCompare, 2, 1) \
-  F(StringReplaceGlobalRegExpWithString, 4, 1) \
-  F(StringReplaceOneCharWithString, 3, 1) \
-  F(StringMatch, 3, 1) \
-  F(StringTrim, 3, 1) \
-  F(StringToArray, 2, 1) \
-  F(NewStringWrapper, 1, 1) \
-  F(NewString, 2, 1) \
-  F(TruncateString, 2, 1) \
-  \
-  /* Numbers */ \
-  F(NumberToRadixString, 2, 1) \
-  F(NumberToFixed, 2, 1) \
-  F(NumberToExponential, 2, 1) \
-  F(NumberToPrecision, 2, 1) \
+#define RUNTIME_FUNCTION_LIST_ALWAYS_1(F)                   \
+  /* Property access */                                     \
+  F(GetProperty, 2, 1)                                      \
+  F(KeyedGetProperty, 2, 1)                                 \
+  F(DeleteProperty, 3, 1)                                   \
+  F(HasOwnProperty, 2, 1)                                   \
+  F(HasProperty, 2, 1)                                      \
+  F(HasElement, 2, 1)                                       \
+  F(IsPropertyEnumerable, 2, 1)                             \
+  F(GetPropertyNames, 1, 1)                                 \
+  F(GetPropertyNamesFast, 1, 1)                             \
+  F(GetOwnPropertyNames, 2, 1)                              \
+  F(GetOwnElementNames, 1, 1)                               \
+  F(GetInterceptorInfo, 1, 1)                               \
+  F(GetNamedInterceptorPropertyNames, 1, 1)                 \
+  F(GetIndexedInterceptorElementNames, 1, 1)                \
+  F(GetArgumentsProperty, 1, 1)                             \
+  F(ToFastProperties, 1, 1)                                 \
+  F(FinishArrayPrototypeSetup, 1, 1)                        \
+  F(SpecialArrayFunctions, 0, 1)                            \
+  F(IsSloppyModeFunction, 1, 1)                             \
+  F(GetDefaultReceiver, 1, 1)                               \
+                                                            \
+  F(GetPrototype, 1, 1)                                     \
+  F(SetPrototype, 2, 1)                                     \
+  F(IsInPrototypeChain, 2, 1)                               \
+                                                            \
+  F(GetOwnProperty, 2, 1)                                   \
+                                                            \
+  F(IsExtensible, 1, 1)                                     \
+  F(PreventExtensions, 1, 1)                                \
+                                                            \
+  /* Utilities */                                           \
+  F(CheckIsBootstrapping, 0, 1)                             \
+  F(GetRootNaN, 0, 1)                                       \
+  F(Call, -1 /* >= 2 */, 1)                                 \
+  F(Apply, 5, 1)                                            \
+  F(GetFunctionDelegate, 1, 1)                              \
+  F(GetConstructorDelegate, 1, 1)                           \
+  F(DeoptimizeFunction, 1, 1)                               \
+  F(ClearFunctionTypeFeedback, 1, 1)                        \
+  F(RunningInSimulator, 0, 1)                               \
+  F(IsConcurrentRecompilationSupported, 0, 1)               \
+  F(OptimizeFunctionOnNextCall, -1, 1)                      \
+  F(NeverOptimizeFunction, 1, 1)                            \
+  F(GetOptimizationStatus, -1, 1)                           \
+  F(IsOptimized, 0, 1) /* TODO(turbofan): Only temporary */ \
+  F(GetOptimizationCount, 1, 1)                             \
+  F(UnblockConcurrentRecompilation, 0, 1)                   \
+  F(CompileForOnStackReplacement, 1, 1)                     \
+  F(SetAllocationTimeout, -1 /* 2 || 3 */, 1)               \
+  F(SetNativeFlag, 1, 1)                                    \
+  F(SetInlineBuiltinFlag, 1, 1)                             \
+  F(StoreArrayLiteralElement, 5, 1)                         \
+  F(DebugPrepareStepInIfStepping, 1, 1)                     \
+  F(DebugPromiseHandlePrologue, 1, 1)                       \
+  F(DebugPromiseHandleEpilogue, 0, 1)                       \
+  F(DebugPromiseEvent, 1, 1)                                \
+  F(DebugAsyncTaskEvent, 1, 1)                              \
+  F(FlattenString, 1, 1)                                    \
+  F(LoadMutableDouble, 2, 1)                                \
+  F(TryMigrateInstance, 1, 1)                               \
+  F(NotifyContextDisposed, 0, 1)                            \
+                                                            \
+  /* Array join support */                                  \
+  F(PushIfAbsent, 2, 1)                                     \
+  F(ArrayConcat, 1, 1)                                      \
+                                                            \
+  /* Conversions */                                         \
+  F(ToBool, 1, 1)                                           \
+  F(Typeof, 1, 1)                                           \
+                                                            \
+  F(Booleanize, 2, 1) /* TODO(turbofan): Only temporary */  \
+                                                            \
+  F(StringToNumber, 1, 1)                                   \
+  F(StringParseInt, 2, 1)                                   \
+  F(StringParseFloat, 1, 1)                                 \
+  F(StringToLowerCase, 1, 1)                                \
+  F(StringToUpperCase, 1, 1)                                \
+  F(StringSplit, 3, 1)                                      \
+  F(CharFromCode, 1, 1)                                     \
+  F(URIEscape, 1, 1)                                        \
+  F(URIUnescape, 1, 1)                                      \
+                                                            \
+  F(NumberToInteger, 1, 1)                                  \
+  F(NumberToIntegerMapMinusZero, 1, 1)                      \
+  F(NumberToJSUint32, 1, 1)                                 \
+  F(NumberToJSInt32, 1, 1)                                  \
+                                                            \
+  /* Arithmetic operations */                               \
+  F(NumberAdd, 2, 1)                                        \
+  F(NumberSub, 2, 1)                                        \
+  F(NumberMul, 2, 1)                                        \
+  F(NumberDiv, 2, 1)                                        \
+  F(NumberMod, 2, 1)                                        \
+  F(NumberUnaryMinus, 1, 1)                                 \
+  F(NumberImul, 2, 1)                                       \
+                                                            \
+  F(StringBuilderConcat, 3, 1)                              \
+  F(StringBuilderJoin, 3, 1)                                \
+  F(SparseJoinWithSeparator, 3, 1)                          \
+                                                            \
+  /* Bit operations */                                      \
+  F(NumberOr, 2, 1)                                         \
+  F(NumberAnd, 2, 1)                                        \
+  F(NumberXor, 2, 1)                                        \
+                                                            \
+  F(NumberShl, 2, 1)                                        \
+  F(NumberShr, 2, 1)                                        \
+  F(NumberSar, 2, 1)                                        \
+                                                            \
+  /* Comparisons */                                         \
+  F(NumberEquals, 2, 1)                                     \
+  F(StringEquals, 2, 1)                                     \
+                                                            \
+  F(NumberCompare, 3, 1)                                    \
+  F(SmiLexicographicCompare, 2, 1)                          \
+                                                            \
+  /* Math */                                                \
+  F(MathAcos, 1, 1)                                         \
+  F(MathAsin, 1, 1)                                         \
+  F(MathAtan, 1, 1)                                         \
+  F(MathFloorRT, 1, 1)                                      \
+  F(MathAtan2, 2, 1)                                        \
+  F(MathExpRT, 1, 1)                                        \
+  F(RoundNumber, 1, 1)                                      \
+  F(MathFround, 1, 1)                                       \
+                                                            \
+  /* Regular expressions */                                 \
+  F(RegExpCompile, 3, 1)                                    \
+  F(RegExpExecMultiple, 4, 1)                               \
+  F(RegExpInitializeObject, 5, 1)                           \
+                                                            \
+  /* JSON */                                                \
+  F(ParseJson, 1, 1)                                        \
+  F(BasicJSONStringify, 1, 1)                               \
+  F(QuoteJSONString, 1, 1)                                  \
+                                                            \
+  /* Strings */                                             \
+  F(StringIndexOf, 3, 1)                                    \
+  F(StringLastIndexOf, 3, 1)                                \
+  F(StringLocaleCompare, 2, 1)                              \
+  F(StringReplaceGlobalRegExpWithString, 4, 1)              \
+  F(StringReplaceOneCharWithString, 3, 1)                   \
+  F(StringMatch, 3, 1)                                      \
+  F(StringTrim, 3, 1)                                       \
+  F(StringToArray, 2, 1)                                    \
+  F(NewStringWrapper, 1, 1)                                 \
+  F(NewString, 2, 1)                                        \
+  F(TruncateString, 2, 1)                                   \
+                                                            \
+  /* Numbers */                                             \
+  F(NumberToRadixString, 2, 1)                              \
+  F(NumberToFixed, 2, 1)                                    \
+  F(NumberToExponential, 2, 1)                              \
+  F(NumberToPrecision, 2, 1)                                \
   F(IsValidSmi, 1, 1)
 
 
-#define RUNTIME_FUNCTION_LIST_ALWAYS_2(F)              \
-  /* Reflection */                                     \
-  F(FunctionSetInstanceClassName, 2, 1)                \
-  F(FunctionSetLength, 2, 1)                           \
-  F(FunctionSetPrototype, 2, 1)                        \
-  F(FunctionGetName, 1, 1)                             \
-  F(FunctionSetName, 2, 1)                             \
-  F(FunctionNameShouldPrintAsAnonymous, 1, 1)          \
-  F(FunctionMarkNameShouldPrintAsAnonymous, 1, 1)      \
-  F(FunctionIsGenerator, 1, 1)                         \
-  F(FunctionIsArrow, 1, 1)                             \
-  F(FunctionBindArguments, 4, 1)                       \
-  F(BoundFunctionGetBindings, 1, 1)                    \
-  F(FunctionRemovePrototype, 1, 1)                     \
-  F(FunctionGetSourceCode, 1, 1)                       \
-  F(FunctionGetScript, 1, 1)                           \
-  F(FunctionGetScriptSourcePosition, 1, 1)             \
-  F(FunctionGetPositionForOffset, 2, 1)                \
-  F(FunctionIsAPIFunction, 1, 1)                       \
-  F(FunctionIsBuiltin, 1, 1)                           \
-  F(GetScript, 1, 1)                                   \
-  F(CollectStackTrace, 2, 1)                           \
-  F(GetV8Version, 0, 1)                                \
-                                                       \
-  F(SetCode, 2, 1)                                     \
-                                                       \
-  F(CreateApiFunction, 2, 1)                           \
-  F(IsTemplate, 1, 1)                                  \
-  F(GetTemplateField, 2, 1)                            \
-  F(DisableAccessChecks, 1, 1)                         \
-  F(EnableAccessChecks, 1, 1)                          \
-                                                       \
-  /* Dates */                                          \
-  F(DateCurrentTime, 0, 1)                             \
-  F(DateParseString, 2, 1)                             \
-  F(DateLocalTimezone, 1, 1)                           \
-  F(DateToUTC, 1, 1)                                   \
-  F(DateMakeDay, 2, 1)                                 \
-  F(DateSetValue, 3, 1)                                \
-  F(DateCacheVersion, 0, 1)                            \
-                                                       \
-  /* Globals */                                        \
-  F(CompileString, 2, 1)                               \
-                                                       \
-  /* Eval */                                           \
-  F(GlobalProxy, 1, 1)                                 \
-  F(IsAttachedGlobal, 1, 1)                            \
-                                                       \
-  F(AddNamedProperty, 4, 1)                            \
-  F(AddPropertyForTemplate, 4, 1)                      \
-  F(SetProperty, 4, 1)                                 \
-  F(DefineApiAccessorProperty, 5, 1)                   \
-  F(DefineDataPropertyUnchecked, 4, 1)                 \
-  F(DefineAccessorPropertyUnchecked, 5, 1)             \
-  F(GetDataProperty, 2, 1)                             \
-  F(SetHiddenProperty, 3, 1)                           \
-                                                       \
-  /* Arrays */                                         \
-  F(RemoveArrayHoles, 2, 1)                            \
-  F(GetArrayKeys, 2, 1)                                \
-  F(MoveArrayContents, 2, 1)                           \
-  F(EstimateNumberOfElements, 1, 1)                    \
-  F(NormalizeElements, 1, 1)                           \
-                                                       \
-  /* Getters and Setters */                            \
-  F(LookupAccessor, 3, 1)                              \
-                                                       \
-  /* ES5 */                                            \
-  F(ObjectFreeze, 1, 1)                                \
-                                                       \
-  /* Harmony modules */                                \
-  F(IsJSModule, 1, 1)                                  \
-                                                       \
-  /* Harmony symbols */                                \
-  F(CreateSymbol, 1, 1)                                \
-  F(CreatePrivateSymbol, 1, 1)                         \
-  F(CreateGlobalPrivateSymbol, 1, 1)                   \
-  F(NewSymbolWrapper, 1, 1)                            \
-  F(SymbolDescription, 1, 1)                           \
-  F(SymbolRegistry, 0, 1)                              \
-  F(SymbolIsPrivate, 1, 1)                             \
-                                                       \
-  /* Harmony proxies */                                \
-  F(CreateJSProxy, 2, 1)                               \
-  F(CreateJSFunctionProxy, 4, 1)                       \
-  F(IsJSProxy, 1, 1)                                   \
-  F(IsJSFunctionProxy, 1, 1)                           \
-  F(GetHandler, 1, 1)                                  \
-  F(GetCallTrap, 1, 1)                                 \
-  F(GetConstructTrap, 1, 1)                            \
-  F(Fix, 1, 1)                                         \
-                                                       \
-  /* Harmony sets */                                   \
-  F(SetInitialize, 1, 1)                               \
-  F(SetAdd, 2, 1)                                      \
-  F(SetHas, 2, 1)                                      \
-  F(SetDelete, 2, 1)                                   \
-  F(SetClear, 1, 1)                                    \
-  F(SetGetSize, 1, 1)                                  \
-                                                       \
-  F(SetIteratorInitialize, 3, 1)                       \
-  F(SetIteratorNext, 2, 1)                             \
-                                                       \
-  /* Harmony maps */                                   \
-  F(MapInitialize, 1, 1)                               \
-  F(MapGet, 2, 1)                                      \
-  F(MapHas, 2, 1)                                      \
-  F(MapDelete, 2, 1)                                   \
-  F(MapClear, 1, 1)                                    \
-  F(MapSet, 3, 1)                                      \
-  F(MapGetSize, 1, 1)                                  \
-                                                       \
-  F(MapIteratorInitialize, 3, 1)                       \
-  F(MapIteratorNext, 2, 1)                             \
-                                                       \
-  /* Harmony weak maps and sets */                     \
-  F(WeakCollectionInitialize, 1, 1)                    \
-  F(WeakCollectionGet, 2, 1)                           \
-  F(WeakCollectionHas, 2, 1)                           \
-  F(WeakCollectionDelete, 2, 1)                        \
-  F(WeakCollectionSet, 3, 1)                           \
-                                                       \
-  F(GetWeakMapEntries, 1, 1)                           \
-  F(GetWeakSetValues, 1, 1)                            \
-                                                       \
-  /* Harmony events */                                 \
-  F(EnqueueMicrotask, 1, 1)                            \
-  F(RunMicrotasks, 0, 1)                               \
-                                                       \
-  /* Harmony observe */                                \
-  F(IsObserved, 1, 1)                                  \
-  F(SetIsObserved, 1, 1)                               \
-  F(GetObservationState, 0, 1)                         \
-  F(ObservationWeakMapCreate, 0, 1)                    \
-  F(ObserverObjectAndRecordHaveSameOrigin, 3, 1)       \
-  F(ObjectWasCreatedInCurrentOrigin, 1, 1)             \
-  F(GetObjectContextObjectObserve, 1, 1)               \
-  F(GetObjectContextObjectGetNotifier, 1, 1)           \
-  F(GetObjectContextNotifierPerformChange, 1, 1)       \
-                                                       \
-  /* Harmony typed arrays */                           \
-  F(ArrayBufferInitialize, 2, 1)                       \
-  F(ArrayBufferSliceImpl, 3, 1)                        \
-  F(ArrayBufferIsView, 1, 1)                           \
-  F(ArrayBufferNeuter, 1, 1)                           \
-                                                       \
-  F(TypedArrayInitializeFromArrayLike, 4, 1)           \
-  F(TypedArrayGetBuffer, 1, 1)                         \
-  F(TypedArraySetFastCases, 3, 1)                      \
-                                                       \
-  F(DataViewGetBuffer, 1, 1)                           \
-  F(DataViewGetInt8, 3, 1)                             \
-  F(DataViewGetUint8, 3, 1)                            \
-  F(DataViewGetInt16, 3, 1)                            \
-  F(DataViewGetUint16, 3, 1)                           \
-  F(DataViewGetInt32, 3, 1)                            \
-  F(DataViewGetUint32, 3, 1)                           \
-  F(DataViewGetFloat32, 3, 1)                          \
-  F(DataViewGetFloat64, 3, 1)                          \
-                                                       \
-  F(DataViewSetInt8, 4, 1)                             \
-  F(DataViewSetUint8, 4, 1)                            \
-  F(DataViewSetInt16, 4, 1)                            \
-  F(DataViewSetUint16, 4, 1)                           \
-  F(DataViewSetInt32, 4, 1)                            \
-  F(DataViewSetUint32, 4, 1)                           \
-  F(DataViewSetFloat32, 4, 1)                          \
-  F(DataViewSetFloat64, 4, 1)                          \
-                                                       \
-  /* Statements */                                     \
-  F(NewObjectFromBound, 1, 1)                          \
-                                                       \
-  /* Declarations and initialization */                \
-  F(InitializeVarGlobal, 3, 1)                         \
-  F(OptimizeObjectForAddingMultipleProperties, 2, 1)   \
-                                                       \
-  /* Debugging */                                      \
-  F(DebugPrint, 1, 1)                                  \
-  F(GlobalPrint, 1, 1)                                 \
-  F(DebugTrace, 0, 1)                                  \
-  F(TraceEnter, 0, 1)                                  \
-  F(TraceExit, 1, 1)                                   \
-  F(Abort, 1, 1)                                       \
-  F(AbortJS, 1, 1)                                     \
-  /* ES5 */                                            \
-  F(OwnKeys, 1, 1)                                     \
-                                                       \
-  /* Message objects */                                \
-  F(MessageGetStartPosition, 1, 1)                     \
-  F(MessageGetScript, 1, 1)                            \
-                                                       \
-  /* Pseudo functions - handled as macros by parser */ \
-  F(IS_VAR, 1, 1)                                      \
-                                                       \
-  /* expose boolean functions from objects-inl.h */    \
-  F(HasFastSmiElements, 1, 1)                          \
-  F(HasFastSmiOrObjectElements, 1, 1)                  \
-  F(HasFastObjectElements, 1, 1)                       \
-  F(HasFastDoubleElements, 1, 1)                       \
-  F(HasFastHoleyElements, 1, 1)                        \
-  F(HasDictionaryElements, 1, 1)                       \
-  F(HasSloppyArgumentsElements, 1, 1)                  \
-  F(HasExternalUint8ClampedElements, 1, 1)             \
-  F(HasExternalArrayElements, 1, 1)                    \
-  F(HasExternalInt8Elements, 1, 1)                     \
-  F(HasExternalUint8Elements, 1, 1)                    \
-  F(HasExternalInt16Elements, 1, 1)                    \
-  F(HasExternalUint16Elements, 1, 1)                   \
-  F(HasExternalInt32Elements, 1, 1)                    \
-  F(HasExternalUint32Elements, 1, 1)                   \
-  F(HasExternalFloat32Elements, 1, 1)                  \
-  F(HasExternalFloat64Elements, 1, 1)                  \
-  F(HasFixedUint8ClampedElements, 1, 1)                \
-  F(HasFixedInt8Elements, 1, 1)                        \
-  F(HasFixedUint8Elements, 1, 1)                       \
-  F(HasFixedInt16Elements, 1, 1)                       \
-  F(HasFixedUint16Elements, 1, 1)                      \
-  F(HasFixedInt32Elements, 1, 1)                       \
-  F(HasFixedUint32Elements, 1, 1)                      \
-  F(HasFixedFloat32Elements, 1, 1)                     \
-  F(HasFixedFloat64Elements, 1, 1)                     \
-  F(HasFastProperties, 1, 1)                           \
-  F(TransitionElementsKind, 2, 1)                      \
-  F(HaveSameMap, 2, 1)                                 \
-  F(IsJSGlobalProxy, 1, 1)
-
-
-#define RUNTIME_FUNCTION_LIST_ALWAYS_3(F)  \
-  /* String and Regexp */                  \
-  F(NumberToStringRT, 1, 1)                \
-  F(RegExpConstructResult, 3, 1)           \
-  F(RegExpExecRT, 4, 1)                    \
-  F(StringAdd, 2, 1)                       \
-  F(SubString, 3, 1)                       \
-  F(InternalizeString, 1, 1)               \
-  F(StringCompare, 2, 1)                   \
-  F(StringCharCodeAtRT, 2, 1)              \
-  F(GetFromCache, 2, 1)                    \
-                                           \
-  /* Compilation */                        \
-  F(CompileUnoptimized, 1, 1)              \
-  F(CompileOptimized, 2, 1)                \
-  F(TryInstallOptimizedCode, 1, 1)         \
-  F(NotifyDeoptimized, 1, 1)               \
-  F(NotifyStubFailure, 0, 1)               \
-                                           \
-  /* Utilities */                          \
-  F(AllocateInNewSpace, 1, 1)              \
-  F(AllocateInTargetSpace, 2, 1)           \
-  F(AllocateHeapNumber, 0, 1)              \
-  F(NumberToSmi, 1, 1)                     \
-  F(NumberToStringSkipCache, 1, 1)         \
-                                           \
-  F(NewSloppyArguments, 3, 1)              \
-  F(NewStrictArguments, 3, 1)              \
-                                           \
-  /* Harmony generators */                 \
-  F(CreateJSGeneratorObject, 0, 1)         \
-  F(SuspendJSGeneratorObject, 1, 1)        \
-  F(ResumeJSGeneratorObject, 3, 1)         \
-  F(ThrowGeneratorStateError, 1, 1)        \
-                                           \
-  /* Arrays */                             \
-  F(ArrayConstructor, -1, 1)               \
-  F(InternalArrayConstructor, -1, 1)       \
-                                           \
-  /* Literals */                           \
-  F(MaterializeRegExpLiteral, 4, 1)        \
-  F(CreateObjectLiteral, 4, 1)             \
-  F(CreateArrayLiteral, 4, 1)              \
-  F(CreateArrayLiteralStubBailout, 3, 1)   \
-                                           \
-  /* Statements */                         \
-  F(NewClosure, 3, 1)                      \
-  F(NewClosureFromStubFailure, 1, 1)       \
-  F(NewObject, 1, 1)                       \
-  F(NewObjectWithAllocationSite, 2, 1)     \
-  F(FinalizeInstanceSize, 1, 1)            \
-  F(Throw, 1, 1)                           \
-  F(ReThrow, 1, 1)                         \
-  F(ThrowReferenceError, 1, 1)             \
-  F(ThrowNotDateError, 0, 1)               \
-  F(StackGuard, 0, 1)                      \
-  F(Interrupt, 0, 1)                       \
-  F(PromoteScheduledException, 0, 1)       \
-                                           \
-  /* Contexts */                           \
-  F(NewGlobalContext, 2, 1)                \
-  F(NewFunctionContext, 1, 1)              \
-  F(PushWithContext, 2, 1)                 \
-  F(PushCatchContext, 3, 1)                \
-  F(PushBlockContext, 2, 1)                \
-  F(PushModuleContext, 2, 1)               \
-  F(DeleteLookupSlot, 2, 1)                \
-  F(LoadLookupSlot, 2, 2)                  \
-  F(LoadLookupSlotNoReferenceError, 2, 2)  \
-  F(StoreLookupSlot, 4, 1)                 \
-                                           \
-  /* Declarations and initialization */    \
-  F(DeclareGlobals, 3, 1)                  \
-  F(DeclareModules, 1, 1)                  \
-  F(DeclareLookupSlot, 4, 1)               \
-  F(InitializeConstGlobal, 2, 1)           \
-  F(InitializeLegacyConstLookupSlot, 3, 1) \
-                                           \
-  /* Eval */                               \
-  F(ResolvePossiblyDirectEval, 5, 2)       \
-                                           \
-  /* Maths */                              \
-  F(MathPowSlow, 2, 1)                     \
+#define RUNTIME_FUNCTION_LIST_ALWAYS_2(F)                             \
+  /* Reflection */                                                    \
+  F(FunctionSetInstanceClassName, 2, 1)                               \
+  F(FunctionSetLength, 2, 1)                                          \
+  F(FunctionSetPrototype, 2, 1)                                       \
+  F(FunctionGetName, 1, 1)                                            \
+  F(FunctionSetName, 2, 1)                                            \
+  F(FunctionNameShouldPrintAsAnonymous, 1, 1)                         \
+  F(FunctionMarkNameShouldPrintAsAnonymous, 1, 1)                     \
+  F(FunctionIsGenerator, 1, 1)                                        \
+  F(FunctionIsArrow, 1, 1)                                            \
+  F(FunctionBindArguments, 4, 1)                                      \
+  F(BoundFunctionGetBindings, 1, 1)                                   \
+  F(FunctionRemovePrototype, 1, 1)                                    \
+  F(FunctionGetSourceCode, 1, 1)                                      \
+  F(FunctionGetScript, 1, 1)                                          \
+  F(FunctionGetScriptSourcePosition, 1, 1)                            \
+  F(FunctionGetPositionForOffset, 2, 1)                               \
+  F(FunctionIsAPIFunction, 1, 1)                                      \
+  F(FunctionIsBuiltin, 1, 1)                                          \
+  F(GetScript, 1, 1)                                                  \
+  F(CollectStackTrace, 2, 1)                                          \
+  F(GetV8Version, 0, 1)                                               \
+                                                                      \
+  F(SetCode, 2, 1)                                                    \
+                                                                      \
+  F(CreateApiFunction, 2, 1)                                          \
+  F(IsTemplate, 1, 1)                                                 \
+  F(GetTemplateField, 2, 1)                                           \
+  F(DisableAccessChecks, 1, 1)                                        \
+  F(EnableAccessChecks, 1, 1)                                         \
+                                                                      \
+  /* Dates */                                                         \
+  F(DateCurrentTime, 0, 1)                                            \
+  F(DateParseString, 2, 1)                                            \
+  F(DateLocalTimezone, 1, 1)                                          \
+  F(DateToUTC, 1, 1)                                                  \
+  F(DateMakeDay, 2, 1)                                                \
+  F(DateSetValue, 3, 1)                                               \
+  F(DateCacheVersion, 0, 1)                                           \
+                                                                      \
+  /* Globals */                                                       \
+  F(CompileString, 2, 1)                                              \
+                                                                      \
+  /* Eval */                                                          \
+  F(GlobalProxy, 1, 1)                                                \
+  F(IsAttachedGlobal, 1, 1)                                           \
+                                                                      \
+  F(AddNamedProperty, 4, 1)                                           \
+  F(AddPropertyForTemplate, 4, 1)                                     \
+  F(SetProperty, 4, 1)                                                \
+  F(DefineApiAccessorProperty, 5, 1)                                  \
+  F(DefineDataPropertyUnchecked, 4, 1)                                \
+  F(DefineAccessorPropertyUnchecked, 5, 1)                            \
+  F(GetDataProperty, 2, 1)                                            \
+  F(SetHiddenProperty, 3, 1)                                          \
+                                                                      \
+  /* Arrays */                                                        \
+  F(RemoveArrayHoles, 2, 1)                                           \
+  F(GetArrayKeys, 2, 1)                                               \
+  F(MoveArrayContents, 2, 1)                                          \
+  F(EstimateNumberOfElements, 1, 1)                                   \
+  F(NormalizeElements, 1, 1)                                          \
+                                                                      \
+  /* Getters and Setters */                                           \
+  F(LookupAccessor, 3, 1)                                             \
+                                                                      \
+  /* ES5 */                                                           \
+  F(ObjectFreeze, 1, 1)                                               \
+                                                                      \
+  /* Harmony modules */                                               \
+  F(IsJSModule, 1, 1)                                                 \
+                                                                      \
+  /* Harmony symbols */                                               \
+  F(CreateSymbol, 1, 1)                                               \
+  F(CreatePrivateSymbol, 1, 1)                                        \
+  F(CreateGlobalPrivateSymbol, 1, 1)                                  \
+  F(NewSymbolWrapper, 1, 1)                                           \
+  F(SymbolDescription, 1, 1)                                          \
+  F(SymbolRegistry, 0, 1)                                             \
+  F(SymbolIsPrivate, 1, 1)                                            \
+                                                                      \
+  /* Harmony proxies */                                               \
+  F(CreateJSProxy, 2, 1)                                              \
+  F(CreateJSFunctionProxy, 4, 1)                                      \
+  F(IsJSProxy, 1, 1)                                                  \
+  F(IsJSFunctionProxy, 1, 1)                                          \
+  F(GetHandler, 1, 1)                                                 \
+  F(GetCallTrap, 1, 1)                                                \
+  F(GetConstructTrap, 1, 1)                                           \
+  F(Fix, 1, 1)                                                        \
+                                                                      \
+  /* Harmony sets */                                                  \
+  F(SetInitialize, 1, 1)                                              \
+  F(SetAdd, 2, 1)                                                     \
+  F(SetHas, 2, 1)                                                     \
+  F(SetDelete, 2, 1)                                                  \
+  F(SetClear, 1, 1)                                                   \
+  F(SetGetSize, 1, 1)                                                 \
+                                                                      \
+  F(SetIteratorInitialize, 3, 1)                                      \
+  F(SetIteratorNext, 2, 1)                                            \
+                                                                      \
+  /* Harmony maps */                                                  \
+  F(MapInitialize, 1, 1)                                              \
+  F(MapGet, 2, 1)                                                     \
+  F(MapHas, 2, 1)                                                     \
+  F(MapDelete, 2, 1)                                                  \
+  F(MapClear, 1, 1)                                                   \
+  F(MapSet, 3, 1)                                                     \
+  F(MapGetSize, 1, 1)                                                 \
+                                                                      \
+  F(MapIteratorInitialize, 3, 1)                                      \
+  F(MapIteratorNext, 2, 1)                                            \
+                                                                      \
+  /* Harmony weak maps and sets */                                    \
+  F(WeakCollectionInitialize, 1, 1)                                   \
+  F(WeakCollectionGet, 2, 1)                                          \
+  F(WeakCollectionHas, 2, 1)                                          \
+  F(WeakCollectionDelete, 2, 1)                                       \
+  F(WeakCollectionSet, 3, 1)                                          \
+                                                                      \
+  F(GetWeakMapEntries, 1, 1)                                          \
+  F(GetWeakSetValues, 1, 1)                                           \
+                                                                      \
+  /* Harmony events */                                                \
+  F(EnqueueMicrotask, 1, 1)                                           \
+  F(RunMicrotasks, 0, 1)                                              \
+                                                                      \
+  /* Harmony observe */                                               \
+  F(IsObserved, 1, 1)                                                 \
+  F(SetIsObserved, 1, 1)                                              \
+  F(GetObservationState, 0, 1)                                        \
+  F(ObservationWeakMapCreate, 0, 1)                                   \
+  F(ObserverObjectAndRecordHaveSameOrigin, 3, 1)                      \
+  F(ObjectWasCreatedInCurrentOrigin, 1, 1)                            \
+  F(GetObjectContextObjectObserve, 1, 1)                              \
+  F(GetObjectContextObjectGetNotifier, 1, 1)                          \
+  F(GetObjectContextNotifierPerformChange, 1, 1)                      \
+                                                                      \
+  /* Harmony typed arrays */                                          \
+  F(ArrayBufferInitialize, 2, 1)                                      \
+  F(ArrayBufferSliceImpl, 3, 1)                                       \
+  F(ArrayBufferIsView, 1, 1)                                          \
+  F(ArrayBufferNeuter, 1, 1)                                          \
+                                                                      \
+  F(TypedArrayInitializeFromArrayLike, 4, 1)                          \
+  F(TypedArrayGetBuffer, 1, 1)                                        \
+  F(TypedArraySetFastCases, 3, 1)                                     \
+                                                                      \
+  F(DataViewGetBuffer, 1, 1)                                          \
+  F(DataViewGetInt8, 3, 1)                                            \
+  F(DataViewGetUint8, 3, 1)                                           \
+  F(DataViewGetInt16, 3, 1)                                           \
+  F(DataViewGetUint16, 3, 1)                                          \
+  F(DataViewGetInt32, 3, 1)                                           \
+  F(DataViewGetUint32, 3, 1)                                          \
+  F(DataViewGetFloat32, 3, 1)                                         \
+  F(DataViewGetFloat64, 3, 1)                                         \
+                                                                      \
+  F(DataViewSetInt8, 4, 1)                                            \
+  F(DataViewSetUint8, 4, 1)                                           \
+  F(DataViewSetInt16, 4, 1)                                           \
+  F(DataViewSetUint16, 4, 1)                                          \
+  F(DataViewSetInt32, 4, 1)                                           \
+  F(DataViewSetUint32, 4, 1)                                          \
+  F(DataViewSetFloat32, 4, 1)                                         \
+  F(DataViewSetFloat64, 4, 1)                                         \
+                                                                      \
+  /* Statements */                                                    \
+  F(NewObjectFromBound, 1, 1)                                         \
+                                                                      \
+  /* Declarations and initialization */                               \
+  F(InitializeVarGlobal, 3, 1)                                        \
+  F(OptimizeObjectForAddingMultipleProperties, 2, 1)                  \
+                                                                      \
+  /* Debugging */                                                     \
+  F(DebugPrint, 1, 1)                                                 \
+  F(GlobalPrint, 1, 1)                                                \
+  F(DebugTrace, 0, 1)                                                 \
+  F(TraceEnter, 0, 1)                                                 \
+  F(TraceExit, 1, 1)                                                  \
+  F(Abort, 1, 1)                                                      \
+  F(AbortJS, 1, 1)                                                    \
+  /* ES5 */                                                           \
+  F(OwnKeys, 1, 1)                                                    \
+                                                                      \
+  /* Message objects */                                               \
+  F(MessageGetStartPosition, 1, 1)                                    \
+  F(MessageGetScript, 1, 1)                                           \
+                                                                      \
+  /* Pseudo functions - handled as macros by parser */                \
+  F(IS_VAR, 1, 1)                                                     \
+                                                                      \
+  /* expose boolean functions from objects-inl.h */                   \
+  F(HasFastSmiElements, 1, 1)                                         \
+  F(HasFastSmiOrObjectElements, 1, 1)                                 \
+  F(HasFastObjectElements, 1, 1)                                      \
+  F(HasFastDoubleElements, 1, 1)                                      \
+  F(HasFastHoleyElements, 1, 1)                                       \
+  F(HasDictionaryElements, 1, 1)                                      \
+  F(HasSloppyArgumentsElements, 1, 1)                                 \
+  F(HasExternalUint8ClampedElements, 1, 1)                            \
+  F(HasExternalArrayElements, 1, 1)                                   \
+  F(HasExternalInt8Elements, 1, 1)                                    \
+  F(HasExternalUint8Elements, 1, 1)                                   \
+  F(HasExternalInt16Elements, 1, 1)                                   \
+  F(HasExternalUint16Elements, 1, 1)                                  \
+  F(HasExternalInt32Elements, 1, 1)                                   \
+  F(HasExternalUint32Elements, 1, 1)                                  \
+  F(HasExternalFloat32Elements, 1, 1)                                 \
+  F(HasExternalFloat64Elements, 1, 1)                                 \
+  F(HasFixedUint8ClampedElements, 1, 1)                               \
+  F(HasFixedInt8Elements, 1, 1)                                       \
+  F(HasFixedUint8Elements, 1, 1)                                      \
+  F(HasFixedInt16Elements, 1, 1)                                      \
+  F(HasFixedUint16Elements, 1, 1)                                     \
+  F(HasFixedInt32Elements, 1, 1)                                      \
+  F(HasFixedUint32Elements, 1, 1)                                     \
+  F(HasFixedFloat32Elements, 1, 1)                                    \
+  F(HasFixedFloat64Elements, 1, 1)                                    \
+  F(HasFastProperties, 1, 1)                                          \
+  F(TransitionElementsKind, 2, 1)                                     \
+  F(HaveSameMap, 2, 1)                                                \
+  F(IsJSGlobalProxy, 1, 1)                                            \
+  F(ForInInit, 2, 2)             /* TODO(turbofan): Only temporary */ \
+  F(ForInNext, 4, 2)             /* TODO(turbofan): Only temporary */ \
+  F(ForInCacheArrayLength, 2, 1) /* TODO(turbofan): Only temporary */
+
+
+#define RUNTIME_FUNCTION_LIST_ALWAYS_3(F)                            \
+  /* String and Regexp */                                            \
+  F(NumberToStringRT, 1, 1)                                          \
+  F(RegExpConstructResult, 3, 1)                                     \
+  F(RegExpExecRT, 4, 1)                                              \
+  F(StringAdd, 2, 1)                                                 \
+  F(SubString, 3, 1)                                                 \
+  F(InternalizeString, 1, 1)                                         \
+  F(StringCompare, 2, 1)                                             \
+  F(StringCharCodeAtRT, 2, 1)                                        \
+  F(GetFromCache, 2, 1)                                              \
+                                                                     \
+  /* Compilation */                                                  \
+  F(CompileUnoptimized, 1, 1)                                        \
+  F(CompileOptimized, 2, 1)                                          \
+  F(TryInstallOptimizedCode, 1, 1)                                   \
+  F(NotifyDeoptimized, 1, 1)                                         \
+  F(NotifyStubFailure, 0, 1)                                         \
+                                                                     \
+  /* Utilities */                                                    \
+  F(AllocateInNewSpace, 1, 1)                                        \
+  F(AllocateInTargetSpace, 2, 1)                                     \
+  F(AllocateHeapNumber, 0, 1)                                        \
+  F(NumberToSmi, 1, 1)                                               \
+  F(NumberToStringSkipCache, 1, 1)                                   \
+                                                                     \
+  F(NewArguments, 1, 1) /* TODO(turbofan): Only temporary */         \
+  F(NewSloppyArguments, 3, 1)                                        \
+  F(NewStrictArguments, 3, 1)                                        \
+                                                                     \
+  /* Harmony generators */                                           \
+  F(CreateJSGeneratorObject, 0, 1)                                   \
+  F(SuspendJSGeneratorObject, 1, 1)                                  \
+  F(ResumeJSGeneratorObject, 3, 1)                                   \
+  F(ThrowGeneratorStateError, 1, 1)                                  \
+                                                                     \
+  /* Arrays */                                                       \
+  F(ArrayConstructor, -1, 1)                                         \
+  F(InternalArrayConstructor, -1, 1)                                 \
+                                                                     \
+  /* Literals */                                                     \
+  F(MaterializeRegExpLiteral, 4, 1)                                  \
+  F(CreateObjectLiteral, 4, 1)                                       \
+  F(CreateArrayLiteral, 4, 1)                                        \
+  F(CreateArrayLiteralStubBailout, 3, 1)                             \
+                                                                     \
+  /* Statements */                                                   \
+  F(NewClosure, 3, 1)                                                \
+  F(NewClosureFromStubFailure, 1, 1)                                 \
+  F(NewObject, 1, 1)                                                 \
+  F(NewObjectWithAllocationSite, 2, 1)                               \
+  F(FinalizeInstanceSize, 1, 1)                                      \
+  F(Throw, 1, 1)                                                     \
+  F(ReThrow, 1, 1)                                                   \
+  F(ThrowReferenceError, 1, 1)                                       \
+  F(ThrowNotDateError, 0, 1)                                         \
+  F(StackGuard, 0, 1)                                                \
+  F(Interrupt, 0, 1)                                                 \
+  F(PromoteScheduledException, 0, 1)                                 \
+                                                                     \
+  /* Contexts */                                                     \
+  F(NewGlobalContext, 2, 1)                                          \
+  F(NewFunctionContext, 1, 1)                                        \
+  F(PushWithContext, 2, 1)                                           \
+  F(PushCatchContext, 3, 1)                                          \
+  F(PushBlockContext, 2, 1)                                          \
+  F(PushModuleContext, 2, 1)                                         \
+  F(DeleteLookupSlot, 2, 1)                                          \
+  F(LoadLookupSlot, 2, 2)                                            \
+  F(LoadContextRelative, 3, 1) /* TODO(turbofan): Only temporary */  \
+  F(LoadLookupSlotNoReferenceError, 2, 2)                            \
+  F(StoreLookupSlot, 4, 1)                                           \
+  F(StoreContextRelative, 4, 1) /* TODO(turbofan): Only temporary */ \
+                                                                     \
+  /* Declarations and initialization */                              \
+  F(DeclareGlobals, 3, 1)                                            \
+  F(DeclareModules, 1, 1)                                            \
+  F(DeclareLookupSlot, 4, 1)                                         \
+  F(InitializeConstGlobal, 2, 1)                                     \
+  F(InitializeLegacyConstLookupSlot, 3, 1)                           \
+                                                                     \
+  /* Eval */                                                         \
+  F(ResolvePossiblyDirectEval, 5, 2)                                 \
+                                                                     \
+  /* Maths */                                                        \
+  F(MathPowSlow, 2, 1)                                               \
   F(MathPowRT, 2, 1)
 
 
@@ -790,6 +799,9 @@ class Runtime : public AllStatic {
   // Get the intrinsic function with the given FunctionId.
   static const Function* FunctionForId(FunctionId id);
 
+  // Get the intrinsic function with the given function entry address.
+  static const Function* FunctionForEntry(Address ref);
+
   // General-purpose helper functions for runtime system.
   static int StringMatch(Isolate* isolate,
                          Handle<String> sub,
index a619228ab23090e78b41bc3b9755851b2831196a..edc951d5278b4d4e62e75270c3d27e00704196d1 100644 (file)
@@ -9,6 +9,7 @@
 #include "src/deoptimizer.h"
 #include "src/disasm.h"
 #include "src/macro-assembler.h"
+#include "src/ostreams.h"
 #include "src/zone-inl.h"
 
 namespace v8 {
index 787e647267258edc8019d2a07229f9ea1125bff8..b379f5efea7c501d51b8c447e906246eb7692b4d 100644 (file)
@@ -97,8 +97,10 @@ Handle<ScopeInfo> ScopeInfo::Create(Scope* scope, Zone* zone) {
   ASSERT(index == scope_info->ContextLocalInfoEntriesIndex());
   for (int i = 0; i < context_local_count; ++i) {
     Variable* var = context_locals[i];
-    uint32_t value = ContextLocalMode::encode(var->mode()) |
-        ContextLocalInitFlag::encode(var->initialization_flag());
+    uint32_t value =
+        ContextLocalMode::encode(var->mode()) |
+        ContextLocalInitFlag::encode(var->initialization_flag()) |
+        ContextLocalMaybeAssignedFlag::encode(var->maybe_assigned());
     scope_info->set(index++, Smi::FromInt(value));
   }
 
@@ -255,6 +257,14 @@ InitializationFlag ScopeInfo::ContextLocalInitFlag(int var) {
 }
 
 
+MaybeAssignedFlag ScopeInfo::ContextLocalMaybeAssignedFlag(int var) {
+  ASSERT(0 <= var && var < ContextLocalCount());
+  int info_index = ContextLocalInfoEntriesIndex() + var;
+  int value = Smi::cast(get(info_index))->value();
+  return ContextLocalMaybeAssignedFlag::decode(value);
+}
+
+
 bool ScopeInfo::LocalIsSynthetic(int var) {
   ASSERT(0 <= var && var < LocalCount());
   // There's currently no flag stored on the ScopeInfo to indicate that a
@@ -282,17 +292,17 @@ int ScopeInfo::StackSlotIndex(String* name) {
 
 
 int ScopeInfo::ContextSlotIndex(Handle<ScopeInfo> scope_info,
-                                Handle<String> name,
-                                VariableMode* mode,
-                                InitializationFlag* init_flag) {
+                                Handle<String> name, VariableMode* mode,
+                                InitializationFlag* init_flag,
+                                MaybeAssignedFlag* maybe_assigned_flag) {
   ASSERT(name->IsInternalizedString());
   ASSERT(mode != NULL);
   ASSERT(init_flag != NULL);
   if (scope_info->length() > 0) {
     ContextSlotCache* context_slot_cache =
         scope_info->GetIsolate()->context_slot_cache();
-    int result =
-        context_slot_cache->Lookup(*scope_info, *name, mode, init_flag);
+    int result = context_slot_cache->Lookup(*scope_info, *name, mode, init_flag,
+                                            maybe_assigned_flag);
     if (result != ContextSlotCache::kNotFound) {
       ASSERT(result < scope_info->ContextLength());
       return result;
@@ -306,15 +316,17 @@ int ScopeInfo::ContextSlotIndex(Handle<ScopeInfo> scope_info,
         int var = i - start;
         *mode = scope_info->ContextLocalMode(var);
         *init_flag = scope_info->ContextLocalInitFlag(var);
+        *maybe_assigned_flag = scope_info->ContextLocalMaybeAssignedFlag(var);
         result = Context::MIN_CONTEXT_SLOTS + var;
-        context_slot_cache->Update(scope_info, name, *mode, *init_flag, result);
+        context_slot_cache->Update(scope_info, name, *mode, *init_flag,
+                                   *maybe_assigned_flag, result);
         ASSERT(result < scope_info->ContextLength());
         return result;
       }
     }
-    // Cache as not found. Mode and init flag don't matter.
-    context_slot_cache->Update(
-        scope_info, name, INTERNAL, kNeedsInitialization, -1);
+    // Cache as not found. Mode, init flag and maybe assigned flag don't matter.
+    context_slot_cache->Update(scope_info, name, INTERNAL, kNeedsInitialization,
+                               kNotAssigned, -1);
   }
   return -1;
 }
@@ -413,26 +425,26 @@ int ContextSlotCache::Hash(Object* data, String* name) {
 }
 
 
-int ContextSlotCache::Lookup(Object* data,
-                             String* name,
-                             VariableMode* mode,
-                             InitializationFlag* init_flag) {
+int ContextSlotCache::Lookup(Object* data, String* name, VariableMode* mode,
+                             InitializationFlag* init_flag,
+                             MaybeAssignedFlag* maybe_assigned_flag) {
   int index = Hash(data, name);
   Key& key = keys_[index];
   if ((key.data == data) && key.name->Equals(name)) {
     Value result(values_[index]);
     if (mode != NULL) *mode = result.mode();
     if (init_flag != NULL) *init_flag = result.initialization_flag();
+    if (maybe_assigned_flag != NULL)
+      *maybe_assigned_flag = result.maybe_assigned_flag();
     return result.index() + kNotFound;
   }
   return kNotFound;
 }
 
 
-void ContextSlotCache::Update(Handle<Object> data,
-                              Handle<String> name,
-                              VariableMode mode,
-                              InitializationFlag init_flag,
+void ContextSlotCache::Update(Handle<Object> data, Handle<String> name,
+                              VariableMode mode, InitializationFlag init_flag,
+                              MaybeAssignedFlag maybe_assigned_flag,
                               int slot_index) {
   DisallowHeapAllocation no_gc;
   Handle<String> internalized_name;
@@ -444,9 +456,10 @@ void ContextSlotCache::Update(Handle<Object> data,
     key.data = *data;
     key.name = *internalized_name;
     // Please note value only takes a uint as index.
-    values_[index] = Value(mode, init_flag, slot_index - kNotFound).raw();
+    values_[index] = Value(mode, init_flag, maybe_assigned_flag,
+                           slot_index - kNotFound).raw();
 #ifdef DEBUG
-    ValidateEntry(data, name, mode, init_flag, slot_index);
+    ValidateEntry(data, name, mode, init_flag, maybe_assigned_flag, slot_index);
 #endif
   }
 }
@@ -459,10 +472,10 @@ void ContextSlotCache::Clear() {
 
 #ifdef DEBUG
 
-void ContextSlotCache::ValidateEntry(Handle<Object> data,
-                                     Handle<String> name,
+void ContextSlotCache::ValidateEntry(Handle<Object> data, Handle<String> name,
                                      VariableMode mode,
                                      InitializationFlag init_flag,
+                                     MaybeAssignedFlag maybe_assigned_flag,
                                      int slot_index) {
   DisallowHeapAllocation no_gc;
   Handle<String> internalized_name;
@@ -475,6 +488,7 @@ void ContextSlotCache::ValidateEntry(Handle<Object> data,
     Value result(values_[index]);
     ASSERT(result.mode() == mode);
     ASSERT(result.initialization_flag() == init_flag);
+    ASSERT(result.maybe_assigned_flag() == maybe_assigned_flag);
     ASSERT(result.index() + kNotFound == slot_index);
   }
 }
index 7b8ed44d5bd6280f5773625ce4f15af2f137c326..4de325939588446f44bf91ed8d59e42e2029877f 100644 (file)
@@ -20,17 +20,14 @@ class ContextSlotCache {
  public:
   // Lookup context slot index for (data, name).
   // If absent, kNotFound is returned.
-  int Lookup(Object* data,
-             String* name,
-             VariableMode* mode,
-             InitializationFlag* init_flag);
+  int Lookup(Object* data, String* name, VariableMode* mode,
+             InitializationFlag* init_flag,
+             MaybeAssignedFlag* maybe_assigned_flag);
 
   // Update an element in the cache.
-  void Update(Handle<Object> data,
-              Handle<String> name,
-              VariableMode mode,
+  void Update(Handle<Object> data, Handle<String> name, VariableMode mode,
               InitializationFlag init_flag,
-              int slot_index);
+              MaybeAssignedFlag maybe_assigned_flag, int slot_index);
 
   // Clear the cache.
   void Clear();
@@ -49,11 +46,9 @@ class ContextSlotCache {
   inline static int Hash(Object* data, String* name);
 
 #ifdef DEBUG
-  void ValidateEntry(Handle<Object> data,
-                     Handle<String> name,
-                     VariableMode mode,
-                     InitializationFlag init_flag,
-                     int slot_index);
+  void ValidateEntry(Handle<Object> data, Handle<String> name,
+                     VariableMode mode, InitializationFlag init_flag,
+                     MaybeAssignedFlag maybe_assigned_flag, int slot_index);
 #endif
 
   static const int kLength = 256;
@@ -63,17 +58,18 @@ class ContextSlotCache {
   };
 
   struct Value {
-    Value(VariableMode mode,
-          InitializationFlag init_flag,
-          int index) {
+    Value(VariableMode mode, InitializationFlag init_flag,
+          MaybeAssignedFlag maybe_assigned_flag, int index) {
       ASSERT(ModeField::is_valid(mode));
       ASSERT(InitField::is_valid(init_flag));
+      ASSERT(MaybeAssignedField::is_valid(maybe_assigned_flag));
       ASSERT(IndexField::is_valid(index));
-      value_ = ModeField::encode(mode) |
-          IndexField::encode(index) |
-          InitField::encode(init_flag);
+      value_ = ModeField::encode(mode) | IndexField::encode(index) |
+               InitField::encode(init_flag) |
+               MaybeAssignedField::encode(maybe_assigned_flag);
       ASSERT(mode == this->mode());
       ASSERT(init_flag == this->initialization_flag());
+      ASSERT(maybe_assigned_flag == this->maybe_assigned_flag());
       ASSERT(index == this->index());
     }
 
@@ -87,13 +83,18 @@ class ContextSlotCache {
       return InitField::decode(value_);
     }
 
+    MaybeAssignedFlag maybe_assigned_flag() {
+      return MaybeAssignedField::decode(value_);
+    }
+
     int index() { return IndexField::decode(value_); }
 
     // Bit fields in value_ (type, shift, size). Must be public so the
     // constants can be embedded in generated code.
-    class ModeField:  public BitField<VariableMode,       0, 4> {};
-    class InitField:  public BitField<InitializationFlag, 4, 1> {};
-    class IndexField: public BitField<int,                5, 32-5> {};
+    class ModeField : public BitField<VariableMode, 0, 4> {};
+    class InitField : public BitField<InitializationFlag, 4, 1> {};
+    class MaybeAssignedField : public BitField<MaybeAssignedFlag, 5, 1> {};
+    class IndexField : public BitField<int, 6, 32 - 6> {};
 
    private:
     uint32_t value_;
index 0dfc3a2c426f4a917823cbf13a677940509e1dd0..91ed1debbb45a41dc21b9c0a4f43e3255d8bc3e9 100644 (file)
@@ -30,14 +30,12 @@ VariableMap::VariableMap(Zone* zone)
 VariableMap::~VariableMap() {}
 
 
-Variable* VariableMap::Declare(
-    Scope* scope,
-    const AstRawString* name,
-    VariableMode mode,
-    bool is_valid_lhs,
-    Variable::Kind kind,
-    InitializationFlag initialization_flag,
-    Interface* interface) {
+Variable* VariableMap::Declare(Scope* scope, const AstRawString* name,
+                               VariableMode mode, bool is_valid_lhs,
+                               Variable::Kind kind,
+                               InitializationFlag initialization_flag,
+                               MaybeAssignedFlag maybe_assigned_flag,
+                               Interface* interface) {
   // AstRawStrings are unambiguous, i.e., the same string is always represented
   // by the same AstRawString*.
   // FIXME(marja): fix the type of Lookup.
@@ -46,13 +44,9 @@ Variable* VariableMap::Declare(
   if (p->value == NULL) {
     // The variable has not been declared yet -> insert it.
     ASSERT(p->key == name);
-    p->value = new(zone()) Variable(scope,
-                                    name,
-                                    mode,
-                                    is_valid_lhs,
-                                    kind,
-                                    initialization_flag,
-                                    interface);
+    p->value = new (zone())
+        Variable(scope, name, mode, is_valid_lhs, kind, initialization_flag,
+                 maybe_assigned_flag, interface);
   }
   return reinterpret_cast<Variable*>(p->value);
 }
@@ -392,8 +386,9 @@ Variable* Scope::LookupLocal(const AstRawString* name) {
   VariableMode mode;
   Variable::Location location = Variable::CONTEXT;
   InitializationFlag init_flag;
-  int index =
-      ScopeInfo::ContextSlotIndex(scope_info_, name_handle, &mode, &init_flag);
+  MaybeAssignedFlag maybe_assigned_flag;
+  int index = ScopeInfo::ContextSlotIndex(scope_info_, name_handle, &mode,
+                                          &init_flag, &maybe_assigned_flag);
   if (index < 0) {
     // Check parameters.
     index = scope_info_->ParameterIndex(*name_handle);
@@ -402,10 +397,14 @@ Variable* Scope::LookupLocal(const AstRawString* name) {
     mode = DYNAMIC;
     location = Variable::LOOKUP;
     init_flag = kCreatedInitialized;
+    // Be conservative and flag parameters as maybe assigned. Better information
+    // would require ScopeInfo to serialize the maybe_assigned bit also for
+    // parameters.
+    maybe_assigned_flag = kMaybeAssigned;
   }
 
   Variable* var = variables_.Declare(this, name, mode, true, Variable::NORMAL,
-                                     init_flag);
+                                     init_flag, maybe_assigned_flag);
   var->AllocateTo(location, index);
   return var;
 }
@@ -446,18 +445,19 @@ Variable* Scope::Lookup(const AstRawString* name) {
 }
 
 
-void Scope::DeclareParameter(const AstRawString* name, VariableMode mode) {
+Variable* Scope::DeclareParameter(const AstRawString* name, VariableMode mode) {
   ASSERT(!already_resolved());
   ASSERT(is_function_scope());
   Variable* var = variables_.Declare(this, name, mode, true, Variable::NORMAL,
                                      kCreatedInitialized);
   params_.Add(var, zone());
+  return var;
 }
 
 
-Variable* Scope::DeclareLocal(const AstRawString* name,
-                              VariableMode mode,
+Variable* Scope::DeclareLocal(const AstRawString* name, VariableMode mode,
                               InitializationFlag init_flag,
+                              MaybeAssignedFlag maybe_assigned_flag,
                               Interface* interface) {
   ASSERT(!already_resolved());
   // This function handles VAR, LET, and CONST modes.  DYNAMIC variables are
@@ -465,8 +465,8 @@ Variable* Scope::DeclareLocal(const AstRawString* name,
   // explicitly, and TEMPORARY variables are allocated via NewTemporary().
   ASSERT(IsDeclaredVariableMode(mode));
   ++num_var_or_const_;
-  return variables_.Declare(
-      this, name, mode, true, Variable::NORMAL, init_flag, interface);
+  return variables_.Declare(this, name, mode, true, Variable::NORMAL, init_flag,
+                            maybe_assigned_flag, interface);
 }
 
 
@@ -825,7 +825,7 @@ static void PrintVar(int indent, Variable* var) {
       PrintF("forced context allocation");
       comma = true;
     }
-    if (var->maybe_assigned()) {
+    if (var->maybe_assigned() == kMaybeAssigned) {
       if (comma) PrintF(", ");
       PrintF("maybe assigned");
     }
index f41a088da331eef5a7c9962693fd92b8806d4cc3..f5903aa02a03aeb1a222eaa9a2dfb026fc38acc4 100644 (file)
@@ -21,12 +21,10 @@ class VariableMap: public ZoneHashMap {
 
   virtual ~VariableMap();
 
-  Variable* Declare(Scope* scope,
-                    const AstRawString* name,
-                    VariableMode mode,
-                    bool is_valid_lhs,
-                    Variable::Kind kind,
+  Variable* Declare(Scope* scope, const AstRawString* name, VariableMode mode,
+                    bool is_valid_lhs, Variable::Kind kind,
                     InitializationFlag initialization_flag,
+                    MaybeAssignedFlag maybe_assigned_flag = kNotAssigned,
                     Interface* interface = Interface::NewValue());
 
   Variable* Lookup(const AstRawString* name);
@@ -127,13 +125,13 @@ class Scope: public ZoneObject {
   // Declare a parameter in this scope.  When there are duplicated
   // parameters the rightmost one 'wins'.  However, the implementation
   // expects all parameters to be declared and from left to right.
-  void DeclareParameter(const AstRawString* name, VariableMode mode);
+  Variable* DeclareParameter(const AstRawString* name, VariableMode mode);
 
   // Declare a local variable in this scope. If the variable has been
   // declared before, the previously declared variable is returned.
-  Variable* DeclareLocal(const AstRawString* name,
-                         VariableMode mode,
+  Variable* DeclareLocal(const AstRawString* name, VariableMode mode,
                          InitializationFlag init_flag,
+                         MaybeAssignedFlag maybe_assigned_flag = kNotAssigned,
                          Interface* interface = Interface::NewValue());
 
   // Declare an implicit global variable in this scope which must be a
index 196e5bb4a16fac72b43118de8f9f9c6f4754e78b..eae7bc6b1563296f178d3f31d96d7e3b81b22da6 100644 (file)
@@ -129,9 +129,18 @@ void StringStream::Add(Vector<const char> format, Vector<FmtElm> elms) {
     }
     case 'f': case 'g': case 'G': case 'e': case 'E': {
       double value = current.data_.u_double_;
-      EmbeddedVector<char, 28> formatted;
-      SNPrintF(formatted, temp.start(), value);
-      Add(formatted.start());
+      int inf = std::isinf(value);
+      if (inf == -1) {
+        Add("-inf");
+      } else if (inf == 1) {
+        Add("inf");
+      } else if (std::isnan(value)) {
+        Add("nan");
+      } else {
+        EmbeddedVector<char, 28> formatted;
+        SNPrintF(formatted, temp.start(), value);
+        Add(formatted.start());
+      }
       break;
     }
     case 'p': {
index 327eba0679bc9cb9efa5444a839ef02b8b8082e3..0f39e51b822b188b0704b7d79dfe9f6f05ccdf10 100644 (file)
@@ -4,7 +4,7 @@
 
 #include "src/types.h"
 
-#include "src/string-stream.h"
+#include "src/ostreams.h"
 #include "src/types-inl.h"
 
 namespace v8 {
@@ -123,31 +123,21 @@ int TypeImpl<Config>::BitsetType::Lub(double value) {
   DisallowHeapAllocation no_allocation;
   if (i::IsMinusZero(value)) return kMinusZero;
   if (std::isnan(value)) return kNaN;
-  if (IsUint32Double(value)) return Lub(FastD2UI(value));
-  if (IsInt32Double(value)) return Lub(FastD2I(value));
-  return kOtherNumber;
-}
-
-
-template<class Config>
-int TypeImpl<Config>::BitsetType::Lub(int32_t value) {
-  if (value >= 0x40000000) {
-    return i::SmiValuesAre31Bits() ? kOtherUnsigned31 : kUnsignedSmall;
+  if (IsUint32Double(value)) {
+    uint32_t u = FastD2UI(value);
+    if (u < 0x40000000u) return kUnsignedSmall;
+    if (u < 0x80000000u) {
+      return i::SmiValuesAre31Bits() ? kOtherUnsigned31 : kUnsignedSmall;
+    }
+    return kOtherUnsigned32;
   }
-  if (value >= 0) return kUnsignedSmall;
-  if (value >= -0x40000000) return kOtherSignedSmall;
-  return i::SmiValuesAre31Bits() ? kOtherSigned32 : kOtherSignedSmall;
-}
-
-
-template<class Config>
-int TypeImpl<Config>::BitsetType::Lub(uint32_t value) {
-  DisallowHeapAllocation no_allocation;
-  if (value >= 0x80000000u) return kOtherUnsigned32;
-  if (value >= 0x40000000u) {
-    return i::SmiValuesAre31Bits() ? kOtherUnsigned31 : kUnsignedSmall;
+  if (IsInt32Double(value)) {
+    int32_t i = FastD2I(value);
+    ASSERT(i < 0);
+    if (i >= -0x40000000) return kOtherSignedSmall;
+    return i::SmiValuesAre31Bits() ? kOtherSigned32 : kOtherSignedSmall;
   }
-  return kUnsignedSmall;
+  return kOtherNumber;
 }
 
 
@@ -238,6 +228,7 @@ int TypeImpl<Config>::BitsetType::Lub(i::Map* map) {
     case ACCESSOR_PAIR_TYPE:
     case FIXED_ARRAY_TYPE:
     case FOREIGN_TYPE:
+    case CODE_TYPE:
       return kInternal & kTaggedPtr;
     default:
       UNREACHABLE();
index 8d9678cf48ca0c9dd75565e5605a7af73f145be0..849f0f44b607411681342c33ca1b5cd02d3804e8 100644 (file)
@@ -7,11 +7,12 @@
 
 #include "src/factory.h"
 #include "src/handles.h"
-#include "src/ostreams.h"
 
 namespace v8 {
 namespace internal {
 
+class OStream;
+
 // SUMMARY
 //
 // A simple type system for compiler-internal use. It is based entirely on
@@ -511,8 +512,6 @@ class TypeImpl<Config>::BitsetType : public TypeImpl<Config> {
   static int Lub(TypeImpl* type);  // least upper bound that's a bitset
   static int Lub(i::Object* value);
   static int Lub(double value);
-  static int Lub(int32_t value);
-  static int Lub(uint32_t value);
   static int Lub(i::Map* map);
   static int InherentLub(TypeImpl* type);
 
index 84dbe815e1fed66fe0e538d94310080d3fd33a19..2eca59eeabfc48adfdcfc460899659c7d93c7f3b 100644 (file)
@@ -6,6 +6,7 @@
 
 #include "src/frames.h"
 #include "src/frames-inl.h"
+#include "src/ostreams.h"
 #include "src/parser.h"  // for CompileTimeValue; TODO(rossberg): should move
 #include "src/scopes.h"
 
index 4668128e19c1e4f264e38d15bffec2c2f4291efc..3c1c0aab87aa375b75cba4621461199546f1f15b 100644 (file)
@@ -7,6 +7,7 @@
 
 #include "src/handles.h"
 #include "src/objects.h"
+#include "src/string-stream.h"
 #include "src/utils.h"
 #include "src/zone.h"
 
@@ -29,7 +30,7 @@ class UniqueSet;
 // Careful! Comparison of two Uniques is only correct if both were created
 // in the same "era" of GC or if at least one is a non-movable object.
 template <typename T>
-class Unique V8_FINAL {
+class Unique {
  public:
   // TODO(titzer): make private and introduce a uniqueness scope.
   explicit Unique(Handle<T> handle) {
@@ -117,8 +118,10 @@ class Unique V8_FINAL {
   friend class UniqueSet<T>;  // Uses internal details for speed.
   template <class U>
   friend class Unique;  // For comparing raw_address values.
+  template <class U>
+  friend class PrintableUnique;  // For automatic up casting.
 
- private:
+ protected:
   Unique<T>() : raw_address_(NULL) { }
 
   Address raw_address_;
@@ -128,6 +131,70 @@ class Unique V8_FINAL {
 };
 
 
+// TODO(danno): At some point if all of the uses of Unique end up using
+// PrintableUnique, then we should merge PrintableUnique into Unique and
+// predicate generating the printable string on a "am I tracing" check.
+template <class T>
+class PrintableUnique : public Unique<T> {
+ public:
+  // TODO(titzer): make private and introduce a uniqueness scope.
+  explicit PrintableUnique(Zone* zone, Handle<T> handle) : Unique<T>(handle) {
+    InitializeString(zone);
+  }
+
+  // TODO(titzer): this is a hack to migrate to Unique<T> incrementally.
+  PrintableUnique(Zone* zone, Address raw_address, Handle<T> handle)
+      : Unique<T>(raw_address, handle) {
+    InitializeString(zone);
+  }
+
+  // Constructor for handling automatic up casting.
+  // Eg. PrintableUnique<JSFunction> can be passed when PrintableUnique<Object>
+  // is expected.
+  template <class S>
+  PrintableUnique(PrintableUnique<S> uniq)  // NOLINT
+      : Unique<T>(Handle<T>()) {
+#ifdef DEBUG
+    T* a = NULL;
+    S* b = NULL;
+    a = b;  // Fake assignment to enforce type checks.
+    USE(a);
+#endif
+    this->raw_address_ = uniq.raw_address_;
+    this->handle_ = uniq.handle_;
+    string_ = uniq.string();
+  }
+
+  // TODO(titzer): this is a hack to migrate to Unique<T> incrementally.
+  static PrintableUnique<T> CreateUninitialized(Zone* zone, Handle<T> handle) {
+    return PrintableUnique<T>(zone, reinterpret_cast<Address>(NULL), handle);
+  }
+
+  static PrintableUnique<T> CreateImmovable(Zone* zone, Handle<T> handle) {
+    return PrintableUnique<T>(zone, reinterpret_cast<Address>(*handle), handle);
+  }
+
+  const char* string() { return string_; }
+
+ private:
+  const char* string_;
+
+  void InitializeString(Zone* zone) {
+    // The stringified version of the parameter must be calculated when the
+    // Operator is constructed to avoid accessing the heap.
+    HeapStringAllocator temp_allocator;
+    StringStream stream(&temp_allocator);
+    this->handle_->ShortPrint(&stream);
+    SmartArrayPointer<const char> desc_string = stream.ToCString();
+    const char* desc_chars = desc_string.get();
+    int length = strlen(desc_chars);
+    char* desc_copy = zone->NewArray<char>(length + 1);
+    memcpy(desc_copy, desc_chars, length + 1);
+    string_ = desc_copy;
+  }
+};
+
+
 template <typename T>
 class UniqueSet V8_FINAL : public ZoneObject {
  public:
@@ -341,7 +408,6 @@ class UniqueSet V8_FINAL : public ZoneObject {
   }
 };
 
-
 } }  // namespace v8::internal
 
 #endif  // V8_HYDROGEN_UNIQUE_H_
index cc08d2f2bb2187b77a5d62513e75ca0425a7f439..6423c54f00557c4c75971565c0356e483210eb66 100644 (file)
--- a/src/v8.cc
+++ b/src/v8.cc
@@ -8,6 +8,7 @@
 #include "src/base/once.h"
 #include "src/base/platform/platform.h"
 #include "src/bootstrapper.h"
+#include "src/compiler/instruction.h"
 #include "src/debug.h"
 #include "src/deoptimizer.h"
 #include "src/elements.h"
@@ -22,6 +23,7 @@
 #include "src/serialize.h"
 #include "src/store-buffer.h"
 
+
 namespace v8 {
 namespace internal {
 
@@ -46,6 +48,7 @@ void V8::TearDown() {
   Bootstrapper::TearDownExtensions();
   ElementsAccessor::TearDown();
   LOperand::TearDownCaches();
+  compiler::InstructionOperand::TearDownCaches();
   ExternalReference::TearDownMathExpData();
   RegisteredExtension::UnregisterAll();
   Isolate::GlobalTearDown();
@@ -87,6 +90,7 @@ void V8::InitializeOncePerProcessImpl() {
 #endif
   ElementsAccessor::InitializeOncePerProcess();
   LOperand::SetUpCaches();
+  compiler::InstructionOperand::SetUpCaches();
   SetUpJSCallerSavedCodeData();
   ExternalReference::SetUp();
   Bootstrapper::InitializeOncePerProcess();
index 8a234e46a0af9ba785478d185c669ee4f6538334..9916628f42ed85b98cb055877579aed19bed519f 100644 (file)
@@ -32,27 +32,24 @@ const char* Variable::Mode2String(VariableMode mode) {
 }
 
 
-Variable::Variable(Scope* scope,
-                   const AstRawString* name,
-                   VariableMode mode,
-                   bool is_valid_ref,
-                   Kind kind,
+Variable::Variable(Scope* scope, const AstRawString* name, VariableMode mode,
+                   bool is_valid_ref, Kind kind,
                    InitializationFlag initialization_flag,
-                   Interface* interface)
-  : scope_(scope),
-    name_(name),
-    mode_(mode),
-    kind_(kind),
-    location_(UNALLOCATED),
-    index_(-1),
-    initializer_position_(RelocInfo::kNoPosition),
-    local_if_not_shadowed_(NULL),
-    is_valid_ref_(is_valid_ref),
-    force_context_allocation_(false),
-    is_used_(false),
-    maybe_assigned_(false),
-    initialization_flag_(initialization_flag),
-    interface_(interface) {
+                   MaybeAssignedFlag maybe_assigned_flag, Interface* interface)
+    : scope_(scope),
+      name_(name),
+      mode_(mode),
+      kind_(kind),
+      location_(UNALLOCATED),
+      index_(-1),
+      initializer_position_(RelocInfo::kNoPosition),
+      local_if_not_shadowed_(NULL),
+      is_valid_ref_(is_valid_ref),
+      force_context_allocation_(false),
+      is_used_(false),
+      initialization_flag_(initialization_flag),
+      maybe_assigned_(maybe_assigned_flag),
+      interface_(interface) {
   // Var declared variables never need initialization.
   ASSERT(!(mode == VAR && initialization_flag == kNeedsInitialization));
 }
index 58089b36ed9699463763c9681fbd2670c313f0a2..2749b7b5b6f9c5613f62746f4034faef6b8b2ae0 100644 (file)
@@ -52,12 +52,9 @@ class Variable: public ZoneObject {
     LOOKUP
   };
 
-  Variable(Scope* scope,
-           const AstRawString* name,
-           VariableMode mode,
-           bool is_valid_ref,
-           Kind kind,
-           InitializationFlag initialization_flag,
+  Variable(Scope* scope, const AstRawString* name, VariableMode mode,
+           bool is_valid_ref, Kind kind, InitializationFlag initialization_flag,
+           MaybeAssignedFlag maybe_assigned_flag = kNotAssigned,
            Interface* interface = Interface::NewValue());
 
   // Printing support
@@ -83,8 +80,8 @@ class Variable: public ZoneObject {
   }
   bool is_used() { return is_used_; }
   void set_is_used() { is_used_ = true; }
-  bool maybe_assigned() { return maybe_assigned_; }
-  void set_maybe_assigned() { maybe_assigned_ = true; }
+  MaybeAssignedFlag maybe_assigned() const { return maybe_assigned_; }
+  void set_maybe_assigned() { maybe_assigned_ = kMaybeAssigned; }
 
   int initializer_position() { return initializer_position_; }
   void set_initializer_position(int pos) { initializer_position_ = pos; }
@@ -159,8 +156,8 @@ class Variable: public ZoneObject {
   // Usage info.
   bool force_context_allocation_;  // set by variable resolver
   bool is_used_;
-  bool maybe_assigned_;
   InitializationFlag initialization_flag_;
+  MaybeAssignedFlag maybe_assigned_;
 
   // Module type info.
   Interface* interface_;
index 6d3750e3109c95426b9af2ef64116902765c274b..21ffa27b373afd27fd92cac29d680115ae4c4f8d 100644 (file)
@@ -901,6 +901,14 @@ void Assembler::emit_idiv(Register src, int size) {
 }
 
 
+void Assembler::emit_div(Register src, int size) {
+  EnsureSpace ensure_space(this);
+  emit_rex(src, size);
+  emit(0xF7);
+  emit_modrm(0x6, src);
+}
+
+
 void Assembler::emit_imul(Register src, int size) {
   EnsureSpace ensure_space(this);
   emit_rex(src, size);
@@ -1394,6 +1402,17 @@ void Assembler::emit_movzxb(Register dst, const Operand& src, int size) {
 }
 
 
+void Assembler::emit_movzxb(Register dst, Register src, int size) {
+  EnsureSpace ensure_space(this);
+  // 32 bit operations zero the top 32 bits of 64 bit registers.  Therefore
+  // there is no need to make this a 64 bit operation.
+  emit_optional_rex_32(dst, src);
+  emit(0x0F);
+  emit(0xB6);
+  emit_modrm(dst, src);
+}
+
+
 void Assembler::emit_movzxw(Register dst, const Operand& src, int size) {
   EnsureSpace ensure_space(this);
   // 32 bit operations zero the top 32 bits of 64 bit registers.  Therefore
@@ -1688,6 +1707,14 @@ void Assembler::emit_xchg(Register dst, Register src, int size) {
 }
 
 
+void Assembler::emit_xchg(Register dst, const Operand& src, int size) {
+  EnsureSpace ensure_space(this);
+  emit_rex(dst, src, size);
+  emit(0x87);
+  emit_operand(dst, src);
+}
+
+
 void Assembler::store_rax(void* dst, RelocInfo::Mode mode) {
   EnsureSpace ensure_space(this);
   if (kPointerSize == kInt64Size) {
index 4259e9b50ef5ccbfd5d5b8f2452b6052697a5543..6f4b7e1a56d1f53bc152e0e6b225af2f2c48617f 100644 (file)
@@ -437,26 +437,27 @@ class Operand BASE_EMBEDDED {
 };
 
 
-#define ASSEMBLER_INSTRUCTION_LIST(V)   \
-  V(add)                                \
-  V(and)                                \
-  V(cmp)                                \
-  V(dec)                                \
-  V(idiv)                               \
-  V(imul)                               \
-  V(inc)                                \
-  V(lea)                                \
-  V(mov)                                \
-  V(movzxb)                             \
-  V(movzxw)                             \
-  V(neg)                                \
-  V(not)                                \
-  V(or)                                 \
-  V(repmovs)                            \
-  V(sbb)                                \
-  V(sub)                                \
-  V(test)                               \
-  V(xchg)                               \
+#define ASSEMBLER_INSTRUCTION_LIST(V) \
+  V(add)                              \
+  V(and)                              \
+  V(cmp)                              \
+  V(dec)                              \
+  V(idiv)                             \
+  V(div)                              \
+  V(imul)                             \
+  V(inc)                              \
+  V(lea)                              \
+  V(mov)                              \
+  V(movzxb)                           \
+  V(movzxw)                           \
+  V(neg)                              \
+  V(not)                              \
+  V(or)                               \
+  V(repmovs)                          \
+  V(sbb)                              \
+  V(sub)                              \
+  V(test)                             \
+  V(xchg)                             \
   V(xor)
 
 
@@ -1435,6 +1436,7 @@ class Assembler : public AssemblerBase {
   // Divide edx:eax by lower 32 bits of src.  Quotient in eax, remainder in edx
   // when size is 32.
   void emit_idiv(Register src, int size);
+  void emit_div(Register src, int size);
 
   // Signed multiply instructions.
   // rdx:rax = rax * src when size is 64 or edx:eax = eax * src when size is 32.
@@ -1455,6 +1457,7 @@ class Assembler : public AssemblerBase {
   void emit_mov(const Operand& dst, Immediate value, int size);
 
   void emit_movzxb(Register dst, const Operand& src, int size);
+  void emit_movzxb(Register dst, Register src, int size);
   void emit_movzxw(Register dst, const Operand& src, int size);
   void emit_movzxw(Register dst, Register src, int size);
 
@@ -1514,9 +1517,12 @@ class Assembler : public AssemblerBase {
   void emit_test(Register reg, Immediate mask, int size);
   void emit_test(const Operand& op, Register reg, int size);
   void emit_test(const Operand& op, Immediate mask, int size);
+  void emit_test(Register reg, const Operand& op, int size) {
+    return emit_test(op, reg, size);
+  }
 
-  // Exchange two registers
   void emit_xchg(Register dst, Register src, int size);
+  void emit_xchg(Register dst, const Operand& src, int size);
 
   void emit_xor(Register dst, Register src, int size) {
     if (size == kInt64Size && dst.code() == src.code()) {
index cb97b7787d4abc4ed0fbd9fea6de32c8e39a0fbf..e3d54206eaac988bdfd1b83aae402c281059fe11 100644 (file)
@@ -20,7 +20,7 @@ void FastNewClosureStub::InitializeInterfaceDescriptor(
     CodeStubInterfaceDescriptor* descriptor) {
   Register registers[] = { rsi, rbx };
   descriptor->Initialize(
-      ARRAY_SIZE(registers), registers,
+      MajorKey(), ARRAY_SIZE(registers), registers,
       Runtime::FunctionForId(Runtime::kNewClosureFromStubFailure)->entry);
 }
 
@@ -28,14 +28,14 @@ void FastNewClosureStub::InitializeInterfaceDescriptor(
 void FastNewContextStub::InitializeInterfaceDescriptor(
     CodeStubInterfaceDescriptor* descriptor) {
   Register registers[] = { rsi, rdi };
-  descriptor->Initialize(ARRAY_SIZE(registers), registers);
+  descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers);
 }
 
 
 void ToNumberStub::InitializeInterfaceDescriptor(
     CodeStubInterfaceDescriptor* descriptor) {
   Register registers[] = { rsi, rax };
-  descriptor->Initialize(ARRAY_SIZE(registers), registers);
+  descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers);
 }
 
 
@@ -43,7 +43,7 @@ void NumberToStringStub::InitializeInterfaceDescriptor(
     CodeStubInterfaceDescriptor* descriptor) {
   Register registers[] = { rsi, rax };
   descriptor->Initialize(
-      ARRAY_SIZE(registers), registers,
+      MajorKey(), ARRAY_SIZE(registers), registers,
       Runtime::FunctionForId(Runtime::kNumberToStringRT)->entry);
 }
 
@@ -58,9 +58,8 @@ void FastCloneShallowArrayStub::InitializeInterfaceDescriptor(
     Representation::Tagged() };
 
   descriptor->Initialize(
-      ARRAY_SIZE(registers), registers,
-      Runtime::FunctionForId(
-          Runtime::kCreateArrayLiteralStubBailout)->entry,
+      MajorKey(), ARRAY_SIZE(registers), registers,
+      Runtime::FunctionForId(Runtime::kCreateArrayLiteralStubBailout)->entry,
       representations);
 }
 
@@ -69,7 +68,7 @@ void FastCloneShallowObjectStub::InitializeInterfaceDescriptor(
     CodeStubInterfaceDescriptor* descriptor) {
   Register registers[] = { rsi, rax, rbx, rcx, rdx };
   descriptor->Initialize(
-      ARRAY_SIZE(registers), registers,
+      MajorKey(), ARRAY_SIZE(registers), registers,
       Runtime::FunctionForId(Runtime::kCreateObjectLiteral)->entry);
 }
 
@@ -77,7 +76,35 @@ void FastCloneShallowObjectStub::InitializeInterfaceDescriptor(
 void CreateAllocationSiteStub::InitializeInterfaceDescriptor(
     CodeStubInterfaceDescriptor* descriptor) {
   Register registers[] = { rsi, rbx, rdx };
-  descriptor->Initialize(ARRAY_SIZE(registers), registers);
+  descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers);
+}
+
+
+void InstanceofStub::InitializeInterfaceDescriptor(
+    Isolate* isolate, CodeStubInterfaceDescriptor* descriptor) {
+  Register registers[] = {rsi, left(), right()};
+  descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers);
+}
+
+
+void CallFunctionStub::InitializeInterfaceDescriptor(
+    Isolate* isolate, CodeStubInterfaceDescriptor* descriptor) {
+  Register registers[] = {rsi, rdi};
+  descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers);
+}
+
+
+void CallConstructStub::InitializeInterfaceDescriptor(
+    Isolate* isolate, CodeStubInterfaceDescriptor* descriptor) {
+  // rax : number of arguments
+  // rbx : feedback vector
+  // rdx : (only if rbx is not the megamorphic symbol) slot in feedback
+  //       vector (Smi)
+  // rdi : constructor function
+  // TODO(turbofan): So far we don't gather type feedback and hence skip the
+  // slot parameter, but ArrayConstructStub needs the vector to be undefined.
+  Register registers[] = {rsi, rax, rdi, rbx};
+  descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers);
 }
 
 
@@ -85,7 +112,7 @@ void RegExpConstructResultStub::InitializeInterfaceDescriptor(
     CodeStubInterfaceDescriptor* descriptor) {
   Register registers[] = { rsi, rcx, rbx, rax };
   descriptor->Initialize(
-      ARRAY_SIZE(registers), registers,
+      MajorKey(), ARRAY_SIZE(registers), registers,
       Runtime::FunctionForId(Runtime::kRegExpConstructResult)->entry);
 }
 
@@ -94,7 +121,7 @@ void TransitionElementsKindStub::InitializeInterfaceDescriptor(
     CodeStubInterfaceDescriptor* descriptor) {
   Register registers[] = { rsi, rax, rbx };
   descriptor->Initialize(
-      ARRAY_SIZE(registers), registers,
+      MajorKey(), ARRAY_SIZE(registers), registers,
       Runtime::FunctionForId(Runtime::kTransitionElementsKind)->entry);
 }
 
@@ -103,7 +130,7 @@ const Register InterfaceDescriptor::ContextRegister() { return rsi; }
 
 
 static void InitializeArrayConstructorDescriptor(
-    CodeStubInterfaceDescriptor* descriptor,
+    CodeStub::Major major, CodeStubInterfaceDescriptor* descriptor,
     int constant_stack_parameter_count) {
   // register state
   // rax -- number of arguments
@@ -114,10 +141,8 @@ static void InitializeArrayConstructorDescriptor(
 
   if (constant_stack_parameter_count == 0) {
     Register registers[] = { rsi, rdi, rbx };
-    descriptor->Initialize(ARRAY_SIZE(registers), registers,
-                           deopt_handler,
-                           NULL,
-                           constant_stack_parameter_count,
+    descriptor->Initialize(major, ARRAY_SIZE(registers), registers,
+                           deopt_handler, NULL, constant_stack_parameter_count,
                            JS_FUNCTION_STUB_MODE);
   } else {
     // stack param count needs (constructor pointer, and single argument)
@@ -127,19 +152,16 @@ static void InitializeArrayConstructorDescriptor(
         Representation::Tagged(),
         Representation::Tagged(),
         Representation::Integer32() };
-    descriptor->Initialize(ARRAY_SIZE(registers), registers,
-                           rax,
-                           deopt_handler,
-                           representations,
+    descriptor->Initialize(major, ARRAY_SIZE(registers), registers, rax,
+                           deopt_handler, representations,
                            constant_stack_parameter_count,
-                           JS_FUNCTION_STUB_MODE,
-                           PASS_ARGUMENTS);
+                           JS_FUNCTION_STUB_MODE, PASS_ARGUMENTS);
   }
 }
 
 
 static void InitializeInternalArrayConstructorDescriptor(
-    CodeStubInterfaceDescriptor* descriptor,
+    CodeStub::Major major, CodeStubInterfaceDescriptor* descriptor,
     int constant_stack_parameter_count) {
   // register state
   // rsi -- context
@@ -150,10 +172,8 @@ static void InitializeInternalArrayConstructorDescriptor(
 
   if (constant_stack_parameter_count == 0) {
     Register registers[] = { rsi, rdi };
-    descriptor->Initialize(ARRAY_SIZE(registers), registers,
-                           deopt_handler,
-                           NULL,
-                           constant_stack_parameter_count,
+    descriptor->Initialize(major, ARRAY_SIZE(registers), registers,
+                           deopt_handler, NULL, constant_stack_parameter_count,
                            JS_FUNCTION_STUB_MODE);
   } else {
     // stack param count needs (constructor pointer, and single argument)
@@ -162,57 +182,54 @@ static void InitializeInternalArrayConstructorDescriptor(
         Representation::Tagged(),
         Representation::Tagged(),
         Representation::Integer32() };
-    descriptor->Initialize(ARRAY_SIZE(registers), registers,
-                           rax,
-                           deopt_handler,
-                           representations,
+    descriptor->Initialize(major, ARRAY_SIZE(registers), registers, rax,
+                           deopt_handler, representations,
                            constant_stack_parameter_count,
-                           JS_FUNCTION_STUB_MODE,
-                           PASS_ARGUMENTS);
+                           JS_FUNCTION_STUB_MODE, PASS_ARGUMENTS);
   }
 }
 
 
 void ArrayNoArgumentConstructorStub::InitializeInterfaceDescriptor(
     CodeStubInterfaceDescriptor* descriptor) {
-  InitializeArrayConstructorDescriptor(descriptor, 0);
+  InitializeArrayConstructorDescriptor(MajorKey(), descriptor, 0);
 }
 
 
 void ArraySingleArgumentConstructorStub::InitializeInterfaceDescriptor(
     CodeStubInterfaceDescriptor* descriptor) {
-  InitializeArrayConstructorDescriptor(descriptor, 1);
+  InitializeArrayConstructorDescriptor(MajorKey(), descriptor, 1);
 }
 
 
 void ArrayNArgumentsConstructorStub::InitializeInterfaceDescriptor(
     CodeStubInterfaceDescriptor* descriptor) {
-  InitializeArrayConstructorDescriptor(descriptor, -1);
+  InitializeArrayConstructorDescriptor(MajorKey(), descriptor, -1);
 }
 
 
 void InternalArrayNoArgumentConstructorStub::InitializeInterfaceDescriptor(
     CodeStubInterfaceDescriptor* descriptor) {
-  InitializeInternalArrayConstructorDescriptor(descriptor, 0);
+  InitializeInternalArrayConstructorDescriptor(MajorKey(), descriptor, 0);
 }
 
 
 void InternalArraySingleArgumentConstructorStub::InitializeInterfaceDescriptor(
     CodeStubInterfaceDescriptor* descriptor) {
-  InitializeInternalArrayConstructorDescriptor(descriptor, 1);
+  InitializeInternalArrayConstructorDescriptor(MajorKey(), descriptor, 1);
 }
 
 
 void InternalArrayNArgumentsConstructorStub::InitializeInterfaceDescriptor(
     CodeStubInterfaceDescriptor* descriptor) {
-  InitializeInternalArrayConstructorDescriptor(descriptor, -1);
+  InitializeInternalArrayConstructorDescriptor(MajorKey(), descriptor, -1);
 }
 
 
 void CompareNilICStub::InitializeInterfaceDescriptor(
     CodeStubInterfaceDescriptor* descriptor) {
   Register registers[] = { rsi, rax };
-  descriptor->Initialize(ARRAY_SIZE(registers), registers,
+  descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers,
                          FUNCTION_ADDR(CompareNilIC_Miss));
   descriptor->SetMissHandler(
       ExternalReference(IC_Utility(IC::kCompareNilIC_Miss), isolate()));
@@ -222,7 +239,7 @@ void CompareNilICStub::InitializeInterfaceDescriptor(
 void ToBooleanStub::InitializeInterfaceDescriptor(
     CodeStubInterfaceDescriptor* descriptor) {
   Register registers[] = { rsi, rax };
-  descriptor->Initialize(ARRAY_SIZE(registers), registers,
+  descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers,
                          FUNCTION_ADDR(ToBooleanIC_Miss));
   descriptor->SetMissHandler(
       ExternalReference(IC_Utility(IC::kToBooleanIC_Miss), isolate()));
@@ -232,7 +249,7 @@ void ToBooleanStub::InitializeInterfaceDescriptor(
 void BinaryOpICStub::InitializeInterfaceDescriptor(
     CodeStubInterfaceDescriptor* descriptor) {
   Register registers[] = { rsi, rdx, rax };
-  descriptor->Initialize(ARRAY_SIZE(registers), registers,
+  descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers,
                          FUNCTION_ADDR(BinaryOpIC_Miss));
   descriptor->SetMissHandler(
       ExternalReference(IC_Utility(IC::kBinaryOpIC_Miss), isolate()));
@@ -242,7 +259,7 @@ void BinaryOpICStub::InitializeInterfaceDescriptor(
 void BinaryOpWithAllocationSiteStub::InitializeInterfaceDescriptor(
     CodeStubInterfaceDescriptor* descriptor) {
   Register registers[] = { rsi, rcx, rdx, rax };
-  descriptor->Initialize(ARRAY_SIZE(registers), registers,
+  descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers,
                          FUNCTION_ADDR(BinaryOpIC_MissWithAllocationSite));
 }
 
@@ -250,9 +267,8 @@ void BinaryOpWithAllocationSiteStub::InitializeInterfaceDescriptor(
 void StringAddStub::InitializeInterfaceDescriptor(
     CodeStubInterfaceDescriptor* descriptor) {
   Register registers[] = { rsi, rdx, rax };
-  descriptor->Initialize(
-      ARRAY_SIZE(registers), registers,
-      Runtime::FunctionForId(Runtime::kStringAdd)->entry);
+  descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers,
+                         Runtime::FunctionForId(Runtime::kStringAdd)->entry);
 }
 
 
@@ -2727,6 +2743,13 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
   // is and instance of the function and anything else to
   // indicate that the value is not an instance.
 
+  // Fixed register usage throughout the stub.
+  Register object = rax;     // Object (lhs).
+  Register map = rbx;        // Map of the object.
+  Register function = rdx;   // Function (rhs).
+  Register prototype = rdi;  // Prototype of the function.
+  Register scratch = rcx;
+
   static const int kOffsetToMapCheckValue = 2;
   static const int kOffsetToResultValue = kPointerSize == kInt64Size ? 18 : 14;
   // The last 4 bytes of the instruction sequence
@@ -2741,85 +2764,88 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
   // before the offset of the hole value in the root array.
   static const unsigned int kWordBeforeResultValue =
       kPointerSize == kInt64Size ? 0x458B4906 : 0x458B4106;
-  // Only the inline check flag is supported on X64.
-  ASSERT(flags_ == kNoFlags || HasCallSiteInlineCheck());
+
   int extra_argument_offset = HasCallSiteInlineCheck() ? 1 : 0;
 
-  // Get the object - go slow case if it's a smi.
+  ASSERT_EQ(object.code(), InstanceofStub::left().code());
+  ASSERT_EQ(function.code(), InstanceofStub::right().code());
+
+  // Get the object and function - they are always both needed.
+  // Go slow case if the object is a smi.
   Label slow;
   StackArgumentsAccessor args(rsp, 2 + extra_argument_offset,
                               ARGUMENTS_DONT_CONTAIN_RECEIVER);
-  __ movp(rax, args.GetArgumentOperand(0));
-  __ JumpIfSmi(rax, &slow);
+  if (!HasArgsInRegisters()) {
+    __ movp(object, args.GetArgumentOperand(0));
+    __ movp(function, args.GetArgumentOperand(1));
+  }
+  __ JumpIfSmi(object, &slow);
 
   // Check that the left hand is a JS object. Leave its map in rax.
-  __ CmpObjectType(rax, FIRST_SPEC_OBJECT_TYPE, rax);
+  __ CmpObjectType(object, FIRST_SPEC_OBJECT_TYPE, map);
   __ j(below, &slow);
-  __ CmpInstanceType(rax, LAST_SPEC_OBJECT_TYPE);
+  __ CmpInstanceType(map, LAST_SPEC_OBJECT_TYPE);
   __ j(above, &slow);
 
-  // Get the prototype of the function.
-  __ movp(rdx, args.GetArgumentOperand(1));
-  // rdx is function, rax is map.
-
   // If there is a call site cache don't look in the global cache, but do the
   // real lookup and update the call site cache.
-  if (!HasCallSiteInlineCheck()) {
+  if (!HasCallSiteInlineCheck() && !ReturnTrueFalseObject()) {
     // Look up the function and the map in the instanceof cache.
     Label miss;
-    __ CompareRoot(rdx, Heap::kInstanceofCacheFunctionRootIndex);
+    __ CompareRoot(function, Heap::kInstanceofCacheFunctionRootIndex);
     __ j(not_equal, &miss, Label::kNear);
-    __ CompareRoot(rax, Heap::kInstanceofCacheMapRootIndex);
+    __ CompareRoot(map, Heap::kInstanceofCacheMapRootIndex);
     __ j(not_equal, &miss, Label::kNear);
     __ LoadRoot(rax, Heap::kInstanceofCacheAnswerRootIndex);
-    __ ret(2 * kPointerSize);
+    __ ret((HasArgsInRegisters() ? 0 : 2) * kPointerSize);
     __ bind(&miss);
   }
 
-  __ TryGetFunctionPrototype(rdx, rbx, &slow, true);
+  // Get the prototype of the function.
+  __ TryGetFunctionPrototype(function, prototype, &slow, true);
 
   // Check that the function prototype is a JS object.
-  __ JumpIfSmi(rbx, &slow);
-  __ CmpObjectType(rbx, FIRST_SPEC_OBJECT_TYPE, kScratchRegister);
+  __ JumpIfSmi(prototype, &slow);
+  __ CmpObjectType(prototype, FIRST_SPEC_OBJECT_TYPE, kScratchRegister);
   __ j(below, &slow);
   __ CmpInstanceType(kScratchRegister, LAST_SPEC_OBJECT_TYPE);
   __ j(above, &slow);
 
-  // Register mapping:
-  //   rax is object map.
-  //   rdx is function.
-  //   rbx is function prototype.
+  // Update the global instanceof or call site inlined cache with the current
+  // map and function. The cached answer will be set when it is known below.
   if (!HasCallSiteInlineCheck()) {
-    __ StoreRoot(rdx, Heap::kInstanceofCacheFunctionRootIndex);
-    __ StoreRoot(rax, Heap::kInstanceofCacheMapRootIndex);
+    __ StoreRoot(function, Heap::kInstanceofCacheFunctionRootIndex);
+    __ StoreRoot(map, Heap::kInstanceofCacheMapRootIndex);
   } else {
+    // The constants for the code patching are based on push instructions
+    // at the call site.
+    ASSERT(!HasArgsInRegisters());
     // Get return address and delta to inlined map check.
     __ movq(kScratchRegister, StackOperandForReturnAddress(0));
     __ subp(kScratchRegister, args.GetArgumentOperand(2));
     if (FLAG_debug_code) {
-      __ movl(rdi, Immediate(kWordBeforeMapCheckValue));
-      __ cmpl(Operand(kScratchRegister, kOffsetToMapCheckValue - 4), rdi);
+      __ movl(scratch, Immediate(kWordBeforeMapCheckValue));
+      __ cmpl(Operand(kScratchRegister, kOffsetToMapCheckValue - 4), scratch);
       __ Assert(equal, kInstanceofStubUnexpectedCallSiteCacheCheck);
     }
     __ movp(kScratchRegister,
             Operand(kScratchRegister, kOffsetToMapCheckValue));
-    __ movp(Operand(kScratchRegister, 0), rax);
+    __ movp(Operand(kScratchRegister, 0), map);
   }
 
-  __ movp(rcx, FieldOperand(rax, Map::kPrototypeOffset));
-
   // Loop through the prototype chain looking for the function prototype.
+  __ movp(scratch, FieldOperand(map, Map::kPrototypeOffset));
   Label loop, is_instance, is_not_instance;
   __ LoadRoot(kScratchRegister, Heap::kNullValueRootIndex);
   __ bind(&loop);
-  __ cmpp(rcx, rbx);
+  __ cmpp(scratch, prototype);
   __ j(equal, &is_instance, Label::kNear);
-  __ cmpp(rcx, kScratchRegister);
+  __ cmpp(scratch, kScratchRegister);
   // The code at is_not_instance assumes that kScratchRegister contains a
   // non-zero GCable value (the null object in this case).
   __ j(equal, &is_not_instance, Label::kNear);
-  __ movp(rcx, FieldOperand(rcx, HeapObject::kMapOffset));
-  __ movp(rcx, FieldOperand(rcx, Map::kPrototypeOffset));
+  __ movp(scratch, FieldOperand(scratch, HeapObject::kMapOffset));
+  __ movp(scratch, FieldOperand(scratch, Map::kPrototypeOffset));
   __ jmp(&loop);
 
   __ bind(&is_instance);
@@ -2828,6 +2854,9 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
     // Store bitwise zero in the cache.  This is a Smi in GC terms.
     STATIC_ASSERT(kSmiTag == 0);
     __ StoreRoot(rax, Heap::kInstanceofCacheAnswerRootIndex);
+    if (ReturnTrueFalseObject()) {
+      __ LoadRoot(rax, Heap::kTrueValueRootIndex);
+    }
   } else {
     // Store offset of true in the root array at the inline check site.
     int true_offset = 0x100 +
@@ -2843,14 +2872,20 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
       __ cmpl(Operand(kScratchRegister, kOffsetToResultValue - 4), rax);
       __ Assert(equal, kInstanceofStubUnexpectedCallSiteCacheMov);
     }
-    __ Set(rax, 0);
+    if (!ReturnTrueFalseObject()) {
+      __ Set(rax, 0);
+    }
   }
-  __ ret((2 + extra_argument_offset) * kPointerSize);
+  __ ret(((HasArgsInRegisters() ? 0 : 2) + extra_argument_offset) *
+         kPointerSize);
 
   __ bind(&is_not_instance);
   if (!HasCallSiteInlineCheck()) {
     // We have to store a non-zero value in the cache.
     __ StoreRoot(kScratchRegister, Heap::kInstanceofCacheAnswerRootIndex);
+    if (ReturnTrueFalseObject()) {
+      __ LoadRoot(rax, Heap::kFalseValueRootIndex);
+    }
   } else {
     // Store offset of false in the root array at the inline check site.
     int false_offset = 0x100 +
@@ -2867,25 +2902,48 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
       __ Assert(equal, kInstanceofStubUnexpectedCallSiteCacheMov);
     }
   }
-  __ ret((2 + extra_argument_offset) * kPointerSize);
+  __ ret(((HasArgsInRegisters() ? 0 : 2) + extra_argument_offset) *
+         kPointerSize);
 
   // Slow-case: Go through the JavaScript implementation.
   __ bind(&slow);
-  if (HasCallSiteInlineCheck()) {
-    // Remove extra value from the stack.
-    __ PopReturnAddressTo(rcx);
-    __ Pop(rax);
-    __ PushReturnAddressFrom(rcx);
+  if (!ReturnTrueFalseObject()) {
+    // Tail call the builtin which returns 0 or 1.
+    ASSERT(!HasArgsInRegisters());
+    if (HasCallSiteInlineCheck()) {
+      // Remove extra value from the stack.
+      __ PopReturnAddressTo(rcx);
+      __ Pop(rax);
+      __ PushReturnAddressFrom(rcx);
+    }
+    __ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_FUNCTION);
+  } else {
+    // Call the builtin and convert 0/1 to true/false.
+    {
+      FrameScope scope(masm, StackFrame::INTERNAL);
+      __ Push(object);
+      __ Push(function);
+      __ InvokeBuiltin(Builtins::INSTANCE_OF, CALL_FUNCTION);
+    }
+    Label true_value, done;
+    __ testq(rax, rax);
+    __ j(zero, &true_value, Label::kNear);
+    __ LoadRoot(rax, Heap::kFalseValueRootIndex);
+    __ jmp(&done, Label::kNear);
+    __ bind(&true_value);
+    __ LoadRoot(rax, Heap::kTrueValueRootIndex);
+    __ bind(&done);
+    __ ret(((HasArgsInRegisters() ? 0 : 2) + extra_argument_offset) *
+           kPointerSize);
   }
-  __ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_FUNCTION);
 }
 
 
 // Passing arguments in registers is not supported.
-Register InstanceofStub::left() { return no_reg; }
+Register InstanceofStub::left() { return rax; }
 
 
-Register InstanceofStub::right() { return no_reg; }
+Register InstanceofStub::right() { return rdx; }
 
 
 // -------------------------------------------------------------------------
index 4697749c646d3f8c2dcfe625e848d99f56badb6a..b2ef1b64d76271956ef25ce858ea342f9734883d 100644 (file)
@@ -60,9 +60,6 @@ void Deoptimizer::PatchCodeForDeoptimization(Isolate* isolate, Code* code) {
 #endif
   DeoptimizationInputData* deopt_data =
       DeoptimizationInputData::cast(code->deoptimization_data());
-  SharedFunctionInfo* shared =
-      SharedFunctionInfo::cast(deopt_data->SharedFunctionInfo());
-  shared->EvictFromOptimizedCodeMap(code, "deoptimized code");
   deopt_data->SetSharedFunctionInfo(Smi::FromInt(0));
   // For each LLazyBailout instruction insert a call to the corresponding
   // deoptimization entry.
index bd16e92fb09b05ec87d0b741488d9db0b09e2a43..7d78c6fb04079186de82ba70de3bd12c69c925ff 100644 (file)
@@ -680,6 +680,9 @@ int DisassemblerX64::F6F7Instruction(byte* data) {
       case 5:
         mnem = "imul";
         break;
+      case 6:
+        mnem = "div";
+        break;
       case 7:
         mnem = "idiv";
         break;
index d552b31807ec3be5b164da1ce7fcd6bf158da68d..c9001b0008761cfb1cf290ca310710bc4b0741c8 100644 (file)
@@ -811,7 +811,7 @@ void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
   int length = deoptimizations_.length();
   if (length == 0) return;
   Handle<DeoptimizationInputData> data =
-      DeoptimizationInputData::New(isolate(), length, TENURED);
+      DeoptimizationInputData::New(isolate(), length, 0, TENURED);
 
   Handle<ByteArray> translations =
       translations_.CreateByteArray(isolate()->factory());
index af57996bbea638e10da47be5b0d077e68d5df8c9..56a6c048f6c7a2c8b0b2dc4eb7f9920bab851ce1 100644 (file)
@@ -7,9 +7,8 @@
 #if V8_TARGET_ARCH_X64
 
 #include "src/hydrogen-osr.h"
-#include "src/lithium-allocator-inl.h"
+#include "src/lithium-inl.h"
 #include "src/x64/lithium-codegen-x64.h"
-#include "src/x64/lithium-x64.h"
 
 namespace v8 {
 namespace internal {
index b2f4697f16173ab240f0629029e5206995b34e84..69d02c0063252c5b46e69e40d4b6ba9ad2bc6114 100644 (file)
@@ -220,6 +220,9 @@ class LInstruction : public ZoneObject {
 
   virtual bool IsControl() const { return false; }
 
+  // Try deleting this instruction if possible.
+  virtual bool TryDelete() { return false; }
+
   void set_environment(LEnvironment* env) { environment_ = env; }
   LEnvironment* environment() const { return environment_; }
   bool HasEnvironment() const { return environment_ != NULL; }
@@ -262,11 +265,12 @@ class LInstruction : public ZoneObject {
   void VerifyCall();
 #endif
 
+  virtual int InputCount() = 0;
+  virtual LOperand* InputAt(int i) = 0;
+
  private:
   // Iterator support.
   friend class InputIterator;
-  virtual int InputCount() = 0;
-  virtual LOperand* InputAt(int i) = 0;
 
   friend class TempIterator;
   virtual int TempCount() = 0;
index 39d78e32ef4632c48cdbf68f4bd02c4a504116a5..fcb28632c499213db292c4b2a9db56a61eee37a4 100644 (file)
@@ -23,7 +23,7 @@ void FastNewClosureStub::InitializeInterfaceDescriptor(
     CodeStubInterfaceDescriptor* descriptor) {
   Register registers[] = { esi, ebx };
   descriptor->Initialize(
-      ARRAY_SIZE(registers), registers,
+      MajorKey(), ARRAY_SIZE(registers), registers,
       Runtime::FunctionForId(Runtime::kNewClosureFromStubFailure)->entry);
 }
 
@@ -31,7 +31,7 @@ void FastNewClosureStub::InitializeInterfaceDescriptor(
 void FastNewContextStub::InitializeInterfaceDescriptor(
     CodeStubInterfaceDescriptor* descriptor) {
   Register registers[] = { esi, edi };
-  descriptor->Initialize(ARRAY_SIZE(registers), registers);
+  descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers);
 }
 
 
@@ -39,7 +39,7 @@ void ToNumberStub::InitializeInterfaceDescriptor(
     CodeStubInterfaceDescriptor* descriptor) {
   // ToNumberStub invokes a function, and therefore needs a context.
   Register registers[] = { esi, eax };
-  descriptor->Initialize(ARRAY_SIZE(registers), registers);
+  descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers);
 }
 
 
@@ -47,7 +47,7 @@ void NumberToStringStub::InitializeInterfaceDescriptor(
     CodeStubInterfaceDescriptor* descriptor) {
   Register registers[] = { esi, eax };
   descriptor->Initialize(
-      ARRAY_SIZE(registers), registers,
+      MajorKey(), ARRAY_SIZE(registers), registers,
       Runtime::FunctionForId(Runtime::kNumberToStringRT)->entry);
 }
 
@@ -62,9 +62,8 @@ void FastCloneShallowArrayStub::InitializeInterfaceDescriptor(
     Representation::Tagged() };
 
   descriptor->Initialize(
-      ARRAY_SIZE(registers), registers,
-      Runtime::FunctionForId(
-          Runtime::kCreateArrayLiteralStubBailout)->entry,
+      MajorKey(), ARRAY_SIZE(registers), registers,
+      Runtime::FunctionForId(Runtime::kCreateArrayLiteralStubBailout)->entry,
       representations);
 }
 
@@ -73,7 +72,7 @@ void FastCloneShallowObjectStub::InitializeInterfaceDescriptor(
     CodeStubInterfaceDescriptor* descriptor) {
   Register registers[] = { esi, eax, ebx, ecx, edx };
   descriptor->Initialize(
-      ARRAY_SIZE(registers), registers,
+      MajorKey(), ARRAY_SIZE(registers), registers,
       Runtime::FunctionForId(Runtime::kCreateObjectLiteral)->entry);
 }
 
@@ -81,7 +80,7 @@ void FastCloneShallowObjectStub::InitializeInterfaceDescriptor(
 void CreateAllocationSiteStub::InitializeInterfaceDescriptor(
     CodeStubInterfaceDescriptor* descriptor) {
   Register registers[] = { esi, ebx, edx };
-  descriptor->Initialize(ARRAY_SIZE(registers), registers);
+  descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers);
 }
 
 
@@ -89,7 +88,7 @@ void RegExpConstructResultStub::InitializeInterfaceDescriptor(
     CodeStubInterfaceDescriptor* descriptor) {
   Register registers[] = { esi, ecx, ebx, eax };
   descriptor->Initialize(
-      ARRAY_SIZE(registers), registers,
+      MajorKey(), ARRAY_SIZE(registers), registers,
       Runtime::FunctionForId(Runtime::kRegExpConstructResult)->entry);
 }
 
@@ -98,7 +97,7 @@ void TransitionElementsKindStub::InitializeInterfaceDescriptor(
     CodeStubInterfaceDescriptor* descriptor) {
   Register registers[] = { esi, eax, ebx };
   descriptor->Initialize(
-      ARRAY_SIZE(registers), registers,
+      MajorKey(), ARRAY_SIZE(registers), registers,
       Runtime::FunctionForId(Runtime::kTransitionElementsKind)->entry);
 }
 
@@ -107,7 +106,7 @@ const Register InterfaceDescriptor::ContextRegister() { return esi; }
 
 
 static void InitializeArrayConstructorDescriptor(
-    Isolate* isolate,
+    Isolate* isolate, CodeStub::Major major,
     CodeStubInterfaceDescriptor* descriptor,
     int constant_stack_parameter_count) {
   // register state
@@ -119,10 +118,8 @@ static void InitializeArrayConstructorDescriptor(
 
   if (constant_stack_parameter_count == 0) {
     Register registers[] = { esi, edi, ebx };
-    descriptor->Initialize(ARRAY_SIZE(registers), registers,
-                           deopt_handler,
-                           NULL,
-                           constant_stack_parameter_count,
+    descriptor->Initialize(major, ARRAY_SIZE(registers), registers,
+                           deopt_handler, NULL, constant_stack_parameter_count,
                            JS_FUNCTION_STUB_MODE);
   } else {
     // stack param count needs (constructor pointer, and single argument)
@@ -132,19 +129,16 @@ static void InitializeArrayConstructorDescriptor(
         Representation::Tagged(),
         Representation::Tagged(),
         Representation::Integer32() };
-    descriptor->Initialize(ARRAY_SIZE(registers), registers,
-                           eax,
-                           deopt_handler,
-                           representations,
+    descriptor->Initialize(major, ARRAY_SIZE(registers), registers, eax,
+                           deopt_handler, representations,
                            constant_stack_parameter_count,
-                           JS_FUNCTION_STUB_MODE,
-                           PASS_ARGUMENTS);
+                           JS_FUNCTION_STUB_MODE, PASS_ARGUMENTS);
   }
 }
 
 
 static void InitializeInternalArrayConstructorDescriptor(
-    CodeStubInterfaceDescriptor* descriptor,
+    CodeStub::Major major, CodeStubInterfaceDescriptor* descriptor,
     int constant_stack_parameter_count) {
   // register state
   // eax -- number of arguments
@@ -154,10 +148,8 @@ static void InitializeInternalArrayConstructorDescriptor(
 
   if (constant_stack_parameter_count == 0) {
     Register registers[] = { esi, edi };
-    descriptor->Initialize(ARRAY_SIZE(registers), registers,
-                           deopt_handler,
-                           NULL,
-                           constant_stack_parameter_count,
+    descriptor->Initialize(major, ARRAY_SIZE(registers), registers,
+                           deopt_handler, NULL, constant_stack_parameter_count,
                            JS_FUNCTION_STUB_MODE);
   } else {
     // stack param count needs (constructor pointer, and single argument)
@@ -166,57 +158,54 @@ static void InitializeInternalArrayConstructorDescriptor(
         Representation::Tagged(),
         Representation::Tagged(),
         Representation::Integer32() };
-    descriptor->Initialize(ARRAY_SIZE(registers), registers,
-                           eax,
-                           deopt_handler,
-                           representations,
+    descriptor->Initialize(major, ARRAY_SIZE(registers), registers, eax,
+                           deopt_handler, representations,
                            constant_stack_parameter_count,
-                           JS_FUNCTION_STUB_MODE,
-                           PASS_ARGUMENTS);
+                           JS_FUNCTION_STUB_MODE, PASS_ARGUMENTS);
   }
 }
 
 
 void ArrayNoArgumentConstructorStub::InitializeInterfaceDescriptor(
     CodeStubInterfaceDescriptor* descriptor) {
-  InitializeArrayConstructorDescriptor(isolate(), descriptor, 0);
+  InitializeArrayConstructorDescriptor(isolate(), MajorKey(), descriptor, 0);
 }
 
 
 void ArraySingleArgumentConstructorStub::InitializeInterfaceDescriptor(
     CodeStubInterfaceDescriptor* descriptor) {
-  InitializeArrayConstructorDescriptor(isolate(), descriptor, 1);
+  InitializeArrayConstructorDescriptor(isolate(), MajorKey(), descriptor, 1);
 }
 
 
 void ArrayNArgumentsConstructorStub::InitializeInterfaceDescriptor(
     CodeStubInterfaceDescriptor* descriptor) {
-  InitializeArrayConstructorDescriptor(isolate(), descriptor, -1);
+  InitializeArrayConstructorDescriptor(isolate(), MajorKey(), descriptor, -1);
 }
 
 
 void InternalArrayNoArgumentConstructorStub::InitializeInterfaceDescriptor(
     CodeStubInterfaceDescriptor* descriptor) {
-  InitializeInternalArrayConstructorDescriptor(descriptor, 0);
+  InitializeInternalArrayConstructorDescriptor(MajorKey(), descriptor, 0);
 }
 
 
 void InternalArraySingleArgumentConstructorStub::InitializeInterfaceDescriptor(
     CodeStubInterfaceDescriptor* descriptor) {
-  InitializeInternalArrayConstructorDescriptor(descriptor, 1);
+  InitializeInternalArrayConstructorDescriptor(MajorKey(), descriptor, 1);
 }
 
 
 void InternalArrayNArgumentsConstructorStub::InitializeInterfaceDescriptor(
     CodeStubInterfaceDescriptor* descriptor) {
-  InitializeInternalArrayConstructorDescriptor(descriptor, -1);
+  InitializeInternalArrayConstructorDescriptor(MajorKey(), descriptor, -1);
 }
 
 
 void CompareNilICStub::InitializeInterfaceDescriptor(
     CodeStubInterfaceDescriptor* descriptor) {
   Register registers[] = { esi, eax };
-  descriptor->Initialize(ARRAY_SIZE(registers), registers,
+  descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers,
                          FUNCTION_ADDR(CompareNilIC_Miss));
   descriptor->SetMissHandler(
       ExternalReference(IC_Utility(IC::kCompareNilIC_Miss), isolate()));
@@ -225,7 +214,7 @@ void CompareNilICStub::InitializeInterfaceDescriptor(
 void ToBooleanStub::InitializeInterfaceDescriptor(
     CodeStubInterfaceDescriptor* descriptor) {
   Register registers[] = { esi, eax };
-  descriptor->Initialize(ARRAY_SIZE(registers), registers,
+  descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers,
                          FUNCTION_ADDR(ToBooleanIC_Miss));
   descriptor->SetMissHandler(
       ExternalReference(IC_Utility(IC::kToBooleanIC_Miss), isolate()));
@@ -235,7 +224,7 @@ void ToBooleanStub::InitializeInterfaceDescriptor(
 void BinaryOpICStub::InitializeInterfaceDescriptor(
     CodeStubInterfaceDescriptor* descriptor) {
   Register registers[] = { esi, edx, eax };
-  descriptor->Initialize(ARRAY_SIZE(registers), registers,
+  descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers,
                          FUNCTION_ADDR(BinaryOpIC_Miss));
   descriptor->SetMissHandler(
       ExternalReference(IC_Utility(IC::kBinaryOpIC_Miss), isolate()));
@@ -245,7 +234,7 @@ void BinaryOpICStub::InitializeInterfaceDescriptor(
 void BinaryOpWithAllocationSiteStub::InitializeInterfaceDescriptor(
     CodeStubInterfaceDescriptor* descriptor) {
   Register registers[] = { esi, ecx, edx, eax };
-  descriptor->Initialize(ARRAY_SIZE(registers), registers,
+  descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers,
                          FUNCTION_ADDR(BinaryOpIC_MissWithAllocationSite));
 }
 
@@ -253,9 +242,8 @@ void BinaryOpWithAllocationSiteStub::InitializeInterfaceDescriptor(
 void StringAddStub::InitializeInterfaceDescriptor(
     CodeStubInterfaceDescriptor* descriptor) {
   Register registers[] = { esi, edx, eax };
-  descriptor->Initialize(
-      ARRAY_SIZE(registers), registers,
-      Runtime::FunctionForId(Runtime::kStringAdd)->entry);
+  descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers,
+                         Runtime::FunctionForId(Runtime::kStringAdd)->entry);
 }
 
 
index 9d8c496651184cb6f19ac755e2970ec8c3a4f58b..9603f32fea0b9cb92ff45da14e0b8916c0eb6aee 100644 (file)
@@ -128,9 +128,6 @@ void Deoptimizer::PatchCodeForDeoptimization(Isolate* isolate, Code* code) {
   // Emit call to lazy deoptimization at all lazy deopt points.
   DeoptimizationInputData* deopt_data =
       DeoptimizationInputData::cast(code->deoptimization_data());
-  SharedFunctionInfo* shared =
-      SharedFunctionInfo::cast(deopt_data->SharedFunctionInfo());
-  shared->EvictFromOptimizedCodeMap(code, "deoptimized code");
 #ifdef DEBUG
   Address prev_call_address = NULL;
 #endif
index 4d9365149d5cbab2a3a19c1c2dac9976d4dab0cf..2dfb389642e1ab85656ef54195d4fe014e0219a5 100644 (file)
@@ -1095,7 +1095,7 @@ void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
   int length = deoptimizations_.length();
   if (length == 0) return;
   Handle<DeoptimizationInputData> data =
-      DeoptimizationInputData::New(isolate(), length, TENURED);
+      DeoptimizationInputData::New(isolate(), length, 0, TENURED);
 
   Handle<ByteArray> translations =
       translations_.CreateByteArray(isolate()->factory());
index fd07ce22ef79dbe13ac3a7e361d34fb646bdbc81..ab0ae9cf606ddb446b3b79756baa69a72221ae6d 100644 (file)
@@ -62,6 +62,8 @@ class zone_allocator {
   Zone* zone_;
 };
 
+typedef zone_allocator<bool> ZoneBoolAllocator;
+typedef zone_allocator<int> ZoneIntAllocator;
 } }  // namespace v8::internal
 
 #endif  // V8_ZONE_ALLOCATOR_H_
index 0d9c2d76e27f4a200b4d57cf1e4438e8cb2af24f..1295ed7ab950e40f61c9d7d1928bde1166673a17 100644 (file)
@@ -5,15 +5,15 @@
 #ifndef V8_ZONE_CONTAINERS_H_
 #define V8_ZONE_CONTAINERS_H_
 
-#include <set>
 #include <vector>
 
-#include "src/zone.h"
+#include "src/zone-allocator.h"
 
 namespace v8 {
 namespace internal {
 
-typedef zone_allocator<int> ZoneIntAllocator;
+typedef std::vector<bool, ZoneBoolAllocator> BoolVector;
+
 typedef std::vector<int, ZoneIntAllocator> IntVector;
 typedef IntVector::iterator IntVectorIter;
 typedef IntVector::reverse_iterator IntVectorRIter;
index 090aed2dd1dc67b9c4d9a33788be43561085ccff..bd70313439067654a1389d41f463768ef4cda0bc 100644 (file)
       ],
       'sources': [  ### gcmole(all) ###
         '<(generated_file)',
+        'compiler/codegen-tester.cc',
+        'compiler/codegen-tester.h',
+        'compiler/function-tester.h',
+        'compiler/graph-builder-tester.cc',
+        'compiler/graph-builder-tester.h',
+        'compiler/graph-tester.h',
+        'compiler/simplified-graph-builder.cc',
+        'compiler/simplified-graph-builder.h',
+        'compiler/test-branch-combine.cc',
+        'compiler/test-codegen-deopt.cc',
+        'compiler/test-gap-resolver.cc',
+        'compiler/test-graph-reducer.cc',
+        'compiler/test-instruction-selector.cc',
+        'compiler/test-instruction.cc',
+        'compiler/test-js-context-specialization.cc',
+        'compiler/test-js-constant-cache.cc',
+        'compiler/test-js-typed-lowering.cc',
+        'compiler/test-linkage.cc',
+        'compiler/test-machine-operator-reducer.cc',
+        'compiler/test-node-algorithm.cc',
+        'compiler/test-node-cache.cc',
+        'compiler/test-node.cc',
+        'compiler/test-operator.cc',
+        'compiler/test-phi-reducer.cc',
+        'compiler/test-pipeline.cc',
+        'compiler/test-representation-change.cc',
+        'compiler/test-run-deopt.cc',
+        'compiler/test-run-intrinsics.cc',
+        'compiler/test-run-jsbranches.cc',
+        'compiler/test-run-jscalls.cc',
+        'compiler/test-run-jsexceptions.cc',
+        'compiler/test-run-jsops.cc',
+        'compiler/test-run-machops.cc',
+        'compiler/test-run-variables.cc',
+        'compiler/test-schedule.cc',
+        'compiler/test-scheduler.cc',
+        'compiler/test-simplified-lowering.cc',
+        'compiler/test-structured-ifbuilder-fuzzer.cc',
+        'compiler/test-structured-machine-assembler.cc',
         'cctest.cc',
         'gay-fixed.cc',
         'gay-precision.cc',
@@ -57,6 +96,7 @@
         'test-atomicops.cc',
         'test-bignum.cc',
         'test-bignum-dtoa.cc',
+        'test-checks.cc',
         'test-circular-queue.cc',
         'test-compiler.cc',
         'test-condition-variable.cc',
         }],
         ['v8_target_arch=="arm"', {
           'sources': [  ### gcmole(arch:arm) ###
+            'compiler/test-instruction-selector-arm.cc',
             'test-assembler-arm.cc',
             'test-code-stubs.cc',
             'test-code-stubs-arm.cc',
index fc6ec398fd03a1464a9cb1e618b67612a17b72ff..c99520ba542f8b9b40dfd038b5a313457977ef17 100644 (file)
@@ -30,6 +30,8 @@
 
 #include "src/v8.h"
 
+#include "src/isolate-inl.h"
+
 #ifndef TEST
 #define TEST(Name)                                                             \
   static void Test##Name();                                                    \
@@ -112,6 +114,11 @@ class CcTest {
     return isolate_;
   }
 
+  static i::Isolate* InitIsolateOnce() {
+    if (!initialize_called_) InitializeVM();
+    return i_isolate();
+  }
+
   static i::Isolate* i_isolate() {
     return reinterpret_cast<i::Isolate*>(isolate());
   }
@@ -124,6 +131,10 @@ class CcTest {
     return reinterpret_cast<TestHeap*>(i_isolate()->heap());
   }
 
+  static v8::base::RandomNumberGenerator* random_number_generator() {
+    return InitIsolateOnce()->random_number_generator();
+  }
+
   static v8::Local<v8::Object> global() {
     return isolate()->GetCurrentContext()->Global();
   }
@@ -506,4 +517,30 @@ class HeapObjectsTracker {
 };
 
 
+class InitializedHandleScope {
+ public:
+  InitializedHandleScope()
+      : main_isolate_(CcTest::InitIsolateOnce()),
+        handle_scope_(main_isolate_) {}
+
+  // Prefixing the below with main_ reduces a lot of naming clashes.
+  i::Isolate* main_isolate() { return main_isolate_; }
+
+ private:
+  i::Isolate* main_isolate_;
+  i::HandleScope handle_scope_;
+};
+
+
+class HandleAndZoneScope : public InitializedHandleScope {
+ public:
+  HandleAndZoneScope() : main_zone_(main_isolate()) {}
+
+  // Prefixing the below with main_ reduces a lot of naming clashes.
+  i::Zone* main_zone() { return &main_zone_; }
+
+ private:
+  i::Zone main_zone_;
+};
+
 #endif  // ifndef CCTEST_H_
index e1d93b230c9d5b3bb1b4da48294fb96e3186f327..43ee6c9273c0e4a070c143c44c34b0ebb05bf494 100644 (file)
   # BUG(3287). (test-cpu-profiler/SampleWhenFrameIsNotSetup)
   'test-cpu-profiler/*': [PASS, FLAKY],
 
+  ##############################################################################
+  # TurboFan compiler failures.
+
+  # TODO(jarin): Lazy deoptimization test.
+  'test-run-deopt/TurboSimpleDeopt': [SKIP],
+
+  # TODO(mstarzinger): These need investigation and are not categorized yet.
+  'test-cpu-profiler/*': [SKIP],
+  'test-heap/NextCodeLinkIsWeak': [PASS, NO_VARIANTS],
+
+  # TODO(mstarzinger/verwaest): This access check API is borked.
+  'test-api/TurnOnAccessCheck': [PASS, NO_VARIANTS],
+  'test-api/TurnOnAccessCheckAndRecompile': [PASS, NO_VARIANTS],
+
+  # TODO(mstarzinger): Sometimes the try-catch blacklist fails.
+  'test-debug/DebugEvaluateWithoutStack': [PASS, NO_VARIANTS],
+  'test-debug/MessageQueues': [PASS, NO_VARIANTS],
+  'test-debug/NestedBreakEventContextData': [PASS, NO_VARIANTS],
+  'test-debug/SendClientDataToHandler': [PASS, NO_VARIANTS],
+
+  # Some tests are just too slow to run for now.
+  'test-api/Threading*': [PASS, NO_VARIANTS],
+  'test-api/ExternalArrays': [PASS, NO_VARIANTS],
+  'test-api/RequestInterruptTestWithMathAbs': [PASS, NO_VARIANTS],
+  'test-heap/IncrementalMarkingStepMakesBigProgressWithLargeObjects': [PASS, NO_VARIANTS],
+  'test-heap-profiler/ManyLocalsInSharedContext': [PASS, NO_VARIANTS],
+  'test-debug/ThreadedDebugging': [PASS, NO_VARIANTS],
+  'test-debug/DebugBreakLoop': [PASS, NO_VARIANTS],
+
+  # Support for lazy deoptimization is missing.
+  'test-deoptimization/DeoptimizeSimple': [PASS, NO_VARIANTS],
+  'test-deoptimization/DeoptimizeSimpleNested': [PASS, NO_VARIANTS],
+  'test-deoptimization/DeoptimizeSimpleWithArguments': [PASS, NO_VARIANTS],
+  'test-deoptimization/DeoptimizeBinaryOperation*': [PASS, NO_VARIANTS],
+  'test-deoptimization/DeoptimizeCompare': [PASS, NO_VARIANTS],
+  'test-deoptimization/DeoptimizeLoadICStoreIC': [PASS, NO_VARIANTS],
+  'test-deoptimization/DeoptimizeLoadICStoreICNested': [PASS, NO_VARIANTS],
+
+  # Support for breakpoints requires using LoadICs and StoreICs.
+  'test-debug/BreakPointICStore': [PASS, NO_VARIANTS],
+  'test-debug/BreakPointICLoad': [PASS, NO_VARIANTS],
+  'test-debug/BreakPointICCall': [PASS, NO_VARIANTS],
+  'test-debug/BreakPointICCallWithGC': [PASS, NO_VARIANTS],
+  'test-debug/BreakPointConstructCallWithGC': [PASS, NO_VARIANTS],
+  'test-debug/BreakPointReturn': [PASS, NO_VARIANTS],
+  'test-debug/BreakPointThroughJavaScript': [PASS, NO_VARIANTS],
+  'test-debug/ScriptBreakPointByNameThroughJavaScript': [PASS, NO_VARIANTS],
+  'test-debug/ScriptBreakPointByIdThroughJavaScript': [PASS, NO_VARIANTS],
+  'test-debug/DebugStepLinear': [PASS, NO_VARIANTS],
+  'test-debug/DebugStepKeyedLoadLoop': [PASS, NO_VARIANTS],
+  'test-debug/DebugStepKeyedStoreLoop': [PASS, NO_VARIANTS],
+  'test-debug/DebugStepNamedLoadLoop': [PASS, NO_VARIANTS],
+  'test-debug/DebugStepNamedStoreLoop': [PASS, NO_VARIANTS],
+  'test-debug/DebugStepLinearMixedICs': [PASS, NO_VARIANTS],
+  'test-debug/DebugStepDeclarations': [PASS, NO_VARIANTS],
+  'test-debug/DebugStepLocals': [PASS, NO_VARIANTS],
+  'test-debug/DebugStepIf': [PASS, NO_VARIANTS],
+  'test-debug/DebugStepSwitch': [PASS, NO_VARIANTS],
+  'test-debug/DebugStepWhile': [PASS, NO_VARIANTS],
+  'test-debug/DebugStepDoWhile': [PASS, NO_VARIANTS],
+  'test-debug/DebugStepFor': [PASS, NO_VARIANTS],
+  'test-debug/DebugStepForContinue': [PASS, NO_VARIANTS],
+  'test-debug/DebugStepForBreak': [PASS, NO_VARIANTS],
+  'test-debug/DebugStepForIn': [PASS, NO_VARIANTS],
+  'test-debug/DebugStepWith': [PASS, NO_VARIANTS],
+  'test-debug/DebugConditional': [PASS, NO_VARIANTS],
+  'test-debug/StepInOutSimple': [PASS, NO_VARIANTS],
+  'test-debug/StepInOutTree': [PASS, NO_VARIANTS],
+  'test-debug/StepInOutBranch': [PASS, NO_VARIANTS],
+  'test-debug/DebugBreak': [PASS, NO_VARIANTS],
+  'test-debug/DebugBreakStackInspection': [PASS, NO_VARIANTS],
+  'test-debug/BreakMessageWhenMessageHandlerIsReset': [PASS, NO_VARIANTS],
+  'test-debug/NoDebugBreakInAfterCompileMessageHandler': [PASS, NO_VARIANTS],
+  'test-debug/DisableBreak': [PASS, NO_VARIANTS],
+  'test-debug/RegExpDebugBreak': [PASS, NO_VARIANTS],
+  'test-debug/DebugBreakFunctionApply': [PASS, NO_VARIANTS],
+  'test-debug/DeoptimizeDuringDebugBreak': [PASS, NO_VARIANTS],
+
+  # Support for %GetFrameDetails is missing and requires checkpoints.
+  'test-api/Regress385349': [PASS, NO_VARIANTS],
+  'test-debug/DebuggerStatement': [PASS, NO_VARIANTS],
+  'test-debug/DebuggerStatementBreakpoint': [PASS, NO_VARIANTS],
+  'test-debug/DebugEvaluateWithCodeGenerationDisallowed': [PASS, NO_VARIANTS],
+  'test-debug/DebugStepNatives': [PASS, NO_VARIANTS],
+  'test-debug/DebugStepFunctionCall': [PASS, NO_VARIANTS],
+  'test-debug/DebugStepFunctionApply': [PASS, NO_VARIANTS],
+  'test-debug/ScriptNameAndData': [PASS, NO_VARIANTS],
+  'test-debug/ContextData': [PASS, NO_VARIANTS],
+  'test-debug/DebugBreakInMessageHandler': [PASS, NO_VARIANTS],
+  'test-debug/CallFunctionInDebugger': [PASS, NO_VARIANTS],
+  'test-debug/CallingContextIsNotDebugContext': [PASS, NO_VARIANTS],
+  'test-debug/DebugEventContext': [PASS, NO_VARIANTS],
+  'test-debug/DebugBreakInline': [PASS, NO_VARIANTS],
+
   ############################################################################
   # Slow tests.
   'test-api/Threading1': [PASS, ['mode == debug', SLOW]],
diff --git a/test/cctest/compiler/call-tester.h b/test/cctest/compiler/call-tester.h
new file mode 100644 (file)
index 0000000..6998f19
--- /dev/null
@@ -0,0 +1,391 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_CCTEST_COMPILER_CALL_TESTER_H_
+#define V8_CCTEST_COMPILER_CALL_TESTER_H_
+
+#include "src/v8.h"
+
+#include "src/simulator.h"
+
+#if V8_TARGET_ARCH_IA32
+#if __GNUC__
+#define V8_CDECL __attribute__((cdecl))
+#else
+#define V8_CDECL __cdecl
+#endif
+#else
+#define V8_CDECL
+#endif
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+template <typename R>
+struct ReturnValueTraits {
+  static R Cast(uintptr_t r) { return reinterpret_cast<R>(r); }
+  static MachineRepresentation Representation() {
+    // TODO(dcarney): detect when R is of a subclass of Object* instead of this
+    // type check.
+    while (false) {
+      *(static_cast<Object* volatile*>(0)) = static_cast<R>(0);
+    }
+    return kMachineTagged;
+  }
+};
+
+template <>
+struct ReturnValueTraits<int32_t*> {
+  static int32_t* Cast(uintptr_t r) { return reinterpret_cast<int32_t*>(r); }
+  static MachineRepresentation Representation() {
+    return MachineOperatorBuilder::pointer_rep();
+  }
+};
+
+template <>
+struct ReturnValueTraits<void> {
+  static void Cast(uintptr_t r) {}
+  static MachineRepresentation Representation() {
+    return MachineOperatorBuilder::pointer_rep();
+  }
+};
+
+template <>
+struct ReturnValueTraits<bool> {
+  static bool Cast(uintptr_t r) { return static_cast<bool>(r); }
+  static MachineRepresentation Representation() {
+    return MachineOperatorBuilder::pointer_rep();
+  }
+};
+
+template <>
+struct ReturnValueTraits<int32_t> {
+  static int32_t Cast(uintptr_t r) { return static_cast<int32_t>(r); }
+  static MachineRepresentation Representation() { return kMachineWord32; }
+};
+
+template <>
+struct ReturnValueTraits<uint32_t> {
+  static uint32_t Cast(uintptr_t r) { return static_cast<uint32_t>(r); }
+  static MachineRepresentation Representation() { return kMachineWord32; }
+};
+
+template <>
+struct ReturnValueTraits<int64_t> {
+  static int64_t Cast(uintptr_t r) { return static_cast<int64_t>(r); }
+  static MachineRepresentation Representation() { return kMachineWord64; }
+};
+
+template <>
+struct ReturnValueTraits<uint64_t> {
+  static uint64_t Cast(uintptr_t r) { return static_cast<uint64_t>(r); }
+  static MachineRepresentation Representation() { return kMachineWord64; }
+};
+
+template <>
+struct ReturnValueTraits<int16_t> {
+  static int16_t Cast(uintptr_t r) { return static_cast<int16_t>(r); }
+  static MachineRepresentation Representation() {
+    return MachineOperatorBuilder::pointer_rep();
+  }
+};
+
+template <>
+struct ReturnValueTraits<int8_t> {
+  static int8_t Cast(uintptr_t r) { return static_cast<int8_t>(r); }
+  static MachineRepresentation Representation() {
+    return MachineOperatorBuilder::pointer_rep();
+  }
+};
+
+template <>
+struct ReturnValueTraits<double> {
+  static double Cast(uintptr_t r) {
+    UNREACHABLE();
+    return 0.0;
+  }
+};
+
+
+template <typename R>
+struct ParameterTraits {
+  static uintptr_t Cast(R r) { return static_cast<uintptr_t>(r); }
+};
+
+template <>
+struct ParameterTraits<int*> {
+  static uintptr_t Cast(int* r) { return reinterpret_cast<uintptr_t>(r); }
+};
+
+template <typename T>
+struct ParameterTraits<T*> {
+  static uintptr_t Cast(void* r) { return reinterpret_cast<uintptr_t>(r); }
+};
+
+class CallHelper {
+ public:
+  explicit CallHelper(Isolate* isolate) : isolate_(isolate) { USE(isolate_); }
+  virtual ~CallHelper() {}
+
+  static MachineCallDescriptorBuilder* ToCallDescriptorBuilder(
+      Zone* zone, MachineRepresentation return_type,
+      MachineRepresentation p0 = kMachineLast,
+      MachineRepresentation p1 = kMachineLast,
+      MachineRepresentation p2 = kMachineLast,
+      MachineRepresentation p3 = kMachineLast,
+      MachineRepresentation p4 = kMachineLast) {
+    const int kSize = 5;
+    MachineRepresentation* params =
+        zone->NewArray<MachineRepresentation>(kSize);
+    params[0] = p0;
+    params[1] = p1;
+    params[2] = p2;
+    params[3] = p3;
+    params[4] = p4;
+    int parameter_count = 0;
+    for (int i = 0; i < kSize; ++i) {
+      if (params[i] == kMachineLast) {
+        break;
+      }
+      parameter_count++;
+    }
+    return new (zone)
+        MachineCallDescriptorBuilder(return_type, parameter_count, params);
+  }
+
+ protected:
+  virtual void VerifyParameters(int parameter_count,
+                                MachineRepresentation* parameters) = 0;
+  virtual byte* Generate() = 0;
+
+ private:
+#if USE_SIMULATOR && V8_TARGET_ARCH_ARM64
+  uintptr_t CallSimulator(byte* f, Simulator::CallArgument* args) {
+    Simulator* simulator = Simulator::current(isolate_);
+    return static_cast<uintptr_t>(simulator->CallInt64(f, args));
+  }
+
+  template <typename R, typename F>
+  R DoCall(F* f) {
+    Simulator::CallArgument args[] = {Simulator::CallArgument::End()};
+    return ReturnValueTraits<R>::Cast(CallSimulator(FUNCTION_ADDR(f), args));
+  }
+  template <typename R, typename F, typename P1>
+  R DoCall(F* f, P1 p1) {
+    Simulator::CallArgument args[] = {Simulator::CallArgument(p1),
+                                      Simulator::CallArgument::End()};
+    return ReturnValueTraits<R>::Cast(CallSimulator(FUNCTION_ADDR(f), args));
+  }
+  template <typename R, typename F, typename P1, typename P2>
+  R DoCall(F* f, P1 p1, P2 p2) {
+    Simulator::CallArgument args[] = {Simulator::CallArgument(p1),
+                                      Simulator::CallArgument(p2),
+                                      Simulator::CallArgument::End()};
+    return ReturnValueTraits<R>::Cast(CallSimulator(FUNCTION_ADDR(f), args));
+  }
+  template <typename R, typename F, typename P1, typename P2, typename P3>
+  R DoCall(F* f, P1 p1, P2 p2, P3 p3) {
+    Simulator::CallArgument args[] = {
+        Simulator::CallArgument(p1), Simulator::CallArgument(p2),
+        Simulator::CallArgument(p3), Simulator::CallArgument::End()};
+    return ReturnValueTraits<R>::Cast(CallSimulator(FUNCTION_ADDR(f), args));
+  }
+  template <typename R, typename F, typename P1, typename P2, typename P3,
+            typename P4>
+  R DoCall(F* f, P1 p1, P2 p2, P3 p3, P4 p4) {
+    Simulator::CallArgument args[] = {
+        Simulator::CallArgument(p1), Simulator::CallArgument(p2),
+        Simulator::CallArgument(p3), Simulator::CallArgument(p4),
+        Simulator::CallArgument::End()};
+    return ReturnValueTraits<R>::Cast(CallSimulator(FUNCTION_ADDR(f), args));
+  }
+#elif USE_SIMULATOR && V8_TARGET_ARCH_ARM
+  uintptr_t CallSimulator(byte* f, int32_t p1 = 0, int32_t p2 = 0,
+                          int32_t p3 = 0, int32_t p4 = 0) {
+    Simulator* simulator = Simulator::current(isolate_);
+    return static_cast<uintptr_t>(simulator->Call(f, 4, p1, p2, p3, p4));
+  }
+  template <typename R, typename F>
+  R DoCall(F* f) {
+    return ReturnValueTraits<R>::Cast(CallSimulator(FUNCTION_ADDR(f)));
+  }
+  template <typename R, typename F, typename P1>
+  R DoCall(F* f, P1 p1) {
+    return ReturnValueTraits<R>::Cast(
+        CallSimulator(FUNCTION_ADDR(f), ParameterTraits<P1>::Cast(p1)));
+  }
+  template <typename R, typename F, typename P1, typename P2>
+  R DoCall(F* f, P1 p1, P2 p2) {
+    return ReturnValueTraits<R>::Cast(
+        CallSimulator(FUNCTION_ADDR(f), ParameterTraits<P1>::Cast(p1),
+                      ParameterTraits<P2>::Cast(p2)));
+  }
+  template <typename R, typename F, typename P1, typename P2, typename P3>
+  R DoCall(F* f, P1 p1, P2 p2, P3 p3) {
+    return ReturnValueTraits<R>::Cast(CallSimulator(
+        FUNCTION_ADDR(f), ParameterTraits<P1>::Cast(p1),
+        ParameterTraits<P2>::Cast(p2), ParameterTraits<P3>::Cast(p3)));
+  }
+  template <typename R, typename F, typename P1, typename P2, typename P3,
+            typename P4>
+  R DoCall(F* f, P1 p1, P2 p2, P3 p3, P4 p4) {
+    return ReturnValueTraits<R>::Cast(CallSimulator(
+        FUNCTION_ADDR(f), ParameterTraits<P1>::Cast(p1),
+        ParameterTraits<P2>::Cast(p2), ParameterTraits<P3>::Cast(p3),
+        ParameterTraits<P4>::Cast(p4)));
+  }
+#else
+  template <typename R, typename F>
+  R DoCall(F* f) {
+    return f();
+  }
+  template <typename R, typename F, typename P1>
+  R DoCall(F* f, P1 p1) {
+    return f(p1);
+  }
+  template <typename R, typename F, typename P1, typename P2>
+  R DoCall(F* f, P1 p1, P2 p2) {
+    return f(p1, p2);
+  }
+  template <typename R, typename F, typename P1, typename P2, typename P3>
+  R DoCall(F* f, P1 p1, P2 p2, P3 p3) {
+    return f(p1, p2, p3);
+  }
+  template <typename R, typename F, typename P1, typename P2, typename P3,
+            typename P4>
+  R DoCall(F* f, P1 p1, P2 p2, P3 p3, P4 p4) {
+    return f(p1, p2, p3, p4);
+  }
+#endif
+
+#ifndef DEBUG
+  void VerifyParameters0() {}
+
+  template <typename P1>
+  void VerifyParameters1() {}
+
+  template <typename P1, typename P2>
+  void VerifyParameters2() {}
+
+  template <typename P1, typename P2, typename P3>
+  void VerifyParameters3() {}
+
+  template <typename P1, typename P2, typename P3, typename P4>
+  void VerifyParameters4() {}
+#else
+  void VerifyParameters0() { VerifyParameters(0, NULL); }
+
+  template <typename P1>
+  void VerifyParameters1() {
+    MachineRepresentation parameters[] = {
+        ReturnValueTraits<P1>::Representation()};
+    VerifyParameters(ARRAY_SIZE(parameters), parameters);
+  }
+
+  template <typename P1, typename P2>
+  void VerifyParameters2() {
+    MachineRepresentation parameters[] = {
+        ReturnValueTraits<P1>::Representation(),
+        ReturnValueTraits<P2>::Representation()};
+    VerifyParameters(ARRAY_SIZE(parameters), parameters);
+  }
+
+  template <typename P1, typename P2, typename P3>
+  void VerifyParameters3() {
+    MachineRepresentation parameters[] = {
+        ReturnValueTraits<P1>::Representation(),
+        ReturnValueTraits<P2>::Representation(),
+        ReturnValueTraits<P3>::Representation()};
+    VerifyParameters(ARRAY_SIZE(parameters), parameters);
+  }
+
+  template <typename P1, typename P2, typename P3, typename P4>
+  void VerifyParameters4() {
+    MachineRepresentation parameters[] = {
+        ReturnValueTraits<P1>::Representation(),
+        ReturnValueTraits<P2>::Representation(),
+        ReturnValueTraits<P3>::Representation(),
+        ReturnValueTraits<P4>::Representation()};
+    VerifyParameters(ARRAY_SIZE(parameters), parameters);
+  }
+#endif
+
+  // TODO(dcarney): replace Call() in CallHelper2 with these.
+  template <typename R>
+  R Call0() {
+    typedef R V8_CDECL FType();
+    VerifyParameters0();
+    return DoCall<R>(FUNCTION_CAST<FType*>(Generate()));
+  }
+
+  template <typename R, typename P1>
+  R Call1(P1 p1) {
+    typedef R V8_CDECL FType(P1);
+    VerifyParameters1<P1>();
+    return DoCall<R>(FUNCTION_CAST<FType*>(Generate()), p1);
+  }
+
+  template <typename R, typename P1, typename P2>
+  R Call2(P1 p1, P2 p2) {
+    typedef R V8_CDECL FType(P1, P2);
+    VerifyParameters2<P1, P2>();
+    return DoCall<R>(FUNCTION_CAST<FType*>(Generate()), p1, p2);
+  }
+
+  template <typename R, typename P1, typename P2, typename P3>
+  R Call3(P1 p1, P2 p2, P3 p3) {
+    typedef R V8_CDECL FType(P1, P2, P3);
+    VerifyParameters3<P1, P2, P3>();
+    return DoCall<R>(FUNCTION_CAST<FType*>(Generate()), p1, p2, p3);
+  }
+
+  template <typename R, typename P1, typename P2, typename P3, typename P4>
+  R Call4(P1 p1, P2 p2, P3 p3, P4 p4) {
+    typedef R V8_CDECL FType(P1, P2, P3, P4);
+    VerifyParameters4<P1, P2, P3, P4>();
+    return DoCall<R>(FUNCTION_CAST<FType*>(Generate()), p1, p2, p3, p4);
+  }
+
+  template <typename R, typename C>
+  friend class CallHelper2;
+  Isolate* isolate_;
+};
+
+
+// TODO(dcarney): replace CallHelper with CallHelper2 and rename.
+template <typename R, typename C>
+class CallHelper2 {
+ public:
+  R Call() { return helper()->template Call0<R>(); }
+
+  template <typename P1>
+  R Call(P1 p1) {
+    return helper()->template Call1<R>(p1);
+  }
+
+  template <typename P1, typename P2>
+  R Call(P1 p1, P2 p2) {
+    return helper()->template Call2<R>(p1, p2);
+  }
+
+  template <typename P1, typename P2, typename P3>
+  R Call(P1 p1, P2 p2, P3 p3) {
+    return helper()->template Call3<R>(p1, p2, p3);
+  }
+
+  template <typename P1, typename P2, typename P3, typename P4>
+  R Call(P1 p1, P2 p2, P3 p3, P4 p4) {
+    return helper()->template Call4<R>(p1, p2, p3, p4);
+  }
+
+ private:
+  CallHelper* helper() { return static_cast<C*>(this); }
+};
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_CCTEST_COMPILER_CALL_TESTER_H_
diff --git a/test/cctest/compiler/codegen-tester.cc b/test/cctest/compiler/codegen-tester.cc
new file mode 100644 (file)
index 0000000..24b2c6e
--- /dev/null
@@ -0,0 +1,578 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#include "test/cctest/cctest.h"
+#include "test/cctest/compiler/codegen-tester.h"
+#include "test/cctest/compiler/value-helper.h"
+
+using namespace v8::internal;
+using namespace v8::internal::compiler;
+
+TEST(CompareWrapper) {
+  // Who tests the testers?
+  // If CompareWrapper is broken, then test expectations will be broken.
+  RawMachineAssemblerTester<int32_t> m;
+  CompareWrapper wWord32Equal(IrOpcode::kWord32Equal);
+  CompareWrapper wInt32LessThan(IrOpcode::kInt32LessThan);
+  CompareWrapper wInt32LessThanOrEqual(IrOpcode::kInt32LessThanOrEqual);
+  CompareWrapper wUint32LessThan(IrOpcode::kUint32LessThan);
+  CompareWrapper wUint32LessThanOrEqual(IrOpcode::kUint32LessThanOrEqual);
+
+  {
+    FOR_INT32_INPUTS(pl) {
+      FOR_INT32_INPUTS(pr) {
+        int32_t a = *pl;
+        int32_t b = *pr;
+        CHECK_EQ(a == b, wWord32Equal.Int32Compare(a, b));
+        CHECK_EQ(a < b, wInt32LessThan.Int32Compare(a, b));
+        CHECK_EQ(a <= b, wInt32LessThanOrEqual.Int32Compare(a, b));
+      }
+    }
+  }
+
+  {
+    FOR_UINT32_INPUTS(pl) {
+      FOR_UINT32_INPUTS(pr) {
+        uint32_t a = *pl;
+        uint32_t b = *pr;
+        CHECK_EQ(a == b, wWord32Equal.Int32Compare(a, b));
+        CHECK_EQ(a < b, wUint32LessThan.Int32Compare(a, b));
+        CHECK_EQ(a <= b, wUint32LessThanOrEqual.Int32Compare(a, b));
+      }
+    }
+  }
+
+  CHECK_EQ(true, wWord32Equal.Int32Compare(0, 0));
+  CHECK_EQ(true, wWord32Equal.Int32Compare(257, 257));
+  CHECK_EQ(true, wWord32Equal.Int32Compare(65539, 65539));
+  CHECK_EQ(true, wWord32Equal.Int32Compare(-1, -1));
+  CHECK_EQ(true, wWord32Equal.Int32Compare(0xffffffff, 0xffffffff));
+
+  CHECK_EQ(false, wWord32Equal.Int32Compare(0, 1));
+  CHECK_EQ(false, wWord32Equal.Int32Compare(257, 256));
+  CHECK_EQ(false, wWord32Equal.Int32Compare(65539, 65537));
+  CHECK_EQ(false, wWord32Equal.Int32Compare(-1, -2));
+  CHECK_EQ(false, wWord32Equal.Int32Compare(0xffffffff, 0xfffffffe));
+
+  CHECK_EQ(false, wInt32LessThan.Int32Compare(0, 0));
+  CHECK_EQ(false, wInt32LessThan.Int32Compare(357, 357));
+  CHECK_EQ(false, wInt32LessThan.Int32Compare(75539, 75539));
+  CHECK_EQ(false, wInt32LessThan.Int32Compare(-1, -1));
+  CHECK_EQ(false, wInt32LessThan.Int32Compare(0xffffffff, 0xffffffff));
+
+  CHECK_EQ(true, wInt32LessThan.Int32Compare(0, 1));
+  CHECK_EQ(true, wInt32LessThan.Int32Compare(456, 457));
+  CHECK_EQ(true, wInt32LessThan.Int32Compare(85537, 85539));
+  CHECK_EQ(true, wInt32LessThan.Int32Compare(-2, -1));
+  CHECK_EQ(true, wInt32LessThan.Int32Compare(0xfffffffe, 0xffffffff));
+
+  CHECK_EQ(false, wInt32LessThan.Int32Compare(1, 0));
+  CHECK_EQ(false, wInt32LessThan.Int32Compare(457, 456));
+  CHECK_EQ(false, wInt32LessThan.Int32Compare(85539, 85537));
+  CHECK_EQ(false, wInt32LessThan.Int32Compare(-1, -2));
+  CHECK_EQ(false, wInt32LessThan.Int32Compare(0xffffffff, 0xfffffffe));
+
+  CHECK_EQ(true, wInt32LessThanOrEqual.Int32Compare(0, 0));
+  CHECK_EQ(true, wInt32LessThanOrEqual.Int32Compare(357, 357));
+  CHECK_EQ(true, wInt32LessThanOrEqual.Int32Compare(75539, 75539));
+  CHECK_EQ(true, wInt32LessThanOrEqual.Int32Compare(-1, -1));
+  CHECK_EQ(true, wInt32LessThanOrEqual.Int32Compare(0xffffffff, 0xffffffff));
+
+  CHECK_EQ(true, wInt32LessThanOrEqual.Int32Compare(0, 1));
+  CHECK_EQ(true, wInt32LessThanOrEqual.Int32Compare(456, 457));
+  CHECK_EQ(true, wInt32LessThanOrEqual.Int32Compare(85537, 85539));
+  CHECK_EQ(true, wInt32LessThanOrEqual.Int32Compare(-2, -1));
+  CHECK_EQ(true, wInt32LessThanOrEqual.Int32Compare(0xfffffffe, 0xffffffff));
+
+  CHECK_EQ(false, wInt32LessThanOrEqual.Int32Compare(1, 0));
+  CHECK_EQ(false, wInt32LessThanOrEqual.Int32Compare(457, 456));
+  CHECK_EQ(false, wInt32LessThanOrEqual.Int32Compare(85539, 85537));
+  CHECK_EQ(false, wInt32LessThanOrEqual.Int32Compare(-1, -2));
+  CHECK_EQ(false, wInt32LessThanOrEqual.Int32Compare(0xffffffff, 0xfffffffe));
+
+  // Unsigned comparisons.
+  CHECK_EQ(false, wUint32LessThan.Int32Compare(0, 0));
+  CHECK_EQ(false, wUint32LessThan.Int32Compare(357, 357));
+  CHECK_EQ(false, wUint32LessThan.Int32Compare(75539, 75539));
+  CHECK_EQ(false, wUint32LessThan.Int32Compare(-1, -1));
+  CHECK_EQ(false, wUint32LessThan.Int32Compare(0xffffffff, 0xffffffff));
+  CHECK_EQ(false, wUint32LessThan.Int32Compare(0xffffffff, 0));
+  CHECK_EQ(false, wUint32LessThan.Int32Compare(-2999, 0));
+
+  CHECK_EQ(true, wUint32LessThan.Int32Compare(0, 1));
+  CHECK_EQ(true, wUint32LessThan.Int32Compare(456, 457));
+  CHECK_EQ(true, wUint32LessThan.Int32Compare(85537, 85539));
+  CHECK_EQ(true, wUint32LessThan.Int32Compare(-11, -10));
+  CHECK_EQ(true, wUint32LessThan.Int32Compare(0xfffffffe, 0xffffffff));
+  CHECK_EQ(true, wUint32LessThan.Int32Compare(0, 0xffffffff));
+  CHECK_EQ(true, wUint32LessThan.Int32Compare(0, -2996));
+
+  CHECK_EQ(false, wUint32LessThan.Int32Compare(1, 0));
+  CHECK_EQ(false, wUint32LessThan.Int32Compare(457, 456));
+  CHECK_EQ(false, wUint32LessThan.Int32Compare(85539, 85537));
+  CHECK_EQ(false, wUint32LessThan.Int32Compare(-10, -21));
+  CHECK_EQ(false, wUint32LessThan.Int32Compare(0xffffffff, 0xfffffffe));
+
+  CHECK_EQ(true, wUint32LessThanOrEqual.Int32Compare(0, 0));
+  CHECK_EQ(true, wUint32LessThanOrEqual.Int32Compare(357, 357));
+  CHECK_EQ(true, wUint32LessThanOrEqual.Int32Compare(75539, 75539));
+  CHECK_EQ(true, wUint32LessThanOrEqual.Int32Compare(-1, -1));
+  CHECK_EQ(true, wUint32LessThanOrEqual.Int32Compare(0xffffffff, 0xffffffff));
+
+  CHECK_EQ(true, wUint32LessThanOrEqual.Int32Compare(0, 1));
+  CHECK_EQ(true, wUint32LessThanOrEqual.Int32Compare(456, 457));
+  CHECK_EQ(true, wUint32LessThanOrEqual.Int32Compare(85537, 85539));
+  CHECK_EQ(true, wUint32LessThanOrEqual.Int32Compare(-300, -299));
+  CHECK_EQ(true, wUint32LessThanOrEqual.Int32Compare(-300, -300));
+  CHECK_EQ(true, wUint32LessThanOrEqual.Int32Compare(0xfffffffe, 0xffffffff));
+  CHECK_EQ(true, wUint32LessThanOrEqual.Int32Compare(0, -2995));
+
+  CHECK_EQ(false, wUint32LessThanOrEqual.Int32Compare(1, 0));
+  CHECK_EQ(false, wUint32LessThanOrEqual.Int32Compare(457, 456));
+  CHECK_EQ(false, wUint32LessThanOrEqual.Int32Compare(85539, 85537));
+  CHECK_EQ(false, wUint32LessThanOrEqual.Int32Compare(-130, -170));
+  CHECK_EQ(false, wUint32LessThanOrEqual.Int32Compare(0xffffffff, 0xfffffffe));
+  CHECK_EQ(false, wUint32LessThanOrEqual.Int32Compare(-2997, 0));
+
+  CompareWrapper wFloat64Equal(IrOpcode::kFloat64Equal);
+  CompareWrapper wFloat64LessThan(IrOpcode::kFloat64LessThan);
+  CompareWrapper wFloat64LessThanOrEqual(IrOpcode::kFloat64LessThanOrEqual);
+
+  // Check NaN handling.
+  double nan = v8::base::OS::nan_value();
+  double inf = V8_INFINITY;
+  CHECK_EQ(false, wFloat64Equal.Float64Compare(nan, 0.0));
+  CHECK_EQ(false, wFloat64Equal.Float64Compare(nan, 1.0));
+  CHECK_EQ(false, wFloat64Equal.Float64Compare(nan, inf));
+  CHECK_EQ(false, wFloat64Equal.Float64Compare(nan, -inf));
+  CHECK_EQ(false, wFloat64Equal.Float64Compare(nan, nan));
+
+  CHECK_EQ(false, wFloat64Equal.Float64Compare(0.0, nan));
+  CHECK_EQ(false, wFloat64Equal.Float64Compare(1.0, nan));
+  CHECK_EQ(false, wFloat64Equal.Float64Compare(inf, nan));
+  CHECK_EQ(false, wFloat64Equal.Float64Compare(-inf, nan));
+  CHECK_EQ(false, wFloat64Equal.Float64Compare(nan, nan));
+
+  CHECK_EQ(false, wFloat64LessThan.Float64Compare(nan, 0.0));
+  CHECK_EQ(false, wFloat64LessThan.Float64Compare(nan, 1.0));
+  CHECK_EQ(false, wFloat64LessThan.Float64Compare(nan, inf));
+  CHECK_EQ(false, wFloat64LessThan.Float64Compare(nan, -inf));
+  CHECK_EQ(false, wFloat64LessThan.Float64Compare(nan, nan));
+
+  CHECK_EQ(false, wFloat64LessThan.Float64Compare(0.0, nan));
+  CHECK_EQ(false, wFloat64LessThan.Float64Compare(1.0, nan));
+  CHECK_EQ(false, wFloat64LessThan.Float64Compare(inf, nan));
+  CHECK_EQ(false, wFloat64LessThan.Float64Compare(-inf, nan));
+  CHECK_EQ(false, wFloat64LessThan.Float64Compare(nan, nan));
+
+  CHECK_EQ(false, wFloat64LessThanOrEqual.Float64Compare(nan, 0.0));
+  CHECK_EQ(false, wFloat64LessThanOrEqual.Float64Compare(nan, 1.0));
+  CHECK_EQ(false, wFloat64LessThanOrEqual.Float64Compare(nan, inf));
+  CHECK_EQ(false, wFloat64LessThanOrEqual.Float64Compare(nan, -inf));
+  CHECK_EQ(false, wFloat64LessThanOrEqual.Float64Compare(nan, nan));
+
+  CHECK_EQ(false, wFloat64LessThanOrEqual.Float64Compare(0.0, nan));
+  CHECK_EQ(false, wFloat64LessThanOrEqual.Float64Compare(1.0, nan));
+  CHECK_EQ(false, wFloat64LessThanOrEqual.Float64Compare(inf, nan));
+  CHECK_EQ(false, wFloat64LessThanOrEqual.Float64Compare(-inf, nan));
+  CHECK_EQ(false, wFloat64LessThanOrEqual.Float64Compare(nan, nan));
+
+  // Check inf handling.
+  CHECK_EQ(false, wFloat64Equal.Float64Compare(inf, 0.0));
+  CHECK_EQ(false, wFloat64Equal.Float64Compare(inf, 1.0));
+  CHECK_EQ(true, wFloat64Equal.Float64Compare(inf, inf));
+  CHECK_EQ(false, wFloat64Equal.Float64Compare(inf, -inf));
+
+  CHECK_EQ(false, wFloat64Equal.Float64Compare(0.0, inf));
+  CHECK_EQ(false, wFloat64Equal.Float64Compare(1.0, inf));
+  CHECK_EQ(true, wFloat64Equal.Float64Compare(inf, inf));
+  CHECK_EQ(false, wFloat64Equal.Float64Compare(-inf, inf));
+
+  CHECK_EQ(false, wFloat64LessThan.Float64Compare(inf, 0.0));
+  CHECK_EQ(false, wFloat64LessThan.Float64Compare(inf, 1.0));
+  CHECK_EQ(false, wFloat64LessThan.Float64Compare(inf, inf));
+  CHECK_EQ(false, wFloat64LessThan.Float64Compare(inf, -inf));
+
+  CHECK_EQ(true, wFloat64LessThan.Float64Compare(0.0, inf));
+  CHECK_EQ(true, wFloat64LessThan.Float64Compare(1.0, inf));
+  CHECK_EQ(false, wFloat64LessThan.Float64Compare(inf, inf));
+  CHECK_EQ(true, wFloat64LessThan.Float64Compare(-inf, inf));
+
+  CHECK_EQ(false, wFloat64LessThanOrEqual.Float64Compare(inf, 0.0));
+  CHECK_EQ(false, wFloat64LessThanOrEqual.Float64Compare(inf, 1.0));
+  CHECK_EQ(true, wFloat64LessThanOrEqual.Float64Compare(inf, inf));
+  CHECK_EQ(false, wFloat64LessThanOrEqual.Float64Compare(inf, -inf));
+
+  CHECK_EQ(true, wFloat64LessThanOrEqual.Float64Compare(0.0, inf));
+  CHECK_EQ(true, wFloat64LessThanOrEqual.Float64Compare(1.0, inf));
+  CHECK_EQ(true, wFloat64LessThanOrEqual.Float64Compare(inf, inf));
+  CHECK_EQ(true, wFloat64LessThanOrEqual.Float64Compare(-inf, inf));
+
+  // Check -inf handling.
+  CHECK_EQ(false, wFloat64Equal.Float64Compare(-inf, 0.0));
+  CHECK_EQ(false, wFloat64Equal.Float64Compare(-inf, 1.0));
+  CHECK_EQ(false, wFloat64Equal.Float64Compare(-inf, inf));
+  CHECK_EQ(true, wFloat64Equal.Float64Compare(-inf, -inf));
+
+  CHECK_EQ(false, wFloat64Equal.Float64Compare(0.0, -inf));
+  CHECK_EQ(false, wFloat64Equal.Float64Compare(1.0, -inf));
+  CHECK_EQ(false, wFloat64Equal.Float64Compare(inf, -inf));
+  CHECK_EQ(true, wFloat64Equal.Float64Compare(-inf, -inf));
+
+  CHECK_EQ(true, wFloat64LessThan.Float64Compare(-inf, 0.0));
+  CHECK_EQ(true, wFloat64LessThan.Float64Compare(-inf, 1.0));
+  CHECK_EQ(true, wFloat64LessThan.Float64Compare(-inf, inf));
+  CHECK_EQ(false, wFloat64LessThan.Float64Compare(-inf, -inf));
+
+  CHECK_EQ(false, wFloat64LessThan.Float64Compare(0.0, -inf));
+  CHECK_EQ(false, wFloat64LessThan.Float64Compare(1.0, -inf));
+  CHECK_EQ(false, wFloat64LessThan.Float64Compare(inf, -inf));
+  CHECK_EQ(false, wFloat64LessThan.Float64Compare(-inf, -inf));
+
+  CHECK_EQ(true, wFloat64LessThanOrEqual.Float64Compare(-inf, 0.0));
+  CHECK_EQ(true, wFloat64LessThanOrEqual.Float64Compare(-inf, 1.0));
+  CHECK_EQ(true, wFloat64LessThanOrEqual.Float64Compare(-inf, inf));
+  CHECK_EQ(true, wFloat64LessThanOrEqual.Float64Compare(-inf, -inf));
+
+  CHECK_EQ(false, wFloat64LessThanOrEqual.Float64Compare(0.0, -inf));
+  CHECK_EQ(false, wFloat64LessThanOrEqual.Float64Compare(1.0, -inf));
+  CHECK_EQ(false, wFloat64LessThanOrEqual.Float64Compare(inf, -inf));
+  CHECK_EQ(true, wFloat64LessThanOrEqual.Float64Compare(-inf, -inf));
+
+  // Check basic values.
+  CHECK_EQ(true, wFloat64Equal.Float64Compare(0, 0));
+  CHECK_EQ(true, wFloat64Equal.Float64Compare(257.1, 257.1));
+  CHECK_EQ(true, wFloat64Equal.Float64Compare(65539.1, 65539.1));
+  CHECK_EQ(true, wFloat64Equal.Float64Compare(-1.1, -1.1));
+
+  CHECK_EQ(false, wFloat64Equal.Float64Compare(0, 1));
+  CHECK_EQ(false, wFloat64Equal.Float64Compare(257.2, 256.2));
+  CHECK_EQ(false, wFloat64Equal.Float64Compare(65539.2, 65537.2));
+  CHECK_EQ(false, wFloat64Equal.Float64Compare(-1.2, -2.2));
+
+  CHECK_EQ(false, wFloat64LessThan.Float64Compare(0, 0));
+  CHECK_EQ(false, wFloat64LessThan.Float64Compare(357.3, 357.3));
+  CHECK_EQ(false, wFloat64LessThan.Float64Compare(75539.3, 75539.3));
+  CHECK_EQ(false, wFloat64LessThan.Float64Compare(-1.3, -1.3));
+
+  CHECK_EQ(true, wFloat64LessThan.Float64Compare(0, 1));
+  CHECK_EQ(true, wFloat64LessThan.Float64Compare(456.4, 457.4));
+  CHECK_EQ(true, wFloat64LessThan.Float64Compare(85537.4, 85539.4));
+  CHECK_EQ(true, wFloat64LessThan.Float64Compare(-2.4, -1.4));
+
+  CHECK_EQ(false, wFloat64LessThan.Float64Compare(1, 0));
+  CHECK_EQ(false, wFloat64LessThan.Float64Compare(457.5, 456.5));
+  CHECK_EQ(false, wFloat64LessThan.Float64Compare(85539.5, 85537.5));
+  CHECK_EQ(false, wFloat64LessThan.Float64Compare(-1.5, -2.5));
+
+  CHECK_EQ(true, wFloat64LessThanOrEqual.Float64Compare(0, 0));
+  CHECK_EQ(true, wFloat64LessThanOrEqual.Float64Compare(357.6, 357.6));
+  CHECK_EQ(true, wFloat64LessThanOrEqual.Float64Compare(75539.6, 75539.6));
+  CHECK_EQ(true, wFloat64LessThanOrEqual.Float64Compare(-1.6, -1.6));
+
+  CHECK_EQ(true, wFloat64LessThanOrEqual.Float64Compare(0, 1));
+  CHECK_EQ(true, wFloat64LessThanOrEqual.Float64Compare(456.7, 457.7));
+  CHECK_EQ(true, wFloat64LessThanOrEqual.Float64Compare(85537.7, 85539.7));
+  CHECK_EQ(true, wFloat64LessThanOrEqual.Float64Compare(-2.7, -1.7));
+
+  CHECK_EQ(false, wFloat64LessThanOrEqual.Float64Compare(1, 0));
+  CHECK_EQ(false, wFloat64LessThanOrEqual.Float64Compare(457.8, 456.8));
+  CHECK_EQ(false, wFloat64LessThanOrEqual.Float64Compare(85539.8, 85537.8));
+  CHECK_EQ(false, wFloat64LessThanOrEqual.Float64Compare(-1.8, -2.8));
+}
+
+
+void Int32BinopInputShapeTester::TestAllInputShapes() {
+  std::vector<int32_t> inputs = ValueHelper::int32_vector();
+  int num_int_inputs = static_cast<int>(inputs.size());
+  if (num_int_inputs > 16) num_int_inputs = 16;  // limit to 16 inputs
+
+  for (int i = -2; i < num_int_inputs; i++) {    // for all left shapes
+    for (int j = -2; j < num_int_inputs; j++) {  // for all right shapes
+      if (i >= 0 && j >= 0) break;               // No constant/constant combos
+      RawMachineAssemblerTester<int32_t> m(kMachineWord32, kMachineWord32);
+      Node* p0 = m.Parameter(0);
+      Node* p1 = m.Parameter(1);
+      Node* n0;
+      Node* n1;
+
+      // left = Parameter | Load | Constant
+      if (i == -2) {
+        n0 = p0;
+      } else if (i == -1) {
+        n0 = m.LoadFromPointer(&input_a, kMachineWord32);
+      } else {
+        n0 = m.Int32Constant(inputs[i]);
+      }
+
+      // right = Parameter | Load | Constant
+      if (j == -2) {
+        n1 = p1;
+      } else if (j == -1) {
+        n1 = m.LoadFromPointer(&input_b, kMachineWord32);
+      } else {
+        n1 = m.Int32Constant(inputs[j]);
+      }
+
+      gen->gen(&m, n0, n1);
+
+      if (false) printf("Int32BinopInputShapeTester i=%d, j=%d\n", i, j);
+      if (i >= 0) {
+        input_a = inputs[i];
+        RunRight(&m);
+      } else if (j >= 0) {
+        input_b = inputs[j];
+        RunLeft(&m);
+      } else {
+        Run(&m);
+      }
+    }
+  }
+}
+
+
+void Int32BinopInputShapeTester::Run(RawMachineAssemblerTester<int32_t>* m) {
+  FOR_INT32_INPUTS(pl) {
+    FOR_INT32_INPUTS(pr) {
+      input_a = *pl;
+      input_b = *pr;
+      int32_t expect = gen->expected(input_a, input_b);
+      if (false) printf("  cmp(a=%d, b=%d) ?== %d\n", input_a, input_b, expect);
+      CHECK_EQ(expect, m->Call(input_a, input_b));
+    }
+  }
+}
+
+
+void Int32BinopInputShapeTester::RunLeft(
+    RawMachineAssemblerTester<int32_t>* m) {
+  FOR_UINT32_INPUTS(i) {
+    input_a = *i;
+    int32_t expect = gen->expected(input_a, input_b);
+    if (false) printf("  cmp(a=%d, b=%d) ?== %d\n", input_a, input_b, expect);
+    CHECK_EQ(expect, m->Call(input_a, input_b));
+  }
+}
+
+
+void Int32BinopInputShapeTester::RunRight(
+    RawMachineAssemblerTester<int32_t>* m) {
+  FOR_UINT32_INPUTS(i) {
+    input_b = *i;
+    int32_t expect = gen->expected(input_a, input_b);
+    if (false) printf("  cmp(a=%d, b=%d) ?== %d\n", input_a, input_b, expect);
+    CHECK_EQ(expect, m->Call(input_a, input_b));
+  }
+}
+
+
+TEST(ParametersEqual) {
+  RawMachineAssemblerTester<int32_t> m(kMachineWord32, kMachineWord32);
+  Node* p1 = m.Parameter(1);
+  CHECK_NE(NULL, p1);
+  Node* p0 = m.Parameter(0);
+  CHECK_NE(NULL, p0);
+  CHECK_EQ(p0, m.Parameter(0));
+  CHECK_EQ(p1, m.Parameter(1));
+}
+
+
+#if V8_TURBOFAN_TARGET
+
+void RunSmiConstant(int32_t v) {
+// TODO(dcarney): on x64 Smis are generated with the SmiConstantRegister
+#if !V8_TARGET_ARCH_X64
+  if (Smi::IsValid(v)) {
+    RawMachineAssemblerTester<Object*> m;
+    m.Return(m.NumberConstant(v));
+    CHECK_EQ(Smi::FromInt(v), m.Call());
+  }
+#endif
+}
+
+
+void RunNumberConstant(double v) {
+  RawMachineAssemblerTester<Object*> m;
+#if V8_TARGET_ARCH_X64
+  // TODO(dcarney): on x64 Smis are generated with the SmiConstantRegister
+  Handle<Object> number = m.isolate()->factory()->NewNumber(v);
+  if (number->IsSmi()) return;
+#endif
+  m.Return(m.NumberConstant(v));
+  Object* result = m.Call();
+  m.CheckNumber(v, result);
+}
+
+
+TEST(RunEmpty) {
+  RawMachineAssemblerTester<int32_t> m;
+  m.Return(m.Int32Constant(0));
+  CHECK_EQ(0, m.Call());
+}
+
+
+TEST(RunInt32Constants) {
+  FOR_INT32_INPUTS(i) {
+    RawMachineAssemblerTester<int32_t> m;
+    m.Return(m.Int32Constant(*i));
+    CHECK_EQ(*i, m.Call());
+  }
+}
+
+
+TEST(RunSmiConstants) {
+  for (int32_t i = 1; i < Smi::kMaxValue && i != 0; i = i << 1) {
+    RunSmiConstant(i);
+    RunSmiConstant(3 * i);
+    RunSmiConstant(5 * i);
+    RunSmiConstant(-i);
+    RunSmiConstant(i | 1);
+    RunSmiConstant(i | 3);
+  }
+  RunSmiConstant(Smi::kMaxValue);
+  RunSmiConstant(Smi::kMaxValue - 1);
+  RunSmiConstant(Smi::kMinValue);
+  RunSmiConstant(Smi::kMinValue + 1);
+
+  FOR_INT32_INPUTS(i) { RunSmiConstant(*i); }
+}
+
+
+TEST(RunNumberConstants) {
+  {
+    FOR_FLOAT64_INPUTS(i) { RunNumberConstant(*i); }
+  }
+  {
+    FOR_INT32_INPUTS(i) { RunNumberConstant(*i); }
+  }
+
+  for (int32_t i = 1; i < Smi::kMaxValue && i != 0; i = i << 1) {
+    RunNumberConstant(i);
+    RunNumberConstant(-i);
+    RunNumberConstant(i | 1);
+    RunNumberConstant(i | 3);
+  }
+  RunNumberConstant(Smi::kMaxValue);
+  RunNumberConstant(Smi::kMaxValue - 1);
+  RunNumberConstant(Smi::kMinValue);
+  RunNumberConstant(Smi::kMinValue + 1);
+}
+
+
+TEST(RunEmptyString) {
+  RawMachineAssemblerTester<Object*> m;
+  m.Return(m.StringConstant("empty"));
+  m.CheckString("empty", m.Call());
+}
+
+
+TEST(RunHeapConstant) {
+  RawMachineAssemblerTester<Object*> m;
+  m.Return(m.StringConstant("empty"));
+  m.CheckString("empty", m.Call());
+}
+
+
+TEST(RunHeapNumberConstant) {
+  RawMachineAssemblerTester<Object*> m;
+  Handle<Object> number = m.isolate()->factory()->NewHeapNumber(100.5);
+  m.Return(m.HeapConstant(number));
+  Object* result = m.Call();
+  CHECK_EQ(result, *number);
+}
+
+
+TEST(RunParam1) {
+  RawMachineAssemblerTester<int32_t> m(kMachineWord32);
+  m.Return(m.Parameter(0));
+
+  FOR_INT32_INPUTS(i) {
+    int32_t result = m.Call(*i);
+    CHECK_EQ(*i, result);
+  }
+}
+
+
+TEST(RunParam2_1) {
+  RawMachineAssemblerTester<int32_t> m(kMachineWord32, kMachineWord32);
+  Node* p0 = m.Parameter(0);
+  Node* p1 = m.Parameter(1);
+  m.Return(p0);
+  USE(p1);
+
+  FOR_INT32_INPUTS(i) {
+    int32_t result = m.Call(*i, -9999);
+    CHECK_EQ(*i, result);
+  }
+}
+
+
+TEST(RunParam2_2) {
+  RawMachineAssemblerTester<int32_t> m(kMachineWord32, kMachineWord32);
+  Node* p0 = m.Parameter(0);
+  Node* p1 = m.Parameter(1);
+  m.Return(p1);
+  USE(p0);
+
+  FOR_INT32_INPUTS(i) {
+    int32_t result = m.Call(-7777, *i);
+    CHECK_EQ(*i, result);
+  }
+}
+
+
+TEST(RunParam3) {
+  for (int i = 0; i < 3; i++) {
+    RawMachineAssemblerTester<int32_t> m(kMachineWord32, kMachineWord32,
+                                         kMachineWord32);
+    Node* nodes[] = {m.Parameter(0), m.Parameter(1), m.Parameter(2)};
+    m.Return(nodes[i]);
+
+    int p[] = {-99, -77, -88};
+    FOR_INT32_INPUTS(j) {
+      p[i] = *j;
+      int32_t result = m.Call(p[0], p[1], p[2]);
+      CHECK_EQ(*j, result);
+    }
+  }
+}
+
+
+TEST(RunBinopTester) {
+  {
+    RawMachineAssemblerTester<int32_t> m;
+    Int32BinopTester bt(&m);
+    bt.AddReturn(bt.param0);
+
+    FOR_INT32_INPUTS(i) { CHECK_EQ(*i, bt.call(*i, 777)); }
+  }
+
+  {
+    RawMachineAssemblerTester<int32_t> m;
+    Int32BinopTester bt(&m);
+    bt.AddReturn(bt.param1);
+
+    FOR_INT32_INPUTS(i) { CHECK_EQ(*i, bt.call(666, *i)); }
+  }
+
+  {
+    RawMachineAssemblerTester<int32_t> m;
+    Float64BinopTester bt(&m);
+    bt.AddReturn(bt.param0);
+
+    FOR_FLOAT64_INPUTS(i) { CHECK_EQ(*i, bt.call(*i, 9.0)); }
+  }
+
+  {
+    RawMachineAssemblerTester<int32_t> m;
+    Float64BinopTester bt(&m);
+    bt.AddReturn(bt.param1);
+
+    FOR_FLOAT64_INPUTS(i) { CHECK_EQ(*i, bt.call(-11.25, *i)); }
+  }
+}
+
+#endif  // V8_TURBOFAN_TARGET
diff --git a/test/cctest/compiler/codegen-tester.h b/test/cctest/compiler/codegen-tester.h
new file mode 100644 (file)
index 0000000..2cb9a4e
--- /dev/null
@@ -0,0 +1,323 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_CCTEST_COMPILER_CODEGEN_TESTER_H_
+#define V8_CCTEST_COMPILER_CODEGEN_TESTER_H_
+
+#include "src/v8.h"
+
+#include "src/compiler/pipeline.h"
+#include "src/compiler/raw-machine-assembler.h"
+#include "src/compiler/structured-machine-assembler.h"
+#include "src/simulator.h"
+#include "test/cctest/compiler/call-tester.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+template <typename MachineAssembler>
+class MachineAssemblerTester : public HandleAndZoneScope,
+                               public CallHelper,
+                               public MachineAssembler {
+ public:
+  MachineAssemblerTester(MachineRepresentation return_type,
+                         MachineRepresentation p0, MachineRepresentation p1,
+                         MachineRepresentation p2, MachineRepresentation p3,
+                         MachineRepresentation p4)
+      : HandleAndZoneScope(),
+        CallHelper(main_isolate()),
+        MachineAssembler(new (main_zone()) Graph(main_zone()),
+                         ToCallDescriptorBuilder(main_zone(), return_type, p0,
+                                                 p1, p2, p3, p4),
+                         MachineOperatorBuilder::pointer_rep()) {}
+
+  Node* LoadFromPointer(void* address, MachineRepresentation rep,
+                        int32_t offset = 0) {
+    return this->Load(rep, this->PointerConstant(address),
+                      this->Int32Constant(offset));
+  }
+
+  void StoreToPointer(void* address, MachineRepresentation rep, Node* node) {
+    this->Store(rep, this->PointerConstant(address), node);
+  }
+
+  Node* StringConstant(const char* string) {
+    return this->HeapConstant(
+        this->isolate()->factory()->InternalizeUtf8String(string));
+  }
+
+  void CheckNumber(double expected, Object* number) {
+    CHECK(this->isolate()->factory()->NewNumber(expected)->SameValue(number));
+  }
+
+  void CheckString(const char* expected, Object* string) {
+    CHECK(
+        this->isolate()->factory()->InternalizeUtf8String(expected)->SameValue(
+            string));
+  }
+
+  void GenerateCode() { Generate(); }
+
+ protected:
+  virtual void VerifyParameters(int parameter_count,
+                                MachineRepresentation* parameter_types) {
+    CHECK_EQ(this->parameter_count(), parameter_count);
+    const MachineRepresentation* expected_types = this->parameter_types();
+    for (int i = 0; i < parameter_count; i++) {
+      CHECK_EQ(expected_types[i], parameter_types[i]);
+    }
+  }
+
+  virtual byte* Generate() {
+    if (code_.is_null()) {
+      Schedule* schedule = this->Export();
+      CallDescriptor* call_descriptor = this->call_descriptor();
+      Graph* graph = this->graph();
+      CompilationInfo info(graph->zone()->isolate(), graph->zone());
+      Linkage linkage(&info, call_descriptor);
+      Pipeline pipeline(&info);
+      code_ = pipeline.GenerateCodeForMachineGraph(&linkage, graph, schedule);
+    }
+    return this->code_.ToHandleChecked()->entry();
+  }
+
+ private:
+  MaybeHandle<Code> code_;
+};
+
+
+template <typename ReturnType>
+class RawMachineAssemblerTester
+    : public MachineAssemblerTester<RawMachineAssembler>,
+      public CallHelper2<ReturnType, RawMachineAssemblerTester<ReturnType> > {
+ public:
+  RawMachineAssemblerTester(MachineRepresentation p0 = kMachineLast,
+                            MachineRepresentation p1 = kMachineLast,
+                            MachineRepresentation p2 = kMachineLast,
+                            MachineRepresentation p3 = kMachineLast,
+                            MachineRepresentation p4 = kMachineLast)
+      : MachineAssemblerTester(ReturnValueTraits<ReturnType>::Representation(),
+                               p0, p1, p2, p3, p4) {}
+};
+
+
+template <typename ReturnType>
+class StructuredMachineAssemblerTester
+    : public MachineAssemblerTester<StructuredMachineAssembler>,
+      public CallHelper2<ReturnType,
+                         StructuredMachineAssemblerTester<ReturnType> > {
+ public:
+  StructuredMachineAssemblerTester(MachineRepresentation p0 = kMachineLast,
+                                   MachineRepresentation p1 = kMachineLast,
+                                   MachineRepresentation p2 = kMachineLast,
+                                   MachineRepresentation p3 = kMachineLast,
+                                   MachineRepresentation p4 = kMachineLast)
+      : MachineAssemblerTester(ReturnValueTraits<ReturnType>::Representation(),
+                               p0, p1, p2, p3, p4) {}
+};
+
+
+static const bool USE_RESULT_BUFFER = true;
+static const bool USE_RETURN_REGISTER = false;
+
+// TODO(titzer): use the C-style calling convention, or any register-based
+// calling convention for binop tests.
+template <typename CType, MachineRepresentation rep, bool use_result_buffer>
+class BinopTester {
+ public:
+  static const int32_t CHECK_VALUE = 0x99BEEDCE;
+
+  explicit BinopTester(RawMachineAssemblerTester<int32_t>* tester)
+      : T(tester),
+        param0(T->LoadFromPointer(&p0, rep)),
+        param1(T->LoadFromPointer(&p1, rep)),
+        p0(static_cast<CType>(0)),
+        p1(static_cast<CType>(0)),
+        result(static_cast<CType>(0)) {}
+
+  RawMachineAssemblerTester<int32_t>* T;
+  Node* param0;
+  Node* param1;
+
+  CType call(CType a0, CType a1) {
+    p0 = a0;
+    p1 = a1;
+    if (use_result_buffer) {
+      CHECK_EQ(CHECK_VALUE, T->Call());
+      return result;
+    } else {
+      return T->Call();
+    }
+  }
+
+  void AddReturn(Node* val) {
+    if (use_result_buffer) {
+      T->Store(rep, T->PointerConstant(&result), T->Int32Constant(0), val);
+      T->Return(T->Int32Constant(CHECK_VALUE));
+    } else {
+      T->Return(val);
+    }
+  }
+
+ protected:
+  CType p0;
+  CType p1;
+  CType result;
+};
+
+
+// A helper class for testing code sequences that take two int parameters and
+// return an int value.
+class Int32BinopTester
+    : public BinopTester<int32_t, kMachineWord32, USE_RETURN_REGISTER> {
+ public:
+  explicit Int32BinopTester(RawMachineAssemblerTester<int32_t>* tester)
+      : BinopTester<int32_t, kMachineWord32, USE_RETURN_REGISTER>(tester) {}
+
+  int32_t call(uint32_t a0, uint32_t a1) {
+    p0 = static_cast<int32_t>(a0);
+    p1 = static_cast<int32_t>(a1);
+    return T->Call();
+  }
+};
+
+
+// A helper class for testing code sequences that take two double parameters and
+// return a double value.
+// TODO(titzer): figure out how to return doubles correctly on ia32.
+class Float64BinopTester
+    : public BinopTester<double, kMachineFloat64, USE_RESULT_BUFFER> {
+ public:
+  explicit Float64BinopTester(RawMachineAssemblerTester<int32_t>* tester)
+      : BinopTester<double, kMachineFloat64, USE_RESULT_BUFFER>(tester) {}
+};
+
+
+// A helper class for testing code sequences that take two pointer parameters
+// and return a pointer value.
+// TODO(titzer): pick word size of pointers based on V8_TARGET.
+template <typename Type>
+class PointerBinopTester
+    : public BinopTester<Type*, kMachineWord32, USE_RETURN_REGISTER> {
+ public:
+  explicit PointerBinopTester(RawMachineAssemblerTester<int32_t>* tester)
+      : BinopTester<Type*, kMachineWord32, USE_RETURN_REGISTER>(tester) {}
+};
+
+
+// A helper class for testing code sequences that take two tagged parameters and
+// return a tagged value.
+template <typename Type>
+class TaggedBinopTester
+    : public BinopTester<Type*, kMachineTagged, USE_RETURN_REGISTER> {
+ public:
+  explicit TaggedBinopTester(RawMachineAssemblerTester<int32_t>* tester)
+      : BinopTester<Type*, kMachineTagged, USE_RETURN_REGISTER>(tester) {}
+};
+
+// A helper class for testing compares. Wraps a machine opcode and provides
+// evaluation routines and the operators.
+class CompareWrapper {
+ public:
+  explicit CompareWrapper(IrOpcode::Value op) : opcode(op) {}
+
+  Node* MakeNode(RawMachineAssemblerTester<int32_t>* m, Node* a, Node* b) {
+    return m->NewNode(op(m->machine()), a, b);
+  }
+
+  Operator* op(MachineOperatorBuilder* machine) {
+    switch (opcode) {
+      case IrOpcode::kWord32Equal:
+        return machine->Word32Equal();
+      case IrOpcode::kInt32LessThan:
+        return machine->Int32LessThan();
+      case IrOpcode::kInt32LessThanOrEqual:
+        return machine->Int32LessThanOrEqual();
+      case IrOpcode::kUint32LessThan:
+        return machine->Uint32LessThan();
+      case IrOpcode::kUint32LessThanOrEqual:
+        return machine->Uint32LessThanOrEqual();
+      case IrOpcode::kFloat64Equal:
+        return machine->Float64Equal();
+      case IrOpcode::kFloat64LessThan:
+        return machine->Float64LessThan();
+      case IrOpcode::kFloat64LessThanOrEqual:
+        return machine->Float64LessThanOrEqual();
+      default:
+        UNREACHABLE();
+    }
+    return NULL;
+  }
+
+  bool Int32Compare(int32_t a, int32_t b) {
+    switch (opcode) {
+      case IrOpcode::kWord32Equal:
+        return a == b;
+      case IrOpcode::kInt32LessThan:
+        return a < b;
+      case IrOpcode::kInt32LessThanOrEqual:
+        return a <= b;
+      case IrOpcode::kUint32LessThan:
+        return static_cast<uint32_t>(a) < static_cast<uint32_t>(b);
+      case IrOpcode::kUint32LessThanOrEqual:
+        return static_cast<uint32_t>(a) <= static_cast<uint32_t>(b);
+      default:
+        UNREACHABLE();
+    }
+    return false;
+  }
+
+  bool Float64Compare(double a, double b) {
+    switch (opcode) {
+      case IrOpcode::kFloat64Equal:
+        return a == b;
+      case IrOpcode::kFloat64LessThan:
+        return a < b;
+      case IrOpcode::kFloat64LessThanOrEqual:
+        return a <= b;
+      default:
+        UNREACHABLE();
+    }
+    return false;
+  }
+
+  IrOpcode::Value opcode;
+};
+
+
+// A small closure class to generate code for a function of two inputs that
+// produces a single output so that it can be used in many different contexts.
+// The {expected()} method should compute the expected output for a given
+// pair of inputs.
+template <typename T>
+class BinopGen {
+ public:
+  virtual void gen(RawMachineAssemblerTester<int32_t>* m, Node* a, Node* b) = 0;
+  virtual T expected(T a, T b) = 0;
+  virtual ~BinopGen() {}
+};
+
+// A helper class to generate various combination of input shape combinations
+// and run the generated code to ensure it produces the correct results.
+class Int32BinopInputShapeTester {
+ public:
+  explicit Int32BinopInputShapeTester(BinopGen<int32_t>* g) : gen(g) {}
+
+  void TestAllInputShapes();
+
+ private:
+  BinopGen<int32_t>* gen;
+  int32_t input_a;
+  int32_t input_b;
+
+  void Run(RawMachineAssemblerTester<int32_t>* m);
+  void RunLeft(RawMachineAssemblerTester<int32_t>* m);
+  void RunRight(RawMachineAssemblerTester<int32_t>* m);
+};
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_CCTEST_COMPILER_CODEGEN_TESTER_H_
diff --git a/test/cctest/compiler/compiler/call-tester.h b/test/cctest/compiler/compiler/call-tester.h
new file mode 100644 (file)
index 0000000..6998f19
--- /dev/null
@@ -0,0 +1,391 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_CCTEST_COMPILER_CALL_TESTER_H_
+#define V8_CCTEST_COMPILER_CALL_TESTER_H_
+
+#include "src/v8.h"
+
+#include "src/simulator.h"
+
+#if V8_TARGET_ARCH_IA32
+#if __GNUC__
+#define V8_CDECL __attribute__((cdecl))
+#else
+#define V8_CDECL __cdecl
+#endif
+#else
+#define V8_CDECL
+#endif
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+template <typename R>
+struct ReturnValueTraits {
+  static R Cast(uintptr_t r) { return reinterpret_cast<R>(r); }
+  static MachineRepresentation Representation() {
+    // TODO(dcarney): detect when R is of a subclass of Object* instead of this
+    // type check.
+    while (false) {
+      *(static_cast<Object* volatile*>(0)) = static_cast<R>(0);
+    }
+    return kMachineTagged;
+  }
+};
+
+template <>
+struct ReturnValueTraits<int32_t*> {
+  static int32_t* Cast(uintptr_t r) { return reinterpret_cast<int32_t*>(r); }
+  static MachineRepresentation Representation() {
+    return MachineOperatorBuilder::pointer_rep();
+  }
+};
+
+template <>
+struct ReturnValueTraits<void> {
+  static void Cast(uintptr_t r) {}
+  static MachineRepresentation Representation() {
+    return MachineOperatorBuilder::pointer_rep();
+  }
+};
+
+template <>
+struct ReturnValueTraits<bool> {
+  static bool Cast(uintptr_t r) { return static_cast<bool>(r); }
+  static MachineRepresentation Representation() {
+    return MachineOperatorBuilder::pointer_rep();
+  }
+};
+
+template <>
+struct ReturnValueTraits<int32_t> {
+  static int32_t Cast(uintptr_t r) { return static_cast<int32_t>(r); }
+  static MachineRepresentation Representation() { return kMachineWord32; }
+};
+
+template <>
+struct ReturnValueTraits<uint32_t> {
+  static uint32_t Cast(uintptr_t r) { return static_cast<uint32_t>(r); }
+  static MachineRepresentation Representation() { return kMachineWord32; }
+};
+
+template <>
+struct ReturnValueTraits<int64_t> {
+  static int64_t Cast(uintptr_t r) { return static_cast<int64_t>(r); }
+  static MachineRepresentation Representation() { return kMachineWord64; }
+};
+
+template <>
+struct ReturnValueTraits<uint64_t> {
+  static uint64_t Cast(uintptr_t r) { return static_cast<uint64_t>(r); }
+  static MachineRepresentation Representation() { return kMachineWord64; }
+};
+
+template <>
+struct ReturnValueTraits<int16_t> {
+  static int16_t Cast(uintptr_t r) { return static_cast<int16_t>(r); }
+  static MachineRepresentation Representation() {
+    return MachineOperatorBuilder::pointer_rep();
+  }
+};
+
+template <>
+struct ReturnValueTraits<int8_t> {
+  static int8_t Cast(uintptr_t r) { return static_cast<int8_t>(r); }
+  static MachineRepresentation Representation() {
+    return MachineOperatorBuilder::pointer_rep();
+  }
+};
+
+template <>
+struct ReturnValueTraits<double> {
+  static double Cast(uintptr_t r) {
+    UNREACHABLE();
+    return 0.0;
+  }
+};
+
+
+template <typename R>
+struct ParameterTraits {
+  static uintptr_t Cast(R r) { return static_cast<uintptr_t>(r); }
+};
+
+template <>
+struct ParameterTraits<int*> {
+  static uintptr_t Cast(int* r) { return reinterpret_cast<uintptr_t>(r); }
+};
+
+template <typename T>
+struct ParameterTraits<T*> {
+  static uintptr_t Cast(void* r) { return reinterpret_cast<uintptr_t>(r); }
+};
+
+class CallHelper {
+ public:
+  explicit CallHelper(Isolate* isolate) : isolate_(isolate) { USE(isolate_); }
+  virtual ~CallHelper() {}
+
+  static MachineCallDescriptorBuilder* ToCallDescriptorBuilder(
+      Zone* zone, MachineRepresentation return_type,
+      MachineRepresentation p0 = kMachineLast,
+      MachineRepresentation p1 = kMachineLast,
+      MachineRepresentation p2 = kMachineLast,
+      MachineRepresentation p3 = kMachineLast,
+      MachineRepresentation p4 = kMachineLast) {
+    const int kSize = 5;
+    MachineRepresentation* params =
+        zone->NewArray<MachineRepresentation>(kSize);
+    params[0] = p0;
+    params[1] = p1;
+    params[2] = p2;
+    params[3] = p3;
+    params[4] = p4;
+    int parameter_count = 0;
+    for (int i = 0; i < kSize; ++i) {
+      if (params[i] == kMachineLast) {
+        break;
+      }
+      parameter_count++;
+    }
+    return new (zone)
+        MachineCallDescriptorBuilder(return_type, parameter_count, params);
+  }
+
+ protected:
+  virtual void VerifyParameters(int parameter_count,
+                                MachineRepresentation* parameters) = 0;
+  virtual byte* Generate() = 0;
+
+ private:
+#if USE_SIMULATOR && V8_TARGET_ARCH_ARM64
+  uintptr_t CallSimulator(byte* f, Simulator::CallArgument* args) {
+    Simulator* simulator = Simulator::current(isolate_);
+    return static_cast<uintptr_t>(simulator->CallInt64(f, args));
+  }
+
+  template <typename R, typename F>
+  R DoCall(F* f) {
+    Simulator::CallArgument args[] = {Simulator::CallArgument::End()};
+    return ReturnValueTraits<R>::Cast(CallSimulator(FUNCTION_ADDR(f), args));
+  }
+  template <typename R, typename F, typename P1>
+  R DoCall(F* f, P1 p1) {
+    Simulator::CallArgument args[] = {Simulator::CallArgument(p1),
+                                      Simulator::CallArgument::End()};
+    return ReturnValueTraits<R>::Cast(CallSimulator(FUNCTION_ADDR(f), args));
+  }
+  template <typename R, typename F, typename P1, typename P2>
+  R DoCall(F* f, P1 p1, P2 p2) {
+    Simulator::CallArgument args[] = {Simulator::CallArgument(p1),
+                                      Simulator::CallArgument(p2),
+                                      Simulator::CallArgument::End()};
+    return ReturnValueTraits<R>::Cast(CallSimulator(FUNCTION_ADDR(f), args));
+  }
+  template <typename R, typename F, typename P1, typename P2, typename P3>
+  R DoCall(F* f, P1 p1, P2 p2, P3 p3) {
+    Simulator::CallArgument args[] = {
+        Simulator::CallArgument(p1), Simulator::CallArgument(p2),
+        Simulator::CallArgument(p3), Simulator::CallArgument::End()};
+    return ReturnValueTraits<R>::Cast(CallSimulator(FUNCTION_ADDR(f), args));
+  }
+  template <typename R, typename F, typename P1, typename P2, typename P3,
+            typename P4>
+  R DoCall(F* f, P1 p1, P2 p2, P3 p3, P4 p4) {
+    Simulator::CallArgument args[] = {
+        Simulator::CallArgument(p1), Simulator::CallArgument(p2),
+        Simulator::CallArgument(p3), Simulator::CallArgument(p4),
+        Simulator::CallArgument::End()};
+    return ReturnValueTraits<R>::Cast(CallSimulator(FUNCTION_ADDR(f), args));
+  }
+#elif USE_SIMULATOR && V8_TARGET_ARCH_ARM
+  uintptr_t CallSimulator(byte* f, int32_t p1 = 0, int32_t p2 = 0,
+                          int32_t p3 = 0, int32_t p4 = 0) {
+    Simulator* simulator = Simulator::current(isolate_);
+    return static_cast<uintptr_t>(simulator->Call(f, 4, p1, p2, p3, p4));
+  }
+  template <typename R, typename F>
+  R DoCall(F* f) {
+    return ReturnValueTraits<R>::Cast(CallSimulator(FUNCTION_ADDR(f)));
+  }
+  template <typename R, typename F, typename P1>
+  R DoCall(F* f, P1 p1) {
+    return ReturnValueTraits<R>::Cast(
+        CallSimulator(FUNCTION_ADDR(f), ParameterTraits<P1>::Cast(p1)));
+  }
+  template <typename R, typename F, typename P1, typename P2>
+  R DoCall(F* f, P1 p1, P2 p2) {
+    return ReturnValueTraits<R>::Cast(
+        CallSimulator(FUNCTION_ADDR(f), ParameterTraits<P1>::Cast(p1),
+                      ParameterTraits<P2>::Cast(p2)));
+  }
+  template <typename R, typename F, typename P1, typename P2, typename P3>
+  R DoCall(F* f, P1 p1, P2 p2, P3 p3) {
+    return ReturnValueTraits<R>::Cast(CallSimulator(
+        FUNCTION_ADDR(f), ParameterTraits<P1>::Cast(p1),
+        ParameterTraits<P2>::Cast(p2), ParameterTraits<P3>::Cast(p3)));
+  }
+  template <typename R, typename F, typename P1, typename P2, typename P3,
+            typename P4>
+  R DoCall(F* f, P1 p1, P2 p2, P3 p3, P4 p4) {
+    return ReturnValueTraits<R>::Cast(CallSimulator(
+        FUNCTION_ADDR(f), ParameterTraits<P1>::Cast(p1),
+        ParameterTraits<P2>::Cast(p2), ParameterTraits<P3>::Cast(p3),
+        ParameterTraits<P4>::Cast(p4)));
+  }
+#else
+  template <typename R, typename F>
+  R DoCall(F* f) {
+    return f();
+  }
+  template <typename R, typename F, typename P1>
+  R DoCall(F* f, P1 p1) {
+    return f(p1);
+  }
+  template <typename R, typename F, typename P1, typename P2>
+  R DoCall(F* f, P1 p1, P2 p2) {
+    return f(p1, p2);
+  }
+  template <typename R, typename F, typename P1, typename P2, typename P3>
+  R DoCall(F* f, P1 p1, P2 p2, P3 p3) {
+    return f(p1, p2, p3);
+  }
+  template <typename R, typename F, typename P1, typename P2, typename P3,
+            typename P4>
+  R DoCall(F* f, P1 p1, P2 p2, P3 p3, P4 p4) {
+    return f(p1, p2, p3, p4);
+  }
+#endif
+
+#ifndef DEBUG
+  void VerifyParameters0() {}
+
+  template <typename P1>
+  void VerifyParameters1() {}
+
+  template <typename P1, typename P2>
+  void VerifyParameters2() {}
+
+  template <typename P1, typename P2, typename P3>
+  void VerifyParameters3() {}
+
+  template <typename P1, typename P2, typename P3, typename P4>
+  void VerifyParameters4() {}
+#else
+  void VerifyParameters0() { VerifyParameters(0, NULL); }
+
+  template <typename P1>
+  void VerifyParameters1() {
+    MachineRepresentation parameters[] = {
+        ReturnValueTraits<P1>::Representation()};
+    VerifyParameters(ARRAY_SIZE(parameters), parameters);
+  }
+
+  template <typename P1, typename P2>
+  void VerifyParameters2() {
+    MachineRepresentation parameters[] = {
+        ReturnValueTraits<P1>::Representation(),
+        ReturnValueTraits<P2>::Representation()};
+    VerifyParameters(ARRAY_SIZE(parameters), parameters);
+  }
+
+  template <typename P1, typename P2, typename P3>
+  void VerifyParameters3() {
+    MachineRepresentation parameters[] = {
+        ReturnValueTraits<P1>::Representation(),
+        ReturnValueTraits<P2>::Representation(),
+        ReturnValueTraits<P3>::Representation()};
+    VerifyParameters(ARRAY_SIZE(parameters), parameters);
+  }
+
+  template <typename P1, typename P2, typename P3, typename P4>
+  void VerifyParameters4() {
+    MachineRepresentation parameters[] = {
+        ReturnValueTraits<P1>::Representation(),
+        ReturnValueTraits<P2>::Representation(),
+        ReturnValueTraits<P3>::Representation(),
+        ReturnValueTraits<P4>::Representation()};
+    VerifyParameters(ARRAY_SIZE(parameters), parameters);
+  }
+#endif
+
+  // TODO(dcarney): replace Call() in CallHelper2 with these.
+  template <typename R>
+  R Call0() {
+    typedef R V8_CDECL FType();
+    VerifyParameters0();
+    return DoCall<R>(FUNCTION_CAST<FType*>(Generate()));
+  }
+
+  template <typename R, typename P1>
+  R Call1(P1 p1) {
+    typedef R V8_CDECL FType(P1);
+    VerifyParameters1<P1>();
+    return DoCall<R>(FUNCTION_CAST<FType*>(Generate()), p1);
+  }
+
+  template <typename R, typename P1, typename P2>
+  R Call2(P1 p1, P2 p2) {
+    typedef R V8_CDECL FType(P1, P2);
+    VerifyParameters2<P1, P2>();
+    return DoCall<R>(FUNCTION_CAST<FType*>(Generate()), p1, p2);
+  }
+
+  template <typename R, typename P1, typename P2, typename P3>
+  R Call3(P1 p1, P2 p2, P3 p3) {
+    typedef R V8_CDECL FType(P1, P2, P3);
+    VerifyParameters3<P1, P2, P3>();
+    return DoCall<R>(FUNCTION_CAST<FType*>(Generate()), p1, p2, p3);
+  }
+
+  template <typename R, typename P1, typename P2, typename P3, typename P4>
+  R Call4(P1 p1, P2 p2, P3 p3, P4 p4) {
+    typedef R V8_CDECL FType(P1, P2, P3, P4);
+    VerifyParameters4<P1, P2, P3, P4>();
+    return DoCall<R>(FUNCTION_CAST<FType*>(Generate()), p1, p2, p3, p4);
+  }
+
+  template <typename R, typename C>
+  friend class CallHelper2;
+  Isolate* isolate_;
+};
+
+
+// TODO(dcarney): replace CallHelper with CallHelper2 and rename.
+template <typename R, typename C>
+class CallHelper2 {
+ public:
+  R Call() { return helper()->template Call0<R>(); }
+
+  template <typename P1>
+  R Call(P1 p1) {
+    return helper()->template Call1<R>(p1);
+  }
+
+  template <typename P1, typename P2>
+  R Call(P1 p1, P2 p2) {
+    return helper()->template Call2<R>(p1, p2);
+  }
+
+  template <typename P1, typename P2, typename P3>
+  R Call(P1 p1, P2 p2, P3 p3) {
+    return helper()->template Call3<R>(p1, p2, p3);
+  }
+
+  template <typename P1, typename P2, typename P3, typename P4>
+  R Call(P1 p1, P2 p2, P3 p3, P4 p4) {
+    return helper()->template Call4<R>(p1, p2, p3, p4);
+  }
+
+ private:
+  CallHelper* helper() { return static_cast<C*>(this); }
+};
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_CCTEST_COMPILER_CALL_TESTER_H_
diff --git a/test/cctest/compiler/compiler/codegen-tester.cc b/test/cctest/compiler/compiler/codegen-tester.cc
new file mode 100644 (file)
index 0000000..24b2c6e
--- /dev/null
@@ -0,0 +1,578 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#include "test/cctest/cctest.h"
+#include "test/cctest/compiler/codegen-tester.h"
+#include "test/cctest/compiler/value-helper.h"
+
+using namespace v8::internal;
+using namespace v8::internal::compiler;
+
+TEST(CompareWrapper) {
+  // Who tests the testers?
+  // If CompareWrapper is broken, then test expectations will be broken.
+  RawMachineAssemblerTester<int32_t> m;
+  CompareWrapper wWord32Equal(IrOpcode::kWord32Equal);
+  CompareWrapper wInt32LessThan(IrOpcode::kInt32LessThan);
+  CompareWrapper wInt32LessThanOrEqual(IrOpcode::kInt32LessThanOrEqual);
+  CompareWrapper wUint32LessThan(IrOpcode::kUint32LessThan);
+  CompareWrapper wUint32LessThanOrEqual(IrOpcode::kUint32LessThanOrEqual);
+
+  {
+    FOR_INT32_INPUTS(pl) {
+      FOR_INT32_INPUTS(pr) {
+        int32_t a = *pl;
+        int32_t b = *pr;
+        CHECK_EQ(a == b, wWord32Equal.Int32Compare(a, b));
+        CHECK_EQ(a < b, wInt32LessThan.Int32Compare(a, b));
+        CHECK_EQ(a <= b, wInt32LessThanOrEqual.Int32Compare(a, b));
+      }
+    }
+  }
+
+  {
+    FOR_UINT32_INPUTS(pl) {
+      FOR_UINT32_INPUTS(pr) {
+        uint32_t a = *pl;
+        uint32_t b = *pr;
+        CHECK_EQ(a == b, wWord32Equal.Int32Compare(a, b));
+        CHECK_EQ(a < b, wUint32LessThan.Int32Compare(a, b));
+        CHECK_EQ(a <= b, wUint32LessThanOrEqual.Int32Compare(a, b));
+      }
+    }
+  }
+
+  CHECK_EQ(true, wWord32Equal.Int32Compare(0, 0));
+  CHECK_EQ(true, wWord32Equal.Int32Compare(257, 257));
+  CHECK_EQ(true, wWord32Equal.Int32Compare(65539, 65539));
+  CHECK_EQ(true, wWord32Equal.Int32Compare(-1, -1));
+  CHECK_EQ(true, wWord32Equal.Int32Compare(0xffffffff, 0xffffffff));
+
+  CHECK_EQ(false, wWord32Equal.Int32Compare(0, 1));
+  CHECK_EQ(false, wWord32Equal.Int32Compare(257, 256));
+  CHECK_EQ(false, wWord32Equal.Int32Compare(65539, 65537));
+  CHECK_EQ(false, wWord32Equal.Int32Compare(-1, -2));
+  CHECK_EQ(false, wWord32Equal.Int32Compare(0xffffffff, 0xfffffffe));
+
+  CHECK_EQ(false, wInt32LessThan.Int32Compare(0, 0));
+  CHECK_EQ(false, wInt32LessThan.Int32Compare(357, 357));
+  CHECK_EQ(false, wInt32LessThan.Int32Compare(75539, 75539));
+  CHECK_EQ(false, wInt32LessThan.Int32Compare(-1, -1));
+  CHECK_EQ(false, wInt32LessThan.Int32Compare(0xffffffff, 0xffffffff));
+
+  CHECK_EQ(true, wInt32LessThan.Int32Compare(0, 1));
+  CHECK_EQ(true, wInt32LessThan.Int32Compare(456, 457));
+  CHECK_EQ(true, wInt32LessThan.Int32Compare(85537, 85539));
+  CHECK_EQ(true, wInt32LessThan.Int32Compare(-2, -1));
+  CHECK_EQ(true, wInt32LessThan.Int32Compare(0xfffffffe, 0xffffffff));
+
+  CHECK_EQ(false, wInt32LessThan.Int32Compare(1, 0));
+  CHECK_EQ(false, wInt32LessThan.Int32Compare(457, 456));
+  CHECK_EQ(false, wInt32LessThan.Int32Compare(85539, 85537));
+  CHECK_EQ(false, wInt32LessThan.Int32Compare(-1, -2));
+  CHECK_EQ(false, wInt32LessThan.Int32Compare(0xffffffff, 0xfffffffe));
+
+  CHECK_EQ(true, wInt32LessThanOrEqual.Int32Compare(0, 0));
+  CHECK_EQ(true, wInt32LessThanOrEqual.Int32Compare(357, 357));
+  CHECK_EQ(true, wInt32LessThanOrEqual.Int32Compare(75539, 75539));
+  CHECK_EQ(true, wInt32LessThanOrEqual.Int32Compare(-1, -1));
+  CHECK_EQ(true, wInt32LessThanOrEqual.Int32Compare(0xffffffff, 0xffffffff));
+
+  CHECK_EQ(true, wInt32LessThanOrEqual.Int32Compare(0, 1));
+  CHECK_EQ(true, wInt32LessThanOrEqual.Int32Compare(456, 457));
+  CHECK_EQ(true, wInt32LessThanOrEqual.Int32Compare(85537, 85539));
+  CHECK_EQ(true, wInt32LessThanOrEqual.Int32Compare(-2, -1));
+  CHECK_EQ(true, wInt32LessThanOrEqual.Int32Compare(0xfffffffe, 0xffffffff));
+
+  CHECK_EQ(false, wInt32LessThanOrEqual.Int32Compare(1, 0));
+  CHECK_EQ(false, wInt32LessThanOrEqual.Int32Compare(457, 456));
+  CHECK_EQ(false, wInt32LessThanOrEqual.Int32Compare(85539, 85537));
+  CHECK_EQ(false, wInt32LessThanOrEqual.Int32Compare(-1, -2));
+  CHECK_EQ(false, wInt32LessThanOrEqual.Int32Compare(0xffffffff, 0xfffffffe));
+
+  // Unsigned comparisons.
+  CHECK_EQ(false, wUint32LessThan.Int32Compare(0, 0));
+  CHECK_EQ(false, wUint32LessThan.Int32Compare(357, 357));
+  CHECK_EQ(false, wUint32LessThan.Int32Compare(75539, 75539));
+  CHECK_EQ(false, wUint32LessThan.Int32Compare(-1, -1));
+  CHECK_EQ(false, wUint32LessThan.Int32Compare(0xffffffff, 0xffffffff));
+  CHECK_EQ(false, wUint32LessThan.Int32Compare(0xffffffff, 0));
+  CHECK_EQ(false, wUint32LessThan.Int32Compare(-2999, 0));
+
+  CHECK_EQ(true, wUint32LessThan.Int32Compare(0, 1));
+  CHECK_EQ(true, wUint32LessThan.Int32Compare(456, 457));
+  CHECK_EQ(true, wUint32LessThan.Int32Compare(85537, 85539));
+  CHECK_EQ(true, wUint32LessThan.Int32Compare(-11, -10));
+  CHECK_EQ(true, wUint32LessThan.Int32Compare(0xfffffffe, 0xffffffff));
+  CHECK_EQ(true, wUint32LessThan.Int32Compare(0, 0xffffffff));
+  CHECK_EQ(true, wUint32LessThan.Int32Compare(0, -2996));
+
+  CHECK_EQ(false, wUint32LessThan.Int32Compare(1, 0));
+  CHECK_EQ(false, wUint32LessThan.Int32Compare(457, 456));
+  CHECK_EQ(false, wUint32LessThan.Int32Compare(85539, 85537));
+  CHECK_EQ(false, wUint32LessThan.Int32Compare(-10, -21));
+  CHECK_EQ(false, wUint32LessThan.Int32Compare(0xffffffff, 0xfffffffe));
+
+  CHECK_EQ(true, wUint32LessThanOrEqual.Int32Compare(0, 0));
+  CHECK_EQ(true, wUint32LessThanOrEqual.Int32Compare(357, 357));
+  CHECK_EQ(true, wUint32LessThanOrEqual.Int32Compare(75539, 75539));
+  CHECK_EQ(true, wUint32LessThanOrEqual.Int32Compare(-1, -1));
+  CHECK_EQ(true, wUint32LessThanOrEqual.Int32Compare(0xffffffff, 0xffffffff));
+
+  CHECK_EQ(true, wUint32LessThanOrEqual.Int32Compare(0, 1));
+  CHECK_EQ(true, wUint32LessThanOrEqual.Int32Compare(456, 457));
+  CHECK_EQ(true, wUint32LessThanOrEqual.Int32Compare(85537, 85539));
+  CHECK_EQ(true, wUint32LessThanOrEqual.Int32Compare(-300, -299));
+  CHECK_EQ(true, wUint32LessThanOrEqual.Int32Compare(-300, -300));
+  CHECK_EQ(true, wUint32LessThanOrEqual.Int32Compare(0xfffffffe, 0xffffffff));
+  CHECK_EQ(true, wUint32LessThanOrEqual.Int32Compare(0, -2995));
+
+  CHECK_EQ(false, wUint32LessThanOrEqual.Int32Compare(1, 0));
+  CHECK_EQ(false, wUint32LessThanOrEqual.Int32Compare(457, 456));
+  CHECK_EQ(false, wUint32LessThanOrEqual.Int32Compare(85539, 85537));
+  CHECK_EQ(false, wUint32LessThanOrEqual.Int32Compare(-130, -170));
+  CHECK_EQ(false, wUint32LessThanOrEqual.Int32Compare(0xffffffff, 0xfffffffe));
+  CHECK_EQ(false, wUint32LessThanOrEqual.Int32Compare(-2997, 0));
+
+  CompareWrapper wFloat64Equal(IrOpcode::kFloat64Equal);
+  CompareWrapper wFloat64LessThan(IrOpcode::kFloat64LessThan);
+  CompareWrapper wFloat64LessThanOrEqual(IrOpcode::kFloat64LessThanOrEqual);
+
+  // Check NaN handling.
+  double nan = v8::base::OS::nan_value();
+  double inf = V8_INFINITY;
+  CHECK_EQ(false, wFloat64Equal.Float64Compare(nan, 0.0));
+  CHECK_EQ(false, wFloat64Equal.Float64Compare(nan, 1.0));
+  CHECK_EQ(false, wFloat64Equal.Float64Compare(nan, inf));
+  CHECK_EQ(false, wFloat64Equal.Float64Compare(nan, -inf));
+  CHECK_EQ(false, wFloat64Equal.Float64Compare(nan, nan));
+
+  CHECK_EQ(false, wFloat64Equal.Float64Compare(0.0, nan));
+  CHECK_EQ(false, wFloat64Equal.Float64Compare(1.0, nan));
+  CHECK_EQ(false, wFloat64Equal.Float64Compare(inf, nan));
+  CHECK_EQ(false, wFloat64Equal.Float64Compare(-inf, nan));
+  CHECK_EQ(false, wFloat64Equal.Float64Compare(nan, nan));
+
+  CHECK_EQ(false, wFloat64LessThan.Float64Compare(nan, 0.0));
+  CHECK_EQ(false, wFloat64LessThan.Float64Compare(nan, 1.0));
+  CHECK_EQ(false, wFloat64LessThan.Float64Compare(nan, inf));
+  CHECK_EQ(false, wFloat64LessThan.Float64Compare(nan, -inf));
+  CHECK_EQ(false, wFloat64LessThan.Float64Compare(nan, nan));
+
+  CHECK_EQ(false, wFloat64LessThan.Float64Compare(0.0, nan));
+  CHECK_EQ(false, wFloat64LessThan.Float64Compare(1.0, nan));
+  CHECK_EQ(false, wFloat64LessThan.Float64Compare(inf, nan));
+  CHECK_EQ(false, wFloat64LessThan.Float64Compare(-inf, nan));
+  CHECK_EQ(false, wFloat64LessThan.Float64Compare(nan, nan));
+
+  CHECK_EQ(false, wFloat64LessThanOrEqual.Float64Compare(nan, 0.0));
+  CHECK_EQ(false, wFloat64LessThanOrEqual.Float64Compare(nan, 1.0));
+  CHECK_EQ(false, wFloat64LessThanOrEqual.Float64Compare(nan, inf));
+  CHECK_EQ(false, wFloat64LessThanOrEqual.Float64Compare(nan, -inf));
+  CHECK_EQ(false, wFloat64LessThanOrEqual.Float64Compare(nan, nan));
+
+  CHECK_EQ(false, wFloat64LessThanOrEqual.Float64Compare(0.0, nan));
+  CHECK_EQ(false, wFloat64LessThanOrEqual.Float64Compare(1.0, nan));
+  CHECK_EQ(false, wFloat64LessThanOrEqual.Float64Compare(inf, nan));
+  CHECK_EQ(false, wFloat64LessThanOrEqual.Float64Compare(-inf, nan));
+  CHECK_EQ(false, wFloat64LessThanOrEqual.Float64Compare(nan, nan));
+
+  // Check inf handling.
+  CHECK_EQ(false, wFloat64Equal.Float64Compare(inf, 0.0));
+  CHECK_EQ(false, wFloat64Equal.Float64Compare(inf, 1.0));
+  CHECK_EQ(true, wFloat64Equal.Float64Compare(inf, inf));
+  CHECK_EQ(false, wFloat64Equal.Float64Compare(inf, -inf));
+
+  CHECK_EQ(false, wFloat64Equal.Float64Compare(0.0, inf));
+  CHECK_EQ(false, wFloat64Equal.Float64Compare(1.0, inf));
+  CHECK_EQ(true, wFloat64Equal.Float64Compare(inf, inf));
+  CHECK_EQ(false, wFloat64Equal.Float64Compare(-inf, inf));
+
+  CHECK_EQ(false, wFloat64LessThan.Float64Compare(inf, 0.0));
+  CHECK_EQ(false, wFloat64LessThan.Float64Compare(inf, 1.0));
+  CHECK_EQ(false, wFloat64LessThan.Float64Compare(inf, inf));
+  CHECK_EQ(false, wFloat64LessThan.Float64Compare(inf, -inf));
+
+  CHECK_EQ(true, wFloat64LessThan.Float64Compare(0.0, inf));
+  CHECK_EQ(true, wFloat64LessThan.Float64Compare(1.0, inf));
+  CHECK_EQ(false, wFloat64LessThan.Float64Compare(inf, inf));
+  CHECK_EQ(true, wFloat64LessThan.Float64Compare(-inf, inf));
+
+  CHECK_EQ(false, wFloat64LessThanOrEqual.Float64Compare(inf, 0.0));
+  CHECK_EQ(false, wFloat64LessThanOrEqual.Float64Compare(inf, 1.0));
+  CHECK_EQ(true, wFloat64LessThanOrEqual.Float64Compare(inf, inf));
+  CHECK_EQ(false, wFloat64LessThanOrEqual.Float64Compare(inf, -inf));
+
+  CHECK_EQ(true, wFloat64LessThanOrEqual.Float64Compare(0.0, inf));
+  CHECK_EQ(true, wFloat64LessThanOrEqual.Float64Compare(1.0, inf));
+  CHECK_EQ(true, wFloat64LessThanOrEqual.Float64Compare(inf, inf));
+  CHECK_EQ(true, wFloat64LessThanOrEqual.Float64Compare(-inf, inf));
+
+  // Check -inf handling.
+  CHECK_EQ(false, wFloat64Equal.Float64Compare(-inf, 0.0));
+  CHECK_EQ(false, wFloat64Equal.Float64Compare(-inf, 1.0));
+  CHECK_EQ(false, wFloat64Equal.Float64Compare(-inf, inf));
+  CHECK_EQ(true, wFloat64Equal.Float64Compare(-inf, -inf));
+
+  CHECK_EQ(false, wFloat64Equal.Float64Compare(0.0, -inf));
+  CHECK_EQ(false, wFloat64Equal.Float64Compare(1.0, -inf));
+  CHECK_EQ(false, wFloat64Equal.Float64Compare(inf, -inf));
+  CHECK_EQ(true, wFloat64Equal.Float64Compare(-inf, -inf));
+
+  CHECK_EQ(true, wFloat64LessThan.Float64Compare(-inf, 0.0));
+  CHECK_EQ(true, wFloat64LessThan.Float64Compare(-inf, 1.0));
+  CHECK_EQ(true, wFloat64LessThan.Float64Compare(-inf, inf));
+  CHECK_EQ(false, wFloat64LessThan.Float64Compare(-inf, -inf));
+
+  CHECK_EQ(false, wFloat64LessThan.Float64Compare(0.0, -inf));
+  CHECK_EQ(false, wFloat64LessThan.Float64Compare(1.0, -inf));
+  CHECK_EQ(false, wFloat64LessThan.Float64Compare(inf, -inf));
+  CHECK_EQ(false, wFloat64LessThan.Float64Compare(-inf, -inf));
+
+  CHECK_EQ(true, wFloat64LessThanOrEqual.Float64Compare(-inf, 0.0));
+  CHECK_EQ(true, wFloat64LessThanOrEqual.Float64Compare(-inf, 1.0));
+  CHECK_EQ(true, wFloat64LessThanOrEqual.Float64Compare(-inf, inf));
+  CHECK_EQ(true, wFloat64LessThanOrEqual.Float64Compare(-inf, -inf));
+
+  CHECK_EQ(false, wFloat64LessThanOrEqual.Float64Compare(0.0, -inf));
+  CHECK_EQ(false, wFloat64LessThanOrEqual.Float64Compare(1.0, -inf));
+  CHECK_EQ(false, wFloat64LessThanOrEqual.Float64Compare(inf, -inf));
+  CHECK_EQ(true, wFloat64LessThanOrEqual.Float64Compare(-inf, -inf));
+
+  // Check basic values.
+  CHECK_EQ(true, wFloat64Equal.Float64Compare(0, 0));
+  CHECK_EQ(true, wFloat64Equal.Float64Compare(257.1, 257.1));
+  CHECK_EQ(true, wFloat64Equal.Float64Compare(65539.1, 65539.1));
+  CHECK_EQ(true, wFloat64Equal.Float64Compare(-1.1, -1.1));
+
+  CHECK_EQ(false, wFloat64Equal.Float64Compare(0, 1));
+  CHECK_EQ(false, wFloat64Equal.Float64Compare(257.2, 256.2));
+  CHECK_EQ(false, wFloat64Equal.Float64Compare(65539.2, 65537.2));
+  CHECK_EQ(false, wFloat64Equal.Float64Compare(-1.2, -2.2));
+
+  CHECK_EQ(false, wFloat64LessThan.Float64Compare(0, 0));
+  CHECK_EQ(false, wFloat64LessThan.Float64Compare(357.3, 357.3));
+  CHECK_EQ(false, wFloat64LessThan.Float64Compare(75539.3, 75539.3));
+  CHECK_EQ(false, wFloat64LessThan.Float64Compare(-1.3, -1.3));
+
+  CHECK_EQ(true, wFloat64LessThan.Float64Compare(0, 1));
+  CHECK_EQ(true, wFloat64LessThan.Float64Compare(456.4, 457.4));
+  CHECK_EQ(true, wFloat64LessThan.Float64Compare(85537.4, 85539.4));
+  CHECK_EQ(true, wFloat64LessThan.Float64Compare(-2.4, -1.4));
+
+  CHECK_EQ(false, wFloat64LessThan.Float64Compare(1, 0));
+  CHECK_EQ(false, wFloat64LessThan.Float64Compare(457.5, 456.5));
+  CHECK_EQ(false, wFloat64LessThan.Float64Compare(85539.5, 85537.5));
+  CHECK_EQ(false, wFloat64LessThan.Float64Compare(-1.5, -2.5));
+
+  CHECK_EQ(true, wFloat64LessThanOrEqual.Float64Compare(0, 0));
+  CHECK_EQ(true, wFloat64LessThanOrEqual.Float64Compare(357.6, 357.6));
+  CHECK_EQ(true, wFloat64LessThanOrEqual.Float64Compare(75539.6, 75539.6));
+  CHECK_EQ(true, wFloat64LessThanOrEqual.Float64Compare(-1.6, -1.6));
+
+  CHECK_EQ(true, wFloat64LessThanOrEqual.Float64Compare(0, 1));
+  CHECK_EQ(true, wFloat64LessThanOrEqual.Float64Compare(456.7, 457.7));
+  CHECK_EQ(true, wFloat64LessThanOrEqual.Float64Compare(85537.7, 85539.7));
+  CHECK_EQ(true, wFloat64LessThanOrEqual.Float64Compare(-2.7, -1.7));
+
+  CHECK_EQ(false, wFloat64LessThanOrEqual.Float64Compare(1, 0));
+  CHECK_EQ(false, wFloat64LessThanOrEqual.Float64Compare(457.8, 456.8));
+  CHECK_EQ(false, wFloat64LessThanOrEqual.Float64Compare(85539.8, 85537.8));
+  CHECK_EQ(false, wFloat64LessThanOrEqual.Float64Compare(-1.8, -2.8));
+}
+
+
+void Int32BinopInputShapeTester::TestAllInputShapes() {
+  std::vector<int32_t> inputs = ValueHelper::int32_vector();
+  int num_int_inputs = static_cast<int>(inputs.size());
+  if (num_int_inputs > 16) num_int_inputs = 16;  // limit to 16 inputs
+
+  for (int i = -2; i < num_int_inputs; i++) {    // for all left shapes
+    for (int j = -2; j < num_int_inputs; j++) {  // for all right shapes
+      if (i >= 0 && j >= 0) break;               // No constant/constant combos
+      RawMachineAssemblerTester<int32_t> m(kMachineWord32, kMachineWord32);
+      Node* p0 = m.Parameter(0);
+      Node* p1 = m.Parameter(1);
+      Node* n0;
+      Node* n1;
+
+      // left = Parameter | Load | Constant
+      if (i == -2) {
+        n0 = p0;
+      } else if (i == -1) {
+        n0 = m.LoadFromPointer(&input_a, kMachineWord32);
+      } else {
+        n0 = m.Int32Constant(inputs[i]);
+      }
+
+      // right = Parameter | Load | Constant
+      if (j == -2) {
+        n1 = p1;
+      } else if (j == -1) {
+        n1 = m.LoadFromPointer(&input_b, kMachineWord32);
+      } else {
+        n1 = m.Int32Constant(inputs[j]);
+      }
+
+      gen->gen(&m, n0, n1);
+
+      if (false) printf("Int32BinopInputShapeTester i=%d, j=%d\n", i, j);
+      if (i >= 0) {
+        input_a = inputs[i];
+        RunRight(&m);
+      } else if (j >= 0) {
+        input_b = inputs[j];
+        RunLeft(&m);
+      } else {
+        Run(&m);
+      }
+    }
+  }
+}
+
+
+void Int32BinopInputShapeTester::Run(RawMachineAssemblerTester<int32_t>* m) {
+  FOR_INT32_INPUTS(pl) {
+    FOR_INT32_INPUTS(pr) {
+      input_a = *pl;
+      input_b = *pr;
+      int32_t expect = gen->expected(input_a, input_b);
+      if (false) printf("  cmp(a=%d, b=%d) ?== %d\n", input_a, input_b, expect);
+      CHECK_EQ(expect, m->Call(input_a, input_b));
+    }
+  }
+}
+
+
+void Int32BinopInputShapeTester::RunLeft(
+    RawMachineAssemblerTester<int32_t>* m) {
+  FOR_UINT32_INPUTS(i) {
+    input_a = *i;
+    int32_t expect = gen->expected(input_a, input_b);
+    if (false) printf("  cmp(a=%d, b=%d) ?== %d\n", input_a, input_b, expect);
+    CHECK_EQ(expect, m->Call(input_a, input_b));
+  }
+}
+
+
+void Int32BinopInputShapeTester::RunRight(
+    RawMachineAssemblerTester<int32_t>* m) {
+  FOR_UINT32_INPUTS(i) {
+    input_b = *i;
+    int32_t expect = gen->expected(input_a, input_b);
+    if (false) printf("  cmp(a=%d, b=%d) ?== %d\n", input_a, input_b, expect);
+    CHECK_EQ(expect, m->Call(input_a, input_b));
+  }
+}
+
+
+TEST(ParametersEqual) {
+  RawMachineAssemblerTester<int32_t> m(kMachineWord32, kMachineWord32);
+  Node* p1 = m.Parameter(1);
+  CHECK_NE(NULL, p1);
+  Node* p0 = m.Parameter(0);
+  CHECK_NE(NULL, p0);
+  CHECK_EQ(p0, m.Parameter(0));
+  CHECK_EQ(p1, m.Parameter(1));
+}
+
+
+#if V8_TURBOFAN_TARGET
+
+void RunSmiConstant(int32_t v) {
+// TODO(dcarney): on x64 Smis are generated with the SmiConstantRegister
+#if !V8_TARGET_ARCH_X64
+  if (Smi::IsValid(v)) {
+    RawMachineAssemblerTester<Object*> m;
+    m.Return(m.NumberConstant(v));
+    CHECK_EQ(Smi::FromInt(v), m.Call());
+  }
+#endif
+}
+
+
+void RunNumberConstant(double v) {
+  RawMachineAssemblerTester<Object*> m;
+#if V8_TARGET_ARCH_X64
+  // TODO(dcarney): on x64 Smis are generated with the SmiConstantRegister
+  Handle<Object> number = m.isolate()->factory()->NewNumber(v);
+  if (number->IsSmi()) return;
+#endif
+  m.Return(m.NumberConstant(v));
+  Object* result = m.Call();
+  m.CheckNumber(v, result);
+}
+
+
+TEST(RunEmpty) {
+  RawMachineAssemblerTester<int32_t> m;
+  m.Return(m.Int32Constant(0));
+  CHECK_EQ(0, m.Call());
+}
+
+
+TEST(RunInt32Constants) {
+  FOR_INT32_INPUTS(i) {
+    RawMachineAssemblerTester<int32_t> m;
+    m.Return(m.Int32Constant(*i));
+    CHECK_EQ(*i, m.Call());
+  }
+}
+
+
+TEST(RunSmiConstants) {
+  for (int32_t i = 1; i < Smi::kMaxValue && i != 0; i = i << 1) {
+    RunSmiConstant(i);
+    RunSmiConstant(3 * i);
+    RunSmiConstant(5 * i);
+    RunSmiConstant(-i);
+    RunSmiConstant(i | 1);
+    RunSmiConstant(i | 3);
+  }
+  RunSmiConstant(Smi::kMaxValue);
+  RunSmiConstant(Smi::kMaxValue - 1);
+  RunSmiConstant(Smi::kMinValue);
+  RunSmiConstant(Smi::kMinValue + 1);
+
+  FOR_INT32_INPUTS(i) { RunSmiConstant(*i); }
+}
+
+
+TEST(RunNumberConstants) {
+  {
+    FOR_FLOAT64_INPUTS(i) { RunNumberConstant(*i); }
+  }
+  {
+    FOR_INT32_INPUTS(i) { RunNumberConstant(*i); }
+  }
+
+  for (int32_t i = 1; i < Smi::kMaxValue && i != 0; i = i << 1) {
+    RunNumberConstant(i);
+    RunNumberConstant(-i);
+    RunNumberConstant(i | 1);
+    RunNumberConstant(i | 3);
+  }
+  RunNumberConstant(Smi::kMaxValue);
+  RunNumberConstant(Smi::kMaxValue - 1);
+  RunNumberConstant(Smi::kMinValue);
+  RunNumberConstant(Smi::kMinValue + 1);
+}
+
+
+TEST(RunEmptyString) {
+  RawMachineAssemblerTester<Object*> m;
+  m.Return(m.StringConstant("empty"));
+  m.CheckString("empty", m.Call());
+}
+
+
+TEST(RunHeapConstant) {
+  RawMachineAssemblerTester<Object*> m;
+  m.Return(m.StringConstant("empty"));
+  m.CheckString("empty", m.Call());
+}
+
+
+TEST(RunHeapNumberConstant) {
+  RawMachineAssemblerTester<Object*> m;
+  Handle<Object> number = m.isolate()->factory()->NewHeapNumber(100.5);
+  m.Return(m.HeapConstant(number));
+  Object* result = m.Call();
+  CHECK_EQ(result, *number);
+}
+
+
+TEST(RunParam1) {
+  RawMachineAssemblerTester<int32_t> m(kMachineWord32);
+  m.Return(m.Parameter(0));
+
+  FOR_INT32_INPUTS(i) {
+    int32_t result = m.Call(*i);
+    CHECK_EQ(*i, result);
+  }
+}
+
+
+TEST(RunParam2_1) {
+  RawMachineAssemblerTester<int32_t> m(kMachineWord32, kMachineWord32);
+  Node* p0 = m.Parameter(0);
+  Node* p1 = m.Parameter(1);
+  m.Return(p0);
+  USE(p1);
+
+  FOR_INT32_INPUTS(i) {
+    int32_t result = m.Call(*i, -9999);
+    CHECK_EQ(*i, result);
+  }
+}
+
+
+TEST(RunParam2_2) {
+  RawMachineAssemblerTester<int32_t> m(kMachineWord32, kMachineWord32);
+  Node* p0 = m.Parameter(0);
+  Node* p1 = m.Parameter(1);
+  m.Return(p1);
+  USE(p0);
+
+  FOR_INT32_INPUTS(i) {
+    int32_t result = m.Call(-7777, *i);
+    CHECK_EQ(*i, result);
+  }
+}
+
+
+TEST(RunParam3) {
+  for (int i = 0; i < 3; i++) {
+    RawMachineAssemblerTester<int32_t> m(kMachineWord32, kMachineWord32,
+                                         kMachineWord32);
+    Node* nodes[] = {m.Parameter(0), m.Parameter(1), m.Parameter(2)};
+    m.Return(nodes[i]);
+
+    int p[] = {-99, -77, -88};
+    FOR_INT32_INPUTS(j) {
+      p[i] = *j;
+      int32_t result = m.Call(p[0], p[1], p[2]);
+      CHECK_EQ(*j, result);
+    }
+  }
+}
+
+
+TEST(RunBinopTester) {
+  {
+    RawMachineAssemblerTester<int32_t> m;
+    Int32BinopTester bt(&m);
+    bt.AddReturn(bt.param0);
+
+    FOR_INT32_INPUTS(i) { CHECK_EQ(*i, bt.call(*i, 777)); }
+  }
+
+  {
+    RawMachineAssemblerTester<int32_t> m;
+    Int32BinopTester bt(&m);
+    bt.AddReturn(bt.param1);
+
+    FOR_INT32_INPUTS(i) { CHECK_EQ(*i, bt.call(666, *i)); }
+  }
+
+  {
+    RawMachineAssemblerTester<int32_t> m;
+    Float64BinopTester bt(&m);
+    bt.AddReturn(bt.param0);
+
+    FOR_FLOAT64_INPUTS(i) { CHECK_EQ(*i, bt.call(*i, 9.0)); }
+  }
+
+  {
+    RawMachineAssemblerTester<int32_t> m;
+    Float64BinopTester bt(&m);
+    bt.AddReturn(bt.param1);
+
+    FOR_FLOAT64_INPUTS(i) { CHECK_EQ(*i, bt.call(-11.25, *i)); }
+  }
+}
+
+#endif  // V8_TURBOFAN_TARGET
diff --git a/test/cctest/compiler/compiler/codegen-tester.h b/test/cctest/compiler/compiler/codegen-tester.h
new file mode 100644 (file)
index 0000000..2cb9a4e
--- /dev/null
@@ -0,0 +1,323 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_CCTEST_COMPILER_CODEGEN_TESTER_H_
+#define V8_CCTEST_COMPILER_CODEGEN_TESTER_H_
+
+#include "src/v8.h"
+
+#include "src/compiler/pipeline.h"
+#include "src/compiler/raw-machine-assembler.h"
+#include "src/compiler/structured-machine-assembler.h"
+#include "src/simulator.h"
+#include "test/cctest/compiler/call-tester.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+template <typename MachineAssembler>
+class MachineAssemblerTester : public HandleAndZoneScope,
+                               public CallHelper,
+                               public MachineAssembler {
+ public:
+  MachineAssemblerTester(MachineRepresentation return_type,
+                         MachineRepresentation p0, MachineRepresentation p1,
+                         MachineRepresentation p2, MachineRepresentation p3,
+                         MachineRepresentation p4)
+      : HandleAndZoneScope(),
+        CallHelper(main_isolate()),
+        MachineAssembler(new (main_zone()) Graph(main_zone()),
+                         ToCallDescriptorBuilder(main_zone(), return_type, p0,
+                                                 p1, p2, p3, p4),
+                         MachineOperatorBuilder::pointer_rep()) {}
+
+  Node* LoadFromPointer(void* address, MachineRepresentation rep,
+                        int32_t offset = 0) {
+    return this->Load(rep, this->PointerConstant(address),
+                      this->Int32Constant(offset));
+  }
+
+  void StoreToPointer(void* address, MachineRepresentation rep, Node* node) {
+    this->Store(rep, this->PointerConstant(address), node);
+  }
+
+  Node* StringConstant(const char* string) {
+    return this->HeapConstant(
+        this->isolate()->factory()->InternalizeUtf8String(string));
+  }
+
+  void CheckNumber(double expected, Object* number) {
+    CHECK(this->isolate()->factory()->NewNumber(expected)->SameValue(number));
+  }
+
+  void CheckString(const char* expected, Object* string) {
+    CHECK(
+        this->isolate()->factory()->InternalizeUtf8String(expected)->SameValue(
+            string));
+  }
+
+  void GenerateCode() { Generate(); }
+
+ protected:
+  virtual void VerifyParameters(int parameter_count,
+                                MachineRepresentation* parameter_types) {
+    CHECK_EQ(this->parameter_count(), parameter_count);
+    const MachineRepresentation* expected_types = this->parameter_types();
+    for (int i = 0; i < parameter_count; i++) {
+      CHECK_EQ(expected_types[i], parameter_types[i]);
+    }
+  }
+
+  virtual byte* Generate() {
+    if (code_.is_null()) {
+      Schedule* schedule = this->Export();
+      CallDescriptor* call_descriptor = this->call_descriptor();
+      Graph* graph = this->graph();
+      CompilationInfo info(graph->zone()->isolate(), graph->zone());
+      Linkage linkage(&info, call_descriptor);
+      Pipeline pipeline(&info);
+      code_ = pipeline.GenerateCodeForMachineGraph(&linkage, graph, schedule);
+    }
+    return this->code_.ToHandleChecked()->entry();
+  }
+
+ private:
+  MaybeHandle<Code> code_;
+};
+
+
+template <typename ReturnType>
+class RawMachineAssemblerTester
+    : public MachineAssemblerTester<RawMachineAssembler>,
+      public CallHelper2<ReturnType, RawMachineAssemblerTester<ReturnType> > {
+ public:
+  RawMachineAssemblerTester(MachineRepresentation p0 = kMachineLast,
+                            MachineRepresentation p1 = kMachineLast,
+                            MachineRepresentation p2 = kMachineLast,
+                            MachineRepresentation p3 = kMachineLast,
+                            MachineRepresentation p4 = kMachineLast)
+      : MachineAssemblerTester(ReturnValueTraits<ReturnType>::Representation(),
+                               p0, p1, p2, p3, p4) {}
+};
+
+
+template <typename ReturnType>
+class StructuredMachineAssemblerTester
+    : public MachineAssemblerTester<StructuredMachineAssembler>,
+      public CallHelper2<ReturnType,
+                         StructuredMachineAssemblerTester<ReturnType> > {
+ public:
+  StructuredMachineAssemblerTester(MachineRepresentation p0 = kMachineLast,
+                                   MachineRepresentation p1 = kMachineLast,
+                                   MachineRepresentation p2 = kMachineLast,
+                                   MachineRepresentation p3 = kMachineLast,
+                                   MachineRepresentation p4 = kMachineLast)
+      : MachineAssemblerTester(ReturnValueTraits<ReturnType>::Representation(),
+                               p0, p1, p2, p3, p4) {}
+};
+
+
+static const bool USE_RESULT_BUFFER = true;
+static const bool USE_RETURN_REGISTER = false;
+
+// TODO(titzer): use the C-style calling convention, or any register-based
+// calling convention for binop tests.
+template <typename CType, MachineRepresentation rep, bool use_result_buffer>
+class BinopTester {
+ public:
+  static const int32_t CHECK_VALUE = 0x99BEEDCE;
+
+  explicit BinopTester(RawMachineAssemblerTester<int32_t>* tester)
+      : T(tester),
+        param0(T->LoadFromPointer(&p0, rep)),
+        param1(T->LoadFromPointer(&p1, rep)),
+        p0(static_cast<CType>(0)),
+        p1(static_cast<CType>(0)),
+        result(static_cast<CType>(0)) {}
+
+  RawMachineAssemblerTester<int32_t>* T;
+  Node* param0;
+  Node* param1;
+
+  CType call(CType a0, CType a1) {
+    p0 = a0;
+    p1 = a1;
+    if (use_result_buffer) {
+      CHECK_EQ(CHECK_VALUE, T->Call());
+      return result;
+    } else {
+      return T->Call();
+    }
+  }
+
+  void AddReturn(Node* val) {
+    if (use_result_buffer) {
+      T->Store(rep, T->PointerConstant(&result), T->Int32Constant(0), val);
+      T->Return(T->Int32Constant(CHECK_VALUE));
+    } else {
+      T->Return(val);
+    }
+  }
+
+ protected:
+  CType p0;
+  CType p1;
+  CType result;
+};
+
+
+// A helper class for testing code sequences that take two int parameters and
+// return an int value.
+class Int32BinopTester
+    : public BinopTester<int32_t, kMachineWord32, USE_RETURN_REGISTER> {
+ public:
+  explicit Int32BinopTester(RawMachineAssemblerTester<int32_t>* tester)
+      : BinopTester<int32_t, kMachineWord32, USE_RETURN_REGISTER>(tester) {}
+
+  int32_t call(uint32_t a0, uint32_t a1) {
+    p0 = static_cast<int32_t>(a0);
+    p1 = static_cast<int32_t>(a1);
+    return T->Call();
+  }
+};
+
+
+// A helper class for testing code sequences that take two double parameters and
+// return a double value.
+// TODO(titzer): figure out how to return doubles correctly on ia32.
+class Float64BinopTester
+    : public BinopTester<double, kMachineFloat64, USE_RESULT_BUFFER> {
+ public:
+  explicit Float64BinopTester(RawMachineAssemblerTester<int32_t>* tester)
+      : BinopTester<double, kMachineFloat64, USE_RESULT_BUFFER>(tester) {}
+};
+
+
+// A helper class for testing code sequences that take two pointer parameters
+// and return a pointer value.
+// TODO(titzer): pick word size of pointers based on V8_TARGET.
+template <typename Type>
+class PointerBinopTester
+    : public BinopTester<Type*, kMachineWord32, USE_RETURN_REGISTER> {
+ public:
+  explicit PointerBinopTester(RawMachineAssemblerTester<int32_t>* tester)
+      : BinopTester<Type*, kMachineWord32, USE_RETURN_REGISTER>(tester) {}
+};
+
+
+// A helper class for testing code sequences that take two tagged parameters and
+// return a tagged value.
+template <typename Type>
+class TaggedBinopTester
+    : public BinopTester<Type*, kMachineTagged, USE_RETURN_REGISTER> {
+ public:
+  explicit TaggedBinopTester(RawMachineAssemblerTester<int32_t>* tester)
+      : BinopTester<Type*, kMachineTagged, USE_RETURN_REGISTER>(tester) {}
+};
+
+// A helper class for testing compares. Wraps a machine opcode and provides
+// evaluation routines and the operators.
+class CompareWrapper {
+ public:
+  explicit CompareWrapper(IrOpcode::Value op) : opcode(op) {}
+
+  Node* MakeNode(RawMachineAssemblerTester<int32_t>* m, Node* a, Node* b) {
+    return m->NewNode(op(m->machine()), a, b);
+  }
+
+  Operator* op(MachineOperatorBuilder* machine) {
+    switch (opcode) {
+      case IrOpcode::kWord32Equal:
+        return machine->Word32Equal();
+      case IrOpcode::kInt32LessThan:
+        return machine->Int32LessThan();
+      case IrOpcode::kInt32LessThanOrEqual:
+        return machine->Int32LessThanOrEqual();
+      case IrOpcode::kUint32LessThan:
+        return machine->Uint32LessThan();
+      case IrOpcode::kUint32LessThanOrEqual:
+        return machine->Uint32LessThanOrEqual();
+      case IrOpcode::kFloat64Equal:
+        return machine->Float64Equal();
+      case IrOpcode::kFloat64LessThan:
+        return machine->Float64LessThan();
+      case IrOpcode::kFloat64LessThanOrEqual:
+        return machine->Float64LessThanOrEqual();
+      default:
+        UNREACHABLE();
+    }
+    return NULL;
+  }
+
+  bool Int32Compare(int32_t a, int32_t b) {
+    switch (opcode) {
+      case IrOpcode::kWord32Equal:
+        return a == b;
+      case IrOpcode::kInt32LessThan:
+        return a < b;
+      case IrOpcode::kInt32LessThanOrEqual:
+        return a <= b;
+      case IrOpcode::kUint32LessThan:
+        return static_cast<uint32_t>(a) < static_cast<uint32_t>(b);
+      case IrOpcode::kUint32LessThanOrEqual:
+        return static_cast<uint32_t>(a) <= static_cast<uint32_t>(b);
+      default:
+        UNREACHABLE();
+    }
+    return false;
+  }
+
+  bool Float64Compare(double a, double b) {
+    switch (opcode) {
+      case IrOpcode::kFloat64Equal:
+        return a == b;
+      case IrOpcode::kFloat64LessThan:
+        return a < b;
+      case IrOpcode::kFloat64LessThanOrEqual:
+        return a <= b;
+      default:
+        UNREACHABLE();
+    }
+    return false;
+  }
+
+  IrOpcode::Value opcode;
+};
+
+
+// A small closure class to generate code for a function of two inputs that
+// produces a single output so that it can be used in many different contexts.
+// The {expected()} method should compute the expected output for a given
+// pair of inputs.
+template <typename T>
+class BinopGen {
+ public:
+  virtual void gen(RawMachineAssemblerTester<int32_t>* m, Node* a, Node* b) = 0;
+  virtual T expected(T a, T b) = 0;
+  virtual ~BinopGen() {}
+};
+
+// A helper class to generate various combination of input shape combinations
+// and run the generated code to ensure it produces the correct results.
+class Int32BinopInputShapeTester {
+ public:
+  explicit Int32BinopInputShapeTester(BinopGen<int32_t>* g) : gen(g) {}
+
+  void TestAllInputShapes();
+
+ private:
+  BinopGen<int32_t>* gen;
+  int32_t input_a;
+  int32_t input_b;
+
+  void Run(RawMachineAssemblerTester<int32_t>* m);
+  void RunLeft(RawMachineAssemblerTester<int32_t>* m);
+  void RunRight(RawMachineAssemblerTester<int32_t>* m);
+};
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_CCTEST_COMPILER_CODEGEN_TESTER_H_
diff --git a/test/cctest/compiler/compiler/function-tester.h b/test/cctest/compiler/compiler/function-tester.h
new file mode 100644 (file)
index 0000000..2ed2fe9
--- /dev/null
@@ -0,0 +1,194 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_CCTEST_COMPILER_FUNCTION_TESTER_H_
+#define V8_CCTEST_COMPILER_FUNCTION_TESTER_H_
+
+#include "src/v8.h"
+#include "test/cctest/cctest.h"
+
+#include "src/compiler.h"
+#include "src/compiler/pipeline.h"
+#include "src/execution.h"
+#include "src/full-codegen.h"
+#include "src/handles.h"
+#include "src/objects-inl.h"
+#include "src/parser.h"
+#include "src/rewriter.h"
+#include "src/scopes.h"
+
+#define USE_CRANKSHAFT 0
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+class FunctionTester : public InitializedHandleScope {
+ public:
+  explicit FunctionTester(const char* source)
+      : isolate(main_isolate()),
+        function((FLAG_allow_natives_syntax = true, NewFunction(source))) {
+    Compile(function);
+  }
+
+  Isolate* isolate;
+  Handle<JSFunction> function;
+
+  Handle<JSFunction> Compile(Handle<JSFunction> function) {
+#if V8_TURBOFAN_TARGET
+    CompilationInfoWithZone info(function);
+
+    CHECK(Parser::Parse(&info));
+    StrictMode strict_mode = info.function()->strict_mode();
+    info.SetStrictMode(strict_mode);
+    info.SetOptimizing(BailoutId::None(), Handle<Code>(function->code()));
+    CHECK(Rewriter::Rewrite(&info));
+    CHECK(Scope::Analyze(&info));
+    CHECK_NE(NULL, info.scope());
+
+    EnsureDeoptimizationSupport(&info);
+
+    Pipeline pipeline(&info);
+    Handle<Code> code = pipeline.GenerateCode();
+
+    CHECK(!code.is_null());
+    function->ReplaceCode(*code);
+#elif USE_CRANKSHAFT
+    Handle<Code> unoptimized = Handle<Code>(function->code());
+    Handle<Code> code = Compiler::GetOptimizedCode(function, unoptimized,
+                                                   Compiler::NOT_CONCURRENT);
+    CHECK(!code.is_null());
+#if ENABLE_DISASSEMBLER
+    if (FLAG_print_opt_code) {
+      CodeTracer::Scope tracing_scope(isolate->GetCodeTracer());
+      code->Disassemble("test code", tracing_scope.file());
+    }
+#endif
+    function->ReplaceCode(*code);
+#endif
+    return function;
+  }
+
+  static void EnsureDeoptimizationSupport(CompilationInfo* info) {
+    bool should_recompile = !info->shared_info()->has_deoptimization_support();
+    if (should_recompile) {
+      CompilationInfoWithZone unoptimized(info->shared_info());
+      // Note that we use the same AST that we will use for generating the
+      // optimized code.
+      unoptimized.SetFunction(info->function());
+      unoptimized.PrepareForCompilation(info->scope());
+      unoptimized.SetContext(info->context());
+      if (should_recompile) unoptimized.EnableDeoptimizationSupport();
+      bool succeeded = FullCodeGenerator::MakeCode(&unoptimized);
+      CHECK(succeeded);
+      Handle<SharedFunctionInfo> shared = info->shared_info();
+      shared->EnableDeoptimizationSupport(*unoptimized.code());
+    }
+  }
+
+  MaybeHandle<Object> Call(Handle<Object> a, Handle<Object> b) {
+    Handle<Object> args[] = {a, b};
+    return Execution::Call(isolate, function, undefined(), 2, args, false);
+  }
+
+  void CheckThrows(Handle<Object> a, Handle<Object> b) {
+    TryCatch try_catch;
+    MaybeHandle<Object> no_result = Call(a, b);
+    CHECK(isolate->has_pending_exception());
+    CHECK(try_catch.HasCaught());
+    CHECK(no_result.is_null());
+    // TODO(mstarzinger): Temporary workaround for issue chromium:362388.
+    isolate->OptionalRescheduleException(true);
+  }
+
+  v8::Handle<v8::Message> CheckThrowsReturnMessage(Handle<Object> a,
+                                                   Handle<Object> b) {
+    TryCatch try_catch;
+    MaybeHandle<Object> no_result = Call(a, b);
+    CHECK(isolate->has_pending_exception());
+    CHECK(try_catch.HasCaught());
+    CHECK(no_result.is_null());
+    // TODO(mstarzinger): Calling OptionalRescheduleException is a dirty hack,
+    // it's the only way to make Message() not to assert because an external
+    // exception has been caught by the try_catch.
+    isolate->OptionalRescheduleException(true);
+    return try_catch.Message();
+  }
+
+  void CheckCall(Handle<Object> expected, Handle<Object> a, Handle<Object> b) {
+    Handle<Object> result = Call(a, b).ToHandleChecked();
+    CHECK(expected->SameValue(*result));
+  }
+
+  void CheckCall(Handle<Object> expected, Handle<Object> a) {
+    CheckCall(expected, a, undefined());
+  }
+
+  void CheckCall(Handle<Object> expected) {
+    CheckCall(expected, undefined(), undefined());
+  }
+
+  void CheckCall(double expected, double a, double b) {
+    CheckCall(Val(expected), Val(a), Val(b));
+  }
+
+  void CheckTrue(Handle<Object> a, Handle<Object> b) {
+    CheckCall(true_value(), a, b);
+  }
+
+  void CheckTrue(Handle<Object> a) { CheckCall(true_value(), a, undefined()); }
+
+  void CheckTrue(double a, double b) {
+    CheckCall(true_value(), Val(a), Val(b));
+  }
+
+  void CheckFalse(Handle<Object> a, Handle<Object> b) {
+    CheckCall(false_value(), a, b);
+  }
+
+  void CheckFalse(Handle<Object> a) {
+    CheckCall(false_value(), a, undefined());
+  }
+
+  void CheckFalse(double a, double b) {
+    CheckCall(false_value(), Val(a), Val(b));
+  }
+
+  Handle<JSFunction> NewFunction(const char* source) {
+    return v8::Utils::OpenHandle(
+        *v8::Handle<v8::Function>::Cast(CompileRun(source)));
+  }
+
+  Handle<JSObject> NewObject(const char* source) {
+    return v8::Utils::OpenHandle(
+        *v8::Handle<v8::Object>::Cast(CompileRun(source)));
+  }
+
+  Handle<String> Val(const char* string) {
+    return isolate->factory()->InternalizeUtf8String(string);
+  }
+
+  Handle<Object> Val(double value) {
+    return isolate->factory()->NewNumber(value);
+  }
+
+  Handle<Object> infinity() { return isolate->factory()->infinity_value(); }
+
+  Handle<Object> minus_infinity() { return Val(-V8_INFINITY); }
+
+  Handle<Object> nan() { return isolate->factory()->nan_value(); }
+
+  Handle<Object> undefined() { return isolate->factory()->undefined_value(); }
+
+  Handle<Object> null() { return isolate->factory()->null_value(); }
+
+  Handle<Object> true_value() { return isolate->factory()->true_value(); }
+
+  Handle<Object> false_value() { return isolate->factory()->false_value(); }
+};
+}
+}
+}  // namespace v8::internal::compiler
+
+#endif  // V8_CCTEST_COMPILER_FUNCTION_TESTER_H_
diff --git a/test/cctest/compiler/compiler/graph-builder-tester.cc b/test/cctest/compiler/compiler/graph-builder-tester.cc
new file mode 100644 (file)
index 0000000..2d8f9d5
--- /dev/null
@@ -0,0 +1,64 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "test/cctest/compiler/graph-builder-tester.h"
+#include "src/compiler/pipeline.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+MachineCallHelper::MachineCallHelper(Zone* zone,
+                                     MachineCallDescriptorBuilder* builder)
+    : CallHelper(zone->isolate()),
+      call_descriptor_builder_(builder),
+      parameters_(NULL),
+      graph_(NULL) {}
+
+
+void MachineCallHelper::InitParameters(GraphBuilder* builder,
+                                       CommonOperatorBuilder* common) {
+  ASSERT_EQ(NULL, parameters_);
+  graph_ = builder->graph();
+  if (parameter_count() == 0) return;
+  parameters_ = builder->graph()->zone()->NewArray<Node*>(parameter_count());
+  for (int i = 0; i < parameter_count(); ++i) {
+    parameters_[i] = builder->NewNode(common->Parameter(i));
+  }
+}
+
+
+byte* MachineCallHelper::Generate() {
+  ASSERT(parameter_count() == 0 || parameters_ != NULL);
+  if (code_.is_null()) {
+    Zone* zone = graph_->zone();
+    CompilationInfo info(zone->isolate(), zone);
+    Linkage linkage(&info, call_descriptor_builder_->BuildCallDescriptor(zone));
+    Pipeline pipeline(&info);
+    code_ = pipeline.GenerateCodeForMachineGraph(&linkage, graph_);
+  }
+  return code_.ToHandleChecked()->entry();
+}
+
+
+void MachineCallHelper::VerifyParameters(
+    int parameter_count, MachineRepresentation* parameter_types) {
+  CHECK_EQ(this->parameter_count(), parameter_count);
+  const MachineRepresentation* expected_types =
+      call_descriptor_builder_->parameter_types();
+  for (int i = 0; i < parameter_count; i++) {
+    CHECK_EQ(expected_types[i], parameter_types[i]);
+  }
+}
+
+
+Node* MachineCallHelper::Parameter(int offset) {
+  ASSERT_NE(NULL, parameters_);
+  ASSERT(0 <= offset && offset < parameter_count());
+  return parameters_[offset];
+}
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
diff --git a/test/cctest/compiler/compiler/graph-builder-tester.h b/test/cctest/compiler/compiler/graph-builder-tester.h
new file mode 100644 (file)
index 0000000..096828a
--- /dev/null
@@ -0,0 +1,111 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_CCTEST_COMPILER_GRAPH_BUILDER_TESTER_H_
+#define V8_CCTEST_COMPILER_GRAPH_BUILDER_TESTER_H_
+
+#include "src/v8.h"
+#include "test/cctest/cctest.h"
+
+#include "src/compiler/common-operator.h"
+#include "src/compiler/graph-builder.h"
+#include "src/compiler/machine-node-factory.h"
+#include "src/compiler/machine-operator.h"
+#include "src/compiler/simplified-node-factory.h"
+#include "src/compiler/simplified-operator.h"
+#include "test/cctest/compiler/call-tester.h"
+#include "test/cctest/compiler/simplified-graph-builder.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// A class that just passes node creation on to the Graph.
+class DirectGraphBuilder : public GraphBuilder {
+ public:
+  explicit DirectGraphBuilder(Graph* graph) : GraphBuilder(graph) {}
+  virtual ~DirectGraphBuilder() {}
+
+ protected:
+  virtual Node* MakeNode(Operator* op, int value_input_count,
+                         Node** value_inputs) {
+    return graph()->NewNode(op, value_input_count, value_inputs);
+  }
+};
+
+
+class MachineCallHelper : public CallHelper {
+ public:
+  MachineCallHelper(Zone* zone, MachineCallDescriptorBuilder* builder);
+
+  Node* Parameter(int offset);
+
+ protected:
+  virtual byte* Generate();
+  virtual void VerifyParameters(int parameter_count,
+                                MachineRepresentation* parameters);
+  void InitParameters(GraphBuilder* builder, CommonOperatorBuilder* common);
+
+ private:
+  int parameter_count() const {
+    return call_descriptor_builder_->parameter_count();
+  }
+  MachineCallDescriptorBuilder* call_descriptor_builder_;
+  Node** parameters_;
+  // TODO(dcarney): shouldn't need graph stored.
+  Graph* graph_;
+  MaybeHandle<Code> code_;
+};
+
+
+class GraphAndBuilders {
+ public:
+  explicit GraphAndBuilders(Zone* zone)
+      : main_graph_(new (zone) Graph(zone)),
+        main_common_(zone),
+        main_machine_(zone),
+        main_simplified_(zone) {}
+
+ protected:
+  // Prefixed with main_ to avoid naiming conflicts.
+  Graph* const main_graph_;
+  CommonOperatorBuilder main_common_;
+  MachineOperatorBuilder main_machine_;
+  SimplifiedOperatorBuilder main_simplified_;
+};
+
+
+template <typename ReturnType>
+class GraphBuilderTester
+    : public HandleAndZoneScope,
+      private GraphAndBuilders,
+      public MachineCallHelper,
+      public SimplifiedGraphBuilder,
+      public CallHelper2<ReturnType, GraphBuilderTester<ReturnType> > {
+ public:
+  explicit GraphBuilderTester(MachineRepresentation p0,
+                              MachineRepresentation p1,
+                              MachineRepresentation p2,
+                              MachineRepresentation p3,
+                              MachineRepresentation p4)
+      : GraphAndBuilders(main_zone()),
+        MachineCallHelper(
+            main_zone(),
+            ToCallDescriptorBuilder(
+                main_zone(), ReturnValueTraits<ReturnType>::Representation(),
+                p0, p1, p2, p3, p4)),
+        SimplifiedGraphBuilder(main_graph_, &main_common_, &main_machine_,
+                               &main_simplified_) {
+    Begin();
+    InitParameters(this, &main_common_);
+  }
+  virtual ~GraphBuilderTester() {}
+
+  Factory* factory() const { return isolate()->factory(); }
+};
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_CCTEST_COMPILER_GRAPH_BUILDER_TESTER_H_
diff --git a/test/cctest/compiler/compiler/graph-tester.h b/test/cctest/compiler/compiler/graph-tester.h
new file mode 100644 (file)
index 0000000..41dfa07
--- /dev/null
@@ -0,0 +1,41 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_CCTEST_COMPILER_GRAPH_TESTER_H_
+#define V8_CCTEST_COMPILER_GRAPH_TESTER_H_
+
+#include "src/v8.h"
+#include "test/cctest/cctest.h"
+
+#include "src/compiler/common-operator.h"
+#include "src/compiler/graph.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+class GraphTester : public HandleAndZoneScope, public Graph {
+ public:
+  GraphTester() : Graph(main_zone()) {}
+};
+
+
+class GraphWithStartNodeTester : public GraphTester {
+ public:
+  GraphWithStartNodeTester()
+      : builder_(main_zone()), start_node_(NewNode(builder_.Start())) {
+    SetStart(start_node_);
+  }
+
+  Node* start_node() { return start_node_; }
+
+ private:
+  CommonOperatorBuilder builder_;
+  Node* start_node_;
+};
+}
+}
+}  // namespace v8::internal::compiler
+
+#endif  // V8_CCTEST_COMPILER_GRAPH_TESTER_H_
diff --git a/test/cctest/compiler/compiler/instruction-selector-tester.h b/test/cctest/compiler/compiler/instruction-selector-tester.h
new file mode 100644 (file)
index 0000000..2a84b57
--- /dev/null
@@ -0,0 +1,119 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_CCTEST_COMPILER_INSTRUCTION_SELECTOR_TEST_H_
+#define V8_CCTEST_COMPILER_INSTRUCTION_SELECTOR_TEST_H_
+
+#include <deque>
+#include <set>
+
+#include "src/compiler/instruction-selector.h"
+#include "src/compiler/raw-machine-assembler.h"
+#include "src/ostreams.h"
+#include "test/cctest/cctest.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+typedef std::set<int> VirtualRegisterSet;
+
+enum InstructionSelectorTesterMode { kTargetMode, kInternalMode };
+
+class InstructionSelectorTester : public HandleAndZoneScope,
+                                  public RawMachineAssembler {
+ public:
+  enum Mode { kTargetMode, kInternalMode };
+
+  static const int kParameterCount = 3;
+  static MachineRepresentation* BuildParameterArray(Zone* zone) {
+    MachineRepresentation* array =
+        zone->NewArray<MachineRepresentation>(kParameterCount);
+    for (int i = 0; i < kParameterCount; ++i) {
+      array[i] = kMachineWord32;
+    }
+    return array;
+  }
+
+  explicit InstructionSelectorTester(Mode mode = kTargetMode)
+      : RawMachineAssembler(
+            new (main_zone()) Graph(main_zone()), new (main_zone())
+            MachineCallDescriptorBuilder(kMachineWord32, kParameterCount,
+                                         BuildParameterArray(main_zone())),
+            MachineOperatorBuilder::pointer_rep()),
+        mode_(mode) {}
+
+  void SelectInstructions() {
+    OFStream out(stdout);
+    Schedule* schedule = Export();
+    CHECK_NE(0, graph()->NodeCount());
+    CompilationInfo info(main_isolate(), main_zone());
+    Linkage linkage(&info, call_descriptor());
+    InstructionSequence sequence(&linkage, graph(), schedule);
+    SourcePositionTable source_positions(graph());
+    InstructionSelector selector(&sequence, &source_positions);
+    selector.SelectInstructions();
+    out << "--- Code sequence after instruction selection --- " << endl
+        << sequence;
+    for (InstructionSequence::const_iterator i = sequence.begin();
+         i != sequence.end(); ++i) {
+      Instruction* instr = *i;
+      if (instr->opcode() < 0) continue;
+      if (mode_ == kTargetMode) {
+        switch (ArchOpcodeField::decode(instr->opcode())) {
+#define CASE(Name) \
+  case k##Name:    \
+    break;
+          TARGET_ARCH_OPCODE_LIST(CASE)
+#undef CASE
+          default:
+            continue;
+        }
+      }
+      code.push_back(instr);
+    }
+    for (int vreg = 0; vreg < sequence.VirtualRegisterCount(); ++vreg) {
+      if (sequence.IsDouble(vreg)) {
+        CHECK(!sequence.IsReference(vreg));
+        doubles.insert(vreg);
+      }
+      if (sequence.IsReference(vreg)) {
+        CHECK(!sequence.IsDouble(vreg));
+        references.insert(vreg);
+      }
+    }
+    immediates.assign(sequence.immediates().begin(),
+                      sequence.immediates().end());
+  }
+
+  int32_t ToInt32(const InstructionOperand* operand) const {
+    size_t i = operand->index();
+    CHECK(i < immediates.size());
+    CHECK_EQ(InstructionOperand::IMMEDIATE, operand->kind());
+    return immediates[i].ToInt32();
+  }
+
+  std::deque<Instruction*> code;
+  VirtualRegisterSet doubles;
+  VirtualRegisterSet references;
+  std::deque<Constant> immediates;
+
+ private:
+  Mode mode_;
+};
+
+
+static inline void CheckSameVreg(InstructionOperand* exp,
+                                 InstructionOperand* val) {
+  CHECK_EQ(InstructionOperand::UNALLOCATED, exp->kind());
+  CHECK_EQ(InstructionOperand::UNALLOCATED, val->kind());
+  CHECK_EQ(UnallocatedOperand::cast(exp)->virtual_register(),
+           UnallocatedOperand::cast(val)->virtual_register());
+}
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_CCTEST_COMPILER_INSTRUCTION_SELECTOR_TEST_H_
diff --git a/test/cctest/compiler/compiler/simplified-graph-builder.cc b/test/cctest/compiler/compiler/simplified-graph-builder.cc
new file mode 100644 (file)
index 0000000..038c61a
--- /dev/null
@@ -0,0 +1,78 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "test/cctest/compiler/simplified-graph-builder.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+SimplifiedGraphBuilder::SimplifiedGraphBuilder(
+    Graph* graph, CommonOperatorBuilder* common,
+    MachineOperatorBuilder* machine, SimplifiedOperatorBuilder* simplified)
+    : StructuredGraphBuilder(graph, common),
+      machine_(machine),
+      simplified_(simplified) {}
+
+
+void SimplifiedGraphBuilder::Begin() {
+  ASSERT(graph()->start() == NULL);
+  Node* start = graph()->NewNode(common()->Start());
+  graph()->SetStart(start);
+  set_environment(new (zone()) Environment(this, start));
+}
+
+
+void SimplifiedGraphBuilder::Return(Node* value) {
+  Node* control = NewNode(common()->Return(), value);
+  UpdateControlDependencyToLeaveFunction(control);
+}
+
+
+void SimplifiedGraphBuilder::End() {
+  environment()->UpdateControlDependency(exit_control());
+  graph()->SetEnd(NewNode(common()->End()));
+}
+
+
+SimplifiedGraphBuilder::Environment::Environment(
+    SimplifiedGraphBuilder* builder, Node* control_dependency)
+    : StructuredGraphBuilder::Environment(builder, control_dependency) {}
+
+
+Node* SimplifiedGraphBuilder::Environment::Top() {
+  ASSERT(!values()->empty());
+  return values()->back();
+}
+
+
+void SimplifiedGraphBuilder::Environment::Push(Node* node) {
+  values()->push_back(node);
+}
+
+
+Node* SimplifiedGraphBuilder::Environment::Pop() {
+  ASSERT(!values()->empty());
+  Node* back = values()->back();
+  values()->pop_back();
+  return back;
+}
+
+
+void SimplifiedGraphBuilder::Environment::Poke(size_t depth, Node* node) {
+  ASSERT(depth < values()->size());
+  size_t index = values()->size() - depth - 1;
+  values()->at(index) = node;
+}
+
+
+Node* SimplifiedGraphBuilder::Environment::Peek(size_t depth) {
+  ASSERT(depth < values()->size());
+  size_t index = values()->size() - depth - 1;
+  return values()->at(index);
+}
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
diff --git a/test/cctest/compiler/compiler/simplified-graph-builder.h b/test/cctest/compiler/compiler/simplified-graph-builder.h
new file mode 100644 (file)
index 0000000..22b7bbf
--- /dev/null
@@ -0,0 +1,72 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_CCTEST_COMPILER_SIMPLIFIED_GRAPH_BUILDER_H_
+#define V8_CCTEST_COMPILER_SIMPLIFIED_GRAPH_BUILDER_H_
+
+#include "src/compiler/common-operator.h"
+#include "src/compiler/graph-builder.h"
+#include "src/compiler/machine-node-factory.h"
+#include "src/compiler/machine-operator.h"
+#include "src/compiler/simplified-node-factory.h"
+#include "src/compiler/simplified-operator.h"
+#include "test/cctest/cctest.h"
+#include "test/cctest/compiler/call-tester.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+class SimplifiedGraphBuilder
+    : public StructuredGraphBuilder,
+      public MachineNodeFactory<SimplifiedGraphBuilder>,
+      public SimplifiedNodeFactory<SimplifiedGraphBuilder> {
+ public:
+  SimplifiedGraphBuilder(Graph* graph, CommonOperatorBuilder* common,
+                         MachineOperatorBuilder* machine,
+                         SimplifiedOperatorBuilder* simplified);
+  virtual ~SimplifiedGraphBuilder() {}
+
+  class Environment : public StructuredGraphBuilder::Environment {
+   public:
+    Environment(SimplifiedGraphBuilder* builder, Node* control_dependency);
+
+    // TODO(dcarney): encode somehow and merge into StructuredGraphBuilder.
+    // SSA renaming operations.
+    Node* Top();
+    void Push(Node* node);
+    Node* Pop();
+    void Poke(size_t depth, Node* node);
+    Node* Peek(size_t depth);
+  };
+
+  Isolate* isolate() const { return zone()->isolate(); }
+  Zone* zone() const { return StructuredGraphBuilder::zone(); }
+  CommonOperatorBuilder* common() const {
+    return StructuredGraphBuilder::common();
+  }
+  MachineOperatorBuilder* machine() const { return machine_; }
+  SimplifiedOperatorBuilder* simplified() const { return simplified_; }
+  Environment* environment() {
+    return reinterpret_cast<Environment*>(environment_internal());
+  }
+
+  // Initialize graph and builder.
+  void Begin();
+
+  void Return(Node* value);
+
+  // Close the graph.
+  void End();
+
+ private:
+  MachineOperatorBuilder* machine_;
+  SimplifiedOperatorBuilder* simplified_;
+};
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_CCTEST_COMPILER_SIMPLIFIED_GRAPH_BUILDER_H_
diff --git a/test/cctest/compiler/compiler/test-branch-combine.cc b/test/cctest/compiler/compiler/test-branch-combine.cc
new file mode 100644 (file)
index 0000000..eb678ea
--- /dev/null
@@ -0,0 +1,462 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#include "test/cctest/cctest.h"
+#include "test/cctest/compiler/codegen-tester.h"
+#include "test/cctest/compiler/value-helper.h"
+
+#if V8_TURBOFAN_TARGET
+
+using namespace v8::internal;
+using namespace v8::internal::compiler;
+
+typedef RawMachineAssembler::Label MLabel;
+
+static IrOpcode::Value int32cmp_opcodes[] = {
+    IrOpcode::kWord32Equal, IrOpcode::kInt32LessThan,
+    IrOpcode::kInt32LessThanOrEqual, IrOpcode::kUint32LessThan,
+    IrOpcode::kUint32LessThanOrEqual};
+
+
+TEST(BranchCombineWord32EqualZero_1) {
+  // Test combining a branch with x == 0
+  RawMachineAssemblerTester<int32_t> m(kMachineWord32);
+  int32_t eq_constant = -1033;
+  int32_t ne_constant = 825118;
+  Node* p0 = m.Parameter(0);
+
+  MLabel blocka, blockb;
+  m.Branch(m.Word32Equal(p0, m.Int32Constant(0)), &blocka, &blockb);
+  m.Bind(&blocka);
+  m.Return(m.Int32Constant(eq_constant));
+  m.Bind(&blockb);
+  m.Return(m.Int32Constant(ne_constant));
+
+  FOR_INT32_INPUTS(i) {
+    int32_t a = *i;
+    int32_t expect = a == 0 ? eq_constant : ne_constant;
+    CHECK_EQ(expect, m.Call(a));
+  }
+}
+
+
+TEST(BranchCombineWord32EqualZero_chain) {
+  // Test combining a branch with a chain of x == 0 == 0 == 0 ...
+  int32_t eq_constant = -1133;
+  int32_t ne_constant = 815118;
+
+  for (int k = 0; k < 6; k++) {
+    RawMachineAssemblerTester<int32_t> m(kMachineWord32);
+    Node* p0 = m.Parameter(0);
+    MLabel blocka, blockb;
+    Node* cond = p0;
+    for (int j = 0; j < k; j++) {
+      cond = m.Word32Equal(cond, m.Int32Constant(0));
+    }
+    m.Branch(cond, &blocka, &blockb);
+    m.Bind(&blocka);
+    m.Return(m.Int32Constant(eq_constant));
+    m.Bind(&blockb);
+    m.Return(m.Int32Constant(ne_constant));
+
+    FOR_INT32_INPUTS(i) {
+      int32_t a = *i;
+      int32_t expect = (k & 1) == 1 ? (a == 0 ? eq_constant : ne_constant)
+                                    : (a == 0 ? ne_constant : eq_constant);
+      CHECK_EQ(expect, m.Call(a));
+    }
+  }
+}
+
+
+TEST(BranchCombineInt32LessThanZero_1) {
+  // Test combining a branch with x < 0
+  RawMachineAssemblerTester<int32_t> m(kMachineWord32);
+  int32_t eq_constant = -1433;
+  int32_t ne_constant = 845118;
+  Node* p0 = m.Parameter(0);
+
+  MLabel blocka, blockb;
+  m.Branch(m.Int32LessThan(p0, m.Int32Constant(0)), &blocka, &blockb);
+  m.Bind(&blocka);
+  m.Return(m.Int32Constant(eq_constant));
+  m.Bind(&blockb);
+  m.Return(m.Int32Constant(ne_constant));
+
+  FOR_INT32_INPUTS(i) {
+    int32_t a = *i;
+    int32_t expect = a < 0 ? eq_constant : ne_constant;
+    CHECK_EQ(expect, m.Call(a));
+  }
+}
+
+
+TEST(BranchCombineUint32LessThan100_1) {
+  // Test combining a branch with x < 100
+  RawMachineAssemblerTester<int32_t> m(kMachineWord32);
+  int32_t eq_constant = 1471;
+  int32_t ne_constant = 88845718;
+  Node* p0 = m.Parameter(0);
+
+  MLabel blocka, blockb;
+  m.Branch(m.Uint32LessThan(p0, m.Int32Constant(100)), &blocka, &blockb);
+  m.Bind(&blocka);
+  m.Return(m.Int32Constant(eq_constant));
+  m.Bind(&blockb);
+  m.Return(m.Int32Constant(ne_constant));
+
+  FOR_UINT32_INPUTS(i) {
+    uint32_t a = *i;
+    int32_t expect = a < 100 ? eq_constant : ne_constant;
+    CHECK_EQ(expect, m.Call(a));
+  }
+}
+
+
+TEST(BranchCombineUint32LessThanOrEqual100_1) {
+  // Test combining a branch with x <= 100
+  RawMachineAssemblerTester<int32_t> m(kMachineWord32);
+  int32_t eq_constant = 1479;
+  int32_t ne_constant = 77845719;
+  Node* p0 = m.Parameter(0);
+
+  MLabel blocka, blockb;
+  m.Branch(m.Uint32LessThanOrEqual(p0, m.Int32Constant(100)), &blocka, &blockb);
+  m.Bind(&blocka);
+  m.Return(m.Int32Constant(eq_constant));
+  m.Bind(&blockb);
+  m.Return(m.Int32Constant(ne_constant));
+
+  FOR_UINT32_INPUTS(i) {
+    uint32_t a = *i;
+    int32_t expect = a <= 100 ? eq_constant : ne_constant;
+    CHECK_EQ(expect, m.Call(a));
+  }
+}
+
+
+TEST(BranchCombineZeroLessThanInt32_1) {
+  // Test combining a branch with 0 < x
+  RawMachineAssemblerTester<int32_t> m(kMachineWord32);
+  int32_t eq_constant = -2033;
+  int32_t ne_constant = 225118;
+  Node* p0 = m.Parameter(0);
+
+  MLabel blocka, blockb;
+  m.Branch(m.Int32LessThan(m.Int32Constant(0), p0), &blocka, &blockb);
+  m.Bind(&blocka);
+  m.Return(m.Int32Constant(eq_constant));
+  m.Bind(&blockb);
+  m.Return(m.Int32Constant(ne_constant));
+
+  FOR_INT32_INPUTS(i) {
+    int32_t a = *i;
+    int32_t expect = 0 < a ? eq_constant : ne_constant;
+    CHECK_EQ(expect, m.Call(a));
+  }
+}
+
+
+TEST(BranchCombineInt32GreaterThanZero_1) {
+  // Test combining a branch with x > 0
+  RawMachineAssemblerTester<int32_t> m(kMachineWord32);
+  int32_t eq_constant = -1073;
+  int32_t ne_constant = 825178;
+  Node* p0 = m.Parameter(0);
+
+  MLabel blocka, blockb;
+  m.Branch(m.Int32GreaterThan(p0, m.Int32Constant(0)), &blocka, &blockb);
+  m.Bind(&blocka);
+  m.Return(m.Int32Constant(eq_constant));
+  m.Bind(&blockb);
+  m.Return(m.Int32Constant(ne_constant));
+
+  FOR_INT32_INPUTS(i) {
+    int32_t a = *i;
+    int32_t expect = a > 0 ? eq_constant : ne_constant;
+    CHECK_EQ(expect, m.Call(a));
+  }
+}
+
+
+TEST(BranchCombineWord32EqualP) {
+  // Test combining a branch with an Word32Equal.
+  RawMachineAssemblerTester<int32_t> m(kMachineWord32, kMachineWord32);
+  int32_t eq_constant = -1035;
+  int32_t ne_constant = 825018;
+  Node* p0 = m.Parameter(0);
+  Node* p1 = m.Parameter(1);
+
+  MLabel blocka, blockb;
+  m.Branch(m.Word32Equal(p0, p1), &blocka, &blockb);
+  m.Bind(&blocka);
+  m.Return(m.Int32Constant(eq_constant));
+  m.Bind(&blockb);
+  m.Return(m.Int32Constant(ne_constant));
+
+  FOR_INT32_INPUTS(i) {
+    FOR_INT32_INPUTS(j) {
+      int32_t a = *i;
+      int32_t b = *j;
+      int32_t expect = a == b ? eq_constant : ne_constant;
+      CHECK_EQ(expect, m.Call(a, b));
+    }
+  }
+}
+
+
+TEST(BranchCombineWord32EqualI) {
+  int32_t eq_constant = -1135;
+  int32_t ne_constant = 925718;
+
+  for (int left = 0; left < 2; left++) {
+    FOR_INT32_INPUTS(i) {
+      RawMachineAssemblerTester<int32_t> m(kMachineWord32);
+      int32_t a = *i;
+
+      Node* p0 = m.Int32Constant(a);
+      Node* p1 = m.Parameter(0);
+
+      MLabel blocka, blockb;
+      if (left == 1) m.Branch(m.Word32Equal(p0, p1), &blocka, &blockb);
+      if (left == 0) m.Branch(m.Word32Equal(p1, p0), &blocka, &blockb);
+      m.Bind(&blocka);
+      m.Return(m.Int32Constant(eq_constant));
+      m.Bind(&blockb);
+      m.Return(m.Int32Constant(ne_constant));
+
+      FOR_INT32_INPUTS(j) {
+        int32_t b = *j;
+        int32_t expect = a == b ? eq_constant : ne_constant;
+        CHECK_EQ(expect, m.Call(b));
+      }
+    }
+  }
+}
+
+
+TEST(BranchCombineInt32CmpP) {
+  int32_t eq_constant = -1235;
+  int32_t ne_constant = 725018;
+
+  for (int op = 0; op < 2; op++) {
+    RawMachineAssemblerTester<int32_t> m(kMachineWord32, kMachineWord32);
+    Node* p0 = m.Parameter(0);
+    Node* p1 = m.Parameter(1);
+
+    MLabel blocka, blockb;
+    if (op == 0) m.Branch(m.Int32LessThan(p0, p1), &blocka, &blockb);
+    if (op == 1) m.Branch(m.Int32LessThanOrEqual(p0, p1), &blocka, &blockb);
+    m.Bind(&blocka);
+    m.Return(m.Int32Constant(eq_constant));
+    m.Bind(&blockb);
+    m.Return(m.Int32Constant(ne_constant));
+
+    FOR_INT32_INPUTS(i) {
+      FOR_INT32_INPUTS(j) {
+        int32_t a = *i;
+        int32_t b = *j;
+        int32_t expect = 0;
+        if (op == 0) expect = a < b ? eq_constant : ne_constant;
+        if (op == 1) expect = a <= b ? eq_constant : ne_constant;
+        CHECK_EQ(expect, m.Call(a, b));
+      }
+    }
+  }
+}
+
+
+TEST(BranchCombineInt32CmpI) {
+  int32_t eq_constant = -1175;
+  int32_t ne_constant = 927711;
+
+  for (int op = 0; op < 2; op++) {
+    FOR_INT32_INPUTS(i) {
+      RawMachineAssemblerTester<int32_t> m(kMachineWord32);
+      int32_t a = *i;
+      Node* p0 = m.Int32Constant(a);
+      Node* p1 = m.Parameter(0);
+
+      MLabel blocka, blockb;
+      if (op == 0) m.Branch(m.Int32LessThan(p0, p1), &blocka, &blockb);
+      if (op == 1) m.Branch(m.Int32LessThanOrEqual(p0, p1), &blocka, &blockb);
+      m.Bind(&blocka);
+      m.Return(m.Int32Constant(eq_constant));
+      m.Bind(&blockb);
+      m.Return(m.Int32Constant(ne_constant));
+
+      FOR_INT32_INPUTS(j) {
+        int32_t b = *j;
+        int32_t expect = 0;
+        if (op == 0) expect = a < b ? eq_constant : ne_constant;
+        if (op == 1) expect = a <= b ? eq_constant : ne_constant;
+        CHECK_EQ(expect, m.Call(b));
+      }
+    }
+  }
+}
+
+
+// Now come the sophisticated tests for many input shape combinations.
+
+// Materializes a boolean (1 or 0) from a comparison.
+class CmpMaterializeBoolGen : public BinopGen<int32_t> {
+ public:
+  CompareWrapper w;
+  bool invert;
+
+  CmpMaterializeBoolGen(IrOpcode::Value opcode, bool i)
+      : w(opcode), invert(i) {}
+
+  virtual void gen(RawMachineAssemblerTester<int32_t>* m, Node* a, Node* b) {
+    Node* cond = w.MakeNode(m, a, b);
+    if (invert) cond = m->Word32Equal(cond, m->Int32Constant(0));
+    m->Return(cond);
+  }
+  virtual int32_t expected(int32_t a, int32_t b) {
+    if (invert) return !w.Int32Compare(a, b) ? 1 : 0;
+    return w.Int32Compare(a, b) ? 1 : 0;
+  }
+};
+
+
+// Generates a branch and return one of two values from a comparison.
+class CmpBranchGen : public BinopGen<int32_t> {
+ public:
+  CompareWrapper w;
+  bool invert;
+  bool true_first;
+  int32_t eq_constant;
+  int32_t ne_constant;
+
+  CmpBranchGen(IrOpcode::Value opcode, bool i, bool t, int32_t eq, int32_t ne)
+      : w(opcode), invert(i), true_first(t), eq_constant(eq), ne_constant(ne) {}
+
+  virtual void gen(RawMachineAssemblerTester<int32_t>* m, Node* a, Node* b) {
+    MLabel blocka, blockb;
+    Node* cond = w.MakeNode(m, a, b);
+    if (invert) cond = m->Word32Equal(cond, m->Int32Constant(0));
+    m->Branch(cond, &blocka, &blockb);
+    if (true_first) {
+      m->Bind(&blocka);
+      m->Return(m->Int32Constant(eq_constant));
+      m->Bind(&blockb);
+      m->Return(m->Int32Constant(ne_constant));
+    } else {
+      m->Bind(&blockb);
+      m->Return(m->Int32Constant(ne_constant));
+      m->Bind(&blocka);
+      m->Return(m->Int32Constant(eq_constant));
+    }
+  }
+  virtual int32_t expected(int32_t a, int32_t b) {
+    if (invert) return !w.Int32Compare(a, b) ? eq_constant : ne_constant;
+    return w.Int32Compare(a, b) ? eq_constant : ne_constant;
+  }
+};
+
+
+TEST(BranchCombineInt32CmpAllInputShapes_materialized) {
+  for (size_t i = 0; i < ARRAY_SIZE(int32cmp_opcodes); i++) {
+    CmpMaterializeBoolGen gen(int32cmp_opcodes[i], false);
+    Int32BinopInputShapeTester tester(&gen);
+    tester.TestAllInputShapes();
+  }
+}
+
+
+TEST(BranchCombineInt32CmpAllInputShapes_inverted_materialized) {
+  for (size_t i = 0; i < ARRAY_SIZE(int32cmp_opcodes); i++) {
+    CmpMaterializeBoolGen gen(int32cmp_opcodes[i], true);
+    Int32BinopInputShapeTester tester(&gen);
+    tester.TestAllInputShapes();
+  }
+}
+
+
+TEST(BranchCombineInt32CmpAllInputShapes_branch_true) {
+  for (size_t i = 0; i < ARRAY_SIZE(int32cmp_opcodes); i++) {
+    CmpBranchGen gen(int32cmp_opcodes[i], false, false, 995 + i, -1011 - i);
+    Int32BinopInputShapeTester tester(&gen);
+    tester.TestAllInputShapes();
+  }
+}
+
+
+TEST(BranchCombineInt32CmpAllInputShapes_branch_false) {
+  for (size_t i = 0; i < ARRAY_SIZE(int32cmp_opcodes); i++) {
+    CmpBranchGen gen(int32cmp_opcodes[i], false, true, 795 + i, -2011 - i);
+    Int32BinopInputShapeTester tester(&gen);
+    tester.TestAllInputShapes();
+  }
+}
+
+
+TEST(BranchCombineInt32CmpAllInputShapes_inverse_branch_true) {
+  for (size_t i = 0; i < ARRAY_SIZE(int32cmp_opcodes); i++) {
+    CmpBranchGen gen(int32cmp_opcodes[i], true, false, 695 + i, -3011 - i);
+    Int32BinopInputShapeTester tester(&gen);
+    tester.TestAllInputShapes();
+  }
+}
+
+
+TEST(BranchCombineInt32CmpAllInputShapes_inverse_branch_false) {
+  for (size_t i = 0; i < ARRAY_SIZE(int32cmp_opcodes); i++) {
+    CmpBranchGen gen(int32cmp_opcodes[i], true, true, 595 + i, -4011 - i);
+    Int32BinopInputShapeTester tester(&gen);
+    tester.TestAllInputShapes();
+  }
+}
+
+
+TEST(BranchCombineFloat64Compares) {
+  double inf = V8_INFINITY;
+  double nan = v8::base::OS::nan_value();
+  double inputs[] = {0.0, 1.0, -1.0, -inf, inf, nan};
+
+  int32_t eq_constant = -1733;
+  int32_t ne_constant = 915118;
+
+  double input_a = 0.0;
+  double input_b = 0.0;
+
+  CompareWrapper cmps[] = {CompareWrapper(IrOpcode::kFloat64Equal),
+                           CompareWrapper(IrOpcode::kFloat64LessThan),
+                           CompareWrapper(IrOpcode::kFloat64LessThanOrEqual)};
+
+  for (size_t c = 0; c < ARRAY_SIZE(cmps); c++) {
+    CompareWrapper cmp = cmps[c];
+    for (int invert = 0; invert < 2; invert++) {
+      RawMachineAssemblerTester<int32_t> m;
+      Node* a = m.LoadFromPointer(&input_a, kMachineFloat64);
+      Node* b = m.LoadFromPointer(&input_b, kMachineFloat64);
+
+      MLabel blocka, blockb;
+      Node* cond = cmp.MakeNode(&m, a, b);
+      if (invert) cond = m.Word32Equal(cond, m.Int32Constant(0));
+      m.Branch(cond, &blocka, &blockb);
+      m.Bind(&blocka);
+      m.Return(m.Int32Constant(eq_constant));
+      m.Bind(&blockb);
+      m.Return(m.Int32Constant(ne_constant));
+
+      for (size_t i = 0; i < ARRAY_SIZE(inputs); i++) {
+        for (size_t j = 0; j < ARRAY_SIZE(inputs); j += 2) {
+          input_a = inputs[i];
+          input_b = inputs[i];
+          int32_t expected =
+              invert ? (cmp.Float64Compare(input_a, input_b) ? ne_constant
+                                                             : eq_constant)
+                     : (cmp.Float64Compare(input_a, input_b) ? eq_constant
+                                                             : ne_constant);
+          CHECK_EQ(expected, m.Call());
+        }
+      }
+    }
+  }
+}
+#endif  // V8_TURBOFAN_TARGET
diff --git a/test/cctest/compiler/compiler/test-codegen-deopt.cc b/test/cctest/compiler/compiler/test-codegen-deopt.cc
new file mode 100644 (file)
index 0000000..243ece9
--- /dev/null
@@ -0,0 +1,331 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+#include "test/cctest/cctest.h"
+
+#include "src/compiler/code-generator.h"
+#include "src/compiler/common-operator.h"
+#include "src/compiler/graph.h"
+#include "src/compiler/instruction-selector.h"
+#include "src/compiler/machine-operator.h"
+#include "src/compiler/node.h"
+#include "src/compiler/operator.h"
+#include "src/compiler/raw-machine-assembler.h"
+#include "src/compiler/register-allocator.h"
+#include "src/compiler/schedule.h"
+
+#include "src/full-codegen.h"
+#include "src/parser.h"
+#include "src/rewriter.h"
+
+#include "test/cctest/compiler/function-tester.h"
+
+using namespace v8::internal;
+using namespace v8::internal::compiler;
+
+typedef RawMachineAssembler::Label MLabel;
+
+static Handle<JSFunction> NewFunction(const char* source) {
+  return v8::Utils::OpenHandle(
+      *v8::Handle<v8::Function>::Cast(CompileRun(source)));
+}
+
+
+class DeoptCodegenTester {
+ public:
+  explicit DeoptCodegenTester(HandleAndZoneScope* scope, const char* src)
+      : scope_(scope),
+        function(NewFunction(src)),
+        info(function, scope->main_zone()),
+        bailout_id(-1) {
+    CHECK(Parser::Parse(&info));
+    StrictMode strict_mode = info.function()->strict_mode();
+    info.SetStrictMode(strict_mode);
+    info.SetOptimizing(BailoutId::None(), Handle<Code>(function->code()));
+    CHECK(Rewriter::Rewrite(&info));
+    CHECK(Scope::Analyze(&info));
+    CHECK_NE(NULL, info.scope());
+
+    FunctionTester::EnsureDeoptimizationSupport(&info);
+
+    ASSERT(info.shared_info()->has_deoptimization_support());
+
+    graph = new (scope_->main_zone()) Graph(scope_->main_zone());
+  }
+
+  virtual ~DeoptCodegenTester() { delete code; }
+
+  void GenerateCodeFromSchedule(Schedule* schedule) {
+    OFStream os(stdout);
+    os << *schedule;
+
+    // Initialize the codegen and generate code.
+    Linkage* linkage = new (scope_->main_zone()) Linkage(&info);
+    code = new v8::internal::compiler::InstructionSequence(linkage, graph,
+                                                           schedule);
+    SourcePositionTable source_positions(graph);
+    InstructionSelector selector(code, &source_positions);
+    selector.SelectInstructions();
+
+    os << "----- Instruction sequence before register allocation -----\n"
+       << *code;
+
+    RegisterAllocator allocator(code);
+    CHECK(allocator.Allocate());
+
+    os << "----- Instruction sequence after register allocation -----\n"
+       << *code;
+
+    compiler::CodeGenerator generator(code);
+    result_code = generator.GenerateCode();
+
+#ifdef DEBUG
+    result_code->Print();
+#endif
+  }
+
+  Zone* zone() { return scope_->main_zone(); }
+
+  HandleAndZoneScope* scope_;
+  Handle<JSFunction> function;
+  CompilationInfo info;
+  BailoutId bailout_id;
+  Handle<Code> result_code;
+  v8::internal::compiler::InstructionSequence* code;
+  Graph* graph;
+};
+
+
+class TrivialDeoptCodegenTester : public DeoptCodegenTester {
+ public:
+  explicit TrivialDeoptCodegenTester(HandleAndZoneScope* scope)
+      : DeoptCodegenTester(scope,
+                           "function foo() { deopt(); return 42; }; foo") {}
+
+  void GenerateCode() {
+    GenerateCodeFromSchedule(BuildGraphAndSchedule(graph));
+  }
+
+  Schedule* BuildGraphAndSchedule(Graph* graph) {
+    Isolate* isolate = info.isolate();
+    CommonOperatorBuilder common(zone());
+
+    // Manually construct a schedule for the function below:
+    // function foo() {
+    //   deopt();
+    // }
+
+    MachineRepresentation parameter_reps[] = {kMachineTagged};
+    MachineCallDescriptorBuilder descriptor_builder(kMachineTagged, 1,
+                                                    parameter_reps);
+
+    RawMachineAssembler m(graph, &descriptor_builder);
+
+    Handle<Object> undef_object =
+        Handle<Object>(isolate->heap()->undefined_value(), isolate);
+    PrintableUnique<Object> undef_constant =
+        PrintableUnique<Object>::CreateUninitialized(zone(), undef_object);
+    Node* undef_node = m.NewNode(common.HeapConstant(undef_constant));
+
+    Handle<JSFunction> deopt_function =
+        NewFunction("function deopt() { %DeoptimizeFunction(foo); }; deopt");
+    PrintableUnique<Object> deopt_fun_constant =
+        PrintableUnique<Object>::CreateUninitialized(zone(), deopt_function);
+    Node* deopt_fun_node = m.NewNode(common.HeapConstant(deopt_fun_constant));
+
+    MLabel deopt, cont;
+    Node* call = m.CallJS0(deopt_fun_node, undef_node, &cont, &deopt);
+
+    m.Bind(&cont);
+    m.NewNode(common.Continuation(), call);
+    m.Return(undef_node);
+
+    m.Bind(&deopt);
+    m.NewNode(common.LazyDeoptimization(), call);
+
+    bailout_id = GetCallBailoutId();
+    FrameStateDescriptor stateDescriptor(bailout_id);
+    Node* state_node = m.NewNode(common.FrameState(stateDescriptor));
+    m.Deoptimize(state_node);
+
+    // Schedule the graph:
+    Schedule* schedule = m.Export();
+
+    cont_block = cont.block();
+    deopt_block = deopt.block();
+
+    return schedule;
+  }
+
+  BailoutId GetCallBailoutId() {
+    ZoneList<Statement*>* body = info.function()->body();
+    for (int i = 0; i < body->length(); i++) {
+      if (body->at(i)->IsExpressionStatement() &&
+          body->at(i)->AsExpressionStatement()->expression()->IsCall()) {
+        return body->at(i)->AsExpressionStatement()->expression()->id();
+      }
+    }
+    CHECK(false);
+    return BailoutId(-1);
+  }
+
+  BasicBlock* cont_block;
+  BasicBlock* deopt_block;
+};
+
+
+TEST(TurboTrivialDeoptCodegen) {
+  HandleAndZoneScope scope;
+  InitializedHandleScope handles;
+
+  FLAG_allow_natives_syntax = true;
+  FLAG_turbo_deoptimization = true;
+
+  TrivialDeoptCodegenTester t(&scope);
+  t.GenerateCode();
+
+  DeoptimizationInputData* data =
+      DeoptimizationInputData::cast(t.result_code->deoptimization_data());
+
+  Label* cont_label = t.code->GetLabel(t.cont_block);
+  Label* deopt_label = t.code->GetLabel(t.deopt_block);
+
+  // Check the patch table. It should patch the continuation address to the
+  // deoptimization block address.
+  CHECK_EQ(1, data->ReturnAddressPatchCount());
+  CHECK_EQ(cont_label->pos(), data->ReturnAddressPc(0)->value());
+  CHECK_EQ(deopt_label->pos(), data->PatchedAddressPc(0)->value());
+
+  // Check that we deoptimize to the right AST id.
+  CHECK_EQ(1, data->DeoptCount());
+  CHECK_EQ(1, data->DeoptCount());
+  CHECK_EQ(t.bailout_id.ToInt(), data->AstId(0).ToInt());
+}
+
+
+TEST(TurboTrivialDeoptCodegenAndRun) {
+  HandleAndZoneScope scope;
+  InitializedHandleScope handles;
+
+  FLAG_allow_natives_syntax = true;
+  FLAG_turbo_deoptimization = true;
+
+  TrivialDeoptCodegenTester t(&scope);
+  t.GenerateCode();
+
+  t.function->ReplaceCode(*t.result_code);
+  t.info.context()->native_context()->AddOptimizedCode(*t.result_code);
+
+  Isolate* isolate = scope.main_isolate();
+  Handle<Object> result;
+  bool has_pending_exception =
+      !Execution::Call(isolate, t.function,
+                       isolate->factory()->undefined_value(), 0, NULL,
+                       false).ToHandle(&result);
+  CHECK(!has_pending_exception);
+  CHECK(result->SameValue(Smi::FromInt(42)));
+}
+
+
+class TrivialRuntimeDeoptCodegenTester : public DeoptCodegenTester {
+ public:
+  explicit TrivialRuntimeDeoptCodegenTester(HandleAndZoneScope* scope)
+      : DeoptCodegenTester(
+            scope,
+            "function foo() { %DeoptimizeFunction(foo); return 42; }; foo") {}
+
+  void GenerateCode() {
+    GenerateCodeFromSchedule(BuildGraphAndSchedule(graph));
+  }
+
+  Schedule* BuildGraphAndSchedule(Graph* graph) {
+    Isolate* isolate = info.isolate();
+    CommonOperatorBuilder common(zone());
+
+    // Manually construct a schedule for the function below:
+    // function foo() {
+    //   %DeoptimizeFunction(foo);
+    // }
+
+    MachineRepresentation parameter_reps[] = {kMachineTagged};
+    MachineCallDescriptorBuilder descriptor_builder(kMachineTagged, 2,
+                                                    parameter_reps);
+
+    RawMachineAssembler m(graph, &descriptor_builder);
+
+    Handle<Object> undef_object =
+        Handle<Object>(isolate->heap()->undefined_value(), isolate);
+    PrintableUnique<Object> undef_constant =
+        PrintableUnique<Object>::CreateUninitialized(zone(), undef_object);
+    Node* undef_node = m.NewNode(common.HeapConstant(undef_constant));
+
+    PrintableUnique<Object> this_fun_constant =
+        PrintableUnique<Object>::CreateUninitialized(zone(), function);
+    Node* this_fun_node = m.NewNode(common.HeapConstant(this_fun_constant));
+
+    MLabel deopt, cont;
+    Node* call = m.CallRuntime1(Runtime::kDeoptimizeFunction, this_fun_node,
+                                &cont, &deopt);
+
+    m.Bind(&cont);
+    m.NewNode(common.Continuation(), call);
+    m.Return(undef_node);
+
+    m.Bind(&deopt);
+    m.NewNode(common.LazyDeoptimization(), call);
+
+    bailout_id = GetCallBailoutId();
+    FrameStateDescriptor stateDescriptor(bailout_id);
+    Node* state_node = m.NewNode(common.FrameState(stateDescriptor));
+    m.Deoptimize(state_node);
+
+    // Schedule the graph:
+    Schedule* schedule = m.Export();
+
+    cont_block = cont.block();
+    deopt_block = deopt.block();
+
+    return schedule;
+  }
+
+  BailoutId GetCallBailoutId() {
+    ZoneList<Statement*>* body = info.function()->body();
+    for (int i = 0; i < body->length(); i++) {
+      if (body->at(i)->IsExpressionStatement() &&
+          body->at(i)->AsExpressionStatement()->expression()->IsCallRuntime()) {
+        return body->at(i)->AsExpressionStatement()->expression()->id();
+      }
+    }
+    CHECK(false);
+    return BailoutId(-1);
+  }
+
+  BasicBlock* cont_block;
+  BasicBlock* deopt_block;
+};
+
+
+TEST(TurboTrivialRuntimeDeoptCodegenAndRun) {
+  HandleAndZoneScope scope;
+  InitializedHandleScope handles;
+
+  FLAG_allow_natives_syntax = true;
+  FLAG_turbo_deoptimization = true;
+
+  TrivialRuntimeDeoptCodegenTester t(&scope);
+  t.GenerateCode();
+
+  t.function->ReplaceCode(*t.result_code);
+  t.info.context()->native_context()->AddOptimizedCode(*t.result_code);
+
+  Isolate* isolate = scope.main_isolate();
+  Handle<Object> result;
+  bool has_pending_exception =
+      !Execution::Call(isolate, t.function,
+                       isolate->factory()->undefined_value(), 0, NULL,
+                       false).ToHandle(&result);
+  CHECK(!has_pending_exception);
+  CHECK(result->SameValue(Smi::FromInt(42)));
+}
diff --git a/test/cctest/compiler/compiler/test-gap-resolver.cc b/test/cctest/compiler/compiler/test-gap-resolver.cc
new file mode 100644 (file)
index 0000000..60a4762
--- /dev/null
@@ -0,0 +1,172 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/gap-resolver.h"
+
+#include "src/base/utils/random-number-generator.h"
+#include "test/cctest/cctest.h"
+
+using namespace v8::internal;
+using namespace v8::internal::compiler;
+
+// The state of our move interpreter is the mapping of operands to values. Note
+// that the actual values don't really matter, all we care about is equality.
+class InterpreterState {
+ public:
+  typedef std::vector<MoveOperands> Moves;
+
+  void ExecuteInParallel(Moves moves) {
+    InterpreterState copy(*this);
+    for (Moves::iterator it = moves.begin(); it != moves.end(); ++it) {
+      if (!it->IsRedundant()) write(it->destination(), copy.read(it->source()));
+    }
+  }
+
+  bool operator==(const InterpreterState& other) const {
+    return values_ == other.values_;
+  }
+
+  bool operator!=(const InterpreterState& other) const {
+    return values_ != other.values_;
+  }
+
+ private:
+  // Internally, the state is a normalized permutation of (kind,index) pairs.
+  typedef std::pair<InstructionOperand::Kind, int> Key;
+  typedef Key Value;
+  typedef std::map<Key, Value> OperandMap;
+
+  Value read(const InstructionOperand* op) const {
+    OperandMap::const_iterator it = values_.find(KeyFor(op));
+    return (it == values_.end()) ? ValueFor(op) : it->second;
+  }
+
+  void write(const InstructionOperand* op, Value v) {
+    if (v == ValueFor(op)) {
+      values_.erase(KeyFor(op));
+    } else {
+      values_[KeyFor(op)] = v;
+    }
+  }
+
+  static Key KeyFor(const InstructionOperand* op) {
+    return Key(op->kind(), op->index());
+  }
+
+  static Value ValueFor(const InstructionOperand* op) {
+    return Value(op->kind(), op->index());
+  }
+
+  friend OStream& operator<<(OStream& os, const InterpreterState& is) {
+    for (OperandMap::const_iterator it = is.values_.begin();
+         it != is.values_.end(); ++it) {
+      if (it != is.values_.begin()) os << " ";
+      InstructionOperand source(it->first.first, it->first.second);
+      InstructionOperand destination(it->second.first, it->second.second);
+      os << MoveOperands(&source, &destination);
+    }
+    return os;
+  }
+
+  OperandMap values_;
+};
+
+
+// An abstract interpreter for moves, swaps and parallel moves.
+class MoveInterpreter : public GapResolver::Assembler {
+ public:
+  virtual void AssembleMove(InstructionOperand* source,
+                            InstructionOperand* destination) V8_OVERRIDE {
+    InterpreterState::Moves moves;
+    moves.push_back(MoveOperands(source, destination));
+    state_.ExecuteInParallel(moves);
+  }
+
+  virtual void AssembleSwap(InstructionOperand* source,
+                            InstructionOperand* destination) V8_OVERRIDE {
+    InterpreterState::Moves moves;
+    moves.push_back(MoveOperands(source, destination));
+    moves.push_back(MoveOperands(destination, source));
+    state_.ExecuteInParallel(moves);
+  }
+
+  void AssembleParallelMove(const ParallelMove* pm) {
+    InterpreterState::Moves moves(pm->move_operands()->begin(),
+                                  pm->move_operands()->end());
+    state_.ExecuteInParallel(moves);
+  }
+
+  InterpreterState state() const { return state_; }
+
+ private:
+  InterpreterState state_;
+};
+
+
+class ParallelMoveCreator : public HandleAndZoneScope {
+ public:
+  ParallelMoveCreator() : rng_(CcTest::random_number_generator()) {}
+
+  ParallelMove* Create(int size) {
+    ParallelMove* parallel_move = new (main_zone()) ParallelMove(main_zone());
+    std::set<InstructionOperand*, InstructionOperandComparator> seen;
+    for (int i = 0; i < size; ++i) {
+      MoveOperands mo(CreateRandomOperand(), CreateRandomOperand());
+      if (!mo.IsRedundant() && seen.find(mo.destination()) == seen.end()) {
+        parallel_move->AddMove(mo.source(), mo.destination(), main_zone());
+        seen.insert(mo.destination());
+      }
+    }
+    return parallel_move;
+  }
+
+ private:
+  struct InstructionOperandComparator {
+    bool operator()(const InstructionOperand* x, const InstructionOperand* y) {
+      return (x->kind() < y->kind()) ||
+             (x->kind() == y->kind() && x->index() < y->index());
+    }
+  };
+
+  InstructionOperand* CreateRandomOperand() {
+    int index = rng_->NextInt(6);
+    switch (rng_->NextInt(5)) {
+      case 0:
+        return ConstantOperand::Create(index, main_zone());
+      case 1:
+        return StackSlotOperand::Create(index, main_zone());
+      case 2:
+        return DoubleStackSlotOperand::Create(index, main_zone());
+      case 3:
+        return RegisterOperand::Create(index, main_zone());
+      case 4:
+        return DoubleRegisterOperand::Create(index, main_zone());
+    }
+    UNREACHABLE();
+    return NULL;
+  }
+
+ private:
+  v8::base::RandomNumberGenerator* rng_;
+};
+
+
+TEST(FuzzResolver) {
+  ParallelMoveCreator pmc;
+  for (int size = 0; size < 20; ++size) {
+    for (int repeat = 0; repeat < 50; ++repeat) {
+      ParallelMove* pm = pmc.Create(size);
+
+      // Note: The gap resolver modifies the ParallelMove, so interpret first.
+      MoveInterpreter mi1;
+      mi1.AssembleParallelMove(pm);
+
+      MoveInterpreter mi2;
+      GapResolver resolver(&mi2);
+      resolver.Resolve(pm);
+
+      CHECK(mi1.state() == mi2.state());
+    }
+  }
+}
diff --git a/test/cctest/compiler/compiler/test-graph-reducer.cc b/test/cctest/compiler/compiler/test-graph-reducer.cc
new file mode 100644 (file)
index 0000000..dfbb6f2
--- /dev/null
@@ -0,0 +1,659 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#include "graph-tester.h"
+#include "src/compiler/generic-node-inl.h"
+#include "src/compiler/graph-reducer.h"
+
+using namespace v8::internal;
+using namespace v8::internal::compiler;
+
+const uint8_t OPCODE_A0 = 10;
+const uint8_t OPCODE_A1 = 11;
+const uint8_t OPCODE_A2 = 12;
+const uint8_t OPCODE_B0 = 20;
+const uint8_t OPCODE_B1 = 21;
+const uint8_t OPCODE_B2 = 22;
+const uint8_t OPCODE_C0 = 30;
+const uint8_t OPCODE_C1 = 31;
+const uint8_t OPCODE_C2 = 32;
+
+static SimpleOperator OPA0(OPCODE_A0, Operator::kNoWrite, 0, 0, "opa0");
+static SimpleOperator OPA1(OPCODE_A1, Operator::kNoWrite, 1, 0, "opa1");
+static SimpleOperator OPA2(OPCODE_A2, Operator::kNoWrite, 2, 0, "opa2");
+static SimpleOperator OPB0(OPCODE_B0, Operator::kNoWrite, 0, 0, "opa0");
+static SimpleOperator OPB1(OPCODE_B1, Operator::kNoWrite, 1, 0, "opa1");
+static SimpleOperator OPB2(OPCODE_B2, Operator::kNoWrite, 2, 0, "opa2");
+static SimpleOperator OPC0(OPCODE_C0, Operator::kNoWrite, 0, 0, "opc0");
+static SimpleOperator OPC1(OPCODE_C1, Operator::kNoWrite, 1, 0, "opc1");
+static SimpleOperator OPC2(OPCODE_C2, Operator::kNoWrite, 2, 0, "opc2");
+
+
+// Replaces all "A" operators with "B" operators without creating new nodes.
+class InPlaceABReducer : public Reducer {
+ public:
+  virtual Reduction Reduce(Node* node) {
+    switch (node->op()->opcode()) {
+      case OPCODE_A0:
+        CHECK_EQ(0, node->InputCount());
+        node->set_op(&OPB0);
+        return Replace(node);
+      case OPCODE_A1:
+        CHECK_EQ(1, node->InputCount());
+        node->set_op(&OPB1);
+        return Replace(node);
+      case OPCODE_A2:
+        CHECK_EQ(2, node->InputCount());
+        node->set_op(&OPB2);
+        return Replace(node);
+    }
+    return NoChange();
+  }
+};
+
+
+// Replaces all "A" operators with "B" operators by allocating new nodes.
+class NewABReducer : public Reducer {
+ public:
+  explicit NewABReducer(Graph* graph) : graph_(graph) {}
+  virtual Reduction Reduce(Node* node) {
+    switch (node->op()->opcode()) {
+      case OPCODE_A0:
+        CHECK_EQ(0, node->InputCount());
+        return Replace(graph_->NewNode(&OPB0));
+      case OPCODE_A1:
+        CHECK_EQ(1, node->InputCount());
+        return Replace(graph_->NewNode(&OPB1, node->InputAt(0)));
+      case OPCODE_A2:
+        CHECK_EQ(2, node->InputCount());
+        return Replace(
+            graph_->NewNode(&OPB2, node->InputAt(0), node->InputAt(1)));
+    }
+    return NoChange();
+  }
+  Graph* graph_;
+};
+
+
+// Replaces all "B" operators with "C" operators without creating new nodes.
+class InPlaceBCReducer : public Reducer {
+ public:
+  virtual Reduction Reduce(Node* node) {
+    switch (node->op()->opcode()) {
+      case OPCODE_B0:
+        CHECK_EQ(0, node->InputCount());
+        node->set_op(&OPC0);
+        return Replace(node);
+      case OPCODE_B1:
+        CHECK_EQ(1, node->InputCount());
+        node->set_op(&OPC1);
+        return Replace(node);
+      case OPCODE_B2:
+        CHECK_EQ(2, node->InputCount());
+        node->set_op(&OPC2);
+        return Replace(node);
+    }
+    return NoChange();
+  }
+};
+
+
+// Wraps all "OPA0" nodes in "OPB1" operators by allocating new nodes.
+class A0Wrapper V8_FINAL : public Reducer {
+ public:
+  explicit A0Wrapper(Graph* graph) : graph_(graph) {}
+  virtual Reduction Reduce(Node* node) V8_OVERRIDE {
+    switch (node->op()->opcode()) {
+      case OPCODE_A0:
+        CHECK_EQ(0, node->InputCount());
+        return Replace(graph_->NewNode(&OPB1, node));
+    }
+    return NoChange();
+  }
+  Graph* graph_;
+};
+
+
+// Wraps all "OPB0" nodes in two "OPC1" operators by allocating new nodes.
+class B0Wrapper V8_FINAL : public Reducer {
+ public:
+  explicit B0Wrapper(Graph* graph) : graph_(graph) {}
+  virtual Reduction Reduce(Node* node) V8_OVERRIDE {
+    switch (node->op()->opcode()) {
+      case OPCODE_B0:
+        CHECK_EQ(0, node->InputCount());
+        return Replace(graph_->NewNode(&OPC1, graph_->NewNode(&OPC1, node)));
+    }
+    return NoChange();
+  }
+  Graph* graph_;
+};
+
+
+// Replaces all "OPA1" nodes with the first input.
+class A1Forwarder : public Reducer {
+  virtual Reduction Reduce(Node* node) {
+    switch (node->op()->opcode()) {
+      case OPCODE_A1:
+        CHECK_EQ(1, node->InputCount());
+        return Replace(node->InputAt(0));
+    }
+    return NoChange();
+  }
+};
+
+
+// Replaces all "OPB1" nodes with the first input.
+class B1Forwarder : public Reducer {
+  virtual Reduction Reduce(Node* node) {
+    switch (node->op()->opcode()) {
+      case OPCODE_B1:
+        CHECK_EQ(1, node->InputCount());
+        return Replace(node->InputAt(0));
+    }
+    return NoChange();
+  }
+};
+
+
+// Swaps the inputs to "OP2A" and "OP2B" nodes based on ids.
+class AB2Sorter : public Reducer {
+  virtual Reduction Reduce(Node* node) {
+    switch (node->op()->opcode()) {
+      case OPCODE_A2:
+      case OPCODE_B2:
+        CHECK_EQ(2, node->InputCount());
+        Node* x = node->InputAt(0);
+        Node* y = node->InputAt(1);
+        if (x->id() > y->id()) {
+          node->ReplaceInput(0, y);
+          node->ReplaceInput(1, x);
+          return Replace(node);
+        }
+    }
+    return NoChange();
+  }
+};
+
+
+// Simply records the nodes visited.
+class ReducerRecorder : public Reducer {
+ public:
+  explicit ReducerRecorder(Zone* zone)
+      : set(NodeSet::key_compare(), NodeSet::allocator_type(zone)) {}
+  virtual Reduction Reduce(Node* node) {
+    set.insert(node);
+    return NoChange();
+  }
+  void CheckContains(Node* node) { CHECK_EQ(1, set.count(node)); }
+  NodeSet set;
+};
+
+
+TEST(ReduceGraphFromEnd1) {
+  GraphTester graph;
+
+  Node* n1 = graph.NewNode(&OPA0);
+  Node* end = graph.NewNode(&OPA1, n1);
+  graph.SetEnd(end);
+
+  GraphReducer reducer(&graph);
+  ReducerRecorder recorder(graph.zone());
+  reducer.AddReducer(&recorder);
+  reducer.ReduceGraph();
+  recorder.CheckContains(n1);
+  recorder.CheckContains(end);
+}
+
+
+TEST(ReduceGraphFromEnd2) {
+  GraphTester graph;
+
+  Node* n1 = graph.NewNode(&OPA0);
+  Node* n2 = graph.NewNode(&OPA1, n1);
+  Node* n3 = graph.NewNode(&OPA1, n1);
+  Node* end = graph.NewNode(&OPA2, n2, n3);
+  graph.SetEnd(end);
+
+  GraphReducer reducer(&graph);
+  ReducerRecorder recorder(graph.zone());
+  reducer.AddReducer(&recorder);
+  reducer.ReduceGraph();
+  recorder.CheckContains(n1);
+  recorder.CheckContains(n2);
+  recorder.CheckContains(n3);
+  recorder.CheckContains(end);
+}
+
+
+TEST(ReduceInPlace1) {
+  GraphTester graph;
+
+  Node* n1 = graph.NewNode(&OPA0);
+  Node* end = graph.NewNode(&OPA1, n1);
+  graph.SetEnd(end);
+
+  GraphReducer reducer(&graph);
+  InPlaceABReducer r;
+  reducer.AddReducer(&r);
+
+  // Tests A* => B* with in-place updates.
+  for (int i = 0; i < 3; i++) {
+    int before = graph.NodeCount();
+    reducer.ReduceGraph();
+    CHECK_EQ(before, graph.NodeCount());
+    CHECK_EQ(&OPB0, n1->op());
+    CHECK_EQ(&OPB1, end->op());
+    CHECK_EQ(n1, end->InputAt(0));
+  }
+}
+
+
+TEST(ReduceInPlace2) {
+  GraphTester graph;
+
+  Node* n1 = graph.NewNode(&OPA0);
+  Node* n2 = graph.NewNode(&OPA1, n1);
+  Node* n3 = graph.NewNode(&OPA1, n1);
+  Node* end = graph.NewNode(&OPA2, n2, n3);
+  graph.SetEnd(end);
+
+  GraphReducer reducer(&graph);
+  InPlaceABReducer r;
+  reducer.AddReducer(&r);
+
+  // Tests A* => B* with in-place updates.
+  for (int i = 0; i < 3; i++) {
+    int before = graph.NodeCount();
+    reducer.ReduceGraph();
+    CHECK_EQ(before, graph.NodeCount());
+    CHECK_EQ(&OPB0, n1->op());
+    CHECK_EQ(&OPB1, n2->op());
+    CHECK_EQ(n1, n2->InputAt(0));
+    CHECK_EQ(&OPB1, n3->op());
+    CHECK_EQ(n1, n3->InputAt(0));
+    CHECK_EQ(&OPB2, end->op());
+    CHECK_EQ(n2, end->InputAt(0));
+    CHECK_EQ(n3, end->InputAt(1));
+  }
+}
+
+
+TEST(ReduceNew1) {
+  GraphTester graph;
+
+  Node* n1 = graph.NewNode(&OPA0);
+  Node* n2 = graph.NewNode(&OPA1, n1);
+  Node* n3 = graph.NewNode(&OPA1, n1);
+  Node* end = graph.NewNode(&OPA2, n2, n3);
+  graph.SetEnd(end);
+
+  GraphReducer reducer(&graph);
+  NewABReducer r(&graph);
+  reducer.AddReducer(&r);
+
+  // Tests A* => B* while creating new nodes.
+  for (int i = 0; i < 3; i++) {
+    int before = graph.NodeCount();
+    reducer.ReduceGraph();
+    if (i == 0) {
+      CHECK_NE(before, graph.NodeCount());
+    } else {
+      CHECK_EQ(before, graph.NodeCount());
+    }
+    Node* nend = graph.end();
+    CHECK_NE(end, nend);  // end() should be updated too.
+
+    Node* nn2 = nend->InputAt(0);
+    Node* nn3 = nend->InputAt(1);
+    Node* nn1 = nn2->InputAt(0);
+
+    CHECK_EQ(nn1, nn3->InputAt(0));
+
+    CHECK_EQ(&OPB0, nn1->op());
+    CHECK_EQ(&OPB1, nn2->op());
+    CHECK_EQ(&OPB1, nn3->op());
+    CHECK_EQ(&OPB2, nend->op());
+  }
+}
+
+
+TEST(Wrapping1) {
+  GraphTester graph;
+
+  Node* end = graph.NewNode(&OPA0);
+  graph.SetEnd(end);
+  CHECK_EQ(1, graph.NodeCount());
+
+  GraphReducer reducer(&graph);
+  A0Wrapper r(&graph);
+  reducer.AddReducer(&r);
+
+  reducer.ReduceGraph();
+  CHECK_EQ(2, graph.NodeCount());
+
+  Node* nend = graph.end();
+  CHECK_NE(end, nend);
+  CHECK_EQ(&OPB1, nend->op());
+  CHECK_EQ(1, nend->InputCount());
+  CHECK_EQ(end, nend->InputAt(0));
+}
+
+
+TEST(Wrapping2) {
+  GraphTester graph;
+
+  Node* end = graph.NewNode(&OPB0);
+  graph.SetEnd(end);
+  CHECK_EQ(1, graph.NodeCount());
+
+  GraphReducer reducer(&graph);
+  B0Wrapper r(&graph);
+  reducer.AddReducer(&r);
+
+  reducer.ReduceGraph();
+  CHECK_EQ(3, graph.NodeCount());
+
+  Node* nend = graph.end();
+  CHECK_NE(end, nend);
+  CHECK_EQ(&OPC1, nend->op());
+  CHECK_EQ(1, nend->InputCount());
+
+  Node* n1 = nend->InputAt(0);
+  CHECK_NE(end, n1);
+  CHECK_EQ(&OPC1, n1->op());
+  CHECK_EQ(1, n1->InputCount());
+  CHECK_EQ(end, n1->InputAt(0));
+}
+
+
+TEST(Forwarding1) {
+  GraphTester graph;
+
+  Node* n1 = graph.NewNode(&OPA0);
+  Node* end = graph.NewNode(&OPA1, n1);
+  graph.SetEnd(end);
+
+  GraphReducer reducer(&graph);
+  A1Forwarder r;
+  reducer.AddReducer(&r);
+
+  // Tests A1(x) => x
+  for (int i = 0; i < 3; i++) {
+    int before = graph.NodeCount();
+    reducer.ReduceGraph();
+    CHECK_EQ(before, graph.NodeCount());
+    CHECK_EQ(&OPA0, n1->op());
+    CHECK_EQ(n1, graph.end());
+  }
+}
+
+
+TEST(Forwarding2) {
+  GraphTester graph;
+
+  Node* n1 = graph.NewNode(&OPA0);
+  Node* n2 = graph.NewNode(&OPA1, n1);
+  Node* n3 = graph.NewNode(&OPA1, n1);
+  Node* end = graph.NewNode(&OPA2, n2, n3);
+  graph.SetEnd(end);
+
+  GraphReducer reducer(&graph);
+  A1Forwarder r;
+  reducer.AddReducer(&r);
+
+  // Tests reducing A2(A1(x), A1(y)) => A2(x, y).
+  for (int i = 0; i < 3; i++) {
+    int before = graph.NodeCount();
+    reducer.ReduceGraph();
+    CHECK_EQ(before, graph.NodeCount());
+    CHECK_EQ(&OPA0, n1->op());
+    CHECK_EQ(n1, end->InputAt(0));
+    CHECK_EQ(n1, end->InputAt(1));
+    CHECK_EQ(&OPA2, end->op());
+    CHECK_EQ(0, n2->UseCount());
+    CHECK_EQ(0, n3->UseCount());
+  }
+}
+
+
+TEST(Forwarding3) {
+  // Tests reducing a chain of A1(A1(A1(A1(x)))) => x.
+  for (int i = 0; i < 8; i++) {
+    GraphTester graph;
+
+    Node* n1 = graph.NewNode(&OPA0);
+    Node* end = n1;
+    for (int j = 0; j < i; j++) {
+      end = graph.NewNode(&OPA1, end);
+    }
+    graph.SetEnd(end);
+
+    GraphReducer reducer(&graph);
+    A1Forwarder r;
+    reducer.AddReducer(&r);
+
+    for (int i = 0; i < 3; i++) {
+      int before = graph.NodeCount();
+      reducer.ReduceGraph();
+      CHECK_EQ(before, graph.NodeCount());
+      CHECK_EQ(&OPA0, n1->op());
+      CHECK_EQ(n1, graph.end());
+    }
+  }
+}
+
+
+TEST(ReduceForward1) {
+  GraphTester graph;
+
+  Node* n1 = graph.NewNode(&OPA0);
+  Node* n2 = graph.NewNode(&OPA1, n1);
+  Node* n3 = graph.NewNode(&OPA1, n1);
+  Node* end = graph.NewNode(&OPA2, n2, n3);
+  graph.SetEnd(end);
+
+  GraphReducer reducer(&graph);
+  InPlaceABReducer r;
+  B1Forwarder f;
+  reducer.AddReducer(&r);
+  reducer.AddReducer(&f);
+
+  // Tests first reducing A => B, then B1(x) => x.
+  for (int i = 0; i < 3; i++) {
+    int before = graph.NodeCount();
+    reducer.ReduceGraph();
+    CHECK_EQ(before, graph.NodeCount());
+    CHECK_EQ(&OPB0, n1->op());
+    CHECK_EQ(&OPB1, n2->op());
+    CHECK_EQ(n1, end->InputAt(0));
+    CHECK_EQ(&OPB1, n3->op());
+    CHECK_EQ(n1, end->InputAt(0));
+    CHECK_EQ(&OPB2, end->op());
+    CHECK_EQ(0, n2->UseCount());
+    CHECK_EQ(0, n3->UseCount());
+  }
+}
+
+
+TEST(Sorter1) {
+  HandleAndZoneScope scope;
+  AB2Sorter r;
+  for (int i = 0; i < 6; i++) {
+    GraphTester graph;
+
+    Node* n1 = graph.NewNode(&OPA0);
+    Node* n2 = graph.NewNode(&OPA1, n1);
+    Node* n3 = graph.NewNode(&OPA1, n1);
+    Node* end;
+
+    if (i == 0) end = graph.NewNode(&OPA2, n2, n3);
+    if (i == 1) end = graph.NewNode(&OPA2, n3, n2);
+    if (i == 2) end = graph.NewNode(&OPA2, n2, n1);
+    if (i == 3) end = graph.NewNode(&OPA2, n1, n2);
+    if (i == 4) end = graph.NewNode(&OPA2, n3, n1);
+    if (i == 5) end = graph.NewNode(&OPA2, n1, n3);
+
+    graph.SetEnd(end);
+
+    GraphReducer reducer(&graph);
+    reducer.AddReducer(&r);
+
+    int before = graph.NodeCount();
+    reducer.ReduceGraph();
+    CHECK_EQ(before, graph.NodeCount());
+    CHECK_EQ(&OPA0, n1->op());
+    CHECK_EQ(&OPA1, n2->op());
+    CHECK_EQ(&OPA1, n3->op());
+    CHECK_EQ(&OPA2, end->op());
+    CHECK_EQ(end, graph.end());
+    CHECK(end->InputAt(0)->id() <= end->InputAt(1)->id());
+  }
+}
+
+
+// Generate a node graph with the given permutations.
+void GenDAG(Graph* graph, int* p3, int* p2, int* p1) {
+  Node* level4 = graph->NewNode(&OPA0);
+  Node* level3[] = {graph->NewNode(&OPA1, level4),
+                    graph->NewNode(&OPA1, level4)};
+
+  Node* level2[] = {graph->NewNode(&OPA1, level3[p3[0]]),
+                    graph->NewNode(&OPA1, level3[p3[1]]),
+                    graph->NewNode(&OPA1, level3[p3[0]]),
+                    graph->NewNode(&OPA1, level3[p3[1]])};
+
+  Node* level1[] = {graph->NewNode(&OPA2, level2[p2[0]], level2[p2[1]]),
+                    graph->NewNode(&OPA2, level2[p2[2]], level2[p2[3]])};
+
+  Node* end = graph->NewNode(&OPA2, level1[p1[0]], level1[p1[1]]);
+  graph->SetEnd(end);
+}
+
+
+TEST(SortForwardReduce) {
+  GraphTester graph;
+
+  // Tests combined reductions on a series of DAGs.
+  for (int j = 0; j < 2; j++) {
+    int p3[] = {j, 1 - j};
+    for (int m = 0; m < 2; m++) {
+      int p1[] = {m, 1 - m};
+      for (int k = 0; k < 24; k++) {  // All permutations of 0, 1, 2, 3
+        int p2[] = {-1, -1, -1, -1};
+        int n = k;
+        for (int d = 4; d >= 1; d--) {  // Construct permutation.
+          int p = n % d;
+          for (int z = 0; z < 4; z++) {
+            if (p2[z] == -1) {
+              if (p == 0) p2[z] = d - 1;
+              p--;
+            }
+          }
+          n = n / d;
+        }
+
+        GenDAG(&graph, p3, p2, p1);
+
+        GraphReducer reducer(&graph);
+        AB2Sorter r1;
+        A1Forwarder r2;
+        InPlaceABReducer r3;
+        reducer.AddReducer(&r1);
+        reducer.AddReducer(&r2);
+        reducer.AddReducer(&r3);
+
+        reducer.ReduceGraph();
+
+        Node* end = graph.end();
+        CHECK_EQ(&OPB2, end->op());
+        Node* n1 = end->InputAt(0);
+        Node* n2 = end->InputAt(1);
+        CHECK_NE(n1, n2);
+        CHECK(n1->id() < n2->id());
+        CHECK_EQ(&OPB2, n1->op());
+        CHECK_EQ(&OPB2, n2->op());
+        Node* n4 = n1->InputAt(0);
+        CHECK_EQ(&OPB0, n4->op());
+        CHECK_EQ(n4, n1->InputAt(1));
+        CHECK_EQ(n4, n2->InputAt(0));
+        CHECK_EQ(n4, n2->InputAt(1));
+      }
+    }
+  }
+}
+
+
+TEST(Order) {
+  // Test that the order of reducers doesn't matter, as they should be
+  // rerun for changed nodes.
+  for (int i = 0; i < 2; i++) {
+    GraphTester graph;
+
+    Node* n1 = graph.NewNode(&OPA0);
+    Node* end = graph.NewNode(&OPA1, n1);
+    graph.SetEnd(end);
+
+    GraphReducer reducer(&graph);
+    InPlaceABReducer abr;
+    InPlaceBCReducer bcr;
+    if (i == 0) {
+      reducer.AddReducer(&abr);
+      reducer.AddReducer(&bcr);
+    } else {
+      reducer.AddReducer(&bcr);
+      reducer.AddReducer(&abr);
+    }
+
+    // Tests A* => C* with in-place updates.
+    for (int i = 0; i < 3; i++) {
+      int before = graph.NodeCount();
+      reducer.ReduceGraph();
+      CHECK_EQ(before, graph.NodeCount());
+      CHECK_EQ(&OPC0, n1->op());
+      CHECK_EQ(&OPC1, end->op());
+      CHECK_EQ(n1, end->InputAt(0));
+    }
+  }
+}
+
+
+// Tests that a reducer is only applied once.
+class OneTimeReducer : public Reducer {
+ public:
+  OneTimeReducer(Reducer* reducer, Zone* zone)
+      : reducer_(reducer),
+        nodes_(NodeSet::key_compare(), NodeSet::allocator_type(zone)) {}
+  virtual Reduction Reduce(Node* node) {
+    CHECK_EQ(0, nodes_.count(node));
+    nodes_.insert(node);
+    return reducer_->Reduce(node);
+  }
+  Reducer* reducer_;
+  NodeSet nodes_;
+};
+
+
+TEST(OneTimeReduce1) {
+  GraphTester graph;
+
+  Node* n1 = graph.NewNode(&OPA0);
+  Node* end = graph.NewNode(&OPA1, n1);
+  graph.SetEnd(end);
+
+  GraphReducer reducer(&graph);
+  InPlaceABReducer r;
+  OneTimeReducer once(&r, graph.zone());
+  reducer.AddReducer(&once);
+
+  // Tests A* => B* with in-place updates. Should only be applied once.
+  int before = graph.NodeCount();
+  reducer.ReduceGraph();
+  CHECK_EQ(before, graph.NodeCount());
+  CHECK_EQ(&OPB0, n1->op());
+  CHECK_EQ(&OPB1, end->op());
+  CHECK_EQ(n1, end->InputAt(0));
+}
diff --git a/test/cctest/compiler/compiler/test-instruction-selector-arm.cc b/test/cctest/compiler/compiler/test-instruction-selector-arm.cc
new file mode 100644 (file)
index 0000000..ef7bbb7
--- /dev/null
@@ -0,0 +1,977 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <list>
+
+#include "test/cctest/compiler/instruction-selector-tester.h"
+
+using namespace v8::internal;
+using namespace v8::internal::compiler;
+
+namespace {
+
+typedef RawMachineAssembler::Label MLabel;
+
+struct DPI {
+  Operator* op;
+  ArchOpcode arch_opcode;
+  ArchOpcode reverse_arch_opcode;
+  ArchOpcode test_arch_opcode;
+};
+
+
+// ARM data processing instructions.
+class DPIs V8_FINAL : public std::list<DPI>, private HandleAndZoneScope {
+ public:
+  DPIs() {
+    MachineOperatorBuilder machine(main_zone());
+    DPI and_ = {machine.Word32And(), kArmAnd, kArmAnd, kArmTst};
+    push_back(and_);
+    DPI or_ = {machine.Word32Or(), kArmOrr, kArmOrr, kArmOrr};
+    push_back(or_);
+    DPI xor_ = {machine.Word32Xor(), kArmEor, kArmEor, kArmTeq};
+    push_back(xor_);
+    DPI add = {machine.Int32Add(), kArmAdd, kArmAdd, kArmCmn};
+    push_back(add);
+    DPI sub = {machine.Int32Sub(), kArmSub, kArmRsb, kArmCmp};
+    push_back(sub);
+  }
+};
+
+
+// ARM immediates.
+class Immediates V8_FINAL : public std::list<int32_t> {
+ public:
+  Immediates() {
+    for (uint32_t imm8 = 0; imm8 < 256; ++imm8) {
+      for (uint32_t rot4 = 0; rot4 < 32; rot4 += 2) {
+        int32_t imm = (imm8 >> rot4) | (imm8 << (32 - rot4));
+        CHECK(Assembler::ImmediateFitsAddrMode1Instruction(imm));
+        push_back(imm);
+      }
+    }
+  }
+};
+
+
+struct Shift {
+  Operator* op;
+  int32_t i_low;          // lowest possible immediate
+  int32_t i_high;         // highest possible immediate
+  AddressingMode i_mode;  // Operand2_R_<shift>_I
+  AddressingMode r_mode;  // Operand2_R_<shift>_R
+};
+
+
+// ARM shifts.
+class Shifts V8_FINAL : public std::list<Shift>, private HandleAndZoneScope {
+ public:
+  Shifts() {
+    MachineOperatorBuilder machine(main_zone());
+    Shift sar = {machine.Word32Sar(), 1, 32, kMode_Operand2_R_ASR_I,
+                 kMode_Operand2_R_ASR_R};
+    Shift shl = {machine.Word32Shl(), 0, 31, kMode_Operand2_R_LSL_I,
+                 kMode_Operand2_R_LSL_R};
+    Shift shr = {machine.Word32Shr(), 1, 32, kMode_Operand2_R_LSR_I,
+                 kMode_Operand2_R_LSR_R};
+    push_back(sar);
+    push_back(shl);
+    push_back(shr);
+  }
+};
+
+}  // namespace
+
+
+TEST(InstructionSelectorDPIP) {
+  DPIs dpis;
+  for (DPIs::const_iterator i = dpis.begin(); i != dpis.end(); ++i) {
+    DPI dpi = *i;
+    InstructionSelectorTester m;
+    m.Return(m.NewNode(dpi.op, m.Parameter(0), m.Parameter(1)));
+    m.SelectInstructions();
+    CHECK_EQ(1, m.code.size());
+    CHECK_EQ(dpi.arch_opcode, m.code[0]->arch_opcode());
+    CHECK_EQ(kMode_Operand2_R, m.code[0]->addressing_mode());
+  }
+}
+
+
+TEST(InstructionSelectorDPIAndShiftP) {
+  DPIs dpis;
+  Shifts shifts;
+  for (DPIs::const_iterator i = dpis.begin(); i != dpis.end(); ++i) {
+    DPI dpi = *i;
+    for (Shifts::const_iterator j = shifts.begin(); j != shifts.end(); ++j) {
+      Shift shift = *j;
+      {
+        InstructionSelectorTester m;
+        m.Return(
+            m.NewNode(dpi.op, m.Parameter(0),
+                      m.NewNode(shift.op, m.Parameter(1), m.Parameter(2))));
+        m.SelectInstructions();
+        CHECK_EQ(1, m.code.size());
+        CHECK_EQ(dpi.arch_opcode, m.code[0]->arch_opcode());
+        CHECK_EQ(shift.r_mode, m.code[0]->addressing_mode());
+      }
+      {
+        InstructionSelectorTester m;
+        m.Return(m.NewNode(dpi.op,
+                           m.NewNode(shift.op, m.Parameter(0), m.Parameter(1)),
+                           m.Parameter(2)));
+        m.SelectInstructions();
+        CHECK_EQ(1, m.code.size());
+        CHECK_EQ(dpi.reverse_arch_opcode, m.code[0]->arch_opcode());
+        CHECK_EQ(shift.r_mode, m.code[0]->addressing_mode());
+      }
+    }
+  }
+}
+
+
+TEST(InstructionSelectorDPIAndShiftImm) {
+  DPIs dpis;
+  Shifts shifts;
+  for (DPIs::const_iterator i = dpis.begin(); i != dpis.end(); ++i) {
+    DPI dpi = *i;
+    for (Shifts::const_iterator j = shifts.begin(); j != shifts.end(); ++j) {
+      Shift shift = *j;
+      for (int32_t imm = shift.i_low; imm <= shift.i_high; ++imm) {
+        {
+          InstructionSelectorTester m;
+          m.Return(m.NewNode(
+              dpi.op, m.Parameter(0),
+              m.NewNode(shift.op, m.Parameter(1), m.Int32Constant(imm))));
+          m.SelectInstructions();
+          CHECK_EQ(1, m.code.size());
+          CHECK_EQ(dpi.arch_opcode, m.code[0]->arch_opcode());
+          CHECK_EQ(shift.i_mode, m.code[0]->addressing_mode());
+        }
+        {
+          InstructionSelectorTester m;
+          m.Return(m.NewNode(
+              dpi.op, m.NewNode(shift.op, m.Parameter(0), m.Int32Constant(imm)),
+              m.Parameter(1)));
+          m.SelectInstructions();
+          CHECK_EQ(1, m.code.size());
+          CHECK_EQ(dpi.reverse_arch_opcode, m.code[0]->arch_opcode());
+          CHECK_EQ(shift.i_mode, m.code[0]->addressing_mode());
+        }
+      }
+    }
+  }
+}
+
+
+TEST(InstructionSelectorWord32AndAndWord32XorWithMinus1P) {
+  {
+    InstructionSelectorTester m;
+    m.Return(m.Word32And(m.Parameter(0),
+                         m.Word32Xor(m.Int32Constant(-1), m.Parameter(1))));
+    m.SelectInstructions();
+    CHECK_EQ(1, m.code.size());
+    CHECK_EQ(kArmBic, m.code[0]->arch_opcode());
+    CHECK_EQ(kMode_Operand2_R, m.code[0]->addressing_mode());
+  }
+  {
+    InstructionSelectorTester m;
+    m.Return(m.Word32And(m.Parameter(0),
+                         m.Word32Xor(m.Parameter(1), m.Int32Constant(-1))));
+    m.SelectInstructions();
+    CHECK_EQ(1, m.code.size());
+    CHECK_EQ(kArmBic, m.code[0]->arch_opcode());
+    CHECK_EQ(kMode_Operand2_R, m.code[0]->addressing_mode());
+  }
+  {
+    InstructionSelectorTester m;
+    m.Return(m.Word32And(m.Word32Xor(m.Int32Constant(-1), m.Parameter(0)),
+                         m.Parameter(1)));
+    m.SelectInstructions();
+    CHECK_EQ(1, m.code.size());
+    CHECK_EQ(kArmBic, m.code[0]->arch_opcode());
+    CHECK_EQ(kMode_Operand2_R, m.code[0]->addressing_mode());
+  }
+  {
+    InstructionSelectorTester m;
+    m.Return(m.Word32And(m.Word32Xor(m.Parameter(0), m.Int32Constant(-1)),
+                         m.Parameter(1)));
+    m.SelectInstructions();
+    CHECK_EQ(1, m.code.size());
+    CHECK_EQ(kArmBic, m.code[0]->arch_opcode());
+    CHECK_EQ(kMode_Operand2_R, m.code[0]->addressing_mode());
+  }
+}
+
+
+TEST(InstructionSelectorWord32XorWithMinus1P) {
+  {
+    InstructionSelectorTester m;
+    m.Return(m.Word32Xor(m.Int32Constant(-1), m.Parameter(0)));
+    m.SelectInstructions();
+    CHECK_EQ(1, m.code.size());
+    CHECK_EQ(kArmMvn, m.code[0]->arch_opcode());
+    CHECK_EQ(kMode_Operand2_R, m.code[0]->addressing_mode());
+  }
+  {
+    InstructionSelectorTester m;
+    m.Return(m.Word32Xor(m.Parameter(0), m.Int32Constant(-1)));
+    m.SelectInstructions();
+    CHECK_EQ(1, m.code.size());
+    CHECK_EQ(kArmMvn, m.code[0]->arch_opcode());
+    CHECK_EQ(kMode_Operand2_R, m.code[0]->addressing_mode());
+  }
+}
+
+
+TEST(InstructionSelectorInt32MulP) {
+  InstructionSelectorTester m;
+  m.Return(m.Int32Mul(m.Parameter(0), m.Parameter(1)));
+  m.SelectInstructions();
+  CHECK_EQ(1, m.code.size());
+  CHECK_EQ(kArmMul, m.code[0]->arch_opcode());
+}
+
+
+TEST(InstructionSelectorInt32MulImm) {
+  // x * (2^k + 1) -> (x >> k) + x
+  for (int k = 1; k < 31; ++k) {
+    InstructionSelectorTester m;
+    m.Return(m.Int32Mul(m.Parameter(0), m.Int32Constant((1 << k) + 1)));
+    m.SelectInstructions();
+    CHECK_EQ(1, m.code.size());
+    CHECK_EQ(kArmAdd, m.code[0]->arch_opcode());
+    CHECK_EQ(kMode_Operand2_R_LSL_I, m.code[0]->addressing_mode());
+  }
+  // (2^k + 1) * x -> (x >> k) + x
+  for (int k = 1; k < 31; ++k) {
+    InstructionSelectorTester m;
+    m.Return(m.Int32Mul(m.Int32Constant((1 << k) + 1), m.Parameter(0)));
+    m.SelectInstructions();
+    CHECK_EQ(1, m.code.size());
+    CHECK_EQ(kArmAdd, m.code[0]->arch_opcode());
+    CHECK_EQ(kMode_Operand2_R_LSL_I, m.code[0]->addressing_mode());
+  }
+  // x * (2^k - 1) -> (x >> k) - x
+  for (int k = 3; k < 31; ++k) {
+    InstructionSelectorTester m;
+    m.Return(m.Int32Mul(m.Parameter(0), m.Int32Constant((1 << k) - 1)));
+    m.SelectInstructions();
+    CHECK_EQ(1, m.code.size());
+    CHECK_EQ(kArmRsb, m.code[0]->arch_opcode());
+    CHECK_EQ(kMode_Operand2_R_LSL_I, m.code[0]->addressing_mode());
+  }
+  // (2^k - 1) * x -> (x >> k) - x
+  for (int k = 3; k < 31; ++k) {
+    InstructionSelectorTester m;
+    m.Return(m.Int32Mul(m.Int32Constant((1 << k) - 1), m.Parameter(0)));
+    m.SelectInstructions();
+    CHECK_EQ(1, m.code.size());
+    CHECK_EQ(kArmRsb, m.code[0]->arch_opcode());
+    CHECK_EQ(kMode_Operand2_R_LSL_I, m.code[0]->addressing_mode());
+  }
+}
+
+
+// The following tests depend on the exact CPU features available, which we do
+// only fully control in a simulator build.
+#ifdef USE_SIMULATOR
+
+TEST(InstructionSelectorDPIImm_ARMv7AndVFP3Disabled) {
+  i::FLAG_enable_armv7 = false;
+  i::FLAG_enable_vfp3 = false;
+  DPIs dpis;
+  Immediates immediates;
+  for (DPIs::const_iterator i = dpis.begin(); i != dpis.end(); ++i) {
+    DPI dpi = *i;
+    for (Immediates::const_iterator j = immediates.begin();
+         j != immediates.end(); ++j) {
+      int32_t imm = *j;
+      {
+        InstructionSelectorTester m;
+        m.Return(m.NewNode(dpi.op, m.Parameter(0), m.Int32Constant(imm)));
+        m.SelectInstructions();
+        CHECK_EQ(1, m.code.size());
+        CHECK_EQ(dpi.arch_opcode, m.code[0]->arch_opcode());
+        CHECK_EQ(kMode_Operand2_I, m.code[0]->addressing_mode());
+      }
+      {
+        InstructionSelectorTester m;
+        m.Return(m.NewNode(dpi.op, m.Int32Constant(imm), m.Parameter(0)));
+        m.SelectInstructions();
+        CHECK_EQ(1, m.code.size());
+        CHECK_EQ(dpi.reverse_arch_opcode, m.code[0]->arch_opcode());
+        CHECK_EQ(kMode_Operand2_I, m.code[0]->addressing_mode());
+      }
+    }
+  }
+}
+
+
+TEST(InstructionSelectorWord32AndImm_ARMv7Enabled) {
+  i::FLAG_enable_armv7 = true;
+  for (uint32_t width = 1; width <= 32; ++width) {
+    InstructionSelectorTester m;
+    m.Return(m.Word32And(m.Parameter(0),
+                         m.Int32Constant(0xffffffffu >> (32 - width))));
+    m.SelectInstructions();
+    CHECK_EQ(1, m.code.size());
+    CHECK_EQ(kArmUbfx, m.code[0]->arch_opcode());
+    CHECK_EQ(3, m.code[0]->InputCount());
+    CHECK_EQ(0, m.ToInt32(m.code[0]->InputAt(1)));
+    CHECK_EQ(width, m.ToInt32(m.code[0]->InputAt(2)));
+  }
+  for (uint32_t lsb = 0; lsb <= 31; ++lsb) {
+    for (uint32_t width = 1; width < 32 - lsb; ++width) {
+      uint32_t msk = ~((0xffffffffu >> (32 - width)) << lsb);
+      InstructionSelectorTester m;
+      m.Return(m.Word32And(m.Parameter(0), m.Int32Constant(msk)));
+      m.SelectInstructions();
+      CHECK_EQ(1, m.code.size());
+      CHECK_EQ(kArmBfc, m.code[0]->arch_opcode());
+      CHECK_EQ(1, m.code[0]->OutputCount());
+      CHECK(UnallocatedOperand::cast(m.code[0]->Output())
+                ->HasSameAsInputPolicy());
+      CHECK_EQ(3, m.code[0]->InputCount());
+      CHECK_EQ(lsb, m.ToInt32(m.code[0]->InputAt(1)));
+      CHECK_EQ(width, m.ToInt32(m.code[0]->InputAt(2)));
+    }
+  }
+}
+
+
+TEST(InstructionSelectorWord32AndAndWord32ShrImm_ARMv7Enabled) {
+  i::FLAG_enable_armv7 = true;
+  for (uint32_t lsb = 0; lsb <= 31; ++lsb) {
+    for (uint32_t width = 1; width <= 32 - lsb; ++width) {
+      {
+        InstructionSelectorTester m;
+        m.Return(m.Word32And(m.Word32Shr(m.Parameter(0), m.Int32Constant(lsb)),
+                             m.Int32Constant(0xffffffffu >> (32 - width))));
+        m.SelectInstructions();
+        CHECK_EQ(1, m.code.size());
+        CHECK_EQ(kArmUbfx, m.code[0]->arch_opcode());
+        CHECK_EQ(3, m.code[0]->InputCount());
+        CHECK_EQ(lsb, m.ToInt32(m.code[0]->InputAt(1)));
+        CHECK_EQ(width, m.ToInt32(m.code[0]->InputAt(2)));
+      }
+      {
+        InstructionSelectorTester m;
+        m.Return(
+            m.Word32And(m.Int32Constant(0xffffffffu >> (32 - width)),
+                        m.Word32Shr(m.Parameter(0), m.Int32Constant(lsb))));
+        m.SelectInstructions();
+        CHECK_EQ(1, m.code.size());
+        CHECK_EQ(kArmUbfx, m.code[0]->arch_opcode());
+        CHECK_EQ(3, m.code[0]->InputCount());
+        CHECK_EQ(lsb, m.ToInt32(m.code[0]->InputAt(1)));
+        CHECK_EQ(width, m.ToInt32(m.code[0]->InputAt(2)));
+      }
+    }
+  }
+}
+
+
+TEST(InstructionSelectorWord32ShrAndWord32AndImm_ARMv7Enabled) {
+  i::FLAG_enable_armv7 = true;
+  for (uint32_t lsb = 0; lsb <= 31; ++lsb) {
+    for (uint32_t width = 1; width <= 32 - lsb; ++width) {
+      uint32_t max = 1 << lsb;
+      if (max > kMaxInt) max -= 1;
+      uint32_t jnk = CcTest::random_number_generator()->NextInt(max);
+      uint32_t msk = ((0xffffffffu >> (32 - width)) << lsb) | jnk;
+      {
+        InstructionSelectorTester m;
+        m.Return(m.Word32Shr(m.Word32And(m.Parameter(0), m.Int32Constant(msk)),
+                             m.Int32Constant(lsb)));
+        m.SelectInstructions();
+        CHECK_EQ(1, m.code.size());
+        CHECK_EQ(kArmUbfx, m.code[0]->arch_opcode());
+        CHECK_EQ(3, m.code[0]->InputCount());
+        CHECK_EQ(lsb, m.ToInt32(m.code[0]->InputAt(1)));
+        CHECK_EQ(width, m.ToInt32(m.code[0]->InputAt(2)));
+      }
+      {
+        InstructionSelectorTester m;
+        m.Return(m.Word32Shr(m.Word32And(m.Int32Constant(msk), m.Parameter(0)),
+                             m.Int32Constant(lsb)));
+        m.SelectInstructions();
+        CHECK_EQ(1, m.code.size());
+        CHECK_EQ(kArmUbfx, m.code[0]->arch_opcode());
+        CHECK_EQ(3, m.code[0]->InputCount());
+        CHECK_EQ(lsb, m.ToInt32(m.code[0]->InputAt(1)));
+        CHECK_EQ(width, m.ToInt32(m.code[0]->InputAt(2)));
+      }
+    }
+  }
+}
+
+
+TEST(InstructionSelectorInt32SubAndInt32MulP_MlsEnabled) {
+  i::FLAG_enable_mls = true;
+  InstructionSelectorTester m;
+  m.Return(
+      m.Int32Sub(m.Parameter(0), m.Int32Mul(m.Parameter(1), m.Parameter(2))));
+  m.SelectInstructions();
+  CHECK_EQ(1, m.code.size());
+  CHECK_EQ(kArmMls, m.code[0]->arch_opcode());
+}
+
+
+TEST(InstructionSelectorInt32SubAndInt32MulP_MlsDisabled) {
+  i::FLAG_enable_mls = false;
+  InstructionSelectorTester m;
+  m.Return(
+      m.Int32Sub(m.Parameter(0), m.Int32Mul(m.Parameter(1), m.Parameter(2))));
+  m.SelectInstructions();
+  CHECK_EQ(2, m.code.size());
+  CHECK_EQ(kArmMul, m.code[0]->arch_opcode());
+  CHECK_EQ(1, m.code[0]->OutputCount());
+  CHECK_EQ(kArmSub, m.code[1]->arch_opcode());
+  CHECK_EQ(2, m.code[1]->InputCount());
+  CheckSameVreg(m.code[0]->Output(), m.code[1]->InputAt(1));
+}
+
+
+TEST(InstructionSelectorInt32DivP_ARMv7AndSudivEnabled) {
+  i::FLAG_enable_armv7 = true;
+  i::FLAG_enable_sudiv = true;
+  InstructionSelectorTester m;
+  m.Return(m.Int32Div(m.Parameter(0), m.Parameter(1)));
+  m.SelectInstructions();
+  CHECK_EQ(1, m.code.size());
+  CHECK_EQ(kArmSdiv, m.code[0]->arch_opcode());
+}
+
+
+TEST(InstructionSelectorInt32DivP_SudivDisabled) {
+  i::FLAG_enable_sudiv = false;
+  InstructionSelectorTester m;
+  m.Return(m.Int32Div(m.Parameter(0), m.Parameter(1)));
+  m.SelectInstructions();
+  CHECK_EQ(4, m.code.size());
+  CHECK_EQ(kArmVcvtF64S32, m.code[0]->arch_opcode());
+  CHECK_EQ(1, m.code[0]->OutputCount());
+  CHECK_EQ(kArmVcvtF64S32, m.code[1]->arch_opcode());
+  CHECK_EQ(1, m.code[1]->OutputCount());
+  CHECK_EQ(kArmVdivF64, m.code[2]->arch_opcode());
+  CHECK_EQ(2, m.code[2]->InputCount());
+  CHECK_EQ(1, m.code[2]->OutputCount());
+  CheckSameVreg(m.code[0]->Output(), m.code[2]->InputAt(0));
+  CheckSameVreg(m.code[1]->Output(), m.code[2]->InputAt(1));
+  CHECK_EQ(kArmVcvtS32F64, m.code[3]->arch_opcode());
+  CHECK_EQ(1, m.code[3]->InputCount());
+  CheckSameVreg(m.code[2]->Output(), m.code[3]->InputAt(0));
+}
+
+
+TEST(InstructionSelectorInt32UDivP_ARMv7AndSudivEnabled) {
+  i::FLAG_enable_armv7 = true;
+  i::FLAG_enable_sudiv = true;
+  InstructionSelectorTester m;
+  m.Return(m.Int32UDiv(m.Parameter(0), m.Parameter(1)));
+  m.SelectInstructions();
+  CHECK_EQ(1, m.code.size());
+  CHECK_EQ(kArmUdiv, m.code[0]->arch_opcode());
+}
+
+
+TEST(InstructionSelectorInt32UDivP_SudivDisabled) {
+  i::FLAG_enable_sudiv = false;
+  InstructionSelectorTester m;
+  m.Return(m.Int32UDiv(m.Parameter(0), m.Parameter(1)));
+  m.SelectInstructions();
+  CHECK_EQ(4, m.code.size());
+  CHECK_EQ(kArmVcvtF64U32, m.code[0]->arch_opcode());
+  CHECK_EQ(1, m.code[0]->OutputCount());
+  CHECK_EQ(kArmVcvtF64U32, m.code[1]->arch_opcode());
+  CHECK_EQ(1, m.code[1]->OutputCount());
+  CHECK_EQ(kArmVdivF64, m.code[2]->arch_opcode());
+  CHECK_EQ(2, m.code[2]->InputCount());
+  CHECK_EQ(1, m.code[2]->OutputCount());
+  CheckSameVreg(m.code[0]->Output(), m.code[2]->InputAt(0));
+  CheckSameVreg(m.code[1]->Output(), m.code[2]->InputAt(1));
+  CHECK_EQ(kArmVcvtU32F64, m.code[3]->arch_opcode());
+  CHECK_EQ(1, m.code[3]->InputCount());
+  CheckSameVreg(m.code[2]->Output(), m.code[3]->InputAt(0));
+}
+
+
+TEST(InstructionSelectorInt32ModP_ARMv7AndMlsAndSudivEnabled) {
+  i::FLAG_enable_armv7 = true;
+  i::FLAG_enable_mls = true;
+  i::FLAG_enable_sudiv = true;
+  InstructionSelectorTester m;
+  m.Return(m.Int32Mod(m.Parameter(0), m.Parameter(1)));
+  m.SelectInstructions();
+  CHECK_EQ(2, m.code.size());
+  CHECK_EQ(kArmSdiv, m.code[0]->arch_opcode());
+  CHECK_EQ(1, m.code[0]->OutputCount());
+  CHECK_EQ(2, m.code[0]->InputCount());
+  CHECK_EQ(kArmMls, m.code[1]->arch_opcode());
+  CHECK_EQ(1, m.code[1]->OutputCount());
+  CHECK_EQ(3, m.code[1]->InputCount());
+  CheckSameVreg(m.code[0]->Output(), m.code[1]->InputAt(0));
+  CheckSameVreg(m.code[0]->InputAt(1), m.code[1]->InputAt(1));
+  CheckSameVreg(m.code[0]->InputAt(0), m.code[1]->InputAt(2));
+}
+
+
+TEST(InstructionSelectorInt32ModP_ARMv7AndSudivEnabled) {
+  i::FLAG_enable_armv7 = true;
+  i::FLAG_enable_mls = false;
+  i::FLAG_enable_sudiv = true;
+  InstructionSelectorTester m;
+  m.Return(m.Int32Mod(m.Parameter(0), m.Parameter(1)));
+  m.SelectInstructions();
+  CHECK_EQ(3, m.code.size());
+  CHECK_EQ(kArmSdiv, m.code[0]->arch_opcode());
+  CHECK_EQ(1, m.code[0]->OutputCount());
+  CHECK_EQ(2, m.code[0]->InputCount());
+  CHECK_EQ(kArmMul, m.code[1]->arch_opcode());
+  CHECK_EQ(1, m.code[1]->OutputCount());
+  CHECK_EQ(2, m.code[1]->InputCount());
+  CheckSameVreg(m.code[0]->Output(), m.code[1]->InputAt(0));
+  CheckSameVreg(m.code[0]->InputAt(1), m.code[1]->InputAt(1));
+  CHECK_EQ(kArmSub, m.code[2]->arch_opcode());
+  CHECK_EQ(1, m.code[2]->OutputCount());
+  CHECK_EQ(2, m.code[2]->InputCount());
+  CheckSameVreg(m.code[0]->InputAt(0), m.code[2]->InputAt(0));
+  CheckSameVreg(m.code[1]->Output(), m.code[2]->InputAt(1));
+}
+
+
+TEST(InstructionSelectorInt32ModP_ARMv7AndMlsAndSudivDisabled) {
+  i::FLAG_enable_armv7 = false;
+  i::FLAG_enable_mls = false;
+  i::FLAG_enable_sudiv = false;
+  InstructionSelectorTester m;
+  m.Return(m.Int32Mod(m.Parameter(0), m.Parameter(1)));
+  m.SelectInstructions();
+  CHECK_EQ(6, m.code.size());
+  CHECK_EQ(kArmVcvtF64S32, m.code[0]->arch_opcode());
+  CHECK_EQ(1, m.code[0]->OutputCount());
+  CHECK_EQ(kArmVcvtF64S32, m.code[1]->arch_opcode());
+  CHECK_EQ(1, m.code[1]->OutputCount());
+  CHECK_EQ(kArmVdivF64, m.code[2]->arch_opcode());
+  CHECK_EQ(2, m.code[2]->InputCount());
+  CHECK_EQ(1, m.code[2]->OutputCount());
+  CheckSameVreg(m.code[0]->Output(), m.code[2]->InputAt(0));
+  CheckSameVreg(m.code[1]->Output(), m.code[2]->InputAt(1));
+  CHECK_EQ(kArmVcvtS32F64, m.code[3]->arch_opcode());
+  CHECK_EQ(1, m.code[3]->InputCount());
+  CheckSameVreg(m.code[2]->Output(), m.code[3]->InputAt(0));
+  CHECK_EQ(kArmMul, m.code[4]->arch_opcode());
+  CHECK_EQ(1, m.code[4]->OutputCount());
+  CHECK_EQ(2, m.code[4]->InputCount());
+  CheckSameVreg(m.code[3]->Output(), m.code[4]->InputAt(0));
+  CheckSameVreg(m.code[1]->InputAt(0), m.code[4]->InputAt(1));
+  CHECK_EQ(kArmSub, m.code[5]->arch_opcode());
+  CHECK_EQ(1, m.code[5]->OutputCount());
+  CHECK_EQ(2, m.code[5]->InputCount());
+  CheckSameVreg(m.code[0]->InputAt(0), m.code[5]->InputAt(0));
+  CheckSameVreg(m.code[4]->Output(), m.code[5]->InputAt(1));
+}
+
+
+TEST(InstructionSelectorInt32UModP_ARMv7AndMlsAndSudivEnabled) {
+  i::FLAG_enable_armv7 = true;
+  i::FLAG_enable_mls = true;
+  i::FLAG_enable_sudiv = true;
+  InstructionSelectorTester m;
+  m.Return(m.Int32UMod(m.Parameter(0), m.Parameter(1)));
+  m.SelectInstructions();
+  CHECK_EQ(2, m.code.size());
+  CHECK_EQ(kArmUdiv, m.code[0]->arch_opcode());
+  CHECK_EQ(1, m.code[0]->OutputCount());
+  CHECK_EQ(2, m.code[0]->InputCount());
+  CHECK_EQ(kArmMls, m.code[1]->arch_opcode());
+  CHECK_EQ(1, m.code[1]->OutputCount());
+  CHECK_EQ(3, m.code[1]->InputCount());
+  CheckSameVreg(m.code[0]->Output(), m.code[1]->InputAt(0));
+  CheckSameVreg(m.code[0]->InputAt(1), m.code[1]->InputAt(1));
+  CheckSameVreg(m.code[0]->InputAt(0), m.code[1]->InputAt(2));
+}
+
+
+TEST(InstructionSelectorInt32UModP_ARMv7AndSudivEnabled) {
+  i::FLAG_enable_armv7 = true;
+  i::FLAG_enable_mls = false;
+  i::FLAG_enable_sudiv = true;
+  InstructionSelectorTester m;
+  m.Return(m.Int32UMod(m.Parameter(0), m.Parameter(1)));
+  m.SelectInstructions();
+  CHECK_EQ(3, m.code.size());
+  CHECK_EQ(kArmUdiv, m.code[0]->arch_opcode());
+  CHECK_EQ(1, m.code[0]->OutputCount());
+  CHECK_EQ(2, m.code[0]->InputCount());
+  CHECK_EQ(kArmMul, m.code[1]->arch_opcode());
+  CHECK_EQ(1, m.code[1]->OutputCount());
+  CHECK_EQ(2, m.code[1]->InputCount());
+  CheckSameVreg(m.code[0]->Output(), m.code[1]->InputAt(0));
+  CheckSameVreg(m.code[0]->InputAt(1), m.code[1]->InputAt(1));
+  CHECK_EQ(kArmSub, m.code[2]->arch_opcode());
+  CHECK_EQ(1, m.code[2]->OutputCount());
+  CHECK_EQ(2, m.code[2]->InputCount());
+  CheckSameVreg(m.code[0]->InputAt(0), m.code[2]->InputAt(0));
+  CheckSameVreg(m.code[1]->Output(), m.code[2]->InputAt(1));
+}
+
+
+TEST(InstructionSelectorInt32UModP_ARMv7AndMlsAndSudivDisabled) {
+  i::FLAG_enable_armv7 = false;
+  i::FLAG_enable_mls = false;
+  i::FLAG_enable_sudiv = false;
+  InstructionSelectorTester m;
+  m.Return(m.Int32UMod(m.Parameter(0), m.Parameter(1)));
+  m.SelectInstructions();
+  CHECK_EQ(6, m.code.size());
+  CHECK_EQ(kArmVcvtF64U32, m.code[0]->arch_opcode());
+  CHECK_EQ(1, m.code[0]->OutputCount());
+  CHECK_EQ(kArmVcvtF64U32, m.code[1]->arch_opcode());
+  CHECK_EQ(1, m.code[1]->OutputCount());
+  CHECK_EQ(kArmVdivF64, m.code[2]->arch_opcode());
+  CHECK_EQ(2, m.code[2]->InputCount());
+  CHECK_EQ(1, m.code[2]->OutputCount());
+  CheckSameVreg(m.code[0]->Output(), m.code[2]->InputAt(0));
+  CheckSameVreg(m.code[1]->Output(), m.code[2]->InputAt(1));
+  CHECK_EQ(kArmVcvtU32F64, m.code[3]->arch_opcode());
+  CHECK_EQ(1, m.code[3]->InputCount());
+  CheckSameVreg(m.code[2]->Output(), m.code[3]->InputAt(0));
+  CHECK_EQ(kArmMul, m.code[4]->arch_opcode());
+  CHECK_EQ(1, m.code[4]->OutputCount());
+  CHECK_EQ(2, m.code[4]->InputCount());
+  CheckSameVreg(m.code[3]->Output(), m.code[4]->InputAt(0));
+  CheckSameVreg(m.code[1]->InputAt(0), m.code[4]->InputAt(1));
+  CHECK_EQ(kArmSub, m.code[5]->arch_opcode());
+  CHECK_EQ(1, m.code[5]->OutputCount());
+  CHECK_EQ(2, m.code[5]->InputCount());
+  CheckSameVreg(m.code[0]->InputAt(0), m.code[5]->InputAt(0));
+  CheckSameVreg(m.code[4]->Output(), m.code[5]->InputAt(1));
+}
+
+#endif  // USE_SIMULATOR
+
+
+TEST(InstructionSelectorWord32EqualP) {
+  InstructionSelectorTester m;
+  m.Return(m.Word32Equal(m.Parameter(0), m.Parameter(1)));
+  m.SelectInstructions();
+  CHECK_EQ(1, m.code.size());
+  CHECK_EQ(kArmCmp, m.code[0]->arch_opcode());
+  CHECK_EQ(kMode_Operand2_R, m.code[0]->addressing_mode());
+  CHECK_EQ(kFlags_set, m.code[0]->flags_mode());
+  CHECK_EQ(kEqual, m.code[0]->flags_condition());
+}
+
+
+TEST(InstructionSelectorWord32EqualImm) {
+  Immediates immediates;
+  for (Immediates::const_iterator i = immediates.begin(); i != immediates.end();
+       ++i) {
+    int32_t imm = *i;
+    {
+      InstructionSelectorTester m;
+      m.Return(m.Word32Equal(m.Parameter(0), m.Int32Constant(imm)));
+      m.SelectInstructions();
+      CHECK_EQ(1, m.code.size());
+      if (imm == 0) {
+        CHECK_EQ(kArmTst, m.code[0]->arch_opcode());
+        CHECK_EQ(kMode_Operand2_R, m.code[0]->addressing_mode());
+        CHECK_EQ(2, m.code[0]->InputCount());
+        CheckSameVreg(m.code[0]->InputAt(0), m.code[0]->InputAt(1));
+      } else {
+        CHECK_EQ(kArmCmp, m.code[0]->arch_opcode());
+        CHECK_EQ(kMode_Operand2_I, m.code[0]->addressing_mode());
+      }
+      CHECK_EQ(kFlags_set, m.code[0]->flags_mode());
+      CHECK_EQ(kEqual, m.code[0]->flags_condition());
+    }
+    {
+      InstructionSelectorTester m;
+      m.Return(m.Word32Equal(m.Int32Constant(imm), m.Parameter(0)));
+      m.SelectInstructions();
+      CHECK_EQ(1, m.code.size());
+      if (imm == 0) {
+        CHECK_EQ(kArmTst, m.code[0]->arch_opcode());
+        CHECK_EQ(kMode_Operand2_R, m.code[0]->addressing_mode());
+        CHECK_EQ(2, m.code[0]->InputCount());
+        CheckSameVreg(m.code[0]->InputAt(0), m.code[0]->InputAt(1));
+      } else {
+        CHECK_EQ(kArmCmp, m.code[0]->arch_opcode());
+        CHECK_EQ(kMode_Operand2_I, m.code[0]->addressing_mode());
+      }
+      CHECK_EQ(kFlags_set, m.code[0]->flags_mode());
+      CHECK_EQ(kEqual, m.code[0]->flags_condition());
+    }
+  }
+}
+
+
+TEST(InstructionSelectorWord32EqualAndDPIP) {
+  DPIs dpis;
+  for (DPIs::const_iterator i = dpis.begin(); i != dpis.end(); ++i) {
+    DPI dpi = *i;
+    {
+      InstructionSelectorTester m;
+      m.Return(m.Word32Equal(m.NewNode(dpi.op, m.Parameter(0), m.Parameter(1)),
+                             m.Int32Constant(0)));
+      m.SelectInstructions();
+      CHECK_EQ(1, m.code.size());
+      CHECK_EQ(dpi.test_arch_opcode, m.code[0]->arch_opcode());
+      CHECK_EQ(kMode_Operand2_R, m.code[0]->addressing_mode());
+      CHECK_EQ(kFlags_set, m.code[0]->flags_mode());
+      CHECK_EQ(kEqual, m.code[0]->flags_condition());
+    }
+    {
+      InstructionSelectorTester m;
+      m.Return(
+          m.Word32Equal(m.Int32Constant(0),
+                        m.NewNode(dpi.op, m.Parameter(0), m.Parameter(1))));
+      m.SelectInstructions();
+      CHECK_EQ(1, m.code.size());
+      CHECK_EQ(dpi.test_arch_opcode, m.code[0]->arch_opcode());
+      CHECK_EQ(kMode_Operand2_R, m.code[0]->addressing_mode());
+      CHECK_EQ(kFlags_set, m.code[0]->flags_mode());
+      CHECK_EQ(kEqual, m.code[0]->flags_condition());
+    }
+  }
+}
+
+
+TEST(InstructionSelectorWord32EqualAndDPIImm) {
+  DPIs dpis;
+  Immediates immediates;
+  for (DPIs::const_iterator i = dpis.begin(); i != dpis.end(); ++i) {
+    DPI dpi = *i;
+    for (Immediates::const_iterator j = immediates.begin();
+         j != immediates.end(); ++j) {
+      int32_t imm = *j;
+      {
+        InstructionSelectorTester m;
+        m.Return(m.Word32Equal(
+            m.NewNode(dpi.op, m.Parameter(0), m.Int32Constant(imm)),
+            m.Int32Constant(0)));
+        m.SelectInstructions();
+        CHECK_EQ(1, m.code.size());
+        CHECK_EQ(dpi.test_arch_opcode, m.code[0]->arch_opcode());
+        CHECK_EQ(kMode_Operand2_I, m.code[0]->addressing_mode());
+        CHECK_EQ(kFlags_set, m.code[0]->flags_mode());
+        CHECK_EQ(kEqual, m.code[0]->flags_condition());
+      }
+      {
+        InstructionSelectorTester m;
+        m.Return(m.Word32Equal(
+            m.NewNode(dpi.op, m.Int32Constant(imm), m.Parameter(0)),
+            m.Int32Constant(0)));
+        m.SelectInstructions();
+        CHECK_EQ(1, m.code.size());
+        CHECK_EQ(dpi.test_arch_opcode, m.code[0]->arch_opcode());
+        CHECK_EQ(kMode_Operand2_I, m.code[0]->addressing_mode());
+        CHECK_EQ(kFlags_set, m.code[0]->flags_mode());
+        CHECK_EQ(kEqual, m.code[0]->flags_condition());
+      }
+      {
+        InstructionSelectorTester m;
+        m.Return(m.Word32Equal(
+            m.Int32Constant(0),
+            m.NewNode(dpi.op, m.Parameter(0), m.Int32Constant(imm))));
+        m.SelectInstructions();
+        CHECK_EQ(1, m.code.size());
+        CHECK_EQ(dpi.test_arch_opcode, m.code[0]->arch_opcode());
+        CHECK_EQ(kMode_Operand2_I, m.code[0]->addressing_mode());
+        CHECK_EQ(kFlags_set, m.code[0]->flags_mode());
+        CHECK_EQ(kEqual, m.code[0]->flags_condition());
+      }
+      {
+        InstructionSelectorTester m;
+        m.Return(m.Word32Equal(
+            m.Int32Constant(0),
+            m.NewNode(dpi.op, m.Int32Constant(imm), m.Parameter(0))));
+        m.SelectInstructions();
+        CHECK_EQ(1, m.code.size());
+        CHECK_EQ(dpi.test_arch_opcode, m.code[0]->arch_opcode());
+        CHECK_EQ(kMode_Operand2_I, m.code[0]->addressing_mode());
+        CHECK_EQ(kFlags_set, m.code[0]->flags_mode());
+        CHECK_EQ(kEqual, m.code[0]->flags_condition());
+      }
+    }
+  }
+}
+
+
+TEST(InstructionSelectorWord32EqualAndShiftP) {
+  Shifts shifts;
+  for (Shifts::const_iterator i = shifts.begin(); i != shifts.end(); ++i) {
+    Shift shift = *i;
+    {
+      InstructionSelectorTester m;
+      m.Return(m.Word32Equal(
+          m.Parameter(0), m.NewNode(shift.op, m.Parameter(1), m.Parameter(2))));
+      m.SelectInstructions();
+      CHECK_EQ(1, m.code.size());
+      CHECK_EQ(kArmCmp, m.code[0]->arch_opcode());
+      CHECK_EQ(shift.r_mode, m.code[0]->addressing_mode());
+      CHECK_EQ(kFlags_set, m.code[0]->flags_mode());
+      CHECK_EQ(kEqual, m.code[0]->flags_condition());
+    }
+    {
+      InstructionSelectorTester m;
+      m.Return(m.Word32Equal(
+          m.NewNode(shift.op, m.Parameter(0), m.Parameter(1)), m.Parameter(2)));
+      m.SelectInstructions();
+      CHECK_EQ(1, m.code.size());
+      CHECK_EQ(kArmCmp, m.code[0]->arch_opcode());
+      CHECK_EQ(shift.r_mode, m.code[0]->addressing_mode());
+      CHECK_EQ(kFlags_set, m.code[0]->flags_mode());
+      CHECK_EQ(kEqual, m.code[0]->flags_condition());
+    }
+  }
+}
+
+
+TEST(InstructionSelectorBranchWithWord32EqualAndShiftP) {
+  Shifts shifts;
+  for (Shifts::const_iterator i = shifts.begin(); i != shifts.end(); ++i) {
+    Shift shift = *i;
+    {
+      InstructionSelectorTester m;
+      MLabel blocka, blockb;
+      m.Branch(m.Word32Equal(m.Parameter(0), m.NewNode(shift.op, m.Parameter(1),
+                                                       m.Parameter(2))),
+               &blocka, &blockb);
+      m.Bind(&blocka);
+      m.Return(m.Int32Constant(1));
+      m.Bind(&blockb);
+      m.Return(m.Int32Constant(0));
+      m.SelectInstructions();
+      CHECK_EQ(1, m.code.size());
+      CHECK_EQ(kArmCmp, m.code[0]->arch_opcode());
+      CHECK_EQ(shift.r_mode, m.code[0]->addressing_mode());
+      CHECK_EQ(kFlags_branch, m.code[0]->flags_mode());
+      CHECK_EQ(kEqual, m.code[0]->flags_condition());
+    }
+    {
+      InstructionSelectorTester m;
+      MLabel blocka, blockb;
+      m.Branch(
+          m.Word32Equal(m.NewNode(shift.op, m.Parameter(1), m.Parameter(2)),
+                        m.Parameter(0)),
+          &blocka, &blockb);
+      m.Bind(&blocka);
+      m.Return(m.Int32Constant(1));
+      m.Bind(&blockb);
+      m.Return(m.Int32Constant(0));
+      m.SelectInstructions();
+      CHECK_EQ(1, m.code.size());
+      CHECK_EQ(kArmCmp, m.code[0]->arch_opcode());
+      CHECK_EQ(shift.r_mode, m.code[0]->addressing_mode());
+      CHECK_EQ(kFlags_branch, m.code[0]->flags_mode());
+      CHECK_EQ(kEqual, m.code[0]->flags_condition());
+    }
+  }
+}
+
+
+TEST(InstructionSelectorBranchWithWord32EqualAndShiftImm) {
+  Shifts shifts;
+  for (Shifts::const_iterator i = shifts.begin(); i != shifts.end(); ++i) {
+    Shift shift = *i;
+    for (int32_t imm = shift.i_low; imm <= shift.i_high; ++imm) {
+      {
+        InstructionSelectorTester m;
+        MLabel blocka, blockb;
+        m.Branch(
+            m.Word32Equal(m.Parameter(0), m.NewNode(shift.op, m.Parameter(1),
+                                                    m.Int32Constant(imm))),
+            &blocka, &blockb);
+        m.Bind(&blocka);
+        m.Return(m.Int32Constant(1));
+        m.Bind(&blockb);
+        m.Return(m.Int32Constant(0));
+        m.SelectInstructions();
+        CHECK_EQ(1, m.code.size());
+        CHECK_EQ(kArmCmp, m.code[0]->arch_opcode());
+        CHECK_EQ(shift.i_mode, m.code[0]->addressing_mode());
+        CHECK_EQ(kFlags_branch, m.code[0]->flags_mode());
+        CHECK_EQ(kEqual, m.code[0]->flags_condition());
+      }
+      {
+        InstructionSelectorTester m;
+        MLabel blocka, blockb;
+        m.Branch(m.Word32Equal(
+                     m.NewNode(shift.op, m.Parameter(1), m.Int32Constant(imm)),
+                     m.Parameter(0)),
+                 &blocka, &blockb);
+        m.Bind(&blocka);
+        m.Return(m.Int32Constant(1));
+        m.Bind(&blockb);
+        m.Return(m.Int32Constant(0));
+        m.SelectInstructions();
+        CHECK_EQ(1, m.code.size());
+        CHECK_EQ(kArmCmp, m.code[0]->arch_opcode());
+        CHECK_EQ(shift.i_mode, m.code[0]->addressing_mode());
+        CHECK_EQ(kFlags_branch, m.code[0]->flags_mode());
+        CHECK_EQ(kEqual, m.code[0]->flags_condition());
+      }
+    }
+  }
+}
+
+
+TEST(InstructionSelectorBranchWithDPIP) {
+  DPIs dpis;
+  for (DPIs::const_iterator i = dpis.begin(); i != dpis.end(); ++i) {
+    DPI dpi = *i;
+    {
+      InstructionSelectorTester m;
+      MLabel blocka, blockb;
+      m.Branch(m.NewNode(dpi.op, m.Parameter(0), m.Parameter(1)), &blocka,
+               &blockb);
+      m.Bind(&blocka);
+      m.Return(m.Int32Constant(1));
+      m.Bind(&blockb);
+      m.Return(m.Int32Constant(0));
+      m.SelectInstructions();
+      CHECK_EQ(1, m.code.size());
+      CHECK_EQ(dpi.test_arch_opcode, m.code[0]->arch_opcode());
+      CHECK_EQ(kMode_Operand2_R, m.code[0]->addressing_mode());
+      CHECK_EQ(kFlags_branch, m.code[0]->flags_mode());
+      CHECK_EQ(kNotEqual, m.code[0]->flags_condition());
+    }
+    {
+      InstructionSelectorTester m;
+      MLabel blocka, blockb;
+      m.Branch(m.Word32Equal(m.Int32Constant(0),
+                             m.NewNode(dpi.op, m.Parameter(0), m.Parameter(1))),
+               &blocka, &blockb);
+      m.Bind(&blocka);
+      m.Return(m.Int32Constant(1));
+      m.Bind(&blockb);
+      m.Return(m.Int32Constant(0));
+      m.SelectInstructions();
+      CHECK_EQ(1, m.code.size());
+      CHECK_EQ(dpi.test_arch_opcode, m.code[0]->arch_opcode());
+      CHECK_EQ(kMode_Operand2_R, m.code[0]->addressing_mode());
+      CHECK_EQ(kFlags_branch, m.code[0]->flags_mode());
+      CHECK_EQ(kEqual, m.code[0]->flags_condition());
+    }
+    {
+      InstructionSelectorTester m;
+      MLabel blocka, blockb;
+      m.Branch(m.Word32Equal(m.NewNode(dpi.op, m.Parameter(0), m.Parameter(1)),
+                             m.Int32Constant(0)),
+               &blocka, &blockb);
+      m.Bind(&blocka);
+      m.Return(m.Int32Constant(1));
+      m.Bind(&blockb);
+      m.Return(m.Int32Constant(0));
+      m.SelectInstructions();
+      CHECK_EQ(1, m.code.size());
+      CHECK_EQ(dpi.test_arch_opcode, m.code[0]->arch_opcode());
+      CHECK_EQ(kMode_Operand2_R, m.code[0]->addressing_mode());
+      CHECK_EQ(kFlags_branch, m.code[0]->flags_mode());
+      CHECK_EQ(kEqual, m.code[0]->flags_condition());
+    }
+  }
+}
diff --git a/test/cctest/compiler/compiler/test-instruction-selector.cc b/test/cctest/compiler/compiler/test-instruction-selector.cc
new file mode 100644 (file)
index 0000000..a82ceb2
--- /dev/null
@@ -0,0 +1,18 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "test/cctest/compiler/instruction-selector-tester.h"
+
+using namespace v8::internal;
+using namespace v8::internal::compiler;
+
+TEST(InstructionSelectionReturnZero) {
+  InstructionSelectorTester m(InstructionSelectorTester::kInternalMode);
+  m.Return(m.Int32Constant(0));
+  m.SelectInstructions();
+  CHECK_EQ(2, m.code.size());
+  CHECK_EQ(kArchNop, m.code[0]->opcode());
+  CHECK_EQ(kArchRet, m.code[1]->opcode());
+  CHECK_EQ(1, m.code[1]->InputCount());
+}
diff --git a/test/cctest/compiler/compiler/test-instruction.cc b/test/cctest/compiler/compiler/test-instruction.cc
new file mode 100644 (file)
index 0000000..c16e150
--- /dev/null
@@ -0,0 +1,349 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+#include "test/cctest/cctest.h"
+
+#include "src/compiler/code-generator.h"
+#include "src/compiler/common-operator.h"
+#include "src/compiler/graph.h"
+#include "src/compiler/instruction.h"
+#include "src/compiler/machine-operator.h"
+#include "src/compiler/node.h"
+#include "src/compiler/operator.h"
+#include "src/compiler/schedule.h"
+#include "src/compiler/scheduler.h"
+#include "src/lithium.h"
+
+using namespace v8::internal;
+using namespace v8::internal::compiler;
+
+typedef v8::internal::compiler::Instruction TestInstr;
+typedef v8::internal::compiler::InstructionSequence TestInstrSeq;
+
+// A testing helper for the register code abstraction.
+class InstructionTester : public HandleAndZoneScope {
+ public:  // We're all friends here.
+  explicit InstructionTester()
+      : isolate(main_isolate()),
+        graph(zone()),
+        schedule(zone()),
+        info(static_cast<HydrogenCodeStub*>(NULL), main_isolate()),
+        linkage(&info),
+        common(zone()),
+        machine(zone(), kMachineWord32),
+        code(NULL) {}
+
+  Isolate* isolate;
+  Graph graph;
+  Schedule schedule;
+  CompilationInfoWithZone info;
+  Linkage linkage;
+  CommonOperatorBuilder common;
+  MachineOperatorBuilder machine;
+  TestInstrSeq* code;
+
+  Zone* zone() { return main_zone(); }
+
+  void allocCode() {
+    if (schedule.rpo_order()->size() == 0) {
+      // Compute the RPO order.
+      Scheduler scheduler(zone(), &graph, &schedule);
+      scheduler.ComputeSpecialRPO();
+      ASSERT(schedule.rpo_order()->size() > 0);
+    }
+    code = new TestInstrSeq(&linkage, &graph, &schedule);
+  }
+
+  Node* Int32Constant(int32_t val) {
+    Node* node = graph.NewNode(common.Int32Constant(val));
+    schedule.AddNode(schedule.entry(), node);
+    return node;
+  }
+
+  Node* Float64Constant(double val) {
+    Node* node = graph.NewNode(common.Float64Constant(val));
+    schedule.AddNode(schedule.entry(), node);
+    return node;
+  }
+
+  Node* Parameter(int32_t which) {
+    Node* node = graph.NewNode(common.Parameter(which));
+    schedule.AddNode(schedule.entry(), node);
+    return node;
+  }
+
+  Node* NewNode(BasicBlock* block) {
+    Node* node = graph.NewNode(common.Int32Constant(111));
+    schedule.AddNode(block, node);
+    return node;
+  }
+
+  int NewInstr(BasicBlock* block) {
+    InstructionCode opcode = static_cast<InstructionCode>(110);
+    TestInstr* instr = TestInstr::New(zone(), opcode);
+    return code->AddInstruction(instr, block);
+  }
+
+  UnallocatedOperand* NewUnallocated(int vreg) {
+    UnallocatedOperand* unallocated =
+        new (zone()) UnallocatedOperand(UnallocatedOperand::ANY);
+    unallocated->set_virtual_register(vreg);
+    return unallocated;
+  }
+};
+
+
+TEST(InstructionBasic) {
+  InstructionTester R;
+
+  for (int i = 0; i < 10; i++) {
+    R.Int32Constant(i);  // Add some nodes to the graph.
+  }
+
+  BasicBlock* last = R.schedule.entry();
+  for (int i = 0; i < 5; i++) {
+    BasicBlock* block = R.schedule.NewBasicBlock();
+    R.schedule.AddGoto(last, block);
+    last = block;
+  }
+
+  R.allocCode();
+
+  CHECK_EQ(R.graph.NodeCount(), R.code->ValueCount());
+
+  BasicBlockVector* blocks = R.schedule.rpo_order();
+  CHECK_EQ(static_cast<int>(blocks->size()), R.code->BasicBlockCount());
+
+  int index = 0;
+  for (BasicBlockVectorIter i = blocks->begin(); i != blocks->end();
+       i++, index++) {
+    BasicBlock* block = *i;
+    CHECK_EQ(block, R.code->BlockAt(index));
+    CHECK_EQ(-1, R.code->GetLoopEnd(block));
+  }
+}
+
+
+TEST(InstructionGetBasicBlock) {
+  InstructionTester R;
+
+  BasicBlock* b0 = R.schedule.entry();
+  BasicBlock* b1 = R.schedule.NewBasicBlock();
+  BasicBlock* b2 = R.schedule.NewBasicBlock();
+  BasicBlock* b3 = R.schedule.exit();
+
+  R.schedule.AddGoto(b0, b1);
+  R.schedule.AddGoto(b1, b2);
+  R.schedule.AddGoto(b2, b3);
+
+  R.allocCode();
+
+  R.code->StartBlock(b0);
+  int i0 = R.NewInstr(b0);
+  int i1 = R.NewInstr(b0);
+  R.code->EndBlock(b0);
+  R.code->StartBlock(b1);
+  int i2 = R.NewInstr(b1);
+  int i3 = R.NewInstr(b1);
+  int i4 = R.NewInstr(b1);
+  int i5 = R.NewInstr(b1);
+  R.code->EndBlock(b1);
+  R.code->StartBlock(b2);
+  int i6 = R.NewInstr(b2);
+  int i7 = R.NewInstr(b2);
+  int i8 = R.NewInstr(b2);
+  R.code->EndBlock(b2);
+  R.code->StartBlock(b3);
+  R.code->EndBlock(b3);
+
+  CHECK_EQ(b0, R.code->GetBasicBlock(i0));
+  CHECK_EQ(b0, R.code->GetBasicBlock(i1));
+
+  CHECK_EQ(b1, R.code->GetBasicBlock(i2));
+  CHECK_EQ(b1, R.code->GetBasicBlock(i3));
+  CHECK_EQ(b1, R.code->GetBasicBlock(i4));
+  CHECK_EQ(b1, R.code->GetBasicBlock(i5));
+
+  CHECK_EQ(b2, R.code->GetBasicBlock(i6));
+  CHECK_EQ(b2, R.code->GetBasicBlock(i7));
+  CHECK_EQ(b2, R.code->GetBasicBlock(i8));
+
+  CHECK_EQ(b0, R.code->GetBasicBlock(b0->first_instruction_index()));
+  CHECK_EQ(b0, R.code->GetBasicBlock(b0->last_instruction_index()));
+
+  CHECK_EQ(b1, R.code->GetBasicBlock(b1->first_instruction_index()));
+  CHECK_EQ(b1, R.code->GetBasicBlock(b1->last_instruction_index()));
+
+  CHECK_EQ(b2, R.code->GetBasicBlock(b2->first_instruction_index()));
+  CHECK_EQ(b2, R.code->GetBasicBlock(b2->last_instruction_index()));
+
+  CHECK_EQ(b3, R.code->GetBasicBlock(b3->first_instruction_index()));
+  CHECK_EQ(b3, R.code->GetBasicBlock(b3->last_instruction_index()));
+}
+
+
+TEST(InstructionIsGapAt) {
+  InstructionTester R;
+
+  BasicBlock* b0 = R.schedule.entry();
+  R.schedule.AddReturn(b0, R.Int32Constant(1));
+
+  R.allocCode();
+  TestInstr* i0 = TestInstr::New(R.zone(), 100);
+  TestInstr* g = TestInstr::New(R.zone(), 103)->MarkAsControl();
+  R.code->StartBlock(b0);
+  R.code->AddInstruction(i0, b0);
+  R.code->AddInstruction(g, b0);
+  R.code->EndBlock(b0);
+
+  CHECK_EQ(true, R.code->InstructionAt(0)->IsBlockStart());
+
+  CHECK_EQ(true, R.code->IsGapAt(0));   // Label
+  CHECK_EQ(true, R.code->IsGapAt(1));   // Gap
+  CHECK_EQ(false, R.code->IsGapAt(2));  // i0
+  CHECK_EQ(true, R.code->IsGapAt(3));   // Gap
+  CHECK_EQ(true, R.code->IsGapAt(4));   // Gap
+  CHECK_EQ(false, R.code->IsGapAt(5));  // g
+}
+
+
+TEST(InstructionIsGapAt2) {
+  InstructionTester R;
+
+  BasicBlock* b0 = R.schedule.entry();
+  BasicBlock* b1 = R.schedule.exit();
+  R.schedule.AddGoto(b0, b1);
+  R.schedule.AddReturn(b1, R.Int32Constant(1));
+
+  R.allocCode();
+  TestInstr* i0 = TestInstr::New(R.zone(), 100);
+  TestInstr* g = TestInstr::New(R.zone(), 103)->MarkAsControl();
+  R.code->StartBlock(b0);
+  R.code->AddInstruction(i0, b0);
+  R.code->AddInstruction(g, b0);
+  R.code->EndBlock(b0);
+
+  TestInstr* i1 = TestInstr::New(R.zone(), 102);
+  TestInstr* g1 = TestInstr::New(R.zone(), 104)->MarkAsControl();
+  R.code->StartBlock(b1);
+  R.code->AddInstruction(i1, b1);
+  R.code->AddInstruction(g1, b1);
+  R.code->EndBlock(b1);
+
+  CHECK_EQ(true, R.code->InstructionAt(0)->IsBlockStart());
+
+  CHECK_EQ(true, R.code->IsGapAt(0));   // Label
+  CHECK_EQ(true, R.code->IsGapAt(1));   // Gap
+  CHECK_EQ(false, R.code->IsGapAt(2));  // i0
+  CHECK_EQ(true, R.code->IsGapAt(3));   // Gap
+  CHECK_EQ(true, R.code->IsGapAt(4));   // Gap
+  CHECK_EQ(false, R.code->IsGapAt(5));  // g
+
+  CHECK_EQ(true, R.code->InstructionAt(6)->IsBlockStart());
+
+  CHECK_EQ(true, R.code->IsGapAt(6));    // Label
+  CHECK_EQ(true, R.code->IsGapAt(7));    // Gap
+  CHECK_EQ(false, R.code->IsGapAt(8));   // i1
+  CHECK_EQ(true, R.code->IsGapAt(9));    // Gap
+  CHECK_EQ(true, R.code->IsGapAt(10));   // Gap
+  CHECK_EQ(false, R.code->IsGapAt(11));  // g1
+}
+
+
+TEST(InstructionAddGapMove) {
+  InstructionTester R;
+
+  BasicBlock* b0 = R.schedule.entry();
+  R.schedule.AddReturn(b0, R.Int32Constant(1));
+
+  R.allocCode();
+  TestInstr* i0 = TestInstr::New(R.zone(), 100);
+  TestInstr* g = TestInstr::New(R.zone(), 103)->MarkAsControl();
+  R.code->StartBlock(b0);
+  R.code->AddInstruction(i0, b0);
+  R.code->AddInstruction(g, b0);
+  R.code->EndBlock(b0);
+
+  CHECK_EQ(true, R.code->InstructionAt(0)->IsBlockStart());
+
+  CHECK_EQ(true, R.code->IsGapAt(0));   // Label
+  CHECK_EQ(true, R.code->IsGapAt(1));   // Gap
+  CHECK_EQ(false, R.code->IsGapAt(2));  // i0
+  CHECK_EQ(true, R.code->IsGapAt(3));   // Gap
+  CHECK_EQ(true, R.code->IsGapAt(4));   // Gap
+  CHECK_EQ(false, R.code->IsGapAt(5));  // g
+
+  int indexes[] = {0, 1, 3, 4, -1};
+  for (int i = 0; indexes[i] >= 0; i++) {
+    int index = indexes[i];
+
+    UnallocatedOperand* op1 = R.NewUnallocated(index + 6);
+    UnallocatedOperand* op2 = R.NewUnallocated(index + 12);
+
+    R.code->AddGapMove(index, op1, op2);
+    GapInstruction* gap = R.code->GapAt(index);
+    ParallelMove* move = gap->GetParallelMove(GapInstruction::START);
+    CHECK_NE(NULL, move);
+    const ZoneList<MoveOperands>* move_operands = move->move_operands();
+    CHECK_EQ(1, move_operands->length());
+    MoveOperands* cur = &move_operands->at(0);
+    CHECK_EQ(op1, cur->source());
+    CHECK_EQ(op2, cur->destination());
+  }
+}
+
+
+TEST(InstructionOperands) {
+  Zone zone(CcTest::InitIsolateOnce());
+
+  {
+    TestInstr* i = TestInstr::New(&zone, 101);
+    CHECK_EQ(0, i->OutputCount());
+    CHECK_EQ(0, i->InputCount());
+    CHECK_EQ(0, i->TempCount());
+  }
+
+  InstructionOperand* outputs[] = {
+      new (&zone) UnallocatedOperand(UnallocatedOperand::MUST_HAVE_REGISTER),
+      new (&zone) UnallocatedOperand(UnallocatedOperand::MUST_HAVE_REGISTER),
+      new (&zone) UnallocatedOperand(UnallocatedOperand::MUST_HAVE_REGISTER),
+      new (&zone) UnallocatedOperand(UnallocatedOperand::MUST_HAVE_REGISTER)};
+
+  InstructionOperand* inputs[] = {
+      new (&zone) UnallocatedOperand(UnallocatedOperand::MUST_HAVE_REGISTER),
+      new (&zone) UnallocatedOperand(UnallocatedOperand::MUST_HAVE_REGISTER),
+      new (&zone) UnallocatedOperand(UnallocatedOperand::MUST_HAVE_REGISTER),
+      new (&zone) UnallocatedOperand(UnallocatedOperand::MUST_HAVE_REGISTER)};
+
+  InstructionOperand* temps[] = {
+      new (&zone) UnallocatedOperand(UnallocatedOperand::MUST_HAVE_REGISTER),
+      new (&zone) UnallocatedOperand(UnallocatedOperand::MUST_HAVE_REGISTER),
+      new (&zone) UnallocatedOperand(UnallocatedOperand::MUST_HAVE_REGISTER),
+      new (&zone) UnallocatedOperand(UnallocatedOperand::MUST_HAVE_REGISTER)};
+
+  for (size_t i = 0; i < ARRAY_SIZE(outputs); i++) {
+    for (size_t j = 0; j < ARRAY_SIZE(inputs); j++) {
+      for (size_t k = 0; k < ARRAY_SIZE(temps); k++) {
+        TestInstr* m =
+            TestInstr::New(&zone, 101, i, outputs, j, inputs, k, temps);
+        CHECK(i == m->OutputCount());
+        CHECK(j == m->InputCount());
+        CHECK(k == m->TempCount());
+
+        for (size_t z = 0; z < i; z++) {
+          CHECK_EQ(outputs[z], m->OutputAt(z));
+        }
+
+        for (size_t z = 0; z < j; z++) {
+          CHECK_EQ(inputs[z], m->InputAt(z));
+        }
+
+        for (size_t z = 0; z < k; z++) {
+          CHECK_EQ(temps[z], m->TempAt(z));
+        }
+      }
+    }
+  }
+}
diff --git a/test/cctest/compiler/compiler/test-js-constant-cache.cc b/test/cctest/compiler/compiler/test-js-constant-cache.cc
new file mode 100644 (file)
index 0000000..42a606d
--- /dev/null
@@ -0,0 +1,284 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#include "src/compiler/js-graph.h"
+#include "src/compiler/node-properties-inl.h"
+#include "src/compiler/typer.h"
+#include "src/types.h"
+#include "test/cctest/cctest.h"
+#include "test/cctest/compiler/value-helper.h"
+
+using namespace v8::internal;
+using namespace v8::internal::compiler;
+
+class JSCacheTesterHelper {
+ protected:
+  explicit JSCacheTesterHelper(Zone* zone)
+      : main_graph_(zone), main_common_(zone), main_typer_(zone) {}
+  Graph main_graph_;
+  CommonOperatorBuilder main_common_;
+  Typer main_typer_;
+};
+
+
+class JSConstantCacheTester : public HandleAndZoneScope,
+                              public JSCacheTesterHelper,
+                              public JSGraph {
+ public:
+  JSConstantCacheTester()
+      : JSCacheTesterHelper(main_zone()),
+        JSGraph(&main_graph_, &main_common_, &main_typer_) {}
+
+  Type* upper(Node* node) { return NodeProperties::GetBounds(node).upper; }
+
+  Handle<Object> handle(Node* node) {
+    CHECK_EQ(IrOpcode::kHeapConstant, node->opcode());
+    return ValueOf<Handle<Object> >(node->op());
+  }
+
+  Factory* factory() { return main_isolate()->factory(); }
+};
+
+
+TEST(ZeroConstant1) {
+  JSConstantCacheTester T;
+
+  Node* zero = T.ZeroConstant();
+
+  CHECK_EQ(IrOpcode::kNumberConstant, zero->opcode());
+  CHECK_EQ(zero, T.Constant(0));
+  CHECK_NE(zero, T.Constant(-0.0));
+  CHECK_NE(zero, T.Constant(1.0));
+  CHECK_NE(zero, T.Constant(v8::base::OS::nan_value()));
+  CHECK_NE(zero, T.Float64Constant(0));
+  CHECK_NE(zero, T.Int32Constant(0));
+
+  Type* t = T.upper(zero);
+
+  CHECK(t->Is(Type::Number()));
+  CHECK(t->Is(Type::Integral32()));
+  CHECK(t->Is(Type::Signed32()));
+  CHECK(t->Is(Type::Unsigned32()));
+  CHECK(t->Is(Type::SignedSmall()));
+  CHECK(t->Is(Type::UnsignedSmall()));
+}
+
+
+TEST(MinusZeroConstant) {
+  JSConstantCacheTester T;
+
+  Node* minus_zero = T.Constant(-0.0);
+  Node* zero = T.ZeroConstant();
+
+  CHECK_EQ(IrOpcode::kNumberConstant, minus_zero->opcode());
+  CHECK_EQ(minus_zero, T.Constant(-0.0));
+  CHECK_NE(zero, minus_zero);
+
+  Type* t = T.upper(minus_zero);
+
+  CHECK(t->Is(Type::Number()));
+  CHECK(t->Is(Type::MinusZero()));
+  CHECK(!t->Is(Type::Integral32()));
+  CHECK(!t->Is(Type::Signed32()));
+  CHECK(!t->Is(Type::Unsigned32()));
+  CHECK(!t->Is(Type::SignedSmall()));
+  CHECK(!t->Is(Type::UnsignedSmall()));
+
+  double zero_value = ValueOf<double>(zero->op());
+  double minus_zero_value = ValueOf<double>(minus_zero->op());
+
+  CHECK_EQ(0.0, zero_value);
+  CHECK_NE(-0.0, zero_value);
+  CHECK_EQ(-0.0, minus_zero_value);
+  CHECK_NE(0.0, minus_zero_value);
+}
+
+
+TEST(ZeroConstant2) {
+  JSConstantCacheTester T;
+
+  Node* zero = T.Constant(0);
+
+  CHECK_EQ(IrOpcode::kNumberConstant, zero->opcode());
+  CHECK_EQ(zero, T.ZeroConstant());
+  CHECK_NE(zero, T.Constant(-0.0));
+  CHECK_NE(zero, T.Constant(1.0));
+  CHECK_NE(zero, T.Constant(v8::base::OS::nan_value()));
+  CHECK_NE(zero, T.Float64Constant(0));
+  CHECK_NE(zero, T.Int32Constant(0));
+
+  Type* t = T.upper(zero);
+
+  CHECK(t->Is(Type::Number()));
+  CHECK(t->Is(Type::Integral32()));
+  CHECK(t->Is(Type::Signed32()));
+  CHECK(t->Is(Type::Unsigned32()));
+  CHECK(t->Is(Type::SignedSmall()));
+  CHECK(t->Is(Type::UnsignedSmall()));
+}
+
+
+TEST(OneConstant1) {
+  JSConstantCacheTester T;
+
+  Node* one = T.OneConstant();
+
+  CHECK_EQ(IrOpcode::kNumberConstant, one->opcode());
+  CHECK_EQ(one, T.Constant(1));
+  CHECK_EQ(one, T.Constant(1.0));
+  CHECK_NE(one, T.Constant(1.01));
+  CHECK_NE(one, T.Constant(-1.01));
+  CHECK_NE(one, T.Constant(v8::base::OS::nan_value()));
+  CHECK_NE(one, T.Float64Constant(1.0));
+  CHECK_NE(one, T.Int32Constant(1));
+
+  Type* t = T.upper(one);
+
+  CHECK(t->Is(Type::Number()));
+  CHECK(t->Is(Type::Integral32()));
+  CHECK(t->Is(Type::Signed32()));
+  CHECK(t->Is(Type::Unsigned32()));
+  CHECK(t->Is(Type::SignedSmall()));
+  CHECK(t->Is(Type::UnsignedSmall()));
+}
+
+
+TEST(OneConstant2) {
+  JSConstantCacheTester T;
+
+  Node* one = T.Constant(1);
+
+  CHECK_EQ(IrOpcode::kNumberConstant, one->opcode());
+  CHECK_EQ(one, T.OneConstant());
+  CHECK_EQ(one, T.Constant(1.0));
+  CHECK_NE(one, T.Constant(1.01));
+  CHECK_NE(one, T.Constant(-1.01));
+  CHECK_NE(one, T.Constant(v8::base::OS::nan_value()));
+  CHECK_NE(one, T.Float64Constant(1.0));
+  CHECK_NE(one, T.Int32Constant(1));
+
+  Type* t = T.upper(one);
+
+  CHECK(t->Is(Type::Number()));
+  CHECK(t->Is(Type::Integral32()));
+  CHECK(t->Is(Type::Signed32()));
+  CHECK(t->Is(Type::Unsigned32()));
+  CHECK(t->Is(Type::SignedSmall()));
+  CHECK(t->Is(Type::UnsignedSmall()));
+}
+
+
+TEST(Canonicalizations) {
+  JSConstantCacheTester T;
+
+  CHECK_EQ(T.ZeroConstant(), T.ZeroConstant());
+  CHECK_EQ(T.UndefinedConstant(), T.UndefinedConstant());
+  CHECK_EQ(T.TheHoleConstant(), T.TheHoleConstant());
+  CHECK_EQ(T.TrueConstant(), T.TrueConstant());
+  CHECK_EQ(T.FalseConstant(), T.FalseConstant());
+  CHECK_EQ(T.NullConstant(), T.NullConstant());
+  CHECK_EQ(T.ZeroConstant(), T.ZeroConstant());
+  CHECK_EQ(T.OneConstant(), T.OneConstant());
+  CHECK_EQ(T.NaNConstant(), T.NaNConstant());
+}
+
+
+TEST(NoAliasing) {
+  JSConstantCacheTester T;
+
+  Node* nodes[] = {T.UndefinedConstant(), T.TheHoleConstant(), T.TrueConstant(),
+                   T.FalseConstant(),     T.NullConstant(),    T.ZeroConstant(),
+                   T.OneConstant(),       T.NaNConstant(),     T.Constant(21),
+                   T.Constant(22.2)};
+
+  for (size_t i = 0; i < ARRAY_SIZE(nodes); i++) {
+    for (size_t j = 0; j < ARRAY_SIZE(nodes); j++) {
+      if (i != j) CHECK_NE(nodes[i], nodes[j]);
+    }
+  }
+}
+
+
+TEST(CanonicalizingNumbers) {
+  JSConstantCacheTester T;
+
+  FOR_FLOAT64_INPUTS(i) {
+    Node* node = T.Constant(*i);
+    for (int j = 0; j < 5; j++) {
+      CHECK_EQ(node, T.Constant(*i));
+    }
+  }
+}
+
+
+TEST(NumberTypes) {
+  JSConstantCacheTester T;
+
+  FOR_FLOAT64_INPUTS(i) {
+    double value = *i;
+    Node* node = T.Constant(value);
+    CHECK(T.upper(node)->Equals(Type::Of(value, T.main_zone())));
+  }
+}
+
+
+TEST(HeapNumbers) {
+  JSConstantCacheTester T;
+
+  FOR_FLOAT64_INPUTS(i) {
+    double value = *i;
+    Handle<Object> num = T.factory()->NewNumber(value);
+    Handle<HeapNumber> heap = T.factory()->NewHeapNumber(value);
+    Node* node1 = T.Constant(value);
+    Node* node2 = T.Constant(num);
+    Node* node3 = T.Constant(heap);
+    CHECK_EQ(node1, node2);
+    CHECK_EQ(node1, node3);
+  }
+}
+
+
+TEST(OddballHandle) {
+  JSConstantCacheTester T;
+
+  CHECK_EQ(T.UndefinedConstant(), T.Constant(T.factory()->undefined_value()));
+  CHECK_EQ(T.TheHoleConstant(), T.Constant(T.factory()->the_hole_value()));
+  CHECK_EQ(T.TrueConstant(), T.Constant(T.factory()->true_value()));
+  CHECK_EQ(T.FalseConstant(), T.Constant(T.factory()->false_value()));
+  CHECK_EQ(T.NullConstant(), T.Constant(T.factory()->null_value()));
+  CHECK_EQ(T.NaNConstant(), T.Constant(T.factory()->nan_value()));
+}
+
+
+TEST(OddballValues) {
+  JSConstantCacheTester T;
+
+  CHECK_EQ(*T.factory()->undefined_value(), *T.handle(T.UndefinedConstant()));
+  CHECK_EQ(*T.factory()->the_hole_value(), *T.handle(T.TheHoleConstant()));
+  CHECK_EQ(*T.factory()->true_value(), *T.handle(T.TrueConstant()));
+  CHECK_EQ(*T.factory()->false_value(), *T.handle(T.FalseConstant()));
+  CHECK_EQ(*T.factory()->null_value(), *T.handle(T.NullConstant()));
+}
+
+
+TEST(OddballTypes) {
+  JSConstantCacheTester T;
+
+  CHECK(T.upper(T.UndefinedConstant())->Is(Type::Undefined()));
+  // TODO(dcarney): figure this out.
+  // CHECK(T.upper(T.TheHoleConstant())->Is(Type::Internal()));
+  CHECK(T.upper(T.TrueConstant())->Is(Type::Boolean()));
+  CHECK(T.upper(T.FalseConstant())->Is(Type::Boolean()));
+  CHECK(T.upper(T.NullConstant())->Is(Type::Null()));
+  CHECK(T.upper(T.ZeroConstant())->Is(Type::Number()));
+  CHECK(T.upper(T.OneConstant())->Is(Type::Number()));
+  CHECK(T.upper(T.NaNConstant())->Is(Type::NaN()));
+}
+
+
+TEST(ExternalReferences) {
+  // TODO(titzer): test canonicalization of external references.
+}
diff --git a/test/cctest/compiler/compiler/test-js-context-specialization.cc b/test/cctest/compiler/compiler/test-js-context-specialization.cc
new file mode 100644 (file)
index 0000000..613ad06
--- /dev/null
@@ -0,0 +1,252 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/js-context-specialization.h"
+#include "src/compiler/js-operator.h"
+#include "src/compiler/node-matchers.h"
+#include "src/compiler/node-properties-inl.h"
+#include "src/compiler/simplified-node-factory.h"
+#include "src/compiler/source-position.h"
+#include "src/compiler/typer.h"
+#include "test/cctest/cctest.h"
+#include "test/cctest/compiler/function-tester.h"
+#include "test/cctest/compiler/graph-builder-tester.h"
+
+using namespace v8::internal;
+using namespace v8::internal::compiler;
+
+class ContextSpecializationTester
+    : public HandleAndZoneScope,
+      public DirectGraphBuilder,
+      public SimplifiedNodeFactory<ContextSpecializationTester> {
+ public:
+  ContextSpecializationTester()
+      : DirectGraphBuilder(new (main_zone()) Graph(main_zone())),
+        common_(main_zone()),
+        javascript_(main_zone()),
+        simplified_(main_zone()),
+        typer_(main_zone()),
+        jsgraph_(graph(), common(), &typer_),
+        info_(main_isolate(), main_zone()) {}
+
+  Factory* factory() { return main_isolate()->factory(); }
+  CommonOperatorBuilder* common() { return &common_; }
+  JSOperatorBuilder* javascript() { return &javascript_; }
+  SimplifiedOperatorBuilder* simplified() { return &simplified_; }
+  JSGraph* jsgraph() { return &jsgraph_; }
+  CompilationInfo* info() { return &info_; }
+
+ private:
+  CommonOperatorBuilder common_;
+  JSOperatorBuilder javascript_;
+  SimplifiedOperatorBuilder simplified_;
+  Typer typer_;
+  JSGraph jsgraph_;
+  CompilationInfo info_;
+};
+
+
+TEST(ReduceJSLoadContext) {
+  ContextSpecializationTester t;
+
+  Node* start = t.NewNode(t.common()->Start());
+  t.graph()->SetStart(start);
+
+  // Make a context and initialize it a bit for this test.
+  Handle<Context> native = t.factory()->NewNativeContext();
+  Handle<Context> ctx1 = t.factory()->NewNativeContext();
+  Handle<Context> ctx2 = t.factory()->NewNativeContext();
+  ctx2->set_previous(*ctx1);
+  ctx1->set_previous(*native);
+  Handle<Object> expected = t.factory()->InternalizeUtf8String("gboy!");
+  const int slot = Context::GLOBAL_OBJECT_INDEX;
+  native->set(slot, *expected);
+
+  Node* const_context = t.jsgraph()->Constant(native);
+  Node* param_context = t.NewNode(t.common()->Parameter(0));
+  JSContextSpecializer spec(t.info(), t.jsgraph(), const_context);
+
+  {
+    // Mutable slot, constant context, depth = 0 => do nothing.
+    t.info()->SetContext(native);
+    Node* load = t.NewNode(t.javascript()->LoadContext(0, 0, false),
+                           const_context, start, start);
+    Reduction r = spec.ReduceJSLoadContext(load);
+    CHECK(!r.Changed());
+  }
+
+  {
+    // Mutable slot, non-constant context, depth = 0 => do nothing.
+    t.info()->SetContext(native);
+    Node* load = t.NewNode(t.javascript()->LoadContext(0, 0, false),
+                           param_context, start, start);
+    Reduction r = spec.ReduceJSLoadContext(load);
+    CHECK(!r.Changed());
+  }
+
+  {
+    // Mutable slot, non-constant context, depth > 0 => fold-in parent context.
+    t.info()->SetContext(ctx2);
+    Node* load = t.NewNode(
+        t.javascript()->LoadContext(2, Context::GLOBAL_EVAL_FUN_INDEX, false),
+        param_context, start, start);
+    Reduction r = spec.ReduceJSLoadContext(load);
+    CHECK(r.Changed());
+    CHECK_EQ(IrOpcode::kHeapConstant, r.replacement()->InputAt(0)->opcode());
+    ValueMatcher<Handle<Context> > match(r.replacement()->InputAt(0));
+    CHECK_EQ(*native, *match.Value());
+    ContextAccess access = static_cast<Operator1<ContextAccess>*>(
+                               r.replacement()->op())->parameter();
+    CHECK_EQ(Context::GLOBAL_EVAL_FUN_INDEX, access.index());
+    CHECK_EQ(0, access.depth());
+    CHECK_EQ(false, access.immutable());
+  }
+
+  {
+    // Immutable slot, constant context => specialize.
+    t.info()->SetContext(native);
+    Node* load = t.NewNode(t.javascript()->LoadContext(0, slot, true),
+                           const_context, start, start);
+    Reduction r = spec.ReduceJSLoadContext(load);
+    CHECK(r.Changed());
+    CHECK(r.replacement() != load);
+
+    ValueMatcher<Handle<Object> > match(r.replacement());
+    CHECK(match.HasValue());
+    CHECK_EQ(*expected, *match.Value());
+  }
+
+  {
+    // Immutable slot, non-constant context => specialize.
+    t.info()->SetContext(native);
+    Node* load = t.NewNode(t.javascript()->LoadContext(0, slot, true),
+                           param_context, start, start);
+    Reduction r = spec.ReduceJSLoadContext(load);
+    CHECK(r.Changed());
+    CHECK(r.replacement() != load);
+
+    ValueMatcher<Handle<Object> > match(r.replacement());
+    CHECK(match.HasValue());
+    CHECK_EQ(*expected, *match.Value());
+  }
+
+  // TODO(titzer): test with other kinds of contexts, e.g. a function context.
+  // TODO(sigurds): test that loads below create context are not optimized
+}
+
+
+// TODO(titzer): factor out common code with effects checking in typed lowering.
+static void CheckEffectInput(Node* effect, Node* use) {
+  CHECK_EQ(effect, NodeProperties::GetEffectInput(use));
+}
+
+
+TEST(SpecializeToContext) {
+  ContextSpecializationTester t;
+
+  Node* start = t.NewNode(t.common()->Start());
+  t.graph()->SetStart(start);
+
+  // Make a context and initialize it a bit for this test.
+  Handle<Context> native = t.factory()->NewNativeContext();
+  Handle<Object> expected = t.factory()->InternalizeUtf8String("gboy!");
+  const int slot = Context::GLOBAL_OBJECT_INDEX;
+  native->set(slot, *expected);
+  t.info()->SetContext(native);
+
+  Node* const_context = t.jsgraph()->Constant(native);
+  Node* param_context = t.NewNode(t.common()->Parameter(0));
+  JSContextSpecializer spec(t.info(), t.jsgraph(), const_context);
+
+  {
+    // Check that SpecializeToContext() replaces values and forwards effects
+    // correctly, and folds values from constant and non-constant contexts
+    Node* effect_in = t.NewNode(t.common()->Start());
+    Node* load = t.NewNode(t.javascript()->LoadContext(0, slot, true),
+                           const_context, const_context, effect_in, start);
+
+
+    Node* value_use = t.ChangeTaggedToInt32(load);
+    Node* other_load = t.NewNode(t.javascript()->LoadContext(0, slot, true),
+                                 param_context, param_context, load, start);
+    Node* effect_use = other_load;
+    Node* other_use = t.ChangeTaggedToInt32(other_load);
+
+    // Double check the above graph is what we expect, or the test is broken.
+    CheckEffectInput(effect_in, load);
+    CheckEffectInput(load, effect_use);
+
+    // Perform the substitution on the entire graph.
+    spec.SpecializeToContext();
+
+    // Effects should have been forwarded (not replaced with a value).
+    CheckEffectInput(effect_in, effect_use);
+
+    // Use of {other_load} should not have been replaced.
+    CHECK_EQ(other_load, other_use->InputAt(0));
+
+    Node* replacement = value_use->InputAt(0);
+    ValueMatcher<Handle<Object> > match(replacement);
+    CHECK(match.HasValue());
+    CHECK_EQ(*expected, *match.Value());
+  }
+  // TODO(titzer): clean up above test and test more complicated effects.
+}
+
+
+TEST(SpecializeJSFunction_ToConstant1) {
+  FunctionTester T(
+      "(function() { var x = 1; function inc(a)"
+      " { return a + x; } return inc; })()");
+
+  T.CheckCall(1.0, 0.0, 0.0);
+  T.CheckCall(2.0, 1.0, 0.0);
+  T.CheckCall(2.1, 1.1, 0.0);
+}
+
+
+TEST(SpecializeJSFunction_ToConstant2) {
+  FunctionTester T(
+      "(function() { var x = 1.5; var y = 2.25; var z = 3.75;"
+      " function f(a) { return a - x + y - z; } return f; })()");
+
+  T.CheckCall(-3.0, 0.0, 0.0);
+  T.CheckCall(-2.0, 1.0, 0.0);
+  T.CheckCall(-1.9, 1.1, 0.0);
+}
+
+
+TEST(SpecializeJSFunction_ToConstant3) {
+  FunctionTester T(
+      "(function() { var x = -11.5; function inc()"
+      " { return (function(a) { return a + x; }); }"
+      " return inc(); })()");
+
+  T.CheckCall(-11.5, 0.0, 0.0);
+  T.CheckCall(-10.5, 1.0, 0.0);
+  T.CheckCall(-10.4, 1.1, 0.0);
+}
+
+
+TEST(SpecializeJSFunction_ToConstant_uninit) {
+  {
+    FunctionTester T(
+        "(function() { if (false) { var x = 1; } function inc(a)"
+        " { return x; } return inc; })()");  // x is undefined!
+
+    CHECK(T.Call(T.Val(0.0), T.Val(0.0)).ToHandleChecked()->IsUndefined());
+    CHECK(T.Call(T.Val(2.0), T.Val(0.0)).ToHandleChecked()->IsUndefined());
+    CHECK(T.Call(T.Val(-2.1), T.Val(0.0)).ToHandleChecked()->IsUndefined());
+  }
+
+  {
+    FunctionTester T(
+        "(function() { if (false) { var x = 1; } function inc(a)"
+        " { return a + x; } return inc; })()");  // x is undefined!
+
+    CHECK(T.Call(T.Val(0.0), T.Val(0.0)).ToHandleChecked()->IsNaN());
+    CHECK(T.Call(T.Val(2.0), T.Val(0.0)).ToHandleChecked()->IsNaN());
+    CHECK(T.Call(T.Val(-2.1), T.Val(0.0)).ToHandleChecked()->IsNaN());
+  }
+}
diff --git a/test/cctest/compiler/compiler/test-js-typed-lowering.cc b/test/cctest/compiler/compiler/test-js-typed-lowering.cc
new file mode 100644 (file)
index 0000000..1bbc76e
--- /dev/null
@@ -0,0 +1,1345 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+#include "test/cctest/cctest.h"
+
+#include "src/compiler/graph-inl.h"
+#include "src/compiler/js-typed-lowering.h"
+#include "src/compiler/node-properties-inl.h"
+#include "src/compiler/opcodes.h"
+#include "src/compiler/typer.h"
+
+using namespace v8::internal;
+using namespace v8::internal::compiler;
+
+class JSTypedLoweringTester : public HandleAndZoneScope {
+ public:
+  JSTypedLoweringTester()
+      : isolate(main_isolate()),
+        binop(NULL),
+        unop(NULL),
+        javascript(main_zone()),
+        machine(main_zone()),
+        simplified(main_zone()),
+        common(main_zone()),
+        graph(main_zone()),
+        typer(main_zone()),
+        source_positions(&graph),
+        context_node(NULL) {
+    typer.DecorateGraph(&graph);
+  }
+
+  Isolate* isolate;
+  Operator* binop;
+  Operator* unop;
+  JSOperatorBuilder javascript;
+  MachineOperatorBuilder machine;
+  SimplifiedOperatorBuilder simplified;
+  CommonOperatorBuilder common;
+  Graph graph;
+  Typer typer;
+  SourcePositionTable source_positions;
+  Node* context_node;
+
+  Node* Parameter(Type* t, int32_t index = 0) {
+    Node* n = graph.NewNode(common.Parameter(index));
+    NodeProperties::SetBounds(n, Bounds(Type::None(), t));
+    return n;
+  }
+
+  Node* reduce(Node* node) {
+    JSGraph jsgraph(&graph, &common, &typer);
+    JSTypedLowering reducer(&jsgraph, &source_positions);
+    Reduction reduction = reducer.Reduce(node);
+    if (reduction.Changed()) return reduction.replacement();
+    return node;
+  }
+
+  Node* start() {
+    Node* s = graph.start();
+    if (s == NULL) {
+      s = graph.NewNode(common.Start());
+      graph.SetStart(s);
+    }
+    return s;
+  }
+
+  Node* context() {
+    if (context_node == NULL) {
+      context_node = graph.NewNode(common.Parameter(-1));
+    }
+    return context_node;
+  }
+
+  Node* control() { return start(); }
+
+  void CheckPureBinop(IrOpcode::Value expected, Node* node) {
+    CHECK_EQ(expected, node->opcode());
+    CHECK_EQ(2, node->InputCount());  // should not have context, effect, etc.
+  }
+
+  void CheckPureBinop(Operator* expected, Node* node) {
+    CHECK_EQ(expected->opcode(), node->op()->opcode());
+    CHECK_EQ(2, node->InputCount());  // should not have context, effect, etc.
+  }
+
+  Node* ReduceUnop(Operator* op, Type* input_type) {
+    return reduce(Unop(op, Parameter(input_type)));
+  }
+
+  Node* ReduceBinop(Operator* op, Type* left_type, Type* right_type) {
+    return reduce(Binop(op, Parameter(left_type, 0), Parameter(right_type, 1)));
+  }
+
+  Node* Binop(Operator* op, Node* left, Node* right) {
+    // JS binops also require context, effect, and control
+    return graph.NewNode(op, left, right, context(), start(), control());
+  }
+
+  Node* Unop(Operator* op, Node* input) {
+    // JS unops also require context, effect, and control
+    return graph.NewNode(op, input, context(), start(), control());
+  }
+
+  Node* UseForEffect(Node* node) {
+    // TODO(titzer): use EffectPhi after fixing EffectCount
+    return graph.NewNode(javascript.ToNumber(), node, context(), node,
+                         control());
+  }
+
+  void CheckEffectInput(Node* effect, Node* use) {
+    CHECK_EQ(effect, NodeProperties::GetEffectInput(use));
+  }
+
+  void CheckInt32Constant(int32_t expected, Node* result) {
+    CHECK_EQ(IrOpcode::kInt32Constant, result->opcode());
+    CHECK_EQ(expected, ValueOf<int32_t>(result->op()));
+  }
+
+  void CheckNumberConstant(double expected, Node* result) {
+    CHECK_EQ(IrOpcode::kNumberConstant, result->opcode());
+    CHECK_EQ(expected, ValueOf<double>(result->op()));
+  }
+
+  void CheckNaN(Node* result) {
+    CHECK_EQ(IrOpcode::kNumberConstant, result->opcode());
+    double value = ValueOf<double>(result->op());
+    CHECK(std::isnan(value));
+  }
+
+  void CheckTrue(Node* result) {
+    CheckHandle(isolate->factory()->true_value(), result);
+  }
+
+  void CheckFalse(Node* result) {
+    CheckHandle(isolate->factory()->false_value(), result);
+  }
+
+  void CheckHandle(Handle<Object> expected, Node* result) {
+    CHECK_EQ(IrOpcode::kHeapConstant, result->opcode());
+    Handle<Object> value = ValueOf<Handle<Object> >(result->op());
+    CHECK_EQ(*expected, *value);
+  }
+};
+
+static Type* kStringTypes[] = {Type::InternalizedString(), Type::OtherString(),
+                               Type::String()};
+
+
+static Type* kInt32Types[] = {
+    Type::UnsignedSmall(),   Type::OtherSignedSmall(), Type::OtherUnsigned31(),
+    Type::OtherUnsigned32(), Type::OtherSigned32(),    Type::SignedSmall(),
+    Type::Signed32(),        Type::Unsigned32(),       Type::Integral32()};
+
+
+static Type* kNumberTypes[] = {
+    Type::UnsignedSmall(),   Type::OtherSignedSmall(), Type::OtherUnsigned31(),
+    Type::OtherUnsigned32(), Type::OtherSigned32(),    Type::SignedSmall(),
+    Type::Signed32(),        Type::Unsigned32(),       Type::Integral32(),
+    Type::MinusZero(),       Type::NaN(),              Type::OtherNumber(),
+    Type::Number()};
+
+
+static Type* kJSTypes[] = {Type::Undefined(), Type::Null(),   Type::Boolean(),
+                           Type::Number(),    Type::String(), Type::Object()};
+
+
+static Type* I32Type(bool is_signed) {
+  return is_signed ? Type::Signed32() : Type::Unsigned32();
+}
+
+
+static IrOpcode::Value NumberToI32(bool is_signed) {
+  return is_signed ? IrOpcode::kNumberToInt32 : IrOpcode::kNumberToUint32;
+}
+
+
+TEST(StringBinops) {
+  JSTypedLoweringTester R;
+
+  for (size_t i = 0; i < ARRAY_SIZE(kStringTypes); ++i) {
+    Node* p0 = R.Parameter(kStringTypes[i], 0);
+
+    for (size_t j = 0; j < ARRAY_SIZE(kStringTypes); ++j) {
+      Node* p1 = R.Parameter(kStringTypes[j], 1);
+
+      Node* add = R.Binop(R.javascript.Add(), p0, p1);
+      Node* r = R.reduce(add);
+
+      R.CheckPureBinop(IrOpcode::kStringAdd, r);
+      CHECK_EQ(p0, r->InputAt(0));
+      CHECK_EQ(p1, r->InputAt(1));
+    }
+  }
+}
+
+
+TEST(AddNumber1) {
+  JSTypedLoweringTester R;
+  for (size_t i = 0; i < ARRAY_SIZE(kNumberTypes); ++i) {
+    Node* p0 = R.Parameter(kNumberTypes[i], 0);
+    Node* p1 = R.Parameter(kNumberTypes[i], 1);
+    Node* add = R.Binop(R.javascript.Add(), p0, p1);
+    Node* r = R.reduce(add);
+
+    R.CheckPureBinop(IrOpcode::kNumberAdd, r);
+    CHECK_EQ(p0, r->InputAt(0));
+    CHECK_EQ(p1, r->InputAt(1));
+  }
+}
+
+
+TEST(NumberBinops) {
+  JSTypedLoweringTester R;
+  Operator* ops[] = {
+      R.javascript.Add(),      R.simplified.NumberAdd(),
+      R.javascript.Subtract(), R.simplified.NumberSubtract(),
+      R.javascript.Multiply(), R.simplified.NumberMultiply(),
+      R.javascript.Divide(),   R.simplified.NumberDivide(),
+      R.javascript.Modulus(),  R.simplified.NumberModulus(),
+  };
+
+  for (size_t i = 0; i < ARRAY_SIZE(kNumberTypes); ++i) {
+    Node* p0 = R.Parameter(kNumberTypes[i], 0);
+
+    for (size_t j = 0; j < ARRAY_SIZE(kNumberTypes); ++j) {
+      Node* p1 = R.Parameter(kNumberTypes[j], 1);
+
+      for (size_t k = 0; k < ARRAY_SIZE(ops); k += 2) {
+        Node* add = R.Binop(ops[k], p0, p1);
+        Node* r = R.reduce(add);
+
+        R.CheckPureBinop(ops[k + 1], r);
+        CHECK_EQ(p0, r->InputAt(0));
+        CHECK_EQ(p1, r->InputAt(1));
+      }
+    }
+  }
+}
+
+
+static void CheckToI32(Node* old_input, Node* new_input, bool is_signed) {
+  Type* old_type = NodeProperties::GetBounds(old_input).upper;
+  Type* expected_type = I32Type(is_signed);
+  if (old_type->Is(expected_type)) {
+    CHECK_EQ(old_input, new_input);
+  } else if (new_input->opcode() == IrOpcode::kNumberConstant) {
+    CHECK(NodeProperties::GetBounds(new_input).upper->Is(expected_type));
+    double v = ValueOf<double>(new_input->op());
+    double e = static_cast<double>(is_signed ? FastD2I(v) : FastD2UI(v));
+    CHECK_EQ(e, v);
+  } else {
+    CHECK_EQ(NumberToI32(is_signed), new_input->opcode());
+  }
+}
+
+
+// A helper class for testing lowering of bitwise shift operators.
+class JSBitwiseShiftTypedLoweringTester : public JSTypedLoweringTester {
+ public:
+  static const int kNumberOps = 6;
+  Operator** ops;
+  bool* signedness;
+
+  JSBitwiseShiftTypedLoweringTester() {
+    Operator* o[] = {javascript.ShiftLeft(),         machine.Word32Shl(),
+                     javascript.ShiftRight(),        machine.Word32Sar(),
+                     javascript.ShiftRightLogical(), machine.Word32Shr()};
+
+    ops = static_cast<Operator**>(malloc(sizeof(o)));
+    memcpy(ops, o, sizeof(o));
+
+    // Expected signedness of left and right conversions above.
+    bool s[] = {true, false, true, false, false, false};
+
+    signedness = static_cast<bool*>(malloc(sizeof(s)));
+    memcpy(signedness, s, sizeof(s));
+  }
+};
+
+
+TEST(Int32BitwiseShifts) {
+  JSBitwiseShiftTypedLoweringTester R;
+
+  Type* types[] = {
+      Type::SignedSmall(), Type::UnsignedSmall(), Type::OtherSigned32(),
+      Type::Unsigned32(),  Type::Signed32(),      Type::MinusZero(),
+      Type::NaN(),         Type::OtherNumber(),   Type::Undefined(),
+      Type::Null(),        Type::Boolean(),       Type::Number(),
+      Type::String(),      Type::Object()};
+
+  for (size_t i = 0; i < ARRAY_SIZE(types); ++i) {
+    Node* p0 = R.Parameter(types[i], 0);
+
+    for (size_t j = 0; j < ARRAY_SIZE(types); ++j) {
+      Node* p1 = R.Parameter(types[j], 1);
+
+      for (int k = 0; k < R.kNumberOps; k += 2) {
+        Node* add = R.Binop(R.ops[k], p0, p1);
+        Node* r = R.reduce(add);
+
+        R.CheckPureBinop(R.ops[k + 1], r);
+        Node* r0 = r->InputAt(0);
+        Node* r1 = r->InputAt(1);
+
+        CheckToI32(p0, r0, R.signedness[k]);
+
+        R.CheckPureBinop(IrOpcode::kWord32And, r1);
+        CheckToI32(p1, r1->InputAt(0), R.signedness[k + 1]);
+        R.CheckInt32Constant(0x1F, r1->InputAt(1));
+      }
+    }
+  }
+}
+
+
+// A helper class for testing lowering of bitwise operators.
+class JSBitwiseTypedLoweringTester : public JSTypedLoweringTester {
+ public:
+  static const int kNumberOps = 6;
+  Operator** ops;
+  bool* signedness;
+
+  JSBitwiseTypedLoweringTester() {
+    Operator* o[] = {javascript.BitwiseOr(),  machine.Word32Or(),
+                     javascript.BitwiseXor(), machine.Word32Xor(),
+                     javascript.BitwiseAnd(), machine.Word32And()};
+
+    ops = static_cast<Operator**>(malloc(sizeof(o)));
+    memcpy(ops, o, sizeof(o));
+
+    // Expected signedness of left and right conversions above.
+    bool s[] = {true, true, true, true, true, true};
+
+    signedness = static_cast<bool*>(malloc(sizeof(s)));
+    memcpy(signedness, s, sizeof(s));
+  }
+};
+
+
+TEST(Int32BitwiseBinops) {
+  JSBitwiseTypedLoweringTester R;
+
+  Type* types[] = {
+      Type::SignedSmall(), Type::UnsignedSmall(), Type::OtherSigned32(),
+      Type::Unsigned32(),  Type::Signed32(),      Type::MinusZero(),
+      Type::NaN(),         Type::OtherNumber(),   Type::Undefined(),
+      Type::Null(),        Type::Boolean(),       Type::Number(),
+      Type::String(),      Type::Object()};
+
+  for (size_t i = 0; i < ARRAY_SIZE(types); ++i) {
+    Node* p0 = R.Parameter(types[i], 0);
+
+    for (size_t j = 0; j < ARRAY_SIZE(types); ++j) {
+      Node* p1 = R.Parameter(types[j], 1);
+
+      for (int k = 0; k < R.kNumberOps; k += 2) {
+        Node* add = R.Binop(R.ops[k], p0, p1);
+        Node* r = R.reduce(add);
+
+        R.CheckPureBinop(R.ops[k + 1], r);
+
+        CheckToI32(p0, r->InputAt(0), R.signedness[k]);
+        CheckToI32(p1, r->InputAt(1), R.signedness[k + 1]);
+      }
+    }
+  }
+}
+
+
+TEST(JSToNumber1) {
+  JSTypedLoweringTester R;
+  Operator* ton = R.javascript.ToNumber();
+
+  for (size_t i = 0; i < ARRAY_SIZE(kNumberTypes); i++) {  // ToNumber(number)
+    Node* r = R.ReduceUnop(ton, kNumberTypes[i]);
+    CHECK_EQ(IrOpcode::kParameter, r->opcode());
+  }
+
+  {  // ToNumber(undefined)
+    Node* r = R.ReduceUnop(ton, Type::Undefined());
+    R.CheckNaN(r);
+  }
+
+  {  // ToNumber(null)
+    Node* r = R.ReduceUnop(ton, Type::Null());
+    R.CheckNumberConstant(0.0, r);
+  }
+}
+
+
+TEST(JSToNumber_replacement) {
+  JSTypedLoweringTester R;
+
+  Type* types[] = {Type::Null(), Type::Undefined(), Type::Number()};
+
+  for (size_t i = 0; i < ARRAY_SIZE(types); i++) {
+    Node* n = R.Parameter(types[i]);
+    Node* c = R.graph.NewNode(R.javascript.ToNumber(), n, R.context(),
+                              R.start(), R.start());
+    Node* effect_use = R.UseForEffect(c);
+    Node* add = R.graph.NewNode(R.simplified.ReferenceEqual(Type::Any()), n, c);
+
+    R.CheckEffectInput(c, effect_use);
+    Node* r = R.reduce(c);
+
+    if (types[i]->Is(Type::Number())) {
+      CHECK_EQ(n, r);
+    } else {
+      CHECK_EQ(IrOpcode::kNumberConstant, r->opcode());
+    }
+
+    CHECK_EQ(n, add->InputAt(0));
+    CHECK_EQ(r, add->InputAt(1));
+    R.CheckEffectInput(R.start(), effect_use);
+  }
+}
+
+
+TEST(JSToNumberOfConstant) {
+  JSTypedLoweringTester R;
+
+  Operator* ops[] = {R.common.NumberConstant(0), R.common.NumberConstant(-1),
+                     R.common.NumberConstant(0.1), R.common.Int32Constant(1177),
+                     R.common.Float64Constant(0.99)};
+
+  for (size_t i = 0; i < ARRAY_SIZE(ops); i++) {
+    Node* n = R.graph.NewNode(ops[i]);
+    Node* convert = R.Unop(R.javascript.ToNumber(), n);
+    Node* r = R.reduce(convert);
+    // Note that either outcome below is correct. It only depends on whether
+    // the types of constants are eagerly computed or only computed by the
+    // typing pass.
+    if (NodeProperties::GetBounds(n).upper->Is(Type::Number())) {
+      // If number constants are eagerly typed, then reduction should
+      // remove the ToNumber.
+      CHECK_EQ(n, r);
+    } else {
+      // Otherwise, type-based lowering should only look at the type, and
+      // *not* try to constant fold.
+      CHECK_EQ(convert, r);
+    }
+  }
+}
+
+
+TEST(JSToNumberOfNumberOrOtherPrimitive) {
+  JSTypedLoweringTester R;
+  Type* others[] = {Type::Undefined(), Type::Null(), Type::Boolean(),
+                    Type::String()};
+
+  for (size_t i = 0; i < ARRAY_SIZE(others); i++) {
+    Type* t = Type::Union(Type::Number(), others[i], R.main_zone());
+    Node* r = R.ReduceUnop(R.javascript.ToNumber(), t);
+    CHECK_EQ(IrOpcode::kJSToNumber, r->opcode());
+  }
+}
+
+
+TEST(JSToBoolean) {
+  JSTypedLoweringTester R;
+  Operator* op = R.javascript.ToBoolean();
+
+  {  // ToBoolean(undefined)
+    Node* r = R.ReduceUnop(op, Type::Undefined());
+    R.CheckFalse(r);
+  }
+
+  {  // ToBoolean(null)
+    Node* r = R.ReduceUnop(op, Type::Null());
+    R.CheckFalse(r);
+  }
+
+  {  // ToBoolean(boolean)
+    Node* r = R.ReduceUnop(op, Type::Boolean());
+    CHECK_EQ(IrOpcode::kParameter, r->opcode());
+  }
+
+  {  // ToBoolean(number)
+    Node* r = R.ReduceUnop(op, Type::Number());
+    CHECK_EQ(IrOpcode::kBooleanNot, r->opcode());
+    Node* i = r->InputAt(0);
+    CHECK_EQ(IrOpcode::kNumberEqual, i->opcode());
+    // ToBoolean(number) => BooleanNot(NumberEqual(x, #0))
+  }
+
+  {  // ToBoolean(string)
+    Node* r = R.ReduceUnop(op, Type::String());
+    // TODO(titzer): test will break with better js-typed-lowering
+    CHECK_EQ(IrOpcode::kJSToBoolean, r->opcode());
+  }
+
+  {  // ToBoolean(object)
+    Node* r = R.ReduceUnop(op, Type::DetectableObject());
+    R.CheckTrue(r);
+  }
+
+  {  // ToBoolean(undetectable)
+    Node* r = R.ReduceUnop(op, Type::Undetectable());
+    R.CheckFalse(r);
+  }
+
+  {  // ToBoolean(object)
+    Node* r = R.ReduceUnop(op, Type::Object());
+    CHECK_EQ(IrOpcode::kJSToBoolean, r->opcode());
+  }
+}
+
+
+TEST(JSToBoolean_replacement) {
+  JSTypedLoweringTester R;
+
+  Type* types[] = {Type::Null(), Type::Undefined(), Type::Boolean(),
+                   Type::DetectableObject(), Type::Undetectable()};
+
+  for (size_t i = 0; i < ARRAY_SIZE(types); i++) {
+    Node* n = R.Parameter(types[i]);
+    Node* c = R.graph.NewNode(R.javascript.ToBoolean(), n, R.context(),
+                              R.start(), R.start());
+    Node* effect_use = R.UseForEffect(c);
+    Node* add = R.graph.NewNode(R.simplified.ReferenceEqual(Type::Any()), n, c);
+
+    R.CheckEffectInput(c, effect_use);
+    Node* r = R.reduce(c);
+
+    if (types[i]->Is(Type::Boolean())) {
+      CHECK_EQ(n, r);
+    } else {
+      CHECK_EQ(IrOpcode::kHeapConstant, r->opcode());
+    }
+
+    CHECK_EQ(n, add->InputAt(0));
+    CHECK_EQ(r, add->InputAt(1));
+    R.CheckEffectInput(R.start(), effect_use);
+  }
+}
+
+
+TEST(JSToString1) {
+  JSTypedLoweringTester R;
+
+  for (size_t i = 0; i < ARRAY_SIZE(kStringTypes); i++) {
+    Node* r = R.ReduceUnop(R.javascript.ToString(), kStringTypes[i]);
+    CHECK_EQ(IrOpcode::kParameter, r->opcode());
+  }
+
+  Operator* op = R.javascript.ToString();
+
+  {  // ToString(undefined) => "undefined"
+    Node* r = R.ReduceUnop(op, Type::Undefined());
+    R.CheckHandle(R.isolate->factory()->undefined_string(), r);
+  }
+
+  {  // ToString(null) => "null"
+    Node* r = R.ReduceUnop(op, Type::Null());
+    R.CheckHandle(R.isolate->factory()->null_string(), r);
+  }
+
+  {  // ToString(boolean)
+    Node* r = R.ReduceUnop(op, Type::Boolean());
+    // TODO(titzer): could be a branch
+    CHECK_EQ(IrOpcode::kJSToString, r->opcode());
+  }
+
+  {  // ToString(number)
+    Node* r = R.ReduceUnop(op, Type::Number());
+    // TODO(titzer): could remove effects
+    CHECK_EQ(IrOpcode::kJSToString, r->opcode());
+  }
+
+  {  // ToString(string)
+    Node* r = R.ReduceUnop(op, Type::String());
+    CHECK_EQ(IrOpcode::kParameter, r->opcode());  // No-op
+  }
+
+  {  // ToString(object)
+    Node* r = R.ReduceUnop(op, Type::Object());
+    CHECK_EQ(IrOpcode::kJSToString, r->opcode());  // No reduction.
+  }
+}
+
+
+TEST(JSToString_replacement) {
+  JSTypedLoweringTester R;
+
+  Type* types[] = {Type::Null(), Type::Undefined(), Type::String()};
+
+  for (size_t i = 0; i < ARRAY_SIZE(types); i++) {
+    Node* n = R.Parameter(types[i]);
+    Node* c = R.graph.NewNode(R.javascript.ToString(), n, R.context(),
+                              R.start(), R.start());
+    Node* effect_use = R.UseForEffect(c);
+    Node* add = R.graph.NewNode(R.simplified.ReferenceEqual(Type::Any()), n, c);
+
+    R.CheckEffectInput(c, effect_use);
+    Node* r = R.reduce(c);
+
+    if (types[i]->Is(Type::String())) {
+      CHECK_EQ(n, r);
+    } else {
+      CHECK_EQ(IrOpcode::kHeapConstant, r->opcode());
+    }
+
+    CHECK_EQ(n, add->InputAt(0));
+    CHECK_EQ(r, add->InputAt(1));
+    R.CheckEffectInput(R.start(), effect_use);
+  }
+}
+
+
+TEST(StringComparison) {
+  JSTypedLoweringTester R;
+
+  Operator* ops[] = {
+      R.javascript.LessThan(),           R.simplified.StringLessThan(),
+      R.javascript.LessThanOrEqual(),    R.simplified.StringLessThanOrEqual(),
+      R.javascript.GreaterThan(),        R.simplified.StringLessThan(),
+      R.javascript.GreaterThanOrEqual(), R.simplified.StringLessThanOrEqual()};
+
+  for (size_t i = 0; i < ARRAY_SIZE(kStringTypes); i++) {
+    Node* p0 = R.Parameter(kStringTypes[i], 0);
+    for (size_t j = 0; j < ARRAY_SIZE(kStringTypes); j++) {
+      Node* p1 = R.Parameter(kStringTypes[j], 1);
+
+      for (size_t k = 0; k < ARRAY_SIZE(ops); k += 2) {
+        Node* cmp = R.Binop(ops[k], p0, p1);
+        Node* r = R.reduce(cmp);
+
+        R.CheckPureBinop(ops[k + 1], r);
+        if (k >= 4) {
+          // GreaterThan and GreaterThanOrEqual commute the inputs
+          // and use the LessThan and LessThanOrEqual operators.
+          CHECK_EQ(p1, r->InputAt(0));
+          CHECK_EQ(p0, r->InputAt(1));
+        } else {
+          CHECK_EQ(p0, r->InputAt(0));
+          CHECK_EQ(p1, r->InputAt(1));
+        }
+      }
+    }
+  }
+}
+
+
+static void CheckIsConvertedToNumber(Node* val, Node* converted) {
+  if (NodeProperties::GetBounds(val).upper->Is(Type::Number())) {
+    CHECK_EQ(val, converted);
+  } else {
+    if (converted->opcode() == IrOpcode::kNumberConstant) return;
+    CHECK_EQ(IrOpcode::kJSToNumber, converted->opcode());
+    CHECK_EQ(val, converted->InputAt(0));
+  }
+}
+
+
+TEST(NumberComparison) {
+  JSTypedLoweringTester R;
+
+  Operator* ops[] = {
+      R.javascript.LessThan(),           R.simplified.NumberLessThan(),
+      R.javascript.LessThanOrEqual(),    R.simplified.NumberLessThanOrEqual(),
+      R.javascript.GreaterThan(),        R.simplified.NumberLessThan(),
+      R.javascript.GreaterThanOrEqual(), R.simplified.NumberLessThanOrEqual()};
+
+  for (size_t i = 0; i < ARRAY_SIZE(kJSTypes); i++) {
+    Type* t0 = kJSTypes[i];
+    if (t0->Is(Type::String())) continue;  // skip Type::String
+    Node* p0 = R.Parameter(t0, 0);
+
+    for (size_t j = 0; j < ARRAY_SIZE(kJSTypes); j++) {
+      Type* t1 = kJSTypes[j];
+      if (t1->Is(Type::String())) continue;  // skip Type::String
+      Node* p1 = R.Parameter(t1, 1);
+
+      for (size_t k = 0; k < ARRAY_SIZE(ops); k += 2) {
+        Node* cmp = R.Binop(ops[k], p0, p1);
+        Node* r = R.reduce(cmp);
+
+        R.CheckPureBinop(ops[k + 1], r);
+        if (k >= 4) {
+          // GreaterThan and GreaterThanOrEqual commute the inputs
+          // and use the LessThan and LessThanOrEqual operators.
+          CheckIsConvertedToNumber(p1, r->InputAt(0));
+          CheckIsConvertedToNumber(p0, r->InputAt(1));
+        } else {
+          CheckIsConvertedToNumber(p0, r->InputAt(0));
+          CheckIsConvertedToNumber(p1, r->InputAt(1));
+        }
+      }
+    }
+  }
+}
+
+
+TEST(MixedComparison1) {
+  JSTypedLoweringTester R;
+
+  Type* types[] = {Type::Number(), Type::String(),
+                   Type::Union(Type::Number(), Type::String(), R.main_zone())};
+
+  for (size_t i = 0; i < ARRAY_SIZE(types); i++) {
+    Node* p0 = R.Parameter(types[i], 0);
+
+    for (size_t j = 0; j < ARRAY_SIZE(types); j++) {
+      Node* p1 = R.Parameter(types[j], 1);
+      {
+        Node* cmp = R.Binop(R.javascript.LessThan(), p0, p1);
+        Node* r = R.reduce(cmp);
+
+        if (!types[i]->Maybe(Type::String()) ||
+            !types[j]->Maybe(Type::String())) {
+          if (types[i]->Is(Type::String()) && types[j]->Is(Type::String())) {
+            R.CheckPureBinop(R.simplified.StringLessThan(), r);
+          } else {
+            R.CheckPureBinop(R.simplified.NumberLessThan(), r);
+          }
+        } else {
+          CHECK_EQ(cmp, r);  // No reduction of mixed types.
+        }
+      }
+    }
+  }
+}
+
+
+TEST(ObjectComparison) {
+  JSTypedLoweringTester R;
+
+  Node* p0 = R.Parameter(Type::Object(), 0);
+  Node* p1 = R.Parameter(Type::Object(), 1);
+
+  Node* cmp = R.Binop(R.javascript.LessThan(), p0, p1);
+  Node* effect_use = R.UseForEffect(cmp);
+
+  R.CheckEffectInput(R.start(), cmp);
+  R.CheckEffectInput(cmp, effect_use);
+
+  Node* r = R.reduce(cmp);
+
+  R.CheckPureBinop(R.simplified.NumberLessThan(), r);
+
+  Node* i0 = r->InputAt(0);
+  Node* i1 = r->InputAt(1);
+
+  CHECK_NE(p0, i0);
+  CHECK_NE(p1, i1);
+  CHECK_EQ(IrOpcode::kJSToNumber, i0->opcode());
+  CHECK_EQ(IrOpcode::kJSToNumber, i1->opcode());
+
+  // Check effect chain is correct.
+  R.CheckEffectInput(R.start(), i0);
+  R.CheckEffectInput(i0, i1);
+  R.CheckEffectInput(i1, effect_use);
+}
+
+
+TEST(UnaryNot) {
+  JSTypedLoweringTester R;
+  Operator* opnot = R.javascript.UnaryNot();
+
+  for (size_t i = 0; i < ARRAY_SIZE(kJSTypes); i++) {
+    Node* r = R.ReduceUnop(opnot, kJSTypes[i]);
+    // TODO(titzer): test will break if/when js-typed-lowering constant folds.
+    CHECK_EQ(IrOpcode::kBooleanNot, r->opcode());
+  }
+}
+
+
+TEST(RemoveToNumberEffects) {
+  JSTypedLoweringTester R;
+
+  Node* effect_use = NULL;
+  for (int i = 0; i < 10; i++) {
+    Node* p0 = R.Parameter(Type::Number());
+    Node* ton = R.Unop(R.javascript.ToNumber(), p0);
+    effect_use = NULL;
+
+    switch (i) {
+      case 0:
+        effect_use = R.graph.NewNode(R.javascript.ToNumber(), p0, R.context(),
+                                     ton, R.start());
+        break;
+      case 1:
+        effect_use = R.graph.NewNode(R.javascript.ToNumber(), ton, R.context(),
+                                     ton, R.start());
+        break;
+      case 2:
+        effect_use = R.graph.NewNode(R.common.EffectPhi(1), ton, R.start());
+      case 3:
+        effect_use = R.graph.NewNode(R.javascript.Add(), ton, ton, R.context(),
+                                     ton, R.start());
+        break;
+      case 4:
+        effect_use = R.graph.NewNode(R.javascript.Add(), p0, p0, R.context(),
+                                     ton, R.start());
+        break;
+      case 5:
+        effect_use = R.graph.NewNode(R.common.Return(), p0, ton, R.start());
+        break;
+      case 6:
+        effect_use = R.graph.NewNode(R.common.Return(), ton, ton, R.start());
+    }
+
+    R.CheckEffectInput(R.start(), ton);
+    if (effect_use != NULL) R.CheckEffectInput(ton, effect_use);
+
+    Node* r = R.reduce(ton);
+    CHECK_EQ(p0, r);
+    CHECK_NE(R.start(), r);
+
+    if (effect_use != NULL) {
+      R.CheckEffectInput(R.start(), effect_use);
+      // Check that value uses of ToNumber() do not go to start().
+      for (int i = 0; i < effect_use->op()->InputCount(); i++) {
+        CHECK_NE(R.start(), effect_use->InputAt(i));
+      }
+    }
+  }
+
+  CHECK_EQ(NULL, effect_use);  // should have done all cases above.
+}
+
+
+// Helper class for testing the reduction of a single binop.
+class BinopEffectsTester {
+ public:
+  explicit BinopEffectsTester(Operator* op, Type* t0, Type* t1)
+      : R(),
+        p0(R.Parameter(t0, 0)),
+        p1(R.Parameter(t1, 1)),
+        binop(R.Binop(op, p0, p1)),
+        effect_use(R.graph.NewNode(R.common.EffectPhi(1), binop, R.start())) {
+    // Effects should be ordered start -> binop -> effect_use
+    R.CheckEffectInput(R.start(), binop);
+    R.CheckEffectInput(binop, effect_use);
+    result = R.reduce(binop);
+  }
+
+  JSTypedLoweringTester R;
+  Node* p0;
+  Node* p1;
+  Node* binop;
+  Node* effect_use;
+  Node* result;
+
+  void CheckEffectsRemoved() { R.CheckEffectInput(R.start(), effect_use); }
+
+  void CheckEffectOrdering(Node* n0) {
+    R.CheckEffectInput(R.start(), n0);
+    R.CheckEffectInput(n0, effect_use);
+  }
+
+  void CheckEffectOrdering(Node* n0, Node* n1) {
+    R.CheckEffectInput(R.start(), n0);
+    R.CheckEffectInput(n0, n1);
+    R.CheckEffectInput(n1, effect_use);
+  }
+
+  Node* CheckConvertedInput(IrOpcode::Value opcode, int which, bool effects) {
+    return CheckConverted(opcode, result->InputAt(which), effects);
+  }
+
+  Node* CheckConverted(IrOpcode::Value opcode, Node* node, bool effects) {
+    CHECK_EQ(opcode, node->opcode());
+    if (effects) {
+      CHECK_LT(0, NodeProperties::GetEffectInputCount(node));
+    } else {
+      CHECK_EQ(0, NodeProperties::GetEffectInputCount(node));
+    }
+    return node;
+  }
+
+  Node* CheckNoOp(int which) {
+    CHECK_EQ(which == 0 ? p0 : p1, result->InputAt(which));
+    return result->InputAt(which);
+  }
+};
+
+
+// Helper function for strict and non-strict equality reductions.
+void CheckEqualityReduction(JSTypedLoweringTester* R, bool strict, Node* l,
+                            Node* r, IrOpcode::Value expected) {
+  for (int j = 0; j < 2; j++) {
+    Node* p0 = j == 0 ? l : r;
+    Node* p1 = j == 1 ? l : r;
+
+    {
+      Node* eq = strict ? R->graph.NewNode(R->javascript.StrictEqual(), p0, p1)
+                        : R->Binop(R->javascript.Equal(), p0, p1);
+      Node* r = R->reduce(eq);
+      R->CheckPureBinop(expected, r);
+    }
+
+    {
+      Node* ne = strict
+                     ? R->graph.NewNode(R->javascript.StrictNotEqual(), p0, p1)
+                     : R->Binop(R->javascript.NotEqual(), p0, p1);
+      Node* n = R->reduce(ne);
+      CHECK_EQ(IrOpcode::kBooleanNot, n->opcode());
+      Node* r = n->InputAt(0);
+      R->CheckPureBinop(expected, r);
+    }
+  }
+}
+
+
+TEST(EqualityForNumbers) {
+  JSTypedLoweringTester R;
+
+  Type* simple_number_types[] = {Type::UnsignedSmall(), Type::SignedSmall(),
+                                 Type::Signed32(), Type::Unsigned32(),
+                                 Type::Number()};
+
+
+  for (size_t i = 0; i < ARRAY_SIZE(simple_number_types); ++i) {
+    Node* p0 = R.Parameter(simple_number_types[i], 0);
+
+    for (size_t j = 0; j < ARRAY_SIZE(simple_number_types); ++j) {
+      Node* p1 = R.Parameter(simple_number_types[j], 1);
+
+      CheckEqualityReduction(&R, true, p0, p1, IrOpcode::kNumberEqual);
+      CheckEqualityReduction(&R, false, p0, p1, IrOpcode::kNumberEqual);
+    }
+  }
+}
+
+
+TEST(StrictEqualityForRefEqualTypes) {
+  JSTypedLoweringTester R;
+
+  Type* types[] = {Type::Undefined(), Type::Null(), Type::Boolean(),
+                   Type::Object(), Type::Receiver()};
+
+  Node* p0 = R.Parameter(Type::Any());
+  for (size_t i = 0; i < ARRAY_SIZE(types); i++) {
+    Node* p1 = R.Parameter(types[i]);
+    CheckEqualityReduction(&R, true, p0, p1, IrOpcode::kReferenceEqual);
+  }
+  // TODO(titzer): Equal(RefEqualTypes)
+}
+
+
+TEST(StringEquality) {
+  JSTypedLoweringTester R;
+  Node* p0 = R.Parameter(Type::String());
+  Node* p1 = R.Parameter(Type::String());
+
+  CheckEqualityReduction(&R, true, p0, p1, IrOpcode::kStringEqual);
+  CheckEqualityReduction(&R, false, p0, p1, IrOpcode::kStringEqual);
+}
+
+
+TEST(RemovePureNumberBinopEffects) {
+  JSTypedLoweringTester R;
+
+  Operator* ops[] = {
+      R.javascript.Equal(),           R.simplified.NumberEqual(),
+      R.javascript.Add(),             R.simplified.NumberAdd(),
+      R.javascript.Subtract(),        R.simplified.NumberSubtract(),
+      R.javascript.Multiply(),        R.simplified.NumberMultiply(),
+      R.javascript.Divide(),          R.simplified.NumberDivide(),
+      R.javascript.Modulus(),         R.simplified.NumberModulus(),
+      R.javascript.LessThan(),        R.simplified.NumberLessThan(),
+      R.javascript.LessThanOrEqual(), R.simplified.NumberLessThanOrEqual(),
+  };
+
+  for (size_t j = 0; j < ARRAY_SIZE(ops); j += 2) {
+    BinopEffectsTester B(ops[j], Type::Number(), Type::Number());
+    CHECK_EQ(ops[j + 1]->opcode(), B.result->op()->opcode());
+
+    B.R.CheckPureBinop(B.result->opcode(), B.result);
+
+    B.CheckNoOp(0);
+    B.CheckNoOp(1);
+
+    B.CheckEffectsRemoved();
+  }
+}
+
+
+TEST(OrderNumberBinopEffects1) {
+  JSTypedLoweringTester R;
+
+  Operator* ops[] = {
+      R.javascript.Subtract(), R.simplified.NumberSubtract(),
+      R.javascript.Multiply(), R.simplified.NumberMultiply(),
+      R.javascript.Divide(),   R.simplified.NumberDivide(),
+      R.javascript.Modulus(),  R.simplified.NumberModulus(),
+  };
+
+  for (size_t j = 0; j < ARRAY_SIZE(ops); j += 2) {
+    BinopEffectsTester B(ops[j], Type::Object(), Type::String());
+    CHECK_EQ(ops[j + 1]->opcode(), B.result->op()->opcode());
+
+    Node* i0 = B.CheckConvertedInput(IrOpcode::kJSToNumber, 0, true);
+    Node* i1 = B.CheckConvertedInput(IrOpcode::kJSToNumber, 1, true);
+
+    CHECK_EQ(B.p0, i0->InputAt(0));
+    CHECK_EQ(B.p1, i1->InputAt(0));
+
+    // Effects should be ordered start -> i0 -> i1 -> effect_use
+    B.CheckEffectOrdering(i0, i1);
+  }
+}
+
+
+TEST(OrderNumberBinopEffects2) {
+  JSTypedLoweringTester R;
+
+  Operator* ops[] = {
+      R.javascript.Add(),      R.simplified.NumberAdd(),
+      R.javascript.Subtract(), R.simplified.NumberSubtract(),
+      R.javascript.Multiply(), R.simplified.NumberMultiply(),
+      R.javascript.Divide(),   R.simplified.NumberDivide(),
+      R.javascript.Modulus(),  R.simplified.NumberModulus(),
+  };
+
+  for (size_t j = 0; j < ARRAY_SIZE(ops); j += 2) {
+    BinopEffectsTester B(ops[j], Type::Number(), Type::Object());
+
+    Node* i0 = B.CheckNoOp(0);
+    Node* i1 = B.CheckConvertedInput(IrOpcode::kJSToNumber, 1, true);
+
+    CHECK_EQ(B.p0, i0);
+    CHECK_EQ(B.p1, i1->InputAt(0));
+
+    // Effects should be ordered start -> i1 -> effect_use
+    B.CheckEffectOrdering(i1);
+  }
+
+  for (size_t j = 0; j < ARRAY_SIZE(ops); j += 2) {
+    BinopEffectsTester B(ops[j], Type::Object(), Type::Number());
+
+    Node* i0 = B.CheckConvertedInput(IrOpcode::kJSToNumber, 0, true);
+    Node* i1 = B.CheckNoOp(1);
+
+    CHECK_EQ(B.p0, i0->InputAt(0));
+    CHECK_EQ(B.p1, i1);
+
+    // Effects should be ordered start -> i0 -> effect_use
+    B.CheckEffectOrdering(i0);
+  }
+}
+
+
+TEST(OrderCompareEffects) {
+  JSTypedLoweringTester R;
+
+  Operator* ops[] = {
+      R.javascript.GreaterThan(), R.simplified.NumberLessThan(),
+      R.javascript.GreaterThanOrEqual(), R.simplified.NumberLessThanOrEqual(),
+  };
+
+  for (size_t j = 0; j < ARRAY_SIZE(ops); j += 2) {
+    BinopEffectsTester B(ops[j], Type::Object(), Type::String());
+    CHECK_EQ(ops[j + 1]->opcode(), B.result->op()->opcode());
+
+    Node* i0 = B.CheckConvertedInput(IrOpcode::kJSToNumber, 0, true);
+    Node* i1 = B.CheckConvertedInput(IrOpcode::kJSToNumber, 1, true);
+
+    // Inputs should be commuted.
+    CHECK_EQ(B.p1, i0->InputAt(0));
+    CHECK_EQ(B.p0, i1->InputAt(0));
+
+    // But effects should be ordered start -> i1 -> i0 -> effect_use
+    B.CheckEffectOrdering(i1, i0);
+  }
+
+  for (size_t j = 0; j < ARRAY_SIZE(ops); j += 2) {
+    BinopEffectsTester B(ops[j], Type::Number(), Type::Object());
+
+    Node* i0 = B.CheckConvertedInput(IrOpcode::kJSToNumber, 0, true);
+    Node* i1 = B.result->InputAt(1);
+
+    CHECK_EQ(B.p1, i0->InputAt(0));  // Should be commuted.
+    CHECK_EQ(B.p0, i1);
+
+    // Effects should be ordered start -> i1 -> effect_use
+    B.CheckEffectOrdering(i0);
+  }
+
+  for (size_t j = 0; j < ARRAY_SIZE(ops); j += 2) {
+    BinopEffectsTester B(ops[j], Type::Object(), Type::Number());
+
+    Node* i0 = B.result->InputAt(0);
+    Node* i1 = B.CheckConvertedInput(IrOpcode::kJSToNumber, 1, true);
+
+    CHECK_EQ(B.p1, i0);  // Should be commuted.
+    CHECK_EQ(B.p0, i1->InputAt(0));
+
+    // Effects should be ordered start -> i0 -> effect_use
+    B.CheckEffectOrdering(i1);
+  }
+}
+
+
+TEST(Int32BinopEffects) {
+  JSBitwiseTypedLoweringTester R;
+
+  for (int j = 0; j < R.kNumberOps; j += 2) {
+    bool signed_left = R.signedness[j], signed_right = R.signedness[j + 1];
+    BinopEffectsTester B(R.ops[j], I32Type(signed_left), I32Type(signed_right));
+    CHECK_EQ(R.ops[j + 1]->opcode(), B.result->op()->opcode());
+
+    B.R.CheckPureBinop(B.result->opcode(), B.result);
+
+    B.CheckNoOp(0);
+    B.CheckNoOp(1);
+
+    B.CheckEffectsRemoved();
+  }
+
+  for (int j = 0; j < R.kNumberOps; j += 2) {
+    bool signed_left = R.signedness[j], signed_right = R.signedness[j + 1];
+    BinopEffectsTester B(R.ops[j], Type::Number(), Type::Number());
+    CHECK_EQ(R.ops[j + 1]->opcode(), B.result->op()->opcode());
+
+    B.R.CheckPureBinop(B.result->opcode(), B.result);
+
+    B.CheckConvertedInput(NumberToI32(signed_left), 0, false);
+    B.CheckConvertedInput(NumberToI32(signed_right), 1, false);
+
+    B.CheckEffectsRemoved();
+  }
+
+  for (int j = 0; j < R.kNumberOps; j += 2) {
+    bool signed_left = R.signedness[j], signed_right = R.signedness[j + 1];
+    BinopEffectsTester B(R.ops[j], Type::Number(), Type::Object());
+
+    B.R.CheckPureBinop(B.result->opcode(), B.result);
+
+    Node* i0 = B.CheckConvertedInput(NumberToI32(signed_left), 0, false);
+    Node* i1 = B.CheckConvertedInput(NumberToI32(signed_right), 1, false);
+
+    CHECK_EQ(B.p0, i0->InputAt(0));
+    Node* ii1 = B.CheckConverted(IrOpcode::kJSToNumber, i1->InputAt(0), true);
+
+    CHECK_EQ(B.p1, ii1->InputAt(0));
+
+    B.CheckEffectOrdering(ii1);
+  }
+
+  for (int j = 0; j < R.kNumberOps; j += 2) {
+    bool signed_left = R.signedness[j], signed_right = R.signedness[j + 1];
+    BinopEffectsTester B(R.ops[j], Type::Object(), Type::Number());
+
+    B.R.CheckPureBinop(B.result->opcode(), B.result);
+
+    Node* i0 = B.CheckConvertedInput(NumberToI32(signed_left), 0, false);
+    Node* i1 = B.CheckConvertedInput(NumberToI32(signed_right), 1, false);
+
+    Node* ii0 = B.CheckConverted(IrOpcode::kJSToNumber, i0->InputAt(0), true);
+    CHECK_EQ(B.p1, i1->InputAt(0));
+
+    CHECK_EQ(B.p0, ii0->InputAt(0));
+
+    B.CheckEffectOrdering(ii0);
+  }
+
+  for (int j = 0; j < R.kNumberOps; j += 2) {
+    bool signed_left = R.signedness[j], signed_right = R.signedness[j + 1];
+    BinopEffectsTester B(R.ops[j], Type::Object(), Type::Object());
+
+    B.R.CheckPureBinop(B.result->opcode(), B.result);
+
+    Node* i0 = B.CheckConvertedInput(NumberToI32(signed_left), 0, false);
+    Node* i1 = B.CheckConvertedInput(NumberToI32(signed_right), 1, false);
+
+    Node* ii0 = B.CheckConverted(IrOpcode::kJSToNumber, i0->InputAt(0), true);
+    Node* ii1 = B.CheckConverted(IrOpcode::kJSToNumber, i1->InputAt(0), true);
+
+    CHECK_EQ(B.p0, ii0->InputAt(0));
+    CHECK_EQ(B.p1, ii1->InputAt(0));
+
+    B.CheckEffectOrdering(ii0, ii1);
+  }
+}
+
+
+TEST(UnaryNotEffects) {
+  JSTypedLoweringTester R;
+  Operator* opnot = R.javascript.UnaryNot();
+
+  for (size_t i = 0; i < ARRAY_SIZE(kJSTypes); i++) {
+    Node* p0 = R.Parameter(kJSTypes[i], 0);
+    Node* orig = R.Unop(opnot, p0);
+    Node* effect_use = R.UseForEffect(orig);
+    Node* value_use = R.graph.NewNode(R.common.Return(), orig);
+    Node* r = R.reduce(orig);
+    // TODO(titzer): test will break if/when js-typed-lowering constant folds.
+    CHECK_EQ(IrOpcode::kBooleanNot, r->opcode());
+
+    CHECK_EQ(r, value_use->InputAt(0));
+
+    if (r->InputAt(0) == orig && orig->opcode() == IrOpcode::kJSToBoolean) {
+      // The original node was turned into a ToBoolean, which has an effect.
+      R.CheckEffectInput(R.start(), orig);
+      R.CheckEffectInput(orig, effect_use);
+    } else {
+      // effect should have been removed from this node.
+      R.CheckEffectInput(R.start(), effect_use);
+    }
+  }
+}
+
+
+TEST(Int32AddNarrowing) {
+  {
+    JSBitwiseTypedLoweringTester R;
+
+    for (int o = 0; o < R.kNumberOps; o += 2) {
+      for (size_t i = 0; i < ARRAY_SIZE(kInt32Types); i++) {
+        Node* n0 = R.Parameter(kInt32Types[i]);
+        for (size_t j = 0; j < ARRAY_SIZE(kInt32Types); j++) {
+          Node* n1 = R.Parameter(kInt32Types[j]);
+          Node* one = R.graph.NewNode(R.common.NumberConstant(1));
+
+          for (int l = 0; l < 2; l++) {
+            Node* add_node = R.Binop(R.simplified.NumberAdd(), n0, n1);
+            Node* or_node =
+                R.Binop(R.ops[o], l ? add_node : one, l ? one : add_node);
+            Node* r = R.reduce(or_node);
+
+            CHECK_EQ(R.ops[o + 1]->opcode(), r->op()->opcode());
+            CHECK_EQ(IrOpcode::kInt32Add, add_node->opcode());
+            bool is_signed = l ? R.signedness[o] : R.signedness[o + 1];
+
+            Type* add_type = NodeProperties::GetBounds(add_node).upper;
+            CHECK(add_type->Is(I32Type(is_signed)));
+          }
+        }
+      }
+    }
+  }
+  {
+    JSBitwiseShiftTypedLoweringTester R;
+
+    for (int o = 0; o < R.kNumberOps; o += 2) {
+      for (size_t i = 0; i < ARRAY_SIZE(kInt32Types); i++) {
+        Node* n0 = R.Parameter(kInt32Types[i]);
+        for (size_t j = 0; j < ARRAY_SIZE(kInt32Types); j++) {
+          Node* n1 = R.Parameter(kInt32Types[j]);
+          Node* one = R.graph.NewNode(R.common.NumberConstant(1));
+
+          for (int l = 0; l < 2; l++) {
+            Node* add_node = R.Binop(R.simplified.NumberAdd(), n0, n1);
+            Node* or_node =
+                R.Binop(R.ops[o], l ? add_node : one, l ? one : add_node);
+            Node* r = R.reduce(or_node);
+
+            CHECK_EQ(R.ops[o + 1]->opcode(), r->op()->opcode());
+            CHECK_EQ(IrOpcode::kInt32Add, add_node->opcode());
+            bool is_signed = l ? R.signedness[o] : R.signedness[o + 1];
+
+            Type* add_type = NodeProperties::GetBounds(add_node).upper;
+            CHECK(add_type->Is(I32Type(is_signed)));
+          }
+        }
+      }
+    }
+  }
+}
+
+
+TEST(Int32AddNarrowingNotOwned) {
+  JSBitwiseTypedLoweringTester R;
+
+  for (int o = 0; o < R.kNumberOps; o += 2) {
+    Node* n0 = R.Parameter(I32Type(R.signedness[o]));
+    Node* n1 = R.Parameter(I32Type(R.signedness[o + 1]));
+    Node* one = R.graph.NewNode(R.common.NumberConstant(1));
+
+    Node* add_node = R.Binop(R.simplified.NumberAdd(), n0, n1);
+    Node* or_node = R.Binop(R.ops[o], add_node, one);
+    Node* other_use = R.Binop(R.simplified.NumberAdd(), add_node, one);
+    Node* r = R.reduce(or_node);
+    CHECK_EQ(R.ops[o + 1]->opcode(), r->op()->opcode());
+    // Should not be reduced to Int32Add because of the other number add.
+    CHECK_EQ(IrOpcode::kNumberAdd, add_node->opcode());
+    // Conversion to int32 should be done.
+    CheckToI32(add_node, r->InputAt(0), R.signedness[o]);
+    CheckToI32(one, r->InputAt(1), R.signedness[o + 1]);
+    // The other use should also not be touched.
+    CHECK_EQ(add_node, other_use->InputAt(0));
+    CHECK_EQ(one, other_use->InputAt(1));
+  }
+}
+
+
+TEST(Int32Comparisons) {
+  JSTypedLoweringTester R;
+
+  struct Entry {
+    Operator* js_op;
+    Operator* uint_op;
+    Operator* int_op;
+    Operator* num_op;
+    bool commute;
+  };
+
+  Entry ops[] = {
+      {R.javascript.LessThan(), R.machine.Uint32LessThan(),
+       R.machine.Int32LessThan(), R.simplified.NumberLessThan(), false},
+      {R.javascript.LessThanOrEqual(), R.machine.Uint32LessThanOrEqual(),
+       R.machine.Int32LessThanOrEqual(), R.simplified.NumberLessThanOrEqual(),
+       false},
+      {R.javascript.GreaterThan(), R.machine.Uint32LessThan(),
+       R.machine.Int32LessThan(), R.simplified.NumberLessThan(), true},
+      {R.javascript.GreaterThanOrEqual(), R.machine.Uint32LessThanOrEqual(),
+       R.machine.Int32LessThanOrEqual(), R.simplified.NumberLessThanOrEqual(),
+       true}};
+
+  for (size_t o = 0; o < ARRAY_SIZE(ops); o++) {
+    for (size_t i = 0; i < ARRAY_SIZE(kNumberTypes); i++) {
+      Type* t0 = kNumberTypes[i];
+      Node* p0 = R.Parameter(t0, 0);
+
+      for (size_t j = 0; j < ARRAY_SIZE(kNumberTypes); j++) {
+        Type* t1 = kNumberTypes[j];
+        Node* p1 = R.Parameter(t1, 1);
+
+        Node* cmp = R.Binop(ops[o].js_op, p0, p1);
+        Node* r = R.reduce(cmp);
+
+        Operator* expected;
+        if (t0->Is(Type::Unsigned32()) && t1->Is(Type::Unsigned32())) {
+          expected = ops[o].uint_op;
+        } else if (t0->Is(Type::Signed32()) && t1->Is(Type::Signed32())) {
+          expected = ops[o].int_op;
+        } else {
+          expected = ops[o].num_op;
+        }
+        R.CheckPureBinop(expected, r);
+        if (ops[o].commute) {
+          CHECK_EQ(p1, r->InputAt(0));
+          CHECK_EQ(p0, r->InputAt(1));
+        } else {
+          CHECK_EQ(p0, r->InputAt(0));
+          CHECK_EQ(p1, r->InputAt(1));
+        }
+      }
+    }
+  }
+}
diff --git a/test/cctest/compiler/compiler/test-linkage.cc b/test/cctest/compiler/compiler/test-linkage.cc
new file mode 100644 (file)
index 0000000..6d9453f
--- /dev/null
@@ -0,0 +1,113 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#include "src/compiler.h"
+#include "src/zone.h"
+
+#include "src/compiler/common-operator.h"
+#include "src/compiler/generic-node-inl.h"
+#include "src/compiler/graph.h"
+#include "src/compiler/linkage.h"
+#include "src/compiler/machine-operator.h"
+#include "src/compiler/node.h"
+#include "src/compiler/operator.h"
+#include "src/compiler/pipeline.h"
+#include "src/compiler/schedule.h"
+#include "test/cctest/cctest.h"
+
+#if V8_TURBOFAN_TARGET
+
+using namespace v8::internal;
+using namespace v8::internal::compiler;
+
+static SimpleOperator dummy_operator(IrOpcode::kParameter, Operator::kNoWrite,
+                                     0, 0, "dummy");
+
+// So we can get a real JS function.
+static Handle<JSFunction> Compile(const char* source) {
+  Isolate* isolate = CcTest::i_isolate();
+  Handle<String> source_code = isolate->factory()
+                                   ->NewStringFromUtf8(CStrVector(source))
+                                   .ToHandleChecked();
+  Handle<SharedFunctionInfo> shared_function = Compiler::CompileScript(
+      source_code, Handle<String>(), 0, 0, false,
+      Handle<Context>(isolate->native_context()), NULL, NULL,
+      v8::ScriptCompiler::kNoCompileOptions, NOT_NATIVES_CODE);
+  return isolate->factory()->NewFunctionFromSharedFunctionInfo(
+      shared_function, isolate->native_context());
+}
+
+
+TEST(TestLinkageCreate) {
+  InitializedHandleScope handles;
+  Handle<JSFunction> function = Compile("a + b");
+  CompilationInfoWithZone info(function);
+  Linkage linkage(&info);
+}
+
+
+TEST(TestLinkageJSFunctionIncoming) {
+  InitializedHandleScope handles;
+
+  const char* sources[] = {"(function() { })", "(function(a) { })",
+                           "(function(a,b) { })", "(function(a,b,c) { })"};
+
+  for (int i = 0; i < 3; i++) {
+    i::HandleScope handles(CcTest::i_isolate());
+    Handle<JSFunction> function = v8::Utils::OpenHandle(
+        *v8::Handle<v8::Function>::Cast(CompileRun(sources[i])));
+    CompilationInfoWithZone info(function);
+    Linkage linkage(&info);
+
+    CallDescriptor* descriptor = linkage.GetIncomingDescriptor();
+    CHECK_NE(NULL, descriptor);
+
+    CHECK_EQ(1 + i, descriptor->ParameterCount());
+    CHECK_EQ(1, descriptor->ReturnCount());
+    CHECK_EQ(Operator::kNoProperties, descriptor->properties());
+    CHECK_EQ(true, descriptor->IsJSFunctionCall());
+  }
+}
+
+
+TEST(TestLinkageCodeStubIncoming) {
+  Isolate* isolate = CcTest::InitIsolateOnce();
+  CompilationInfoWithZone info(static_cast<HydrogenCodeStub*>(NULL), isolate);
+  Linkage linkage(&info);
+  // TODO(titzer): test linkage creation with a bonafide code stub.
+  // this just checks current behavior.
+  CHECK_EQ(NULL, linkage.GetIncomingDescriptor());
+}
+
+
+TEST(TestLinkageJSCall) {
+  HandleAndZoneScope handles;
+  Handle<JSFunction> function = Compile("a + c");
+  CompilationInfoWithZone info(function);
+  Linkage linkage(&info);
+
+  for (int i = 0; i < 32; i++) {
+    CallDescriptor* descriptor = linkage.GetJSCallDescriptor(i);
+    CHECK_NE(NULL, descriptor);
+    CHECK_EQ(i, descriptor->ParameterCount());
+    CHECK_EQ(1, descriptor->ReturnCount());
+    CHECK_EQ(Operator::kNoProperties, descriptor->properties());
+    CHECK_EQ(true, descriptor->IsJSFunctionCall());
+  }
+}
+
+
+TEST(TestLinkageRuntimeCall) {
+  // TODO(titzer): test linkage creation for outgoing runtime calls.
+}
+
+
+TEST(TestLinkageStubCall) {
+  // TODO(titzer): test linkage creation for outgoing stub calls.
+}
+
+
+#endif  // V8_TURBOFAN_TARGET
diff --git a/test/cctest/compiler/compiler/test-machine-operator-reducer.cc b/test/cctest/compiler/compiler/test-machine-operator-reducer.cc
new file mode 100644 (file)
index 0000000..6a82f5a
--- /dev/null
@@ -0,0 +1,776 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "test/cctest/cctest.h"
+
+#include "src/base/utils/random-number-generator.h"
+#include "src/compiler/graph-inl.h"
+#include "src/compiler/machine-operator-reducer.h"
+#include "test/cctest/compiler/value-helper.h"
+
+using namespace v8::internal;
+using namespace v8::internal::compiler;
+
+template <typename T>
+Operator* NewConstantOperator(CommonOperatorBuilder* common, volatile T value);
+
+template <>
+Operator* NewConstantOperator<int32_t>(CommonOperatorBuilder* common,
+                                       volatile int32_t value) {
+  return common->Int32Constant(value);
+}
+
+template <>
+Operator* NewConstantOperator<double>(CommonOperatorBuilder* common,
+                                      volatile double value) {
+  return common->Float64Constant(value);
+}
+
+
+class ReducerTester : public HandleAndZoneScope {
+ public:
+  ReducerTester()
+      : isolate(main_isolate()),
+        binop(NULL),
+        unop(NULL),
+        machine(main_zone()),
+        common(main_zone()),
+        graph(main_zone()),
+        maxuint32(Constant<int32_t>(kMaxUInt32)) {}
+
+  Isolate* isolate;
+  Operator* binop;
+  Operator* unop;
+  MachineOperatorBuilder machine;
+  CommonOperatorBuilder common;
+  Graph graph;
+  Node* maxuint32;
+
+  template <typename T>
+  Node* Constant(volatile T value) {
+    return graph.NewNode(NewConstantOperator<T>(&common, value));
+  }
+
+  // Check that the reduction of this binop applied to constants {a} and {b}
+  // yields the {expect} value.
+  template <typename T>
+  void CheckFoldBinop(volatile T expect, volatile T a, volatile T b) {
+    CheckFoldBinop<T>(expect, Constant<T>(a), Constant<T>(b));
+  }
+
+  // Check that the reduction of this binop applied to {a} and {b} yields
+  // the {expect} value.
+  template <typename T>
+  void CheckFoldBinop(volatile T expect, Node* a, Node* b) {
+    CHECK_NE(NULL, binop);
+    Node* n = graph.NewNode(binop, a, b);
+    MachineOperatorReducer reducer(&graph);
+    Reduction reduction = reducer.Reduce(n);
+    CHECK(reduction.Changed());
+    CHECK_NE(n, reduction.replacement());
+    CHECK_EQ(expect, ValueOf<T>(reduction.replacement()->op()));
+  }
+
+  // Check that the reduction of this binop applied to {a} and {b} yields
+  // the {expect} node.
+  void CheckBinop(Node* expect, Node* a, Node* b) {
+    CHECK_NE(NULL, binop);
+    Node* n = graph.NewNode(binop, a, b);
+    MachineOperatorReducer reducer(&graph);
+    Reduction reduction = reducer.Reduce(n);
+    CHECK(reduction.Changed());
+    CHECK_EQ(expect, reduction.replacement());
+  }
+
+  // Check that the reduction of this binop applied to {left} and {right} yields
+  // this binop applied to {left_expect} and {right_expect}.
+  void CheckFoldBinop(Node* left_expect, Node* right_expect, Node* left,
+                      Node* right) {
+    CHECK_NE(NULL, binop);
+    Node* n = graph.NewNode(binop, left, right);
+    MachineOperatorReducer reducer(&graph);
+    Reduction reduction = reducer.Reduce(n);
+    CHECK(reduction.Changed());
+    CHECK_EQ(binop, reduction.replacement()->op());
+    CHECK_EQ(left_expect, reduction.replacement()->InputAt(0));
+    CHECK_EQ(right_expect, reduction.replacement()->InputAt(1));
+  }
+
+  // Check that the reduction of this binop applied to {left} and {right} yields
+  // the {op_expect} applied to {left_expect} and {right_expect}.
+  template <typename T>
+  void CheckFoldBinop(volatile T left_expect, Operator* op_expect,
+                      Node* right_expect, Node* left, Node* right) {
+    CHECK_NE(NULL, binop);
+    Node* n = graph.NewNode(binop, left, right);
+    MachineOperatorReducer reducer(&graph);
+    Reduction r = reducer.Reduce(n);
+    CHECK(r.Changed());
+    CHECK_EQ(op_expect->opcode(), r.replacement()->op()->opcode());
+    CHECK_EQ(left_expect, ValueOf<T>(r.replacement()->InputAt(0)->op()));
+    CHECK_EQ(right_expect, r.replacement()->InputAt(1));
+  }
+
+  // Check that the reduction of this binop applied to {left} and {right} yields
+  // the {op_expect} applied to {left_expect} and {right_expect}.
+  template <typename T>
+  void CheckFoldBinop(Node* left_expect, Operator* op_expect,
+                      volatile T right_expect, Node* left, Node* right) {
+    CHECK_NE(NULL, binop);
+    Node* n = graph.NewNode(binop, left, right);
+    MachineOperatorReducer reducer(&graph);
+    Reduction r = reducer.Reduce(n);
+    CHECK(r.Changed());
+    CHECK_EQ(op_expect->opcode(), r.replacement()->op()->opcode());
+    CHECK_EQ(left_expect, r.replacement()->InputAt(0));
+    CHECK_EQ(right_expect, ValueOf<T>(r.replacement()->InputAt(1)->op()));
+  }
+
+  // Check that if the given constant appears on the left, the reducer will
+  // swap it to be on the right.
+  template <typename T>
+  void CheckPutConstantOnRight(volatile T constant) {
+    // TODO(titzer): CHECK(binop->HasProperty(Operator::kCommutative));
+    Node* p = Parameter();
+    Node* k = Constant<T>(constant);
+    {
+      Node* n = graph.NewNode(binop, k, p);
+      MachineOperatorReducer reducer(&graph);
+      Reduction reduction = reducer.Reduce(n);
+      CHECK(!reduction.Changed() || reduction.replacement() == n);
+      CHECK_EQ(p, n->InputAt(0));
+      CHECK_EQ(k, n->InputAt(1));
+    }
+    {
+      Node* n = graph.NewNode(binop, p, k);
+      MachineOperatorReducer reducer(&graph);
+      Reduction reduction = reducer.Reduce(n);
+      CHECK(!reduction.Changed());
+      CHECK_EQ(p, n->InputAt(0));
+      CHECK_EQ(k, n->InputAt(1));
+    }
+  }
+
+  // Check that if the given constant appears on the left, the reducer will
+  // *NOT* swap it to be on the right.
+  template <typename T>
+  void CheckDontPutConstantOnRight(volatile T constant) {
+    CHECK(!binop->HasProperty(Operator::kCommutative));
+    Node* p = Parameter();
+    Node* k = Constant<T>(constant);
+    Node* n = graph.NewNode(binop, k, p);
+    MachineOperatorReducer reducer(&graph);
+    Reduction reduction = reducer.Reduce(n);
+    CHECK(!reduction.Changed());
+    CHECK_EQ(k, n->InputAt(0));
+    CHECK_EQ(p, n->InputAt(1));
+  }
+
+  Node* Parameter(int32_t index = 0) {
+    return graph.NewNode(common.Parameter(index));
+  }
+};
+
+
+TEST(ReduceWord32And) {
+  ReducerTester R;
+  R.binop = R.machine.Word32And();
+
+  FOR_INT32_INPUTS(pl) {
+    FOR_INT32_INPUTS(pr) {
+      int32_t x = *pl, y = *pr;
+      R.CheckFoldBinop<int32_t>(x & y, x, y);
+    }
+  }
+
+  R.CheckPutConstantOnRight(33);
+  R.CheckPutConstantOnRight(44000);
+
+  Node* x = R.Parameter();
+  Node* zero = R.Constant<int32_t>(0);
+  Node* minus_1 = R.Constant<int32_t>(-1);
+
+  R.CheckBinop(zero, x, zero);  // x  & 0  => 0
+  R.CheckBinop(zero, zero, x);  // 0  & x  => 0
+  R.CheckBinop(x, x, minus_1);  // x  & -1 => 0
+  R.CheckBinop(x, minus_1, x);  // -1 & x  => 0
+  R.CheckBinop(x, x, x);        // x  & x  => x
+}
+
+
+TEST(ReduceWord32Or) {
+  ReducerTester R;
+  R.binop = R.machine.Word32Or();
+
+  FOR_INT32_INPUTS(pl) {
+    FOR_INT32_INPUTS(pr) {
+      int32_t x = *pl, y = *pr;
+      R.CheckFoldBinop<int32_t>(x | y, x, y);
+    }
+  }
+
+  R.CheckPutConstantOnRight(36);
+  R.CheckPutConstantOnRight(44001);
+
+  Node* x = R.Parameter();
+  Node* zero = R.Constant<int32_t>(0);
+  Node* minus_1 = R.Constant<int32_t>(-1);
+
+  R.CheckBinop(x, x, zero);           // x  & 0  => x
+  R.CheckBinop(x, zero, x);           // 0  & x  => x
+  R.CheckBinop(minus_1, x, minus_1);  // x  & -1 => -1
+  R.CheckBinop(minus_1, minus_1, x);  // -1 & x  => -1
+  R.CheckBinop(x, x, x);              // x  & x  => x
+}
+
+
+TEST(ReduceWord32Xor) {
+  ReducerTester R;
+  R.binop = R.machine.Word32Xor();
+
+  FOR_INT32_INPUTS(pl) {
+    FOR_INT32_INPUTS(pr) {
+      int32_t x = *pl, y = *pr;
+      R.CheckFoldBinop<int32_t>(x ^ y, x, y);
+    }
+  }
+
+  R.CheckPutConstantOnRight(39);
+  R.CheckPutConstantOnRight(4403);
+
+  Node* x = R.Parameter();
+  Node* zero = R.Constant<int32_t>(0);
+
+  R.CheckBinop(x, x, zero);            // x ^ 0  => x
+  R.CheckBinop(x, zero, x);            // 0 ^ x  => x
+  R.CheckFoldBinop<int32_t>(0, x, x);  // x ^ x  => 0
+}
+
+
+TEST(ReduceWord32Shl) {
+  ReducerTester R;
+  R.binop = R.machine.Word32Shl();
+
+  // TODO(titzer): out of range shifts
+  FOR_INT32_INPUTS(i) {
+    for (int y = 0; y < 32; y++) {
+      int32_t x = *i;
+      R.CheckFoldBinop<int32_t>(x << y, x, y);
+    }
+  }
+
+  R.CheckDontPutConstantOnRight(44);
+
+  Node* x = R.Parameter();
+  Node* zero = R.Constant<int32_t>(0);
+
+  R.CheckBinop(x, x, zero);  // x << 0  => x
+}
+
+
+TEST(ReduceWord32Shr) {
+  ReducerTester R;
+  R.binop = R.machine.Word32Shr();
+
+  // TODO(titzer): test out of range shifts
+  FOR_UINT32_INPUTS(i) {
+    for (uint32_t y = 0; y < 32; y++) {
+      uint32_t x = *i;
+      R.CheckFoldBinop<int32_t>(x >> y, x, y);
+    }
+  }
+
+  R.CheckDontPutConstantOnRight(44);
+
+  Node* x = R.Parameter();
+  Node* zero = R.Constant<int32_t>(0);
+
+  R.CheckBinop(x, x, zero);  // x >>> 0  => x
+}
+
+
+TEST(ReduceWord32Sar) {
+  ReducerTester R;
+  R.binop = R.machine.Word32Sar();
+
+  // TODO(titzer): test out of range shifts
+  FOR_INT32_INPUTS(i) {
+    for (int32_t y = 0; y < 32; y++) {
+      int32_t x = *i;
+      R.CheckFoldBinop<int32_t>(x >> y, x, y);
+    }
+  }
+
+  R.CheckDontPutConstantOnRight(44);
+
+  Node* x = R.Parameter();
+  Node* zero = R.Constant<int32_t>(0);
+
+  R.CheckBinop(x, x, zero);  // x >> 0  => x
+}
+
+
+TEST(ReduceWord32Equal) {
+  ReducerTester R;
+  R.binop = R.machine.Word32Equal();
+
+  FOR_INT32_INPUTS(pl) {
+    FOR_INT32_INPUTS(pr) {
+      int32_t x = *pl, y = *pr;
+      R.CheckFoldBinop<int32_t>(x == y ? 1 : 0, x, y);
+    }
+  }
+
+  R.CheckPutConstantOnRight(48);
+  R.CheckPutConstantOnRight(-48);
+
+  Node* x = R.Parameter(0);
+  Node* y = R.Parameter(1);
+  Node* zero = R.Constant<int32_t>(0);
+  Node* sub = R.graph.NewNode(R.machine.Int32Sub(), x, y);
+
+  R.CheckFoldBinop<int32_t>(1, x, x);  // x == x  => 1
+  R.CheckFoldBinop(x, y, sub, zero);   // x - y == 0  => x == y
+  R.CheckFoldBinop(x, y, zero, sub);   // 0 == x - y  => x == y
+}
+
+
+TEST(ReduceInt32Add) {
+  ReducerTester R;
+  R.binop = R.machine.Int32Add();
+
+  FOR_INT32_INPUTS(pl) {
+    FOR_INT32_INPUTS(pr) {
+      int32_t x = *pl, y = *pr;
+      R.CheckFoldBinop<int32_t>(x + y, x, y);  // TODO(titzer): signed overflow
+    }
+  }
+
+  R.CheckPutConstantOnRight(41);
+  R.CheckPutConstantOnRight(4407);
+
+  Node* x = R.Parameter();
+  Node* zero = R.Constant<int32_t>(0);
+
+  R.CheckBinop(x, x, zero);  // x + 0  => x
+  R.CheckBinop(x, zero, x);  // 0 + x  => x
+}
+
+
+TEST(ReduceInt32Sub) {
+  ReducerTester R;
+  R.binop = R.machine.Int32Sub();
+
+  FOR_INT32_INPUTS(pl) {
+    FOR_INT32_INPUTS(pr) {
+      int32_t x = *pl, y = *pr;
+      R.CheckFoldBinop<int32_t>(x - y, x, y);
+    }
+  }
+
+  R.CheckDontPutConstantOnRight(412);
+
+  Node* x = R.Parameter();
+  Node* zero = R.Constant<int32_t>(0);
+
+  R.CheckBinop(x, x, zero);  // x - 0  => x
+}
+
+
+TEST(ReduceInt32Mul) {
+  ReducerTester R;
+  R.binop = R.machine.Int32Mul();
+
+  FOR_INT32_INPUTS(pl) {
+    FOR_INT32_INPUTS(pr) {
+      int32_t x = *pl, y = *pr;
+      R.CheckFoldBinop<int32_t>(x * y, x, y);  // TODO(titzer): signed overflow
+    }
+  }
+
+  R.CheckPutConstantOnRight(4111);
+  R.CheckPutConstantOnRight(-4407);
+
+  Node* x = R.Parameter();
+  Node* zero = R.Constant<int32_t>(0);
+  Node* one = R.Constant<int32_t>(1);
+  Node* minus_one = R.Constant<int32_t>(-1);
+
+  R.CheckBinop(zero, x, zero);  // x * 0  => 0
+  R.CheckBinop(zero, zero, x);  // 0 * x  => 0
+  R.CheckBinop(x, x, one);      // x * 1  => x
+  R.CheckBinop(x, one, x);      // 1 * x  => x
+  R.CheckFoldBinop<int32_t>(0, R.machine.Int32Sub(), x, minus_one,
+                            x);  // -1 * x  => 0 - x
+  R.CheckFoldBinop<int32_t>(0, R.machine.Int32Sub(), x, x,
+                            minus_one);  // x * -1  => 0 - x
+
+  for (int32_t n = 1; n < 31; ++n) {
+    Node* multiplier = R.Constant<int32_t>(1 << n);
+    R.CheckFoldBinop<int32_t>(x, R.machine.Word32Shl(), n, x,
+                              multiplier);  // x * 2^n => x << n
+    R.CheckFoldBinop<int32_t>(x, R.machine.Word32Shl(), n, multiplier,
+                              x);  // 2^n * x => x << n
+  }
+}
+
+
+TEST(ReduceInt32Div) {
+  ReducerTester R;
+  R.binop = R.machine.Int32Div();
+
+  FOR_INT32_INPUTS(pl) {
+    FOR_INT32_INPUTS(pr) {
+      int32_t x = *pl, y = *pr;
+      if (y == 0) continue;              // TODO(titzer): test / 0
+      int32_t r = y == -1 ? -x : x / y;  // INT_MIN / -1 may explode in C
+      R.CheckFoldBinop<int32_t>(r, x, y);
+    }
+  }
+
+  R.CheckDontPutConstantOnRight(41111);
+  R.CheckDontPutConstantOnRight(-44071);
+
+  Node* x = R.Parameter();
+  Node* one = R.Constant<int32_t>(1);
+  Node* minus_one = R.Constant<int32_t>(-1);
+
+  R.CheckBinop(x, x, one);  // x / 1  => x
+  // TODO(titzer):                          // 0 / x  => 0 if x != 0
+  // TODO(titzer):                          // x / 2^n => x >> n and round
+  R.CheckFoldBinop<int32_t>(0, R.machine.Int32Sub(), x, x,
+                            minus_one);  // x / -1  => 0 - x
+}
+
+
+TEST(ReduceInt32UDiv) {
+  ReducerTester R;
+  R.binop = R.machine.Int32UDiv();
+
+  FOR_UINT32_INPUTS(pl) {
+    FOR_UINT32_INPUTS(pr) {
+      uint32_t x = *pl, y = *pr;
+      if (y == 0) continue;  // TODO(titzer): test / 0
+      R.CheckFoldBinop<int32_t>(x / y, x, y);
+    }
+  }
+
+  R.CheckDontPutConstantOnRight(41311);
+  R.CheckDontPutConstantOnRight(-44371);
+
+  Node* x = R.Parameter();
+  Node* one = R.Constant<int32_t>(1);
+
+  R.CheckBinop(x, x, one);  // x / 1  => x
+  // TODO(titzer):                            // 0 / x  => 0 if x != 0
+
+  for (uint32_t n = 1; n < 32; ++n) {
+    Node* divisor = R.Constant<int32_t>(1u << n);
+    R.CheckFoldBinop<int32_t>(x, R.machine.Word32Shr(), n, x,
+                              divisor);  // x / 2^n => x >> n
+  }
+}
+
+
+TEST(ReduceInt32Mod) {
+  ReducerTester R;
+  R.binop = R.machine.Int32Mod();
+
+  FOR_INT32_INPUTS(pl) {
+    FOR_INT32_INPUTS(pr) {
+      int32_t x = *pl, y = *pr;
+      if (y == 0) continue;             // TODO(titzer): test % 0
+      int32_t r = y == -1 ? 0 : x % y;  // INT_MIN % -1 may explode in C
+      R.CheckFoldBinop<int32_t>(r, x, y);
+    }
+  }
+
+  R.CheckDontPutConstantOnRight(413);
+  R.CheckDontPutConstantOnRight(-4401);
+
+  Node* x = R.Parameter();
+  Node* one = R.Constant<int32_t>(1);
+
+  R.CheckFoldBinop<int32_t>(0, x, one);  // x % 1  => 0
+  // TODO(titzer):                       // x % 2^n => x & 2^n-1 and round
+}
+
+
+TEST(ReduceInt32UMod) {
+  ReducerTester R;
+  R.binop = R.machine.Int32UMod();
+
+  FOR_INT32_INPUTS(pl) {
+    FOR_INT32_INPUTS(pr) {
+      uint32_t x = *pl, y = *pr;
+      if (y == 0) continue;  // TODO(titzer): test x % 0
+      R.CheckFoldBinop<int32_t>(x % y, x, y);
+    }
+  }
+
+  R.CheckDontPutConstantOnRight(417);
+  R.CheckDontPutConstantOnRight(-4371);
+
+  Node* x = R.Parameter();
+  Node* one = R.Constant<int32_t>(1);
+
+  R.CheckFoldBinop<int32_t>(0, x, one);  // x % 1  => 0
+
+  for (uint32_t n = 1; n < 32; ++n) {
+    Node* divisor = R.Constant<int32_t>(1u << n);
+    R.CheckFoldBinop<int32_t>(x, R.machine.Word32And(), (1u << n) - 1, x,
+                              divisor);  // x % 2^n => x & 2^n-1
+  }
+}
+
+
+TEST(ReduceInt32LessThan) {
+  ReducerTester R;
+  R.binop = R.machine.Int32LessThan();
+
+  FOR_INT32_INPUTS(pl) {
+    FOR_INT32_INPUTS(pr) {
+      int32_t x = *pl, y = *pr;
+      R.CheckFoldBinop<int32_t>(x < y ? 1 : 0, x, y);
+    }
+  }
+
+  R.CheckDontPutConstantOnRight(41399);
+  R.CheckDontPutConstantOnRight(-440197);
+
+  Node* x = R.Parameter(0);
+  Node* y = R.Parameter(1);
+  Node* zero = R.Constant<int32_t>(0);
+  Node* sub = R.graph.NewNode(R.machine.Int32Sub(), x, y);
+
+  R.CheckFoldBinop<int32_t>(0, x, x);  // x < x  => 0
+  R.CheckFoldBinop(x, y, sub, zero);   // x - y < 0 => x < y
+  R.CheckFoldBinop(y, x, zero, sub);   // 0 < x - y => y < x
+}
+
+
+TEST(ReduceInt32LessThanOrEqual) {
+  ReducerTester R;
+  R.binop = R.machine.Int32LessThanOrEqual();
+
+  FOR_INT32_INPUTS(pl) {
+    FOR_INT32_INPUTS(pr) {
+      int32_t x = *pl, y = *pr;
+      R.CheckFoldBinop<int32_t>(x <= y ? 1 : 0, x, y);
+    }
+  }
+
+  FOR_INT32_INPUTS(i) { R.CheckDontPutConstantOnRight<int32_t>(*i); }
+
+  Node* x = R.Parameter(0);
+  Node* y = R.Parameter(1);
+  Node* zero = R.Constant<int32_t>(0);
+  Node* sub = R.graph.NewNode(R.machine.Int32Sub(), x, y);
+
+  R.CheckFoldBinop<int32_t>(1, x, x);  // x <= x => 1
+  R.CheckFoldBinop(x, y, sub, zero);   // x - y <= 0 => x <= y
+  R.CheckFoldBinop(y, x, zero, sub);   // 0 <= x - y => y <= x
+}
+
+
+TEST(ReduceUint32LessThan) {
+  ReducerTester R;
+  R.binop = R.machine.Uint32LessThan();
+
+  FOR_UINT32_INPUTS(pl) {
+    FOR_UINT32_INPUTS(pr) {
+      uint32_t x = *pl, y = *pr;
+      R.CheckFoldBinop<int32_t>(x < y ? 1 : 0, x, y);
+    }
+  }
+
+  R.CheckDontPutConstantOnRight(41399);
+  R.CheckDontPutConstantOnRight(-440197);
+
+  Node* x = R.Parameter();
+  Node* max = R.maxuint32;
+  Node* zero = R.Constant<int32_t>(0);
+
+  R.CheckFoldBinop<int32_t>(0, max, x);   // M < x  => 0
+  R.CheckFoldBinop<int32_t>(0, x, zero);  // x < 0  => 0
+  R.CheckFoldBinop<int32_t>(0, x, x);     // x < x  => 0
+}
+
+
+TEST(ReduceUint32LessThanOrEqual) {
+  ReducerTester R;
+  R.binop = R.machine.Uint32LessThanOrEqual();
+
+  FOR_UINT32_INPUTS(pl) {
+    FOR_UINT32_INPUTS(pr) {
+      uint32_t x = *pl, y = *pr;
+      R.CheckFoldBinop<int32_t>(x <= y ? 1 : 0, x, y);
+    }
+  }
+
+  R.CheckDontPutConstantOnRight(41399);
+  R.CheckDontPutConstantOnRight(-440197);
+
+  Node* x = R.Parameter();
+  Node* max = R.maxuint32;
+  Node* zero = R.Constant<int32_t>(0);
+
+  R.CheckFoldBinop<int32_t>(1, x, max);   // x <= M  => 1
+  R.CheckFoldBinop<int32_t>(1, zero, x);  // 0 <= x  => 1
+  R.CheckFoldBinop<int32_t>(1, x, x);     // x <= x  => 1
+}
+
+
+TEST(ReduceLoadStore) {
+  ReducerTester R;
+
+  Node* base = R.Constant<int32_t>(11);
+  Node* index = R.Constant<int32_t>(4);
+  Node* load = R.graph.NewNode(R.machine.Load(kMachineWord32), base, index);
+
+  {
+    MachineOperatorReducer reducer(&R.graph);
+    Reduction reduction = reducer.Reduce(load);
+    CHECK(!reduction.Changed());  // loads should not be reduced.
+  }
+
+  {
+    Node* store =
+        R.graph.NewNode(R.machine.Store(kMachineWord32), base, index, load);
+    MachineOperatorReducer reducer(&R.graph);
+    Reduction reduction = reducer.Reduce(store);
+    CHECK(!reduction.Changed());  // stores should not be reduced.
+  }
+}
+
+
+static void CheckNans(ReducerTester* R) {
+  Node* x = R->Parameter();
+  std::vector<double> nans = ValueHelper::nan_vector();
+  for (std::vector<double>::const_iterator pl = nans.begin(); pl != nans.end();
+       ++pl) {
+    for (std::vector<double>::const_iterator pr = nans.begin();
+         pr != nans.end(); ++pr) {
+      Node* nan1 = R->Constant<double>(*pl);
+      Node* nan2 = R->Constant<double>(*pr);
+      R->CheckBinop(nan1, x, nan1);     // x % NaN => NaN
+      R->CheckBinop(nan1, nan1, x);     // NaN % x => NaN
+      R->CheckBinop(nan1, nan2, nan1);  // NaN % NaN => NaN
+    }
+  }
+}
+
+
+TEST(ReduceFloat64Add) {
+  ReducerTester R;
+  R.binop = R.machine.Float64Add();
+
+  FOR_FLOAT64_INPUTS(pl) {
+    FOR_FLOAT64_INPUTS(pr) {
+      double x = *pl, y = *pr;
+      R.CheckFoldBinop<double>(x + y, x, y);
+    }
+  }
+
+  FOR_FLOAT64_INPUTS(i) { R.CheckPutConstantOnRight(*i); }
+  // TODO(titzer): CheckNans(&R);
+}
+
+
+TEST(ReduceFloat64Sub) {
+  ReducerTester R;
+  R.binop = R.machine.Float64Sub();
+
+  FOR_FLOAT64_INPUTS(pl) {
+    FOR_FLOAT64_INPUTS(pr) {
+      double x = *pl, y = *pr;
+      R.CheckFoldBinop<double>(x - y, x, y);
+    }
+  }
+  // TODO(titzer): CheckNans(&R);
+}
+
+
+TEST(ReduceFloat64Mul) {
+  ReducerTester R;
+  R.binop = R.machine.Float64Mul();
+
+  FOR_FLOAT64_INPUTS(pl) {
+    FOR_FLOAT64_INPUTS(pr) {
+      double x = *pl, y = *pr;
+      R.CheckFoldBinop<double>(x * y, x, y);
+    }
+  }
+
+  double inf = V8_INFINITY;
+  R.CheckPutConstantOnRight(-inf);
+  R.CheckPutConstantOnRight(-0.1);
+  R.CheckPutConstantOnRight(0.1);
+  R.CheckPutConstantOnRight(inf);
+
+  Node* x = R.Parameter();
+  Node* one = R.Constant<double>(1.0);
+
+  R.CheckBinop(x, x, one);  // x * 1.0 => x
+  R.CheckBinop(x, one, x);  // 1.0 * x => x
+
+  CheckNans(&R);
+}
+
+
+TEST(ReduceFloat64Div) {
+  ReducerTester R;
+  R.binop = R.machine.Float64Div();
+
+  FOR_FLOAT64_INPUTS(pl) {
+    FOR_FLOAT64_INPUTS(pr) {
+      double x = *pl, y = *pr;
+      R.CheckFoldBinop<double>(x / y, x, y);
+    }
+  }
+
+  Node* x = R.Parameter();
+  Node* one = R.Constant<double>(1.0);
+
+  R.CheckBinop(x, x, one);  // x / 1.0 => x
+
+  CheckNans(&R);
+}
+
+
+TEST(ReduceFloat64Mod) {
+  ReducerTester R;
+  R.binop = R.machine.Float64Mod();
+
+  FOR_FLOAT64_INPUTS(pl) {
+    FOR_FLOAT64_INPUTS(pr) {
+      double x = *pl, y = *pr;
+      R.CheckFoldBinop<double>(modulo(x, y), x, y);
+    }
+  }
+
+  CheckNans(&R);
+}
+
+
+// TODO(titzer): test MachineOperatorReducer for Word64And
+// TODO(titzer): test MachineOperatorReducer for Word64Or
+// TODO(titzer): test MachineOperatorReducer for Word64Xor
+// TODO(titzer): test MachineOperatorReducer for Word64Shl
+// TODO(titzer): test MachineOperatorReducer for Word64Shr
+// TODO(titzer): test MachineOperatorReducer for Word64Sar
+// TODO(titzer): test MachineOperatorReducer for Word64Equal
+// TODO(titzer): test MachineOperatorReducer for Word64Not
+// TODO(titzer): test MachineOperatorReducer for Int64Add
+// TODO(titzer): test MachineOperatorReducer for Int64Sub
+// TODO(titzer): test MachineOperatorReducer for Int64Mul
+// TODO(titzer): test MachineOperatorReducer for Int64UMul
+// TODO(titzer): test MachineOperatorReducer for Int64Div
+// TODO(titzer): test MachineOperatorReducer for Int64UDiv
+// TODO(titzer): test MachineOperatorReducer for Int64Mod
+// TODO(titzer): test MachineOperatorReducer for Int64UMod
+// TODO(titzer): test MachineOperatorReducer for Int64Neg
+// TODO(titzer): test MachineOperatorReducer for ConvertInt32ToFloat64
+// TODO(titzer): test MachineOperatorReducer for ConvertFloat64ToInt32
+// TODO(titzer): test MachineOperatorReducer for Float64Compare
diff --git a/test/cctest/compiler/compiler/test-node-algorithm.cc b/test/cctest/compiler/compiler/test-node-algorithm.cc
new file mode 100644 (file)
index 0000000..ac8fbb9
--- /dev/null
@@ -0,0 +1,330 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <vector>
+
+#include "src/v8.h"
+
+#include "graph-tester.h"
+#include "src/compiler/common-operator.h"
+#include "src/compiler/generic-node.h"
+#include "src/compiler/generic-node-inl.h"
+#include "src/compiler/graph.h"
+#include "src/compiler/graph-inl.h"
+#include "src/compiler/graph-visualizer.h"
+#include "src/compiler/operator.h"
+
+using namespace v8::internal;
+using namespace v8::internal::compiler;
+
+static SimpleOperator dummy_operator(IrOpcode::kParameter, Operator::kNoWrite,
+                                     0, 0, "dummy");
+
+class PreNodeVisitor : public NullNodeVisitor {
+ public:
+  GenericGraphVisit::Control Pre(Node* node) {
+    printf("NODE ID: %d\n", node->id());
+    nodes_.push_back(node);
+    return GenericGraphVisit::CONTINUE;
+  }
+  std::vector<Node*> nodes_;
+};
+
+
+class PostNodeVisitor : public NullNodeVisitor {
+ public:
+  GenericGraphVisit::Control Post(Node* node) {
+    printf("NODE ID: %d\n", node->id());
+    nodes_.push_back(node);
+    return GenericGraphVisit::CONTINUE;
+  }
+  std::vector<Node*> nodes_;
+};
+
+
+TEST(TestUseNodeVisitEmpty) {
+  GraphWithStartNodeTester graph;
+
+  PreNodeVisitor node_visitor;
+  graph.VisitNodeUsesFromStart(&node_visitor);
+
+  CHECK_EQ(1, node_visitor.nodes_.size());
+}
+
+
+TEST(TestUseNodePreOrderVisitSimple) {
+  GraphWithStartNodeTester graph;
+  Node* n2 = graph.NewNode(&dummy_operator, graph.start());
+  Node* n3 = graph.NewNode(&dummy_operator, n2);
+  Node* n4 = graph.NewNode(&dummy_operator, n2, n3);
+  Node* n5 = graph.NewNode(&dummy_operator, n4, n2);
+  graph.SetEnd(n5);
+
+  PreNodeVisitor node_visitor;
+  graph.VisitNodeUsesFromStart(&node_visitor);
+
+  CHECK_EQ(5, node_visitor.nodes_.size());
+  CHECK(graph.start()->id() == node_visitor.nodes_[0]->id());
+  CHECK(n2->id() == node_visitor.nodes_[1]->id());
+  CHECK(n3->id() == node_visitor.nodes_[2]->id());
+  CHECK(n4->id() == node_visitor.nodes_[3]->id());
+  CHECK(n5->id() == node_visitor.nodes_[4]->id());
+}
+
+
+TEST(TestInputNodePreOrderVisitSimple) {
+  GraphWithStartNodeTester graph;
+  Node* n2 = graph.NewNode(&dummy_operator, graph.start());
+  Node* n3 = graph.NewNode(&dummy_operator, n2);
+  Node* n4 = graph.NewNode(&dummy_operator, n2, n3);
+  Node* n5 = graph.NewNode(&dummy_operator, n4, n2);
+  graph.SetEnd(n5);
+
+  PreNodeVisitor node_visitor;
+  graph.VisitNodeInputsFromEnd(&node_visitor);
+  CHECK_EQ(5, node_visitor.nodes_.size());
+  CHECK(n5->id() == node_visitor.nodes_[0]->id());
+  CHECK(n4->id() == node_visitor.nodes_[1]->id());
+  CHECK(n2->id() == node_visitor.nodes_[2]->id());
+  CHECK(graph.start()->id() == node_visitor.nodes_[3]->id());
+  CHECK(n3->id() == node_visitor.nodes_[4]->id());
+}
+
+
+TEST(TestUseNodePostOrderVisitSimple) {
+  GraphWithStartNodeTester graph;
+  Node* n2 = graph.NewNode(&dummy_operator, graph.start());
+  Node* n3 = graph.NewNode(&dummy_operator, graph.start());
+  Node* n4 = graph.NewNode(&dummy_operator, n2);
+  Node* n5 = graph.NewNode(&dummy_operator, n2);
+  Node* n6 = graph.NewNode(&dummy_operator, n2);
+  Node* n7 = graph.NewNode(&dummy_operator, n3);
+  Node* end_dependencies[4] = {n4, n5, n6, n7};
+  Node* n8 = graph.NewNode(&dummy_operator, 4, end_dependencies);
+  graph.SetEnd(n8);
+
+  PostNodeVisitor node_visitor;
+  graph.VisitNodeUsesFromStart(&node_visitor);
+
+  CHECK_EQ(8, node_visitor.nodes_.size());
+  CHECK(graph.end()->id() == node_visitor.nodes_[0]->id());
+  CHECK(n4->id() == node_visitor.nodes_[1]->id());
+  CHECK(n5->id() == node_visitor.nodes_[2]->id());
+  CHECK(n6->id() == node_visitor.nodes_[3]->id());
+  CHECK(n2->id() == node_visitor.nodes_[4]->id());
+  CHECK(n7->id() == node_visitor.nodes_[5]->id());
+  CHECK(n3->id() == node_visitor.nodes_[6]->id());
+  CHECK(graph.start()->id() == node_visitor.nodes_[7]->id());
+}
+
+
+TEST(TestUseNodePostOrderVisitLong) {
+  GraphWithStartNodeTester graph;
+  Node* n2 = graph.NewNode(&dummy_operator, graph.start());
+  Node* n3 = graph.NewNode(&dummy_operator, graph.start());
+  Node* n4 = graph.NewNode(&dummy_operator, n2);
+  Node* n5 = graph.NewNode(&dummy_operator, n2);
+  Node* n6 = graph.NewNode(&dummy_operator, n3);
+  Node* n7 = graph.NewNode(&dummy_operator, n3);
+  Node* n8 = graph.NewNode(&dummy_operator, n5);
+  Node* n9 = graph.NewNode(&dummy_operator, n5);
+  Node* n10 = graph.NewNode(&dummy_operator, n9);
+  Node* n11 = graph.NewNode(&dummy_operator, n9);
+  Node* end_dependencies[6] = {n4, n8, n10, n11, n6, n7};
+  Node* n12 = graph.NewNode(&dummy_operator, 6, end_dependencies);
+  graph.SetEnd(n12);
+
+  PostNodeVisitor node_visitor;
+  graph.VisitNodeUsesFromStart(&node_visitor);
+
+  CHECK_EQ(12, node_visitor.nodes_.size());
+  CHECK(graph.end()->id() == node_visitor.nodes_[0]->id());
+  CHECK(n4->id() == node_visitor.nodes_[1]->id());
+  CHECK(n8->id() == node_visitor.nodes_[2]->id());
+  CHECK(n10->id() == node_visitor.nodes_[3]->id());
+  CHECK(n11->id() == node_visitor.nodes_[4]->id());
+  CHECK(n9->id() == node_visitor.nodes_[5]->id());
+  CHECK(n5->id() == node_visitor.nodes_[6]->id());
+  CHECK(n2->id() == node_visitor.nodes_[7]->id());
+  CHECK(n6->id() == node_visitor.nodes_[8]->id());
+  CHECK(n7->id() == node_visitor.nodes_[9]->id());
+  CHECK(n3->id() == node_visitor.nodes_[10]->id());
+  CHECK(graph.start()->id() == node_visitor.nodes_[11]->id());
+}
+
+
+TEST(TestUseNodePreOrderVisitCycle) {
+  GraphWithStartNodeTester graph;
+  Node* n0 = graph.start_node();
+  Node* n1 = graph.NewNode(&dummy_operator, n0);
+  Node* n2 = graph.NewNode(&dummy_operator, n1);
+  n0->AppendInput(graph.main_zone(), n2);
+  graph.SetStart(n0);
+  graph.SetEnd(n2);
+
+  PreNodeVisitor node_visitor;
+  graph.VisitNodeUsesFromStart(&node_visitor);
+
+  CHECK_EQ(3, node_visitor.nodes_.size());
+  CHECK(n0->id() == node_visitor.nodes_[0]->id());
+  CHECK(n1->id() == node_visitor.nodes_[1]->id());
+  CHECK(n2->id() == node_visitor.nodes_[2]->id());
+}
+
+
+struct ReenterNodeVisitor : NullNodeVisitor {
+  GenericGraphVisit::Control Pre(Node* node) {
+    printf("[%d] PRE NODE: %d\n", static_cast<int>(nodes_.size()), node->id());
+    nodes_.push_back(node->id());
+    int size = nodes_.size();
+    switch (node->id()) {
+      case 0:
+        return size < 6 ? GenericGraphVisit::REENTER : GenericGraphVisit::SKIP;
+      case 1:
+        return size < 4 ? GenericGraphVisit::DEFER
+                        : GenericGraphVisit::CONTINUE;
+      default:
+        return GenericGraphVisit::REENTER;
+    }
+  }
+
+  GenericGraphVisit::Control Post(Node* node) {
+    printf("[%d] POST NODE: %d\n", static_cast<int>(nodes_.size()), node->id());
+    nodes_.push_back(-node->id());
+    return node->id() == 4 ? GenericGraphVisit::REENTER
+                           : GenericGraphVisit::CONTINUE;
+  }
+
+  void PreEdge(Node* from, int index, Node* to) {
+    printf("[%d] PRE EDGE: %d-%d\n", static_cast<int>(edges_.size()),
+           from->id(), to->id());
+    edges_.push_back(std::make_pair(from->id(), to->id()));
+  }
+
+  void PostEdge(Node* from, int index, Node* to) {
+    printf("[%d] POST EDGE: %d-%d\n", static_cast<int>(edges_.size()),
+           from->id(), to->id());
+    edges_.push_back(std::make_pair(-from->id(), -to->id()));
+  }
+
+  std::vector<int> nodes_;
+  std::vector<std::pair<int, int> > edges_;
+};
+
+
+TEST(TestUseNodeReenterVisit) {
+  GraphWithStartNodeTester graph;
+  Node* n0 = graph.start_node();
+  Node* n1 = graph.NewNode(&dummy_operator, n0);
+  Node* n2 = graph.NewNode(&dummy_operator, n0);
+  Node* n3 = graph.NewNode(&dummy_operator, n2);
+  Node* n4 = graph.NewNode(&dummy_operator, n0);
+  Node* n5 = graph.NewNode(&dummy_operator, n4);
+  n0->AppendInput(graph.main_zone(), n3);
+  graph.SetStart(n0);
+  graph.SetEnd(n5);
+
+  ReenterNodeVisitor visitor;
+  graph.VisitNodeUsesFromStart(&visitor);
+
+  CHECK_EQ(22, visitor.nodes_.size());
+  CHECK_EQ(24, visitor.edges_.size());
+
+  CHECK(n0->id() == visitor.nodes_[0]);
+  CHECK(n0->id() == visitor.edges_[0].first);
+  CHECK(n1->id() == visitor.edges_[0].second);
+  CHECK(n1->id() == visitor.nodes_[1]);
+  // N1 is deferred.
+  CHECK(-n1->id() == visitor.edges_[1].second);
+  CHECK(-n0->id() == visitor.edges_[1].first);
+  CHECK(n0->id() == visitor.edges_[2].first);
+  CHECK(n2->id() == visitor.edges_[2].second);
+  CHECK(n2->id() == visitor.nodes_[2]);
+  CHECK(n2->id() == visitor.edges_[3].first);
+  CHECK(n3->id() == visitor.edges_[3].second);
+  CHECK(n3->id() == visitor.nodes_[3]);
+  // Circle back to N0, which we may reenter for now.
+  CHECK(n3->id() == visitor.edges_[4].first);
+  CHECK(n0->id() == visitor.edges_[4].second);
+  CHECK(n0->id() == visitor.nodes_[4]);
+  CHECK(n0->id() == visitor.edges_[5].first);
+  CHECK(n1->id() == visitor.edges_[5].second);
+  CHECK(n1->id() == visitor.nodes_[5]);
+  // This time N1 is no longer deferred.
+  CHECK(-n1->id() == visitor.nodes_[6]);
+  CHECK(-n1->id() == visitor.edges_[6].second);
+  CHECK(-n0->id() == visitor.edges_[6].first);
+  CHECK(n0->id() == visitor.edges_[7].first);
+  CHECK(n2->id() == visitor.edges_[7].second);
+  CHECK(n2->id() == visitor.nodes_[7]);
+  CHECK(n2->id() == visitor.edges_[8].first);
+  CHECK(n3->id() == visitor.edges_[8].second);
+  CHECK(n3->id() == visitor.nodes_[8]);
+  CHECK(n3->id() == visitor.edges_[9].first);
+  CHECK(n0->id() == visitor.edges_[9].second);
+  CHECK(n0->id() == visitor.nodes_[9]);
+  // This time we break at N0 and skip it.
+  CHECK(-n0->id() == visitor.edges_[10].second);
+  CHECK(-n3->id() == visitor.edges_[10].first);
+  CHECK(-n3->id() == visitor.nodes_[10]);
+  CHECK(-n3->id() == visitor.edges_[11].second);
+  CHECK(-n2->id() == visitor.edges_[11].first);
+  CHECK(-n2->id() == visitor.nodes_[11]);
+  CHECK(-n2->id() == visitor.edges_[12].second);
+  CHECK(-n0->id() == visitor.edges_[12].first);
+  CHECK(n0->id() == visitor.edges_[13].first);
+  CHECK(n4->id() == visitor.edges_[13].second);
+  CHECK(n4->id() == visitor.nodes_[12]);
+  CHECK(n4->id() == visitor.edges_[14].first);
+  CHECK(n5->id() == visitor.edges_[14].second);
+  CHECK(n5->id() == visitor.nodes_[13]);
+  CHECK(-n5->id() == visitor.nodes_[14]);
+  CHECK(-n5->id() == visitor.edges_[15].second);
+  CHECK(-n4->id() == visitor.edges_[15].first);
+  CHECK(-n4->id() == visitor.nodes_[15]);
+  CHECK(-n4->id() == visitor.edges_[16].second);
+  CHECK(-n0->id() == visitor.edges_[16].first);
+  CHECK(-n0->id() == visitor.nodes_[16]);
+  CHECK(-n0->id() == visitor.edges_[17].second);
+  CHECK(-n3->id() == visitor.edges_[17].first);
+  CHECK(-n3->id() == visitor.nodes_[17]);
+  CHECK(-n3->id() == visitor.edges_[18].second);
+  CHECK(-n2->id() == visitor.edges_[18].first);
+  CHECK(-n2->id() == visitor.nodes_[18]);
+  CHECK(-n2->id() == visitor.edges_[19].second);
+  CHECK(-n0->id() == visitor.edges_[19].first);
+  // N4 may be reentered.
+  CHECK(n0->id() == visitor.edges_[20].first);
+  CHECK(n4->id() == visitor.edges_[20].second);
+  CHECK(n4->id() == visitor.nodes_[19]);
+  CHECK(n4->id() == visitor.edges_[21].first);
+  CHECK(n5->id() == visitor.edges_[21].second);
+  CHECK(-n5->id() == visitor.edges_[22].second);
+  CHECK(-n4->id() == visitor.edges_[22].first);
+  CHECK(-n4->id() == visitor.nodes_[20]);
+  CHECK(-n4->id() == visitor.edges_[23].second);
+  CHECK(-n0->id() == visitor.edges_[23].first);
+  CHECK(-n0->id() == visitor.nodes_[21]);
+}
+
+
+TEST(TestPrintNodeGraphToNodeGraphviz) {
+  GraphWithStartNodeTester graph;
+  Node* n2 = graph.NewNode(&dummy_operator, graph.start());
+  Node* n3 = graph.NewNode(&dummy_operator, graph.start());
+  Node* n4 = graph.NewNode(&dummy_operator, n2);
+  Node* n5 = graph.NewNode(&dummy_operator, n2);
+  Node* n6 = graph.NewNode(&dummy_operator, n3);
+  Node* n7 = graph.NewNode(&dummy_operator, n3);
+  Node* n8 = graph.NewNode(&dummy_operator, n5);
+  Node* n9 = graph.NewNode(&dummy_operator, n5);
+  Node* n10 = graph.NewNode(&dummy_operator, n9);
+  Node* n11 = graph.NewNode(&dummy_operator, n9);
+  Node* end_dependencies[6] = {n4, n8, n10, n11, n6, n7};
+  Node* n12 = graph.NewNode(&dummy_operator, 6, end_dependencies);
+  graph.SetEnd(n12);
+
+  OFStream os(stdout);
+  os << AsDOT(graph);
+}
diff --git a/test/cctest/compiler/compiler/test-node-cache.cc b/test/cctest/compiler/compiler/test-node-cache.cc
new file mode 100644 (file)
index 0000000..23909a5
--- /dev/null
@@ -0,0 +1,160 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#include "graph-tester.h"
+#include "src/compiler/common-operator.h"
+#include "src/compiler/node-cache.h"
+
+using namespace v8::internal;
+using namespace v8::internal::compiler;
+
+TEST(Int32Constant_back_to_back) {
+  GraphTester graph;
+  Int32NodeCache cache;
+
+  for (int i = -2000000000; i < 2000000000; i += 3315177) {
+    Node** pos = cache.Find(graph.zone(), i);
+    CHECK_NE(NULL, pos);
+    for (int j = 0; j < 3; j++) {
+      Node** npos = cache.Find(graph.zone(), i);
+      CHECK_EQ(pos, npos);
+    }
+  }
+}
+
+
+TEST(Int32Constant_five) {
+  GraphTester graph;
+  Int32NodeCache cache;
+  CommonOperatorBuilder common(graph.zone());
+
+  int32_t constants[] = {static_cast<int32_t>(0x80000000), -77, 0, 1, -1};
+
+  Node* nodes[ARRAY_SIZE(constants)];
+
+  for (size_t i = 0; i < ARRAY_SIZE(constants); i++) {
+    int32_t k = constants[i];
+    Node* node = graph.NewNode(common.Int32Constant(k));
+    *cache.Find(graph.zone(), k) = nodes[i] = node;
+  }
+
+  for (size_t i = 0; i < ARRAY_SIZE(constants); i++) {
+    int32_t k = constants[i];
+    CHECK_EQ(nodes[i], *cache.Find(graph.zone(), k));
+  }
+}
+
+
+TEST(Int32Constant_hits) {
+  GraphTester graph;
+  Int32NodeCache cache;
+  const int32_t kSize = 1500;
+  Node** nodes = graph.zone()->NewArray<Node*>(kSize);
+  CommonOperatorBuilder common(graph.zone());
+
+  for (int i = 0; i < kSize; i++) {
+    int32_t v = i * -55;
+    nodes[i] = graph.NewNode(common.Int32Constant(v));
+    *cache.Find(graph.zone(), v) = nodes[i];
+  }
+
+  int hits = 0;
+  for (int i = 0; i < kSize; i++) {
+    int32_t v = i * -55;
+    Node** pos = cache.Find(graph.zone(), v);
+    if (*pos != NULL) {
+      CHECK_EQ(nodes[i], *pos);
+      hits++;
+    }
+  }
+  CHECK_LT(4, hits);
+}
+
+
+TEST(Int64Constant_back_to_back) {
+  GraphTester graph;
+  Int64NodeCache cache;
+
+  for (int64_t i = -2000000000; i < 2000000000; i += 3315177) {
+    Node** pos = cache.Find(graph.zone(), i);
+    CHECK_NE(NULL, pos);
+    for (int j = 0; j < 3; j++) {
+      Node** npos = cache.Find(graph.zone(), i);
+      CHECK_EQ(pos, npos);
+    }
+  }
+}
+
+
+TEST(Int64Constant_hits) {
+  GraphTester graph;
+  Int64NodeCache cache;
+  const int32_t kSize = 1500;
+  Node** nodes = graph.zone()->NewArray<Node*>(kSize);
+  CommonOperatorBuilder common(graph.zone());
+
+  for (int i = 0; i < kSize; i++) {
+    int64_t v = static_cast<int64_t>(i) * static_cast<int64_t>(5003001);
+    nodes[i] = graph.NewNode(common.Int32Constant(i));
+    *cache.Find(graph.zone(), v) = nodes[i];
+  }
+
+  int hits = 0;
+  for (int i = 0; i < kSize; i++) {
+    int64_t v = static_cast<int64_t>(i) * static_cast<int64_t>(5003001);
+    Node** pos = cache.Find(graph.zone(), v);
+    if (*pos != NULL) {
+      CHECK_EQ(nodes[i], *pos);
+      hits++;
+    }
+  }
+  CHECK_LT(4, hits);
+}
+
+
+TEST(PtrConstant_back_to_back) {
+  GraphTester graph;
+  PtrNodeCache cache;
+  int32_t buffer[50];
+
+  for (int32_t* p = buffer;
+       (p - buffer) < static_cast<ptrdiff_t>(ARRAY_SIZE(buffer)); p++) {
+    Node** pos = cache.Find(graph.zone(), p);
+    CHECK_NE(NULL, pos);
+    for (int j = 0; j < 3; j++) {
+      Node** npos = cache.Find(graph.zone(), p);
+      CHECK_EQ(pos, npos);
+    }
+  }
+}
+
+
+TEST(PtrConstant_hits) {
+  GraphTester graph;
+  PtrNodeCache cache;
+  const int32_t kSize = 50;
+  int32_t buffer[kSize];
+  Node* nodes[kSize];
+  CommonOperatorBuilder common(graph.zone());
+
+  for (size_t i = 0; i < ARRAY_SIZE(buffer); i++) {
+    int k = static_cast<int>(i);
+    int32_t* p = &buffer[i];
+    nodes[i] = graph.NewNode(common.Int32Constant(k));
+    *cache.Find(graph.zone(), p) = nodes[i];
+  }
+
+  int hits = 0;
+  for (size_t i = 0; i < ARRAY_SIZE(buffer); i++) {
+    int32_t* p = &buffer[i];
+    Node** pos = cache.Find(graph.zone(), p);
+    if (*pos != NULL) {
+      CHECK_EQ(nodes[i], *pos);
+      hits++;
+    }
+  }
+  CHECK_LT(4, hits);
+}
diff --git a/test/cctest/compiler/compiler/test-node.cc b/test/cctest/compiler/compiler/test-node.cc
new file mode 100644 (file)
index 0000000..5411755
--- /dev/null
@@ -0,0 +1,813 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#include "graph-tester.h"
+#include "src/compiler/generic-node-inl.h"
+#include "src/compiler/node.h"
+#include "src/compiler/operator.h"
+
+using namespace v8::internal;
+using namespace v8::internal::compiler;
+
+static SimpleOperator dummy_operator(IrOpcode::kParameter, Operator::kNoWrite,
+                                     0, 0, "dummy");
+
+TEST(NodeAllocation) {
+  GraphTester graph;
+  Node* n1 = graph.NewNode(&dummy_operator);
+  Node* n2 = graph.NewNode(&dummy_operator);
+  CHECK(n2->id() != n1->id());
+}
+
+
+TEST(NodeWithOpcode) {
+  GraphTester graph;
+  Node* n1 = graph.NewNode(&dummy_operator);
+  Node* n2 = graph.NewNode(&dummy_operator);
+  CHECK(n1->op() == &dummy_operator);
+  CHECK(n2->op() == &dummy_operator);
+}
+
+
+TEST(NodeInputs1) {
+  GraphTester graph;
+  Node* n0 = graph.NewNode(&dummy_operator);
+  Node* n2 = graph.NewNode(&dummy_operator, n0);
+  CHECK_EQ(1, n2->InputCount());
+  CHECK(n0 == n2->InputAt(0));
+}
+
+
+TEST(NodeInputs2) {
+  GraphTester graph;
+  Node* n0 = graph.NewNode(&dummy_operator);
+  Node* n1 = graph.NewNode(&dummy_operator);
+  Node* n2 = graph.NewNode(&dummy_operator, n0, n1);
+  CHECK_EQ(2, n2->InputCount());
+  CHECK(n0 == n2->InputAt(0));
+  CHECK(n1 == n2->InputAt(1));
+}
+
+
+TEST(NodeInputs3) {
+  GraphTester graph;
+  Node* n0 = graph.NewNode(&dummy_operator);
+  Node* n1 = graph.NewNode(&dummy_operator);
+  Node* n2 = graph.NewNode(&dummy_operator, n0, n1, n1);
+  CHECK_EQ(3, n2->InputCount());
+  CHECK(n0 == n2->InputAt(0));
+  CHECK(n1 == n2->InputAt(1));
+  CHECK(n1 == n2->InputAt(2));
+}
+
+
+TEST(NodeInputIteratorEmpty) {
+  GraphTester graph;
+  Node* n1 = graph.NewNode(&dummy_operator);
+  Node::Inputs::iterator i(n1->inputs().begin());
+  int input_count = 0;
+  for (; i != n1->inputs().end(); ++i) {
+    input_count++;
+  }
+  CHECK_EQ(0, input_count);
+}
+
+
+TEST(NodeInputIteratorOne) {
+  GraphTester graph;
+  Node* n0 = graph.NewNode(&dummy_operator);
+  Node* n1 = graph.NewNode(&dummy_operator, n0);
+  Node::Inputs::iterator i(n1->inputs().begin());
+  CHECK_EQ(1, n1->InputCount());
+  CHECK_EQ(n0, *i);
+  ++i;
+  CHECK(n1->inputs().end() == i);
+}
+
+
+TEST(NodeUseIteratorEmpty) {
+  GraphTester graph;
+  Node* n1 = graph.NewNode(&dummy_operator);
+  Node::Uses::iterator i(n1->uses().begin());
+  int use_count = 0;
+  for (; i != n1->uses().end(); ++i) {
+    Node::Edge edge(i.edge());
+    USE(edge);
+    use_count++;
+  }
+  CHECK_EQ(0, use_count);
+}
+
+
+TEST(NodeUseIteratorOne) {
+  GraphTester graph;
+  Node* n0 = graph.NewNode(&dummy_operator);
+  Node* n1 = graph.NewNode(&dummy_operator, n0);
+  Node::Uses::iterator i(n0->uses().begin());
+  CHECK_EQ(n1, *i);
+  ++i;
+  CHECK(n0->uses().end() == i);
+}
+
+
+TEST(NodeUseIteratorReplaceNoUses) {
+  GraphTester graph;
+  Node* n0 = graph.NewNode(&dummy_operator);
+  Node* n3 = graph.NewNode(&dummy_operator);
+  n0->ReplaceUses(n3);
+  CHECK(n0->uses().begin() == n0->uses().end());
+}
+
+
+TEST(NodeUseIteratorReplaceUses) {
+  GraphTester graph;
+  Node* n0 = graph.NewNode(&dummy_operator);
+  Node* n1 = graph.NewNode(&dummy_operator, n0);
+  Node* n2 = graph.NewNode(&dummy_operator, n0);
+  Node* n3 = graph.NewNode(&dummy_operator);
+  Node::Uses::iterator i1(n0->uses().begin());
+  CHECK_EQ(n1, *i1);
+  ++i1;
+  CHECK_EQ(n2, *i1);
+  n0->ReplaceUses(n3);
+  Node::Uses::iterator i2(n3->uses().begin());
+  CHECK_EQ(n1, *i2);
+  ++i2;
+  CHECK_EQ(n2, *i2);
+  Node::Inputs::iterator i3(n1->inputs().begin());
+  CHECK_EQ(n3, *i3);
+  ++i3;
+  CHECK(n1->inputs().end() == i3);
+  Node::Inputs::iterator i4(n2->inputs().begin());
+  CHECK_EQ(n3, *i4);
+  ++i4;
+  CHECK(n2->inputs().end() == i4);
+}
+
+
+TEST(NodeUseIteratorReplaceUsesSelf) {
+  GraphTester graph;
+  Node* n0 = graph.NewNode(&dummy_operator);
+  Node* n1 = graph.NewNode(&dummy_operator, n0);
+  Node* n3 = graph.NewNode(&dummy_operator);
+
+  n1->ReplaceInput(0, n1);  // Create self-reference.
+
+  Node::Uses::iterator i1(n1->uses().begin());
+  CHECK_EQ(n1, *i1);
+
+  n1->ReplaceUses(n3);
+
+  CHECK(n1->uses().begin() == n1->uses().end());
+
+  Node::Uses::iterator i2(n3->uses().begin());
+  CHECK_EQ(n1, *i2);
+  ++i2;
+  CHECK(n1->uses().end() == i2);
+}
+
+
+TEST(ReplaceInput) {
+  GraphTester graph;
+  Node* n0 = graph.NewNode(&dummy_operator);
+  Node* n1 = graph.NewNode(&dummy_operator);
+  Node* n2 = graph.NewNode(&dummy_operator);
+  Node* n3 = graph.NewNode(&dummy_operator, n0, n1, n2);
+  Node::Inputs::iterator i1(n3->inputs().begin());
+  CHECK(n0 == *i1);
+  CHECK_EQ(n0, n3->InputAt(0));
+  ++i1;
+  CHECK_EQ(n1, *i1);
+  CHECK_EQ(n1, n3->InputAt(1));
+  ++i1;
+  CHECK_EQ(n2, *i1);
+  CHECK_EQ(n2, n3->InputAt(2));
+  ++i1;
+  CHECK(i1 == n3->inputs().end());
+
+  Node::Uses::iterator i2(n1->uses().begin());
+  CHECK_EQ(n3, *i2);
+  ++i2;
+  CHECK(i2 == n1->uses().end());
+
+  Node* n4 = graph.NewNode(&dummy_operator);
+  Node::Uses::iterator i3(n4->uses().begin());
+  CHECK(i3 == n4->uses().end());
+
+  n3->ReplaceInput(1, n4);
+
+  Node::Uses::iterator i4(n1->uses().begin());
+  CHECK(i4 == n1->uses().end());
+
+  Node::Uses::iterator i5(n4->uses().begin());
+  CHECK_EQ(n3, *i5);
+  ++i5;
+  CHECK(i5 == n4->uses().end());
+
+  Node::Inputs::iterator i6(n3->inputs().begin());
+  CHECK(n0 == *i6);
+  CHECK_EQ(n0, n3->InputAt(0));
+  ++i6;
+  CHECK_EQ(n4, *i6);
+  CHECK_EQ(n4, n3->InputAt(1));
+  ++i6;
+  CHECK_EQ(n2, *i6);
+  CHECK_EQ(n2, n3->InputAt(2));
+  ++i6;
+  CHECK(i6 == n3->inputs().end());
+}
+
+
+TEST(OwnedBy) {
+  GraphTester graph;
+
+  {
+    Node* n0 = graph.NewNode(&dummy_operator);
+    Node* n1 = graph.NewNode(&dummy_operator);
+
+    CHECK(!n0->OwnedBy(n1));
+    CHECK(!n1->OwnedBy(n0));
+
+    Node* n2 = graph.NewNode(&dummy_operator, n0);
+    CHECK(n0->OwnedBy(n2));
+    CHECK(!n2->OwnedBy(n0));
+
+    Node* n3 = graph.NewNode(&dummy_operator, n0);
+    CHECK(!n0->OwnedBy(n2));
+    CHECK(!n0->OwnedBy(n3));
+    CHECK(!n2->OwnedBy(n0));
+    CHECK(!n3->OwnedBy(n0));
+  }
+
+  {
+    Node* n0 = graph.NewNode(&dummy_operator);
+    Node* n1 = graph.NewNode(&dummy_operator, n0);
+    CHECK(n0->OwnedBy(n1));
+    CHECK(!n1->OwnedBy(n0));
+    Node* n2 = graph.NewNode(&dummy_operator, n0);
+    CHECK(!n0->OwnedBy(n1));
+    CHECK(!n0->OwnedBy(n2));
+    CHECK(!n1->OwnedBy(n0));
+    CHECK(!n1->OwnedBy(n2));
+    CHECK(!n2->OwnedBy(n0));
+    CHECK(!n2->OwnedBy(n1));
+
+    Node* n3 = graph.NewNode(&dummy_operator);
+    n2->ReplaceInput(0, n3);
+
+    CHECK(n0->OwnedBy(n1));
+    CHECK(!n1->OwnedBy(n0));
+    CHECK(!n1->OwnedBy(n0));
+    CHECK(!n1->OwnedBy(n2));
+    CHECK(!n2->OwnedBy(n0));
+    CHECK(!n2->OwnedBy(n1));
+    CHECK(n3->OwnedBy(n2));
+    CHECK(!n2->OwnedBy(n3));
+  }
+}
+
+
+TEST(Uses) {
+  GraphTester graph;
+
+  Node* n0 = graph.NewNode(&dummy_operator);
+  Node* n1 = graph.NewNode(&dummy_operator, n0);
+  CHECK_EQ(1, n0->UseCount());
+  printf("A: %d vs %d\n", n0->UseAt(0)->id(), n1->id());
+  CHECK(n0->UseAt(0) == n1);
+  Node* n2 = graph.NewNode(&dummy_operator, n0);
+  CHECK_EQ(2, n0->UseCount());
+  printf("B: %d vs %d\n", n0->UseAt(1)->id(), n2->id());
+  CHECK(n0->UseAt(1) == n2);
+  Node* n3 = graph.NewNode(&dummy_operator, n0);
+  CHECK_EQ(3, n0->UseCount());
+  CHECK(n0->UseAt(2) == n3);
+}
+
+
+TEST(Inputs) {
+  GraphTester graph;
+
+  Node* n0 = graph.NewNode(&dummy_operator);
+  Node* n1 = graph.NewNode(&dummy_operator, n0);
+  Node* n2 = graph.NewNode(&dummy_operator, n0);
+  Node* n3 = graph.NewNode(&dummy_operator, n0, n1, n2);
+  CHECK_EQ(3, n3->InputCount());
+  CHECK(n3->InputAt(0) == n0);
+  CHECK(n3->InputAt(1) == n1);
+  CHECK(n3->InputAt(2) == n2);
+  Node* n4 = graph.NewNode(&dummy_operator, n0, n1, n2);
+  n3->AppendInput(graph.zone(), n4);
+  CHECK_EQ(4, n3->InputCount());
+  CHECK(n3->InputAt(0) == n0);
+  CHECK(n3->InputAt(1) == n1);
+  CHECK(n3->InputAt(2) == n2);
+  CHECK(n3->InputAt(3) == n4);
+  Node* n5 = graph.NewNode(&dummy_operator, n4);
+  n3->AppendInput(graph.zone(), n4);
+  CHECK_EQ(5, n3->InputCount());
+  CHECK(n3->InputAt(0) == n0);
+  CHECK(n3->InputAt(1) == n1);
+  CHECK(n3->InputAt(2) == n2);
+  CHECK(n3->InputAt(3) == n4);
+  CHECK(n3->InputAt(4) == n4);
+
+  // Make sure uses have been hooked op correctly.
+  Node::Uses uses(n4->uses());
+  Node::Uses::iterator current = uses.begin();
+  CHECK(current != uses.end());
+  CHECK(*current == n3);
+  ++current;
+  CHECK(current != uses.end());
+  CHECK(*current == n5);
+  ++current;
+  CHECK(current != uses.end());
+  CHECK(*current == n3);
+  ++current;
+  CHECK(current == uses.end());
+}
+
+
+TEST(AppendInputsAndIterator) {
+  GraphTester graph;
+
+  Node* n0 = graph.NewNode(&dummy_operator);
+  Node* n1 = graph.NewNode(&dummy_operator, n0);
+  Node* n2 = graph.NewNode(&dummy_operator, n0, n1);
+
+  Node::Inputs inputs(n2->inputs());
+  Node::Inputs::iterator current = inputs.begin();
+  CHECK(current != inputs.end());
+  CHECK(*current == n0);
+  ++current;
+  CHECK(current != inputs.end());
+  CHECK(*current == n1);
+  ++current;
+  CHECK(current == inputs.end());
+
+  Node* n3 = graph.NewNode(&dummy_operator);
+  n2->AppendInput(graph.zone(), n3);
+  inputs = n2->inputs();
+  current = inputs.begin();
+  CHECK(current != inputs.end());
+  CHECK(*current == n0);
+  CHECK_EQ(0, current.index());
+  ++current;
+  CHECK(current != inputs.end());
+  CHECK(*current == n1);
+  CHECK_EQ(1, current.index());
+  ++current;
+  CHECK(current != inputs.end());
+  CHECK(*current == n3);
+  CHECK_EQ(2, current.index());
+  ++current;
+  CHECK(current == inputs.end());
+}
+
+
+TEST(NullInputsSimple) {
+  GraphTester graph;
+
+  Node* n0 = graph.NewNode(&dummy_operator);
+  Node* n1 = graph.NewNode(&dummy_operator, n0);
+  Node* n2 = graph.NewNode(&dummy_operator, n0, n1);
+  CHECK_EQ(2, n2->InputCount());
+
+  CHECK(n0 == n2->InputAt(0));
+  CHECK(n1 == n2->InputAt(1));
+  CHECK_EQ(2, n0->UseCount());
+  n2->ReplaceInput(0, NULL);
+  CHECK(NULL == n2->InputAt(0));
+  CHECK(n1 == n2->InputAt(1));
+  CHECK_EQ(1, n0->UseCount());
+}
+
+
+TEST(NullInputsAppended) {
+  GraphTester graph;
+
+  Node* n0 = graph.NewNode(&dummy_operator);
+  Node* n1 = graph.NewNode(&dummy_operator, n0);
+  Node* n2 = graph.NewNode(&dummy_operator, n0);
+  Node* n3 = graph.NewNode(&dummy_operator, n0);
+  n3->AppendInput(graph.zone(), n1);
+  n3->AppendInput(graph.zone(), n2);
+  CHECK_EQ(3, n3->InputCount());
+
+  CHECK(n0 == n3->InputAt(0));
+  CHECK(n1 == n3->InputAt(1));
+  CHECK(n2 == n3->InputAt(2));
+  CHECK_EQ(1, n1->UseCount());
+  n3->ReplaceInput(1, NULL);
+  CHECK(n0 == n3->InputAt(0));
+  CHECK(NULL == n3->InputAt(1));
+  CHECK(n2 == n3->InputAt(2));
+  CHECK_EQ(0, n1->UseCount());
+}
+
+
+TEST(ReplaceUsesFromAppendedInputs) {
+  GraphTester graph;
+
+  Node* n0 = graph.NewNode(&dummy_operator);
+  Node* n1 = graph.NewNode(&dummy_operator, n0);
+  Node* n2 = graph.NewNode(&dummy_operator, n0);
+  Node* n3 = graph.NewNode(&dummy_operator);
+  n2->AppendInput(graph.zone(), n1);
+  n2->AppendInput(graph.zone(), n0);
+  CHECK_EQ(0, n3->UseCount());
+  CHECK_EQ(3, n0->UseCount());
+  n0->ReplaceUses(n3);
+  CHECK_EQ(0, n0->UseCount());
+  CHECK_EQ(3, n3->UseCount());
+
+  Node::Uses uses(n3->uses());
+  Node::Uses::iterator current = uses.begin();
+  CHECK(current != uses.end());
+  CHECK(*current == n1);
+  ++current;
+  CHECK(current != uses.end());
+  CHECK(*current == n2);
+  ++current;
+  CHECK(current != uses.end());
+  CHECK(*current == n2);
+  ++current;
+  CHECK(current == uses.end());
+}
+
+
+template <bool result>
+struct FixedPredicate {
+  bool operator()(const Node* node) const { return result; }
+};
+
+
+TEST(ReplaceUsesIfWithFixedPredicate) {
+  GraphTester graph;
+
+  Node* n0 = graph.NewNode(&dummy_operator);
+  Node* n1 = graph.NewNode(&dummy_operator, n0);
+  Node* n2 = graph.NewNode(&dummy_operator, n0);
+  Node* n3 = graph.NewNode(&dummy_operator);
+
+  CHECK_EQ(0, n2->UseCount());
+  n2->ReplaceUsesIf(FixedPredicate<true>(), n1);
+  CHECK_EQ(0, n2->UseCount());
+  n2->ReplaceUsesIf(FixedPredicate<false>(), n1);
+  CHECK_EQ(0, n2->UseCount());
+
+  CHECK_EQ(0, n3->UseCount());
+  n3->ReplaceUsesIf(FixedPredicate<true>(), n1);
+  CHECK_EQ(0, n3->UseCount());
+  n3->ReplaceUsesIf(FixedPredicate<false>(), n1);
+  CHECK_EQ(0, n3->UseCount());
+
+  CHECK_EQ(2, n0->UseCount());
+  CHECK_EQ(0, n1->UseCount());
+  n0->ReplaceUsesIf(FixedPredicate<false>(), n1);
+  CHECK_EQ(2, n0->UseCount());
+  CHECK_EQ(0, n1->UseCount());
+  n0->ReplaceUsesIf(FixedPredicate<true>(), n1);
+  CHECK_EQ(0, n0->UseCount());
+  CHECK_EQ(2, n1->UseCount());
+
+  n1->AppendInput(graph.zone(), n1);
+  CHECK_EQ(3, n1->UseCount());
+  n1->AppendInput(graph.zone(), n3);
+  CHECK_EQ(1, n3->UseCount());
+  n3->ReplaceUsesIf(FixedPredicate<true>(), n1);
+  CHECK_EQ(4, n1->UseCount());
+  CHECK_EQ(0, n3->UseCount());
+  n1->ReplaceUsesIf(FixedPredicate<false>(), n3);
+  CHECK_EQ(4, n1->UseCount());
+  CHECK_EQ(0, n3->UseCount());
+}
+
+
+TEST(ReplaceUsesIfWithEqualTo) {
+  GraphTester graph;
+
+  Node* n0 = graph.NewNode(&dummy_operator);
+  Node* n1 = graph.NewNode(&dummy_operator, n0);
+  Node* n2 = graph.NewNode(&dummy_operator, n0, n1);
+
+  CHECK_EQ(0, n2->UseCount());
+  n2->ReplaceUsesIf(std::bind1st(std::equal_to<Node*>(), n1), n0);
+  CHECK_EQ(0, n2->UseCount());
+
+  CHECK_EQ(2, n0->UseCount());
+  CHECK_EQ(1, n1->UseCount());
+  n1->ReplaceUsesIf(std::bind1st(std::equal_to<Node*>(), n0), n0);
+  CHECK_EQ(2, n0->UseCount());
+  CHECK_EQ(1, n1->UseCount());
+  n0->ReplaceUsesIf(std::bind2nd(std::equal_to<Node*>(), n2), n1);
+  CHECK_EQ(1, n0->UseCount());
+  CHECK_EQ(2, n1->UseCount());
+}
+
+
+TEST(ReplaceInputMultipleUses) {
+  GraphTester graph;
+
+  Node* n0 = graph.NewNode(&dummy_operator);
+  Node* n1 = graph.NewNode(&dummy_operator);
+  Node* n2 = graph.NewNode(&dummy_operator, n0);
+  n2->ReplaceInput(0, n1);
+  CHECK_EQ(0, n0->UseCount());
+  CHECK_EQ(1, n1->UseCount());
+
+  Node* n3 = graph.NewNode(&dummy_operator, n0);
+  n3->ReplaceInput(0, n1);
+  CHECK_EQ(0, n0->UseCount());
+  CHECK_EQ(2, n1->UseCount());
+}
+
+
+TEST(TrimInputCountInline) {
+  GraphTester graph;
+
+  {
+    Node* n0 = graph.NewNode(&dummy_operator);
+    Node* n1 = graph.NewNode(&dummy_operator, n0);
+    n1->TrimInputCount(1);
+    CHECK_EQ(1, n1->InputCount());
+    CHECK_EQ(n0, n1->InputAt(0));
+    CHECK_EQ(1, n0->UseCount());
+  }
+
+  {
+    Node* n0 = graph.NewNode(&dummy_operator);
+    Node* n1 = graph.NewNode(&dummy_operator, n0);
+    n1->TrimInputCount(0);
+    CHECK_EQ(0, n1->InputCount());
+    CHECK_EQ(0, n0->UseCount());
+  }
+
+  {
+    Node* n0 = graph.NewNode(&dummy_operator);
+    Node* n1 = graph.NewNode(&dummy_operator);
+    Node* n2 = graph.NewNode(&dummy_operator, n0, n1);
+    n2->TrimInputCount(2);
+    CHECK_EQ(2, n2->InputCount());
+    CHECK_EQ(1, n0->UseCount());
+    CHECK_EQ(1, n1->UseCount());
+    CHECK_EQ(0, n2->UseCount());
+  }
+
+  {
+    Node* n0 = graph.NewNode(&dummy_operator);
+    Node* n1 = graph.NewNode(&dummy_operator);
+    Node* n2 = graph.NewNode(&dummy_operator, n0, n1);
+    n2->TrimInputCount(1);
+    CHECK_EQ(1, n2->InputCount());
+    CHECK_EQ(1, n0->UseCount());
+    CHECK_EQ(0, n1->UseCount());
+    CHECK_EQ(0, n2->UseCount());
+  }
+
+  {
+    Node* n0 = graph.NewNode(&dummy_operator);
+    Node* n1 = graph.NewNode(&dummy_operator);
+    Node* n2 = graph.NewNode(&dummy_operator, n0, n1);
+    n2->TrimInputCount(0);
+    CHECK_EQ(0, n2->InputCount());
+    CHECK_EQ(0, n0->UseCount());
+    CHECK_EQ(0, n1->UseCount());
+    CHECK_EQ(0, n2->UseCount());
+  }
+
+  {
+    Node* n0 = graph.NewNode(&dummy_operator);
+    Node* n2 = graph.NewNode(&dummy_operator, n0, n0);
+    n2->TrimInputCount(1);
+    CHECK_EQ(1, n2->InputCount());
+    CHECK_EQ(1, n0->UseCount());
+    CHECK_EQ(0, n2->UseCount());
+  }
+
+  {
+    Node* n0 = graph.NewNode(&dummy_operator);
+    Node* n2 = graph.NewNode(&dummy_operator, n0, n0);
+    n2->TrimInputCount(0);
+    CHECK_EQ(0, n2->InputCount());
+    CHECK_EQ(0, n0->UseCount());
+    CHECK_EQ(0, n2->UseCount());
+  }
+}
+
+
+TEST(TrimInputCountOutOfLine1) {
+  GraphTester graph;
+
+  {
+    Node* n0 = graph.NewNode(&dummy_operator);
+    Node* n1 = graph.NewNode(&dummy_operator);
+    n1->AppendInput(graph.zone(), n0);
+    n1->TrimInputCount(1);
+    CHECK_EQ(1, n1->InputCount());
+    CHECK_EQ(n0, n1->InputAt(0));
+    CHECK_EQ(1, n0->UseCount());
+  }
+
+  {
+    Node* n0 = graph.NewNode(&dummy_operator);
+    Node* n1 = graph.NewNode(&dummy_operator);
+    n1->AppendInput(graph.zone(), n0);
+    CHECK_EQ(1, n1->InputCount());
+    n1->TrimInputCount(0);
+    CHECK_EQ(0, n1->InputCount());
+    CHECK_EQ(0, n0->UseCount());
+  }
+
+  {
+    Node* n0 = graph.NewNode(&dummy_operator);
+    Node* n1 = graph.NewNode(&dummy_operator);
+    Node* n2 = graph.NewNode(&dummy_operator);
+    n2->AppendInput(graph.zone(), n0);
+    n2->AppendInput(graph.zone(), n1);
+    CHECK_EQ(2, n2->InputCount());
+    n2->TrimInputCount(2);
+    CHECK_EQ(2, n2->InputCount());
+    CHECK_EQ(n0, n2->InputAt(0));
+    CHECK_EQ(n1, n2->InputAt(1));
+    CHECK_EQ(1, n0->UseCount());
+    CHECK_EQ(1, n1->UseCount());
+    CHECK_EQ(0, n2->UseCount());
+  }
+
+  {
+    Node* n0 = graph.NewNode(&dummy_operator);
+    Node* n1 = graph.NewNode(&dummy_operator);
+    Node* n2 = graph.NewNode(&dummy_operator);
+    n2->AppendInput(graph.zone(), n0);
+    n2->AppendInput(graph.zone(), n1);
+    CHECK_EQ(2, n2->InputCount());
+    n2->TrimInputCount(1);
+    CHECK_EQ(1, n2->InputCount());
+    CHECK_EQ(n0, n2->InputAt(0));
+    CHECK_EQ(1, n0->UseCount());
+    CHECK_EQ(0, n1->UseCount());
+    CHECK_EQ(0, n2->UseCount());
+  }
+
+  {
+    Node* n0 = graph.NewNode(&dummy_operator);
+    Node* n1 = graph.NewNode(&dummy_operator);
+    Node* n2 = graph.NewNode(&dummy_operator);
+    n2->AppendInput(graph.zone(), n0);
+    n2->AppendInput(graph.zone(), n1);
+    CHECK_EQ(2, n2->InputCount());
+    n2->TrimInputCount(0);
+    CHECK_EQ(0, n2->InputCount());
+    CHECK_EQ(0, n0->UseCount());
+    CHECK_EQ(0, n1->UseCount());
+    CHECK_EQ(0, n2->UseCount());
+  }
+
+  {
+    Node* n0 = graph.NewNode(&dummy_operator);
+    Node* n2 = graph.NewNode(&dummy_operator);
+    n2->AppendInput(graph.zone(), n0);
+    n2->AppendInput(graph.zone(), n0);
+    CHECK_EQ(2, n2->InputCount());
+    CHECK_EQ(2, n0->UseCount());
+    n2->TrimInputCount(1);
+    CHECK_EQ(1, n2->InputCount());
+    CHECK_EQ(1, n0->UseCount());
+    CHECK_EQ(0, n2->UseCount());
+  }
+
+  {
+    Node* n0 = graph.NewNode(&dummy_operator);
+    Node* n2 = graph.NewNode(&dummy_operator);
+    n2->AppendInput(graph.zone(), n0);
+    n2->AppendInput(graph.zone(), n0);
+    CHECK_EQ(2, n2->InputCount());
+    CHECK_EQ(2, n0->UseCount());
+    n2->TrimInputCount(0);
+    CHECK_EQ(0, n2->InputCount());
+    CHECK_EQ(0, n0->UseCount());
+    CHECK_EQ(0, n2->UseCount());
+  }
+}
+
+
+TEST(TrimInputCountOutOfLine2) {
+  GraphTester graph;
+
+  {
+    Node* n0 = graph.NewNode(&dummy_operator);
+    Node* n1 = graph.NewNode(&dummy_operator);
+    Node* n2 = graph.NewNode(&dummy_operator, n0);
+    n2->AppendInput(graph.zone(), n1);
+    CHECK_EQ(2, n2->InputCount());
+    n2->TrimInputCount(2);
+    CHECK_EQ(2, n2->InputCount());
+    CHECK_EQ(n0, n2->InputAt(0));
+    CHECK_EQ(n1, n2->InputAt(1));
+    CHECK_EQ(1, n0->UseCount());
+    CHECK_EQ(1, n1->UseCount());
+    CHECK_EQ(0, n2->UseCount());
+  }
+
+  {
+    Node* n0 = graph.NewNode(&dummy_operator);
+    Node* n1 = graph.NewNode(&dummy_operator);
+    Node* n2 = graph.NewNode(&dummy_operator, n0);
+    n2->AppendInput(graph.zone(), n1);
+    CHECK_EQ(2, n2->InputCount());
+    n2->TrimInputCount(1);
+    CHECK_EQ(1, n2->InputCount());
+    CHECK_EQ(n0, n2->InputAt(0));
+    CHECK_EQ(1, n0->UseCount());
+    CHECK_EQ(0, n1->UseCount());
+    CHECK_EQ(0, n2->UseCount());
+  }
+
+  {
+    Node* n0 = graph.NewNode(&dummy_operator);
+    Node* n1 = graph.NewNode(&dummy_operator);
+    Node* n2 = graph.NewNode(&dummy_operator, n0);
+    n2->AppendInput(graph.zone(), n1);
+    CHECK_EQ(2, n2->InputCount());
+    n2->TrimInputCount(0);
+    CHECK_EQ(0, n2->InputCount());
+    CHECK_EQ(0, n0->UseCount());
+    CHECK_EQ(0, n1->UseCount());
+    CHECK_EQ(0, n2->UseCount());
+  }
+
+  {
+    Node* n0 = graph.NewNode(&dummy_operator);
+    Node* n2 = graph.NewNode(&dummy_operator, n0);
+    n2->AppendInput(graph.zone(), n0);
+    CHECK_EQ(2, n2->InputCount());
+    CHECK_EQ(2, n0->UseCount());
+    n2->TrimInputCount(1);
+    CHECK_EQ(1, n2->InputCount());
+    CHECK_EQ(1, n0->UseCount());
+    CHECK_EQ(0, n2->UseCount());
+  }
+
+  {
+    Node* n0 = graph.NewNode(&dummy_operator);
+    Node* n2 = graph.NewNode(&dummy_operator, n0);
+    n2->AppendInput(graph.zone(), n0);
+    CHECK_EQ(2, n2->InputCount());
+    CHECK_EQ(2, n0->UseCount());
+    n2->TrimInputCount(0);
+    CHECK_EQ(0, n2->InputCount());
+    CHECK_EQ(0, n0->UseCount());
+    CHECK_EQ(0, n2->UseCount());
+  }
+}
+
+
+TEST(RemoveAllInputs) {
+  GraphTester graph;
+
+  for (int i = 0; i < 2; i++) {
+    Node* n0 = graph.NewNode(&dummy_operator);
+    Node* n1 = graph.NewNode(&dummy_operator, n0);
+    Node* n2;
+    if (i == 0) {
+      n2 = graph.NewNode(&dummy_operator, n0, n1);
+    } else {
+      n2 = graph.NewNode(&dummy_operator, n0);
+      n2->AppendInput(graph.zone(), n1);  // with out-of-line input.
+    }
+
+    n0->RemoveAllInputs();
+    CHECK_EQ(0, n0->InputCount());
+
+    CHECK_EQ(2, n0->UseCount());
+    n1->RemoveAllInputs();
+    CHECK_EQ(1, n1->InputCount());
+    CHECK_EQ(1, n0->UseCount());
+    CHECK_EQ(NULL, n1->InputAt(0));
+
+    CHECK_EQ(1, n1->UseCount());
+    n2->RemoveAllInputs();
+    CHECK_EQ(2, n2->InputCount());
+    CHECK_EQ(0, n0->UseCount());
+    CHECK_EQ(0, n1->UseCount());
+    CHECK_EQ(NULL, n2->InputAt(0));
+    CHECK_EQ(NULL, n2->InputAt(1));
+  }
+
+  {
+    Node* n0 = graph.NewNode(&dummy_operator);
+    Node* n1 = graph.NewNode(&dummy_operator, n0);
+    n1->ReplaceInput(0, n1);  // self-reference.
+
+    CHECK_EQ(0, n0->UseCount());
+    CHECK_EQ(1, n1->UseCount());
+    n1->RemoveAllInputs();
+    CHECK_EQ(1, n1->InputCount());
+    CHECK_EQ(0, n1->UseCount());
+    CHECK_EQ(NULL, n1->InputAt(0));
+  }
+}
diff --git a/test/cctest/compiler/compiler/test-operator.cc b/test/cctest/compiler/compiler/test-operator.cc
new file mode 100644 (file)
index 0000000..0bf8cb7
--- /dev/null
@@ -0,0 +1,244 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#include "src/compiler/operator.h"
+#include "test/cctest/cctest.h"
+
+using namespace v8::internal;
+using namespace v8::internal::compiler;
+
+#define NaN (v8::base::OS::nan_value())
+#define Infinity (std::numeric_limits<double>::infinity())
+
+TEST(TestOperatorMnemonic) {
+  SimpleOperator op1(10, 0, 0, 0, "ThisOne");
+  CHECK_EQ(0, strcmp(op1.mnemonic(), "ThisOne"));
+
+  SimpleOperator op2(11, 0, 0, 0, "ThatOne");
+  CHECK_EQ(0, strcmp(op2.mnemonic(), "ThatOne"));
+
+  Operator1<int> op3(12, 0, 0, 1, "Mnemonic1", 12333);
+  CHECK_EQ(0, strcmp(op3.mnemonic(), "Mnemonic1"));
+
+  Operator1<double> op4(13, 0, 0, 1, "TheOther", 99.9);
+  CHECK_EQ(0, strcmp(op4.mnemonic(), "TheOther"));
+}
+
+
+TEST(TestSimpleOperatorHash) {
+  SimpleOperator op1(17, 0, 0, 0, "Another");
+  CHECK_EQ(17, op1.HashCode());
+
+  SimpleOperator op2(18, 0, 0, 0, "Falsch");
+  CHECK_EQ(18, op2.HashCode());
+}
+
+
+TEST(TestSimpleOperatorEquals) {
+  SimpleOperator op1a(19, 0, 0, 0, "Another1");
+  SimpleOperator op1b(19, 2, 2, 2, "Another2");
+
+  CHECK(op1a.Equals(&op1a));
+  CHECK(op1a.Equals(&op1b));
+  CHECK(op1b.Equals(&op1a));
+  CHECK(op1b.Equals(&op1b));
+
+  SimpleOperator op2a(20, 0, 0, 0, "Falsch1");
+  SimpleOperator op2b(20, 1, 1, 1, "Falsch2");
+
+  CHECK(op2a.Equals(&op2a));
+  CHECK(op2a.Equals(&op2b));
+  CHECK(op2b.Equals(&op2a));
+  CHECK(op2b.Equals(&op2b));
+
+  CHECK(!op1a.Equals(&op2a));
+  CHECK(!op1a.Equals(&op2b));
+  CHECK(!op1b.Equals(&op2a));
+  CHECK(!op1b.Equals(&op2b));
+
+  CHECK(!op2a.Equals(&op1a));
+  CHECK(!op2a.Equals(&op1b));
+  CHECK(!op2b.Equals(&op1a));
+  CHECK(!op2b.Equals(&op1b));
+}
+
+
+static SmartArrayPointer<const char> OperatorToString(Operator* op) {
+  OStringStream os;
+  os << *op;
+  return SmartArrayPointer<const char>(StrDup(os.c_str()));
+}
+
+
+TEST(TestSimpleOperatorPrint) {
+  SimpleOperator op1a(19, 0, 0, 0, "Another1");
+  SimpleOperator op1b(19, 2, 2, 2, "Another2");
+
+  CHECK_EQ("Another1", OperatorToString(&op1a).get());
+  CHECK_EQ("Another2", OperatorToString(&op1b).get());
+
+  SimpleOperator op2a(20, 0, 0, 0, "Flog1");
+  SimpleOperator op2b(20, 1, 1, 1, "Flog2");
+
+  CHECK_EQ("Flog1", OperatorToString(&op2a).get());
+  CHECK_EQ("Flog2", OperatorToString(&op2b).get());
+}
+
+
+TEST(TestOperator1intHash) {
+  Operator1<int> op1a(23, 0, 0, 0, "Wolfie", 11);
+  Operator1<int> op1b(23, 2, 2, 2, "Doggie", 11);
+
+  CHECK_EQ(op1a.HashCode(), op1b.HashCode());
+
+  Operator1<int> op2a(24, 0, 0, 0, "Arfie", 3);
+  Operator1<int> op2b(24, 0, 0, 0, "Arfie", 4);
+
+  CHECK_NE(op1a.HashCode(), op2a.HashCode());
+  CHECK_NE(op2a.HashCode(), op2b.HashCode());
+}
+
+
+TEST(TestOperator1intEquals) {
+  Operator1<int> op1a(23, 0, 0, 0, "Scratchy", 11);
+  Operator1<int> op1b(23, 2, 2, 2, "Scratchy", 11);
+
+  CHECK(op1a.Equals(&op1a));
+  CHECK(op1a.Equals(&op1b));
+  CHECK(op1b.Equals(&op1a));
+  CHECK(op1b.Equals(&op1b));
+
+  Operator1<int> op2a(24, 0, 0, 0, "Im", 3);
+  Operator1<int> op2b(24, 0, 0, 0, "Im", 4);
+
+  CHECK(op2a.Equals(&op2a));
+  CHECK(!op2a.Equals(&op2b));
+  CHECK(!op2b.Equals(&op2a));
+  CHECK(op2b.Equals(&op2b));
+
+  CHECK(!op1a.Equals(&op2a));
+  CHECK(!op1a.Equals(&op2b));
+  CHECK(!op1b.Equals(&op2a));
+  CHECK(!op1b.Equals(&op2b));
+
+  CHECK(!op2a.Equals(&op1a));
+  CHECK(!op2a.Equals(&op1b));
+  CHECK(!op2b.Equals(&op1a));
+  CHECK(!op2b.Equals(&op1b));
+
+  SimpleOperator op3(25, 0, 0, 0, "Weepy");
+
+  CHECK(!op1a.Equals(&op3));
+  CHECK(!op1b.Equals(&op3));
+  CHECK(!op2a.Equals(&op3));
+  CHECK(!op2b.Equals(&op3));
+
+  CHECK(!op3.Equals(&op1a));
+  CHECK(!op3.Equals(&op1b));
+  CHECK(!op3.Equals(&op2a));
+  CHECK(!op3.Equals(&op2b));
+}
+
+
+TEST(TestOperator1intPrint) {
+  Operator1<int> op1(12, 0, 0, 1, "Op1Test", 0);
+  CHECK_EQ("Op1Test[0]", OperatorToString(&op1).get());
+
+  Operator1<int> op2(12, 0, 0, 1, "Op1Test", 66666666);
+  CHECK_EQ("Op1Test[66666666]", OperatorToString(&op2).get());
+
+  Operator1<int> op3(12, 0, 0, 1, "FooBar", 2347);
+  CHECK_EQ("FooBar[2347]", OperatorToString(&op3).get());
+
+  Operator1<int> op4(12, 0, 0, 1, "BarFoo", -879);
+  CHECK_EQ("BarFoo[-879]", OperatorToString(&op4).get());
+}
+
+
+TEST(TestOperator1doubleHash) {
+  Operator1<double> op1a(23, 0, 0, 0, "Wolfie", 11.77);
+  Operator1<double> op1b(23, 2, 2, 2, "Doggie", 11.77);
+
+  CHECK_EQ(op1a.HashCode(), op1b.HashCode());
+
+  Operator1<double> op2a(24, 0, 0, 0, "Arfie", -6.7);
+  Operator1<double> op2b(24, 0, 0, 0, "Arfie", -6.8);
+
+  CHECK_NE(op1a.HashCode(), op2a.HashCode());
+  CHECK_NE(op2a.HashCode(), op2b.HashCode());
+}
+
+
+TEST(TestOperator1doubleEquals) {
+  Operator1<double> op1a(23, 0, 0, 0, "Scratchy", 11.77);
+  Operator1<double> op1b(23, 2, 2, 2, "Scratchy", 11.77);
+
+  CHECK(op1a.Equals(&op1a));
+  CHECK(op1a.Equals(&op1b));
+  CHECK(op1b.Equals(&op1a));
+  CHECK(op1b.Equals(&op1b));
+
+  Operator1<double> op2a(24, 0, 0, 0, "Im", 3.1);
+  Operator1<double> op2b(24, 0, 0, 0, "Im", 3.2);
+
+  CHECK(op2a.Equals(&op2a));
+  CHECK(!op2a.Equals(&op2b));
+  CHECK(!op2b.Equals(&op2a));
+  CHECK(op2b.Equals(&op2b));
+
+  CHECK(!op1a.Equals(&op2a));
+  CHECK(!op1a.Equals(&op2b));
+  CHECK(!op1b.Equals(&op2a));
+  CHECK(!op1b.Equals(&op2b));
+
+  CHECK(!op2a.Equals(&op1a));
+  CHECK(!op2a.Equals(&op1b));
+  CHECK(!op2b.Equals(&op1a));
+  CHECK(!op2b.Equals(&op1b));
+
+  SimpleOperator op3(25, 0, 0, 0, "Weepy");
+
+  CHECK(!op1a.Equals(&op3));
+  CHECK(!op1b.Equals(&op3));
+  CHECK(!op2a.Equals(&op3));
+  CHECK(!op2b.Equals(&op3));
+
+  CHECK(!op3.Equals(&op1a));
+  CHECK(!op3.Equals(&op1b));
+  CHECK(!op3.Equals(&op2a));
+  CHECK(!op3.Equals(&op2b));
+
+  Operator1<double> op4a(24, 0, 0, 0, "Bashful", NaN);
+  Operator1<double> op4b(24, 0, 0, 0, "Bashful", NaN);
+
+  CHECK(op4a.Equals(&op4a));
+  CHECK(op4a.Equals(&op4b));
+  CHECK(op4b.Equals(&op4a));
+  CHECK(op4b.Equals(&op4b));
+
+  CHECK(!op3.Equals(&op4a));
+  CHECK(!op3.Equals(&op4b));
+  CHECK(!op3.Equals(&op4a));
+  CHECK(!op3.Equals(&op4b));
+}
+
+
+TEST(TestOperator1doublePrint) {
+  Operator1<double> op1(12, 0, 0, 1, "Op1Test", 0);
+  CHECK_EQ("Op1Test[0]", OperatorToString(&op1).get());
+
+  Operator1<double> op2(12, 0, 0, 1, "Op1Test", 7.3);
+  CHECK_EQ("Op1Test[7.3]", OperatorToString(&op2).get());
+
+  Operator1<double> op3(12, 0, 0, 1, "FooBar", 2e+123);
+  CHECK_EQ("FooBar[2e+123]", OperatorToString(&op3).get());
+
+  Operator1<double> op4(12, 0, 0, 1, "BarFoo", Infinity);
+  CHECK_EQ("BarFoo[inf]", OperatorToString(&op4).get());
+
+  Operator1<double> op5(12, 0, 0, 1, "BarFoo", NaN);
+  CHECK_EQ("BarFoo[nan]", OperatorToString(&op5).get());
+}
diff --git a/test/cctest/compiler/compiler/test-phi-reducer.cc b/test/cctest/compiler/compiler/test-phi-reducer.cc
new file mode 100644 (file)
index 0000000..5560040
--- /dev/null
@@ -0,0 +1,223 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+#include "test/cctest/cctest.h"
+
+#include "src/compiler/common-operator.h"
+#include "src/compiler/graph-inl.h"
+#include "src/compiler/phi-reducer.h"
+
+using namespace v8::internal;
+using namespace v8::internal::compiler;
+
+class PhiReducerTester : HandleAndZoneScope {
+ public:
+  PhiReducerTester()
+      : isolate(main_isolate()),
+        common(main_zone()),
+        graph(main_zone()),
+        self(graph.NewNode(common.Start())),
+        dead(graph.NewNode(common.Dead())) {}
+
+  Isolate* isolate;
+  CommonOperatorBuilder common;
+  Graph graph;
+  Node* self;
+  Node* dead;
+
+  void CheckReduce(Node* expect, Node* phi) {
+    PhiReducer reducer;
+    Reduction reduction = reducer.Reduce(phi);
+    if (expect == phi) {
+      CHECK(!reduction.Changed());
+    } else {
+      CHECK(reduction.Changed());
+      CHECK_EQ(expect, reduction.replacement());
+    }
+  }
+
+  Node* Int32Constant(int32_t val) {
+    return graph.NewNode(common.Int32Constant(val));
+  }
+
+  Node* Float64Constant(double val) {
+    return graph.NewNode(common.Float64Constant(val));
+  }
+
+  Node* Parameter(int32_t index = 0) {
+    return graph.NewNode(common.Parameter(index));
+  }
+
+  Node* Phi(Node* a) {
+    return SetSelfReferences(graph.NewNode(common.Phi(1), a));
+  }
+
+  Node* Phi(Node* a, Node* b) {
+    return SetSelfReferences(graph.NewNode(common.Phi(2), a, b));
+  }
+
+  Node* Phi(Node* a, Node* b, Node* c) {
+    return SetSelfReferences(graph.NewNode(common.Phi(3), a, b, c));
+  }
+
+  Node* Phi(Node* a, Node* b, Node* c, Node* d) {
+    return SetSelfReferences(graph.NewNode(common.Phi(4), a, b, c, d));
+  }
+
+  Node* PhiWithControl(Node* a, Node* control) {
+    return SetSelfReferences(graph.NewNode(common.Phi(1), a, control));
+  }
+
+  Node* PhiWithControl(Node* a, Node* b, Node* control) {
+    return SetSelfReferences(graph.NewNode(common.Phi(2), a, b, control));
+  }
+
+  Node* SetSelfReferences(Node* node) {
+    Node::Inputs inputs = node->inputs();
+    for (Node::Inputs::iterator iter(inputs.begin()); iter != inputs.end();
+         ++iter) {
+      Node* input = *iter;
+      if (input == self) node->ReplaceInput(iter.index(), node);
+    }
+    return node;
+  }
+};
+
+
+TEST(PhiReduce1) {
+  PhiReducerTester R;
+  Node* zero = R.Int32Constant(0);
+  Node* one = R.Int32Constant(1);
+  Node* oneish = R.Float64Constant(1.1);
+  Node* param = R.Parameter();
+
+  Node* singles[] = {zero, one, oneish, param};
+  for (size_t i = 0; i < ARRAY_SIZE(singles); i++) {
+    R.CheckReduce(singles[i], R.Phi(singles[i]));
+  }
+}
+
+
+TEST(PhiReduce2) {
+  PhiReducerTester R;
+  Node* zero = R.Int32Constant(0);
+  Node* one = R.Int32Constant(1);
+  Node* oneish = R.Float64Constant(1.1);
+  Node* param = R.Parameter();
+
+  Node* singles[] = {zero, one, oneish, param};
+  for (size_t i = 0; i < ARRAY_SIZE(singles); i++) {
+    Node* a = singles[i];
+    R.CheckReduce(a, R.Phi(a, a));
+  }
+
+  for (size_t i = 0; i < ARRAY_SIZE(singles); i++) {
+    Node* a = singles[i];
+    R.CheckReduce(a, R.Phi(R.self, a));
+    R.CheckReduce(a, R.Phi(a, R.self));
+  }
+
+  for (size_t i = 1; i < ARRAY_SIZE(singles); i++) {
+    Node* a = singles[i], *b = singles[0];
+    Node* phi1 = R.Phi(b, a);
+    R.CheckReduce(phi1, phi1);
+
+    Node* phi2 = R.Phi(a, b);
+    R.CheckReduce(phi2, phi2);
+  }
+}
+
+
+TEST(PhiReduce3) {
+  PhiReducerTester R;
+  Node* zero = R.Int32Constant(0);
+  Node* one = R.Int32Constant(1);
+  Node* oneish = R.Float64Constant(1.1);
+  Node* param = R.Parameter();
+
+  Node* singles[] = {zero, one, oneish, param};
+  for (size_t i = 0; i < ARRAY_SIZE(singles); i++) {
+    Node* a = singles[i];
+    R.CheckReduce(a, R.Phi(a, a, a));
+  }
+
+  for (size_t i = 0; i < ARRAY_SIZE(singles); i++) {
+    Node* a = singles[i];
+    R.CheckReduce(a, R.Phi(R.self, a, a));
+    R.CheckReduce(a, R.Phi(a, R.self, a));
+    R.CheckReduce(a, R.Phi(a, a, R.self));
+  }
+
+  for (size_t i = 1; i < ARRAY_SIZE(singles); i++) {
+    Node* a = singles[i], *b = singles[0];
+    Node* phi1 = R.Phi(b, a, a);
+    R.CheckReduce(phi1, phi1);
+
+    Node* phi2 = R.Phi(a, b, a);
+    R.CheckReduce(phi2, phi2);
+
+    Node* phi3 = R.Phi(a, a, b);
+    R.CheckReduce(phi3, phi3);
+  }
+}
+
+
+TEST(PhiReduce4) {
+  PhiReducerTester R;
+  Node* zero = R.Int32Constant(0);
+  Node* one = R.Int32Constant(1);
+  Node* oneish = R.Float64Constant(1.1);
+  Node* param = R.Parameter();
+
+  Node* singles[] = {zero, one, oneish, param};
+  for (size_t i = 0; i < ARRAY_SIZE(singles); i++) {
+    Node* a = singles[i];
+    R.CheckReduce(a, R.Phi(a, a, a, a));
+  }
+
+  for (size_t i = 0; i < ARRAY_SIZE(singles); i++) {
+    Node* a = singles[i];
+    R.CheckReduce(a, R.Phi(R.self, a, a, a));
+    R.CheckReduce(a, R.Phi(a, R.self, a, a));
+    R.CheckReduce(a, R.Phi(a, a, R.self, a));
+    R.CheckReduce(a, R.Phi(a, a, a, R.self));
+
+    R.CheckReduce(a, R.Phi(R.self, R.self, a, a));
+    R.CheckReduce(a, R.Phi(a, R.self, R.self, a));
+    R.CheckReduce(a, R.Phi(a, a, R.self, R.self));
+    R.CheckReduce(a, R.Phi(R.self, a, a, R.self));
+  }
+
+  for (size_t i = 1; i < ARRAY_SIZE(singles); i++) {
+    Node* a = singles[i], *b = singles[0];
+    Node* phi1 = R.Phi(b, a, a, a);
+    R.CheckReduce(phi1, phi1);
+
+    Node* phi2 = R.Phi(a, b, a, a);
+    R.CheckReduce(phi2, phi2);
+
+    Node* phi3 = R.Phi(a, a, b, a);
+    R.CheckReduce(phi3, phi3);
+
+    Node* phi4 = R.Phi(a, a, a, b);
+    R.CheckReduce(phi4, phi4);
+  }
+}
+
+
+TEST(PhiReduceShouldIgnoreControlNodes) {
+  PhiReducerTester R;
+  Node* zero = R.Int32Constant(0);
+  Node* one = R.Int32Constant(1);
+  Node* oneish = R.Float64Constant(1.1);
+  Node* param = R.Parameter();
+
+  Node* singles[] = {zero, one, oneish, param};
+  for (size_t i = 0; i < ARRAY_SIZE(singles); ++i) {
+    R.CheckReduce(singles[i], R.PhiWithControl(singles[i], R.dead));
+    R.CheckReduce(singles[i], R.PhiWithControl(R.self, singles[i], R.dead));
+    R.CheckReduce(singles[i], R.PhiWithControl(singles[i], R.self, R.dead));
+  }
+}
diff --git a/test/cctest/compiler/compiler/test-pipeline.cc b/test/cctest/compiler/compiler/test-pipeline.cc
new file mode 100644 (file)
index 0000000..84ccc28
--- /dev/null
@@ -0,0 +1,40 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+#include "test/cctest/cctest.h"
+
+#include "src/compiler.h"
+#include "src/compiler/pipeline.h"
+#include "src/handles.h"
+#include "src/parser.h"
+#include "src/rewriter.h"
+#include "src/scopes.h"
+
+using namespace v8::internal;
+using namespace v8::internal::compiler;
+
+TEST(PipelineAdd) {
+  InitializedHandleScope handles;
+  const char* source = "(function(a,b) { return a + b; })";
+  Handle<JSFunction> function = v8::Utils::OpenHandle(
+      *v8::Handle<v8::Function>::Cast(CompileRun(source)));
+  CompilationInfoWithZone info(function);
+
+  CHECK(Parser::Parse(&info));
+  StrictMode strict_mode = info.function()->strict_mode();
+  info.SetStrictMode(strict_mode);
+  CHECK(Rewriter::Rewrite(&info));
+  CHECK(Scope::Analyze(&info));
+  CHECK_NE(NULL, info.scope());
+
+  Pipeline pipeline(&info);
+  Handle<Code> code = pipeline.GenerateCode();
+#if V8_TURBOFAN_TARGET
+  CHECK(Pipeline::SupportedTarget());
+  CHECK(!code.is_null());
+#else
+  USE(code);
+#endif
+}
diff --git a/test/cctest/compiler/compiler/test-representation-change.cc b/test/cctest/compiler/compiler/test-representation-change.cc
new file mode 100644 (file)
index 0000000..2b63307
--- /dev/null
@@ -0,0 +1,281 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <limits>
+
+#include "src/v8.h"
+#include "test/cctest/cctest.h"
+#include "test/cctest/compiler/graph-builder-tester.h"
+
+#include "src/compiler/node-matchers.h"
+#include "src/compiler/representation-change.h"
+#include "src/compiler/typer.h"
+
+using namespace v8::internal;
+using namespace v8::internal::compiler;
+
+namespace v8 {  // for friendiness.
+namespace internal {
+namespace compiler {
+
+class RepresentationChangerTester : public HandleAndZoneScope,
+                                    public GraphAndBuilders {
+ public:
+  RepresentationChangerTester()
+      : GraphAndBuilders(main_zone()),
+        typer_(main_zone()),
+        jsgraph_(main_graph_, &main_common_, &typer_),
+        changer_(&jsgraph_, &main_simplified_, &main_machine_, main_isolate()) {
+  }
+
+  Typer typer_;
+  JSGraph jsgraph_;
+  RepresentationChanger changer_;
+
+  Isolate* isolate() { return main_isolate(); }
+  Graph* graph() { return main_graph_; }
+  CommonOperatorBuilder* common() { return &main_common_; }
+  JSGraph* jsgraph() { return &jsgraph_; }
+  RepresentationChanger* changer() { return &changer_; }
+
+  // TODO(titzer): use ValueChecker / ValueUtil
+  void CheckInt32Constant(Node* n, int32_t expected) {
+    ValueMatcher<int32_t> m(n);
+    CHECK(m.HasValue());
+    CHECK_EQ(expected, m.Value());
+  }
+
+  void CheckHeapConstant(Node* n, Object* expected) {
+    ValueMatcher<Handle<Object> > m(n);
+    CHECK(m.HasValue());
+    CHECK_EQ(expected, *m.Value());
+  }
+
+  void CheckNumberConstant(Node* n, double expected) {
+    ValueMatcher<double> m(n);
+    CHECK_EQ(IrOpcode::kNumberConstant, n->opcode());
+    CHECK(m.HasValue());
+    CHECK_EQ(expected, m.Value());
+  }
+
+  Node* Parameter(int index = 0) {
+    return graph()->NewNode(common()->Parameter(index));
+  }
+
+  void CheckTypeError(RepTypeUnion from, RepTypeUnion to) {
+    changer()->testing_type_errors_ = true;
+    changer()->type_error_ = false;
+    Node* n = Parameter(0);
+    Node* c = changer()->GetRepresentationFor(n, from, to);
+    CHECK_EQ(n, c);
+    CHECK(changer()->type_error_);
+  }
+
+  void CheckNop(RepTypeUnion from, RepTypeUnion to) {
+    Node* n = Parameter(0);
+    Node* c = changer()->GetRepresentationFor(n, from, to);
+    CHECK_EQ(n, c);
+  }
+};
+}
+}
+}  // namespace v8::internal::compiler
+
+
+static const RepType all_reps[] = {rBit, rWord32, rWord64, rFloat64, rTagged};
+
+
+// TODO(titzer): lift this to ValueHelper
+static const double double_inputs[] = {
+    0.0,   -0.0,    1.0,    -1.0,        0.1,         1.4,    -1.7,
+    2,     5,       6,      982983,      888,         -999.8, 3.1e7,
+    -2e66, 2.3e124, -12e73, V8_INFINITY, -V8_INFINITY};
+
+
+static const int32_t int32_inputs[] = {
+    0,      1,                                -1,
+    2,      5,                                6,
+    982983, 888,                              -999,
+    65535,  static_cast<int32_t>(0xFFFFFFFF), static_cast<int32_t>(0x80000000)};
+
+
+static const uint32_t uint32_inputs[] = {
+    0,      1,   static_cast<uint32_t>(-1),   2,     5,          6,
+    982983, 888, static_cast<uint32_t>(-999), 65535, 0xFFFFFFFF, 0x80000000};
+
+
+TEST(BoolToBit_constant) {
+  RepresentationChangerTester r;
+
+  Node* true_node = r.jsgraph()->TrueConstant();
+  Node* true_bit = r.changer()->GetRepresentationFor(true_node, rTagged, rBit);
+  r.CheckInt32Constant(true_bit, 1);
+
+  Node* false_node = r.jsgraph()->FalseConstant();
+  Node* false_bit =
+      r.changer()->GetRepresentationFor(false_node, rTagged, rBit);
+  r.CheckInt32Constant(false_bit, 0);
+}
+
+
+TEST(BitToBool_constant) {
+  RepresentationChangerTester r;
+
+  for (int i = -5; i < 5; i++) {
+    Node* node = r.jsgraph()->Int32Constant(i);
+    Node* val = r.changer()->GetRepresentationFor(node, rBit, rTagged);
+    r.CheckHeapConstant(val, i == 0 ? r.isolate()->heap()->false_value()
+                                    : r.isolate()->heap()->true_value());
+  }
+}
+
+
+TEST(ToTagged_constant) {
+  RepresentationChangerTester r;
+
+  for (size_t i = 0; i < ARRAY_SIZE(double_inputs); i++) {
+    Node* n = r.jsgraph()->Float64Constant(double_inputs[i]);
+    Node* c = r.changer()->GetRepresentationFor(n, rFloat64, rTagged);
+    r.CheckNumberConstant(c, double_inputs[i]);
+  }
+
+  for (size_t i = 0; i < ARRAY_SIZE(int32_inputs); i++) {
+    Node* n = r.jsgraph()->Int32Constant(int32_inputs[i]);
+    Node* c = r.changer()->GetRepresentationFor(n, rWord32 | tInt32, rTagged);
+    r.CheckNumberConstant(c, static_cast<double>(int32_inputs[i]));
+  }
+
+  for (size_t i = 0; i < ARRAY_SIZE(uint32_inputs); i++) {
+    Node* n = r.jsgraph()->Int32Constant(uint32_inputs[i]);
+    Node* c = r.changer()->GetRepresentationFor(n, rWord32 | tUint32, rTagged);
+    r.CheckNumberConstant(c, static_cast<double>(uint32_inputs[i]));
+  }
+}
+
+
+static void CheckChange(IrOpcode::Value expected, RepTypeUnion from,
+                        RepTypeUnion to) {
+  RepresentationChangerTester r;
+
+  Node* n = r.Parameter();
+  Node* c = r.changer()->GetRepresentationFor(n, from, to);
+
+  CHECK_NE(c, n);
+  CHECK_EQ(expected, c->opcode());
+  CHECK_EQ(n, c->InputAt(0));
+}
+
+
+TEST(SingleChanges) {
+  CheckChange(IrOpcode::kChangeBoolToBit, rTagged, rBit);
+  CheckChange(IrOpcode::kChangeBitToBool, rBit, rTagged);
+
+  CheckChange(IrOpcode::kChangeInt32ToTagged, rWord32 | tInt32, rTagged);
+  CheckChange(IrOpcode::kChangeUint32ToTagged, rWord32 | tUint32, rTagged);
+  CheckChange(IrOpcode::kChangeFloat64ToTagged, rFloat64, rTagged);
+
+  CheckChange(IrOpcode::kChangeTaggedToInt32, rTagged | tInt32, rWord32);
+  CheckChange(IrOpcode::kChangeTaggedToUint32, rTagged | tUint32, rWord32);
+  CheckChange(IrOpcode::kChangeTaggedToFloat64, rTagged, rFloat64);
+
+  // Int32,Uint32 <-> Float64 are actually machine conversions.
+  CheckChange(IrOpcode::kConvertInt32ToFloat64, rWord32 | tInt32, rFloat64);
+  CheckChange(IrOpcode::kConvertUint32ToFloat64, rWord32 | tUint32, rFloat64);
+  CheckChange(IrOpcode::kConvertFloat64ToInt32, rFloat64 | tInt32, rWord32);
+  CheckChange(IrOpcode::kConvertFloat64ToUint32, rFloat64 | tUint32, rWord32);
+}
+
+
+TEST(SignednessInWord32) {
+  RepresentationChangerTester r;
+
+  // TODO(titzer): these are currently type errors because the output type is
+  // not specified. Maybe the RepresentationChanger should assume anything to or
+  // from {rWord32} is {tInt32}, i.e. signed, if not it is explicitly otherwise?
+  r.CheckTypeError(rTagged, rWord32 | tInt32);
+  r.CheckTypeError(rTagged, rWord32 | tUint32);
+  r.CheckTypeError(rWord32, rFloat64);
+  r.CheckTypeError(rFloat64, rWord32);
+
+  //  CheckChange(IrOpcode::kChangeTaggedToInt32, rTagged, rWord32 | tInt32);
+  //  CheckChange(IrOpcode::kChangeTaggedToUint32, rTagged, rWord32 | tUint32);
+  //  CheckChange(IrOpcode::kConvertInt32ToFloat64, rWord32, rFloat64);
+  //  CheckChange(IrOpcode::kConvertFloat64ToInt32, rFloat64, rWord32);
+}
+
+
+TEST(Nops) {
+  RepresentationChangerTester r;
+
+  // X -> X is always a nop for any single representation X.
+  for (size_t i = 0; i < ARRAY_SIZE(all_reps); i++) {
+    r.CheckNop(all_reps[i], all_reps[i]);
+  }
+
+  // 32-bit or 64-bit words can be used as branch conditions (rBit).
+  r.CheckNop(rWord32, rBit);
+  r.CheckNop(rWord32, rBit | tBool);
+  r.CheckNop(rWord64, rBit);
+  r.CheckNop(rWord64, rBit | tBool);
+
+  // rBit (result of comparison) is implicitly a wordish thing.
+  r.CheckNop(rBit, rWord32);
+  r.CheckNop(rBit | tBool, rWord32);
+  r.CheckNop(rBit, rWord64);
+  r.CheckNop(rBit | tBool, rWord64);
+}
+
+
+TEST(TypeErrors) {
+  RepresentationChangerTester r;
+
+  // Floats cannot be implicitly converted to/from comparison conditions.
+  r.CheckTypeError(rFloat64, rBit);
+  r.CheckTypeError(rFloat64, rBit | tBool);
+  r.CheckTypeError(rBit, rFloat64);
+  r.CheckTypeError(rBit | tBool, rFloat64);
+
+  // Word64 is internal and shouldn't be implicitly converted.
+  r.CheckTypeError(rWord64, rTagged | tBool);
+  r.CheckTypeError(rWord64, rTagged);
+  r.CheckTypeError(rWord64, rTagged | tBool);
+  r.CheckTypeError(rTagged, rWord64);
+  r.CheckTypeError(rTagged | tBool, rWord64);
+
+  // Word64 / Word32 shouldn't be implicitly converted.
+  r.CheckTypeError(rWord64, rWord32);
+  r.CheckTypeError(rWord32, rWord64);
+  r.CheckTypeError(rWord64, rWord32 | tInt32);
+  r.CheckTypeError(rWord32 | tInt32, rWord64);
+  r.CheckTypeError(rWord64, rWord32 | tUint32);
+  r.CheckTypeError(rWord32 | tUint32, rWord64);
+
+  for (size_t i = 0; i < ARRAY_SIZE(all_reps); i++) {
+    for (size_t j = 0; j < ARRAY_SIZE(all_reps); j++) {
+      if (i == j) continue;
+      // Only a single from representation is allowed.
+      r.CheckTypeError(all_reps[i] | all_reps[j], rTagged);
+    }
+  }
+}
+
+
+TEST(CompleteMatrix) {
+  // TODO(titzer): test all variants in the matrix.
+  // rB
+  // tBrB
+  // tBrT
+  // rW32
+  // tIrW32
+  // tUrW32
+  // rW64
+  // tIrW64
+  // tUrW64
+  // rF64
+  // tIrF64
+  // tUrF64
+  // tArF64
+  // rT
+  // tArT
+}
diff --git a/test/cctest/compiler/compiler/test-run-deopt.cc b/test/cctest/compiler/compiler/test-run-deopt.cc
new file mode 100644 (file)
index 0000000..36998d0
--- /dev/null
@@ -0,0 +1,39 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "v8.h"
+
+#include "function-tester.h"
+
+using namespace v8::internal;
+using namespace v8::internal::compiler;
+
+
+TEST(TurboSimpleDeopt) {
+  FLAG_allow_natives_syntax = true;
+  FLAG_turbo_deoptimization = true;
+
+  FunctionTester T(
+      "(function f(a) {"
+      "var b = 1;"
+      "if (!%IsOptimized()) return 0;"
+      "%DeoptimizeFunction(f);"
+      "if (%IsOptimized()) return 0;"
+      "return a + b; })");
+
+  T.CheckCall(T.Val(2), T.Val(1));
+}
+
+
+TEST(TurboTrivialDeopt) {
+  FLAG_allow_natives_syntax = true;
+  FLAG_turbo_deoptimization = true;
+
+  FunctionTester T(
+      "(function foo() {"
+      "%DeoptimizeFunction(foo);"
+      "return 1; })");
+
+  T.CheckCall(T.Val(1));
+}
diff --git a/test/cctest/compiler/compiler/test-run-intrinsics.cc b/test/cctest/compiler/compiler/test-run-intrinsics.cc
new file mode 100644 (file)
index 0000000..a1b5676
--- /dev/null
@@ -0,0 +1,211 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#include "test/cctest/compiler/function-tester.h"
+
+using namespace v8::internal;
+using namespace v8::internal::compiler;
+
+
+TEST(IsSmi) {
+  FunctionTester T("(function(a) { return %_IsSmi(a); })");
+
+  T.CheckTrue(T.Val(1));
+  T.CheckFalse(T.Val(1.1));
+  T.CheckFalse(T.Val(-0.0));
+  T.CheckTrue(T.Val(-2));
+  T.CheckFalse(T.Val(-2.3));
+  T.CheckFalse(T.undefined());
+}
+
+
+TEST(IsNonNegativeSmi) {
+  FunctionTester T("(function(a) { return %_IsNonNegativeSmi(a); })");
+
+  T.CheckTrue(T.Val(1));
+  T.CheckFalse(T.Val(1.1));
+  T.CheckFalse(T.Val(-0.0));
+  T.CheckFalse(T.Val(-2));
+  T.CheckFalse(T.Val(-2.3));
+  T.CheckFalse(T.undefined());
+}
+
+
+TEST(IsMinusZero) {
+  FunctionTester T("(function(a) { return %_IsMinusZero(a); })");
+
+  T.CheckFalse(T.Val(1));
+  T.CheckFalse(T.Val(1.1));
+  T.CheckTrue(T.Val(-0.0));
+  T.CheckFalse(T.Val(-2));
+  T.CheckFalse(T.Val(-2.3));
+  T.CheckFalse(T.undefined());
+}
+
+
+TEST(IsArray) {
+  FunctionTester T("(function(a) { return %_IsArray(a); })");
+
+  T.CheckFalse(T.NewObject("(function() {})"));
+  T.CheckTrue(T.NewObject("([1])"));
+  T.CheckFalse(T.NewObject("({})"));
+  T.CheckFalse(T.NewObject("(/x/)"));
+  T.CheckFalse(T.undefined());
+  T.CheckFalse(T.null());
+  T.CheckFalse(T.Val("x"));
+  T.CheckFalse(T.Val(1));
+}
+
+
+TEST(IsObject) {
+  FunctionTester T("(function(a) { return %_IsObject(a); })");
+
+  T.CheckFalse(T.NewObject("(function() {})"));
+  T.CheckTrue(T.NewObject("([1])"));
+  T.CheckTrue(T.NewObject("({})"));
+  T.CheckTrue(T.NewObject("(/x/)"));
+  T.CheckFalse(T.undefined());
+  T.CheckTrue(T.null());
+  T.CheckFalse(T.Val("x"));
+  T.CheckFalse(T.Val(1));
+}
+
+
+TEST(IsFunction) {
+  FunctionTester T("(function(a) { return %_IsFunction(a); })");
+
+  T.CheckTrue(T.NewObject("(function() {})"));
+  T.CheckFalse(T.NewObject("([1])"));
+  T.CheckFalse(T.NewObject("({})"));
+  T.CheckFalse(T.NewObject("(/x/)"));
+  T.CheckFalse(T.undefined());
+  T.CheckFalse(T.null());
+  T.CheckFalse(T.Val("x"));
+  T.CheckFalse(T.Val(1));
+}
+
+
+TEST(IsRegExp) {
+  FunctionTester T("(function(a) { return %_IsRegExp(a); })");
+
+  T.CheckFalse(T.NewObject("(function() {})"));
+  T.CheckFalse(T.NewObject("([1])"));
+  T.CheckFalse(T.NewObject("({})"));
+  T.CheckTrue(T.NewObject("(/x/)"));
+  T.CheckFalse(T.undefined());
+  T.CheckFalse(T.null());
+  T.CheckFalse(T.Val("x"));
+  T.CheckFalse(T.Val(1));
+}
+
+
+TEST(ClassOf) {
+  FunctionTester T("(function(a) { return %_ClassOf(a); })");
+
+  T.CheckCall(T.Val("Function"), T.NewObject("(function() {})"));
+  T.CheckCall(T.Val("Array"), T.NewObject("([1])"));
+  T.CheckCall(T.Val("Object"), T.NewObject("({})"));
+  T.CheckCall(T.Val("RegExp"), T.NewObject("(/x/)"));
+  T.CheckCall(T.null(), T.undefined());
+  T.CheckCall(T.null(), T.null());
+  T.CheckCall(T.null(), T.Val("x"));
+  T.CheckCall(T.null(), T.Val(1));
+}
+
+
+TEST(ObjectEquals) {
+  FunctionTester T("(function(a,b) { return %_ObjectEquals(a,b); })");
+  CompileRun("var o = {}");
+
+  T.CheckTrue(T.NewObject("(o)"), T.NewObject("(o)"));
+  T.CheckTrue(T.Val("internal"), T.Val("internal"));
+  T.CheckTrue(T.true_value(), T.true_value());
+  T.CheckFalse(T.true_value(), T.false_value());
+  T.CheckFalse(T.NewObject("({})"), T.NewObject("({})"));
+  T.CheckFalse(T.Val("a"), T.Val("b"));
+}
+
+
+TEST(ValueOf) {
+  FunctionTester T("(function(a) { return %_ValueOf(a); })");
+
+  T.CheckCall(T.Val("a"), T.Val("a"));
+  T.CheckCall(T.Val("b"), T.NewObject("(new String('b'))"));
+  T.CheckCall(T.Val(123), T.Val(123));
+  T.CheckCall(T.Val(456), T.NewObject("(new Number(456))"));
+}
+
+
+TEST(SetValueOf) {
+  FunctionTester T("(function(a,b) { return %_SetValueOf(a,b); })");
+
+  T.CheckCall(T.Val("a"), T.NewObject("(new String)"), T.Val("a"));
+  T.CheckCall(T.Val(123), T.NewObject("(new Number)"), T.Val(123));
+  T.CheckCall(T.Val("x"), T.undefined(), T.Val("x"));
+}
+
+
+TEST(StringCharFromCode) {
+  FunctionTester T("(function(a) { return %_StringCharFromCode(a); })");
+
+  T.CheckCall(T.Val("a"), T.Val(97));
+  T.CheckCall(T.Val("\xE2\x9D\x8A"), T.Val(0x274A));
+  T.CheckCall(T.Val(""), T.undefined());
+}
+
+
+TEST(StringCharAt) {
+  FunctionTester T("(function(a,b) { return %_StringCharAt(a,b); })");
+
+  T.CheckCall(T.Val("e"), T.Val("huge fan!"), T.Val(3));
+  T.CheckCall(T.Val("f"), T.Val("\xE2\x9D\x8A fan!"), T.Val(2));
+  T.CheckCall(T.Val(""), T.Val("not a fan!"), T.Val(23));
+}
+
+
+TEST(StringCharCodeAt) {
+  FunctionTester T("(function(a,b) { return %_StringCharCodeAt(a,b); })");
+
+  T.CheckCall(T.Val('e'), T.Val("huge fan!"), T.Val(3));
+  T.CheckCall(T.Val('f'), T.Val("\xE2\x9D\x8A fan!"), T.Val(2));
+  T.CheckCall(T.nan(), T.Val("not a fan!"), T.Val(23));
+}
+
+
+TEST(StringAdd) {
+  FunctionTester T("(function(a,b) { return %_StringAdd(a,b); })");
+
+  T.CheckCall(T.Val("aaabbb"), T.Val("aaa"), T.Val("bbb"));
+  T.CheckCall(T.Val("aaa"), T.Val("aaa"), T.Val(""));
+  T.CheckCall(T.Val("bbb"), T.Val(""), T.Val("bbb"));
+}
+
+
+TEST(StringSubString) {
+  FunctionTester T("(function(a,b) { return %_SubString(a,b,b+3); })");
+
+  T.CheckCall(T.Val("aaa"), T.Val("aaabbb"), T.Val(0.0));
+  T.CheckCall(T.Val("abb"), T.Val("aaabbb"), T.Val(2));
+  T.CheckCall(T.Val("aaa"), T.Val("aaa"), T.Val(0.0));
+}
+
+
+TEST(StringCompare) {
+  FunctionTester T("(function(a,b) { return %_StringCompare(a,b); })");
+
+  T.CheckCall(T.Val(-1), T.Val("aaa"), T.Val("bbb"));
+  T.CheckCall(T.Val(0.0), T.Val("bbb"), T.Val("bbb"));
+  T.CheckCall(T.Val(+1), T.Val("ccc"), T.Val("bbb"));
+}
+
+
+TEST(CallFunction) {
+  FunctionTester T("(function(a,b) { return %_CallFunction(a, 1, 2, 3, b); })");
+  CompileRun("function f(a,b,c) { return a + b + c + this.d; }");
+
+  T.CheckCall(T.Val(129), T.NewObject("({d:123})"), T.NewObject("f"));
+  T.CheckCall(T.Val("6x"), T.NewObject("({d:'x'})"), T.NewObject("f"));
+}
diff --git a/test/cctest/compiler/compiler/test-run-jsbranches.cc b/test/cctest/compiler/compiler/test-run-jsbranches.cc
new file mode 100644 (file)
index 0000000..2eb4fa6
--- /dev/null
@@ -0,0 +1,262 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#include "test/cctest/compiler/function-tester.h"
+
+using namespace v8::internal;
+using namespace v8::internal::compiler;
+
+TEST(Conditional) {
+  FunctionTester T("(function(a) { return a ? 23 : 42; })");
+
+  T.CheckCall(T.Val(23), T.true_value(), T.undefined());
+  T.CheckCall(T.Val(42), T.false_value(), T.undefined());
+  T.CheckCall(T.Val(42), T.undefined(), T.undefined());
+  T.CheckCall(T.Val(42), T.Val(0.0), T.undefined());
+  T.CheckCall(T.Val(23), T.Val(999), T.undefined());
+  T.CheckCall(T.Val(23), T.Val("x"), T.undefined());
+}
+
+
+TEST(LogicalAnd) {
+  FunctionTester T("(function(a,b) { return a && b; })");
+
+  T.CheckCall(T.true_value(), T.true_value(), T.true_value());
+  T.CheckCall(T.false_value(), T.false_value(), T.true_value());
+  T.CheckCall(T.false_value(), T.true_value(), T.false_value());
+  T.CheckCall(T.false_value(), T.false_value(), T.false_value());
+
+  T.CheckCall(T.Val(999), T.Val(777), T.Val(999));
+  T.CheckCall(T.Val(0.0), T.Val(0.0), T.Val(999));
+  T.CheckCall(T.Val("b"), T.Val("a"), T.Val("b"));
+}
+
+
+TEST(LogicalOr) {
+  FunctionTester T("(function(a,b) { return a || b; })");
+
+  T.CheckCall(T.true_value(), T.true_value(), T.true_value());
+  T.CheckCall(T.true_value(), T.false_value(), T.true_value());
+  T.CheckCall(T.true_value(), T.true_value(), T.false_value());
+  T.CheckCall(T.false_value(), T.false_value(), T.false_value());
+
+  T.CheckCall(T.Val(777), T.Val(777), T.Val(999));
+  T.CheckCall(T.Val(999), T.Val(0.0), T.Val(999));
+  T.CheckCall(T.Val("a"), T.Val("a"), T.Val("b"));
+}
+
+
+TEST(LogicalEffect) {
+  FunctionTester T("(function(a,b) { a && (b = a); return b; })");
+
+  T.CheckCall(T.true_value(), T.true_value(), T.true_value());
+  T.CheckCall(T.true_value(), T.false_value(), T.true_value());
+  T.CheckCall(T.true_value(), T.true_value(), T.false_value());
+  T.CheckCall(T.false_value(), T.false_value(), T.false_value());
+
+  T.CheckCall(T.Val(777), T.Val(777), T.Val(999));
+  T.CheckCall(T.Val(999), T.Val(0.0), T.Val(999));
+  T.CheckCall(T.Val("a"), T.Val("a"), T.Val("b"));
+}
+
+
+TEST(IfStatement) {
+  FunctionTester T("(function(a) { if (a) { return 1; } else { return 2; } })");
+
+  T.CheckCall(T.Val(1), T.true_value(), T.undefined());
+  T.CheckCall(T.Val(2), T.false_value(), T.undefined());
+  T.CheckCall(T.Val(2), T.undefined(), T.undefined());
+  T.CheckCall(T.Val(2), T.Val(0.0), T.undefined());
+  T.CheckCall(T.Val(1), T.Val(999), T.undefined());
+  T.CheckCall(T.Val(1), T.Val("x"), T.undefined());
+}
+
+
+TEST(DoWhileStatement) {
+  FunctionTester T("(function(a,b) { do { a+=23; } while(a < b) return a; })");
+
+  T.CheckCall(T.Val(24), T.Val(1), T.Val(1));
+  T.CheckCall(T.Val(24), T.Val(1), T.Val(23));
+  T.CheckCall(T.Val(47), T.Val(1), T.Val(25));
+  T.CheckCall(T.Val("str23"), T.Val("str"), T.Val("str"));
+}
+
+
+TEST(WhileStatement) {
+  FunctionTester T("(function(a,b) { while(a < b) { a+=23; } return a; })");
+
+  T.CheckCall(T.Val(1), T.Val(1), T.Val(1));
+  T.CheckCall(T.Val(24), T.Val(1), T.Val(23));
+  T.CheckCall(T.Val(47), T.Val(1), T.Val(25));
+  T.CheckCall(T.Val("str"), T.Val("str"), T.Val("str"));
+}
+
+
+TEST(ForStatement) {
+  FunctionTester T("(function(a,b) { for (; a < b; a+=23) {} return a; })");
+
+  T.CheckCall(T.Val(1), T.Val(1), T.Val(1));
+  T.CheckCall(T.Val(24), T.Val(1), T.Val(23));
+  T.CheckCall(T.Val(47), T.Val(1), T.Val(25));
+  T.CheckCall(T.Val("str"), T.Val("str"), T.Val("str"));
+}
+
+
+static void TestForIn(const char* code) {
+  FunctionTester T(code);
+  T.CheckCall(T.undefined(), T.undefined());
+  T.CheckCall(T.undefined(), T.null());
+  T.CheckCall(T.undefined(), T.NewObject("({})"));
+  T.CheckCall(T.undefined(), T.Val(1));
+  T.CheckCall(T.Val("2"), T.Val("str"));
+  T.CheckCall(T.Val("a"), T.NewObject("({'a' : 1})"));
+  T.CheckCall(T.Val("2"), T.NewObject("([1, 2, 3])"));
+  T.CheckCall(T.Val("a"), T.NewObject("({'a' : 1, 'b' : 1})"), T.Val("b"));
+  T.CheckCall(T.Val("1"), T.NewObject("([1, 2, 3])"), T.Val("2"));
+}
+
+
+TEST(ForInStatement) {
+  // Variable assignment.
+  TestForIn(
+      "(function(a, b) {"
+      "var last;"
+      "for (var x in a) {"
+      "  if (b) { delete a[b]; b = undefined; }"
+      "  last = x;"
+      "}"
+      "return last;})");
+  // Indexed assignment.
+  TestForIn(
+      "(function(a, b) {"
+      "var array = [0, 1, undefined];"
+      "for (array[2] in a) {"
+      "  if (b) { delete a[b]; b = undefined; }"
+      "}"
+      "return array[2];})");
+  // Named assignment.
+  TestForIn(
+      "(function(a, b) {"
+      "var obj = {'a' : undefined};"
+      "for (obj.a in a) {"
+      "  if (b) { delete a[b]; b = undefined; }"
+      "}"
+      "return obj.a;})");
+}
+
+
+TEST(SwitchStatement) {
+  const char* src =
+      "(function(a,b) {"
+      "  var r = '-';"
+      "  switch (a) {"
+      "    case 'x'    : r += 'X-';"
+      "    case b + 'b': r += 'B-';"
+      "    default     : r += 'D-';"
+      "    case 'y'    : r += 'Y-';"
+      "  }"
+      "  return r;"
+      "})";
+  FunctionTester T(src);
+
+  T.CheckCall(T.Val("-X-B-D-Y-"), T.Val("x"), T.Val("B"));
+  T.CheckCall(T.Val("-B-D-Y-"), T.Val("Bb"), T.Val("B"));
+  T.CheckCall(T.Val("-D-Y-"), T.Val("z"), T.Val("B"));
+  T.CheckCall(T.Val("-Y-"), T.Val("y"), T.Val("B"));
+
+  CompileRun("var c = 0; var o = { toString:function(){return c++} };");
+  T.CheckCall(T.Val("-D-Y-"), T.Val("1b"), T.NewObject("o"));
+  T.CheckCall(T.Val("-B-D-Y-"), T.Val("1b"), T.NewObject("o"));
+  T.CheckCall(T.Val("-D-Y-"), T.Val("1b"), T.NewObject("o"));
+}
+
+
+TEST(BlockBreakStatement) {
+  FunctionTester T("(function(a,b) { L:{ if (a) break L; b=1; } return b; })");
+
+  T.CheckCall(T.Val(7), T.true_value(), T.Val(7));
+  T.CheckCall(T.Val(1), T.false_value(), T.Val(7));
+}
+
+
+TEST(BlockReturnStatement) {
+  FunctionTester T("(function(a,b) { L:{ if (a) b=1; return b; } })");
+
+  T.CheckCall(T.Val(1), T.true_value(), T.Val(7));
+  T.CheckCall(T.Val(7), T.false_value(), T.Val(7));
+}
+
+
+TEST(NestedIfConditional) {
+  FunctionTester T("(function(a,b) { if (a) { b = (b?b:7) + 1; } return b; })");
+
+  T.CheckCall(T.Val(4), T.false_value(), T.Val(4));
+  T.CheckCall(T.Val(6), T.true_value(), T.Val(5));
+  T.CheckCall(T.Val(8), T.true_value(), T.undefined());
+}
+
+
+TEST(NestedIfLogical) {
+  const char* src =
+      "(function(a,b) {"
+      "  if (a || b) { return 1; } else { return 2; }"
+      "})";
+  FunctionTester T(src);
+
+  T.CheckCall(T.Val(1), T.true_value(), T.true_value());
+  T.CheckCall(T.Val(1), T.false_value(), T.true_value());
+  T.CheckCall(T.Val(1), T.true_value(), T.false_value());
+  T.CheckCall(T.Val(2), T.false_value(), T.false_value());
+  T.CheckCall(T.Val(1), T.Val(1.0), T.Val(1.0));
+  T.CheckCall(T.Val(1), T.Val(0.0), T.Val(1.0));
+  T.CheckCall(T.Val(1), T.Val(1.0), T.Val(0.0));
+  T.CheckCall(T.Val(2), T.Val(0.0), T.Val(0.0));
+}
+
+
+TEST(NestedIfElseFor) {
+  const char* src =
+      "(function(a,b) {"
+      "  if (!a) { return b - 3; } else { for (; a < b; a++); }"
+      "  return a;"
+      "})";
+  FunctionTester T(src);
+
+  T.CheckCall(T.Val(1), T.false_value(), T.Val(4));
+  T.CheckCall(T.Val(2), T.true_value(), T.Val(2));
+  T.CheckCall(T.Val(3), T.Val(3), T.Val(1));
+}
+
+
+TEST(NestedWhileWhile) {
+  const char* src =
+      "(function(a) {"
+      "  var i = a; while (false) while(false) return i;"
+      "  return i;"
+      "})";
+  FunctionTester T(src);
+
+  T.CheckCall(T.Val(2.0), T.Val(2.0), T.Val(-1.0));
+  T.CheckCall(T.Val(65.0), T.Val(65.0), T.Val(-1.0));
+}
+
+
+TEST(NestedForIf) {
+  FunctionTester T("(function(a,b) { for (; a > 1; a--) if (b) return 1; })");
+
+  T.CheckCall(T.Val(1), T.Val(3), T.true_value());
+  T.CheckCall(T.undefined(), T.Val(2), T.false_value());
+  T.CheckCall(T.undefined(), T.Val(1), T.null());
+}
+
+
+TEST(NestedForConditional) {
+  FunctionTester T("(function(a,b) { for (; a > 1; a--) return b ? 1 : 2; })");
+
+  T.CheckCall(T.Val(1), T.Val(3), T.true_value());
+  T.CheckCall(T.Val(2), T.Val(2), T.false_value());
+  T.CheckCall(T.undefined(), T.Val(1), T.null());
+}
diff --git a/test/cctest/compiler/compiler/test-run-jscalls.cc b/test/cctest/compiler/compiler/test-run-jscalls.cc
new file mode 100644 (file)
index 0000000..2ad7e50
--- /dev/null
@@ -0,0 +1,235 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#include "test/cctest/compiler/function-tester.h"
+
+using namespace v8::internal;
+using namespace v8::internal::compiler;
+
+TEST(SimpleCall) {
+  FunctionTester T("(function(foo,a) { return foo(a); })");
+  Handle<JSFunction> foo = T.NewFunction("(function(a) { return a; })");
+
+  T.CheckCall(T.Val(3), foo, T.Val(3));
+  T.CheckCall(T.Val(3.1), foo, T.Val(3.1));
+  T.CheckCall(foo, foo, foo);
+  T.CheckCall(T.Val("Abba"), foo, T.Val("Abba"));
+}
+
+
+TEST(SimpleCall2) {
+  FunctionTester T("(function(foo,a) { return foo(a); })");
+  Handle<JSFunction> foo = T.NewFunction("(function(a) { return a; })");
+  T.Compile(foo);
+
+  T.CheckCall(T.Val(3), foo, T.Val(3));
+  T.CheckCall(T.Val(3.1), foo, T.Val(3.1));
+  T.CheckCall(foo, foo, foo);
+  T.CheckCall(T.Val("Abba"), foo, T.Val("Abba"));
+}
+
+
+TEST(ConstCall) {
+  FunctionTester T("(function(foo,a) { return foo(a,3); })");
+  Handle<JSFunction> foo = T.NewFunction("(function(a,b) { return a + b; })");
+  T.Compile(foo);
+
+  T.CheckCall(T.Val(6), foo, T.Val(3));
+  T.CheckCall(T.Val(6.1), foo, T.Val(3.1));
+  T.CheckCall(T.Val("function (a,b) { return a + b; }3"), foo, foo);
+  T.CheckCall(T.Val("Abba3"), foo, T.Val("Abba"));
+}
+
+
+TEST(ConstCall2) {
+  FunctionTester T("(function(foo,a) { return foo(a,\"3\"); })");
+  Handle<JSFunction> foo = T.NewFunction("(function(a,b) { return a + b; })");
+  T.Compile(foo);
+
+  T.CheckCall(T.Val("33"), foo, T.Val(3));
+  T.CheckCall(T.Val("3.13"), foo, T.Val(3.1));
+  T.CheckCall(T.Val("function (a,b) { return a + b; }3"), foo, foo);
+  T.CheckCall(T.Val("Abba3"), foo, T.Val("Abba"));
+}
+
+
+TEST(PropertyNamedCall) {
+  FunctionTester T("(function(a,b) { return a.foo(b,23); })");
+  CompileRun("function foo(y,z) { return this.x + y + z; }");
+
+  T.CheckCall(T.Val(32), T.NewObject("({ foo:foo, x:4 })"), T.Val(5));
+  T.CheckCall(T.Val("xy23"), T.NewObject("({ foo:foo, x:'x' })"), T.Val("y"));
+  T.CheckCall(T.nan(), T.NewObject("({ foo:foo, y:0 })"), T.Val(3));
+}
+
+
+TEST(PropertyKeyedCall) {
+  FunctionTester T("(function(a,b) { var f = 'foo'; return a[f](b,23); })");
+  CompileRun("function foo(y,z) { return this.x + y + z; }");
+
+  T.CheckCall(T.Val(32), T.NewObject("({ foo:foo, x:4 })"), T.Val(5));
+  T.CheckCall(T.Val("xy23"), T.NewObject("({ foo:foo, x:'x' })"), T.Val("y"));
+  T.CheckCall(T.nan(), T.NewObject("({ foo:foo, y:0 })"), T.Val(3));
+}
+
+
+TEST(GlobalCall) {
+  FunctionTester T("(function(a,b) { return foo(a,b); })");
+  CompileRun("function foo(a,b) { return a + b + this.c; }");
+  CompileRun("var c = 23;");
+
+  T.CheckCall(T.Val(32), T.Val(4), T.Val(5));
+  T.CheckCall(T.Val("xy23"), T.Val("x"), T.Val("y"));
+  T.CheckCall(T.nan(), T.undefined(), T.Val(3));
+}
+
+
+TEST(LookupCall) {
+  FunctionTester T("(function(a,b) { with (a) { return foo(a,b); } })");
+
+  CompileRun("function f1(a,b) { return a.val + b; }");
+  T.CheckCall(T.Val(5), T.NewObject("({ foo:f1, val:2 })"), T.Val(3));
+  T.CheckCall(T.Val("xy"), T.NewObject("({ foo:f1, val:'x' })"), T.Val("y"));
+
+  CompileRun("function f2(a,b) { return this.val + b; }");
+  T.CheckCall(T.Val(9), T.NewObject("({ foo:f2, val:4 })"), T.Val(5));
+  T.CheckCall(T.Val("xy"), T.NewObject("({ foo:f2, val:'x' })"), T.Val("y"));
+}
+
+
+TEST(MismatchCallTooFew) {
+  FunctionTester T("(function(a,b) { return foo(a,b); })");
+  CompileRun("function foo(a,b,c) { return a + b + c; }");
+
+  T.CheckCall(T.nan(), T.Val(23), T.Val(42));
+  T.CheckCall(T.nan(), T.Val(4.2), T.Val(2.3));
+  T.CheckCall(T.Val("abundefined"), T.Val("a"), T.Val("b"));
+}
+
+
+TEST(MismatchCallTooMany) {
+  FunctionTester T("(function(a,b) { return foo(a,b); })");
+  CompileRun("function foo(a) { return a; }");
+
+  T.CheckCall(T.Val(23), T.Val(23), T.Val(42));
+  T.CheckCall(T.Val(4.2), T.Val(4.2), T.Val(2.3));
+  T.CheckCall(T.Val("a"), T.Val("a"), T.Val("b"));
+}
+
+
+TEST(ConstructorCall) {
+  FunctionTester T("(function(a,b) { return new foo(a,b).value; })");
+  CompileRun("function foo(a,b) { return { value: a + b + this.c }; }");
+  CompileRun("foo.prototype.c = 23;");
+
+  T.CheckCall(T.Val(32), T.Val(4), T.Val(5));
+  T.CheckCall(T.Val("xy23"), T.Val("x"), T.Val("y"));
+  T.CheckCall(T.nan(), T.undefined(), T.Val(3));
+}
+
+
+// TODO(titzer): factor these out into test-runtime-calls.cc
+TEST(RuntimeCallCPP1) {
+  FLAG_allow_natives_syntax = true;
+  FunctionTester T("(function(a) { return %ToBool(a); })");
+
+  T.CheckCall(T.true_value(), T.Val(23), T.undefined());
+  T.CheckCall(T.true_value(), T.Val(4.2), T.undefined());
+  T.CheckCall(T.true_value(), T.Val("str"), T.undefined());
+  T.CheckCall(T.true_value(), T.true_value(), T.undefined());
+  T.CheckCall(T.false_value(), T.false_value(), T.undefined());
+  T.CheckCall(T.false_value(), T.undefined(), T.undefined());
+  T.CheckCall(T.false_value(), T.Val(0.0), T.undefined());
+}
+
+
+TEST(RuntimeCallCPP2) {
+  FLAG_allow_natives_syntax = true;
+  FunctionTester T("(function(a,b) { return %NumberAdd(a, b); })");
+
+  T.CheckCall(T.Val(65), T.Val(42), T.Val(23));
+  T.CheckCall(T.Val(19), T.Val(42), T.Val(-23));
+  T.CheckCall(T.Val(6.5), T.Val(4.2), T.Val(2.3));
+}
+
+
+TEST(RuntimeCallJS) {
+  FLAG_allow_natives_syntax = true;
+  FunctionTester T("(function(a) { return %ToString(a); })");
+
+  T.CheckCall(T.Val("23"), T.Val(23), T.undefined());
+  T.CheckCall(T.Val("4.2"), T.Val(4.2), T.undefined());
+  T.CheckCall(T.Val("str"), T.Val("str"), T.undefined());
+  T.CheckCall(T.Val("true"), T.true_value(), T.undefined());
+  T.CheckCall(T.Val("false"), T.false_value(), T.undefined());
+  T.CheckCall(T.Val("undefined"), T.undefined(), T.undefined());
+}
+
+
+TEST(RuntimeCallInline) {
+  FLAG_allow_natives_syntax = true;
+  FunctionTester T("(function(a) { return %_IsObject(a); })");
+
+  T.CheckCall(T.false_value(), T.Val(23), T.undefined());
+  T.CheckCall(T.false_value(), T.Val(4.2), T.undefined());
+  T.CheckCall(T.false_value(), T.Val("str"), T.undefined());
+  T.CheckCall(T.false_value(), T.true_value(), T.undefined());
+  T.CheckCall(T.false_value(), T.false_value(), T.undefined());
+  T.CheckCall(T.false_value(), T.undefined(), T.undefined());
+  T.CheckCall(T.true_value(), T.NewObject("({})"), T.undefined());
+  T.CheckCall(T.true_value(), T.NewObject("([])"), T.undefined());
+}
+
+
+TEST(RuntimeCallBooleanize) {
+  // TODO(turbofan): %Booleanize will disappear, don't hesitate to remove this
+  // test case, two-argument case is covered by the above test already.
+  FLAG_allow_natives_syntax = true;
+  FunctionTester T("(function(a,b) { return %Booleanize(a, b); })");
+
+  T.CheckCall(T.true_value(), T.Val(-1), T.Val(Token::LT));
+  T.CheckCall(T.false_value(), T.Val(-1), T.Val(Token::EQ));
+  T.CheckCall(T.false_value(), T.Val(-1), T.Val(Token::GT));
+
+  T.CheckCall(T.false_value(), T.Val(0.0), T.Val(Token::LT));
+  T.CheckCall(T.true_value(), T.Val(0.0), T.Val(Token::EQ));
+  T.CheckCall(T.false_value(), T.Val(0.0), T.Val(Token::GT));
+
+  T.CheckCall(T.false_value(), T.Val(1), T.Val(Token::LT));
+  T.CheckCall(T.false_value(), T.Val(1), T.Val(Token::EQ));
+  T.CheckCall(T.true_value(), T.Val(1), T.Val(Token::GT));
+}
+
+
+TEST(EvalCall) {
+  FunctionTester T("(function(a,b) { return eval(a); })");
+  Handle<JSObject> g(T.function->context()->global_object()->global_proxy());
+
+  T.CheckCall(T.Val(23), T.Val("17 + 6"), T.undefined());
+  T.CheckCall(T.Val("'Y'; a"), T.Val("'Y'; a"), T.Val("b-val"));
+  T.CheckCall(T.Val("b-val"), T.Val("'Y'; b"), T.Val("b-val"));
+  T.CheckCall(g, T.Val("this"), T.undefined());
+  T.CheckCall(g, T.Val("'use strict'; this"), T.undefined());
+
+  CompileRun("eval = function(x) { return x; }");
+  T.CheckCall(T.Val("17 + 6"), T.Val("17 + 6"), T.undefined());
+
+  CompileRun("eval = function(x) { return this; }");
+  T.CheckCall(g, T.Val("17 + 6"), T.undefined());
+
+  CompileRun("eval = function(x) { 'use strict'; return this; }");
+  T.CheckCall(T.undefined(), T.Val("17 + 6"), T.undefined());
+}
+
+
+TEST(ReceiverPatching) {
+  // TODO(turbofan): Note that this test only checks that the function prologue
+  // patches an undefined receiver to the global receiver. If this starts to
+  // fail once we fix the calling protocol, just remove this test.
+  FunctionTester T("(function(a) { return this; })");
+  Handle<JSObject> g(T.function->context()->global_object()->global_proxy());
+  T.CheckCall(g, T.undefined());
+}
diff --git a/test/cctest/compiler/compiler/test-run-jsexceptions.cc b/test/cctest/compiler/compiler/test-run-jsexceptions.cc
new file mode 100644 (file)
index 0000000..0712ab6
--- /dev/null
@@ -0,0 +1,45 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#include "test/cctest/compiler/function-tester.h"
+
+using namespace v8::internal;
+using namespace v8::internal::compiler;
+
+TEST(Throw) {
+  FunctionTester T("(function(a,b) { if (a) { throw b; } else { return b; }})");
+
+  T.CheckThrows(T.true_value(), T.NewObject("new Error"));
+  T.CheckCall(T.Val(23), T.false_value(), T.Val(23));
+}
+
+
+TEST(ThrowSourcePosition) {
+  static const char* src =
+      "(function(a, b) {        \n"
+      "  if (a == 1) throw 1;   \n"
+      "  if (a == 2) {throw 2}  \n"
+      "  if (a == 3) {0;throw 3}\n"
+      "  throw 4;               \n"
+      "})                       ";
+  FunctionTester T(src);
+  v8::Handle<v8::Message> message;
+
+  message = T.CheckThrowsReturnMessage(T.Val(1), T.undefined());
+  CHECK(!message.IsEmpty());
+  CHECK_EQ(2, message->GetLineNumber());
+  CHECK_EQ(40, message->GetStartPosition());
+
+  message = T.CheckThrowsReturnMessage(T.Val(2), T.undefined());
+  CHECK(!message.IsEmpty());
+  CHECK_EQ(3, message->GetLineNumber());
+  CHECK_EQ(67, message->GetStartPosition());
+
+  message = T.CheckThrowsReturnMessage(T.Val(3), T.undefined());
+  CHECK(!message.IsEmpty());
+  CHECK_EQ(4, message->GetLineNumber());
+  CHECK_EQ(95, message->GetStartPosition());
+}
diff --git a/test/cctest/compiler/compiler/test-run-jsops.cc b/test/cctest/compiler/compiler/test-run-jsops.cc
new file mode 100644 (file)
index 0000000..eb39760
--- /dev/null
@@ -0,0 +1,524 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#include "test/cctest/compiler/function-tester.h"
+
+using namespace v8::internal;
+using namespace v8::internal::compiler;
+
+TEST(BinopAdd) {
+  FunctionTester T("(function(a,b) { return a + b; })");
+
+  T.CheckCall(3, 1, 2);
+  T.CheckCall(-11, -2, -9);
+  T.CheckCall(-11, -1.5, -9.5);
+  T.CheckCall(T.Val("AB"), T.Val("A"), T.Val("B"));
+  T.CheckCall(T.Val("A11"), T.Val("A"), T.Val(11));
+  T.CheckCall(T.Val("12B"), T.Val(12), T.Val("B"));
+  T.CheckCall(T.Val("38"), T.Val("3"), T.Val("8"));
+  T.CheckCall(T.Val("31"), T.Val("3"), T.NewObject("([1])"));
+  T.CheckCall(T.Val("3[object Object]"), T.Val("3"), T.NewObject("({})"));
+}
+
+
+TEST(BinopSubtract) {
+  FunctionTester T("(function(a,b) { return a - b; })");
+
+  T.CheckCall(3, 4, 1);
+  T.CheckCall(3.0, 4.5, 1.5);
+  T.CheckCall(T.Val(-9), T.Val("0"), T.Val(9));
+  T.CheckCall(T.Val(-9), T.Val(0.0), T.Val("9"));
+  T.CheckCall(T.Val(1), T.Val("3"), T.Val("2"));
+  T.CheckCall(T.nan(), T.Val("3"), T.Val("B"));
+  T.CheckCall(T.Val(2), T.Val("3"), T.NewObject("([1])"));
+  T.CheckCall(T.nan(), T.Val("3"), T.NewObject("({})"));
+}
+
+
+TEST(BinopMultiply) {
+  FunctionTester T("(function(a,b) { return a * b; })");
+
+  T.CheckCall(6, 3, 2);
+  T.CheckCall(4.5, 2.0, 2.25);
+  T.CheckCall(T.Val(6), T.Val("3"), T.Val(2));
+  T.CheckCall(T.Val(4.5), T.Val(2.0), T.Val("2.25"));
+  T.CheckCall(T.Val(6), T.Val("3"), T.Val("2"));
+  T.CheckCall(T.nan(), T.Val("3"), T.Val("B"));
+  T.CheckCall(T.Val(3), T.Val("3"), T.NewObject("([1])"));
+  T.CheckCall(T.nan(), T.Val("3"), T.NewObject("({})"));
+}
+
+
+TEST(BinopDivide) {
+  FunctionTester T("(function(a,b) { return a / b; })");
+
+  T.CheckCall(2, 8, 4);
+  T.CheckCall(2.1, 8.4, 4);
+  T.CheckCall(V8_INFINITY, 8, 0);
+  T.CheckCall(-V8_INFINITY, -8, 0);
+  T.CheckCall(T.infinity(), T.Val(8), T.Val("0"));
+  T.CheckCall(T.minus_infinity(), T.Val("-8"), T.Val(0.0));
+  T.CheckCall(T.Val(1.5), T.Val("3"), T.Val("2"));
+  T.CheckCall(T.nan(), T.Val("3"), T.Val("B"));
+  T.CheckCall(T.Val(1.5), T.Val("3"), T.NewObject("([2])"));
+  T.CheckCall(T.nan(), T.Val("3"), T.NewObject("({})"));
+}
+
+
+TEST(BinopModulus) {
+  FunctionTester T("(function(a,b) { return a % b; })");
+
+  T.CheckCall(3, 8, 5);
+  T.CheckCall(T.Val(3), T.Val("8"), T.Val(5));
+  T.CheckCall(T.Val(3), T.Val(8), T.Val("5"));
+  T.CheckCall(T.Val(1), T.Val("3"), T.Val("2"));
+  T.CheckCall(T.nan(), T.Val("3"), T.Val("B"));
+  T.CheckCall(T.Val(1), T.Val("3"), T.NewObject("([2])"));
+  T.CheckCall(T.nan(), T.Val("3"), T.NewObject("({})"));
+}
+
+
+TEST(BinopShiftLeft) {
+  FunctionTester T("(function(a,b) { return a << b; })");
+
+  T.CheckCall(4, 2, 1);
+  T.CheckCall(T.Val(4), T.Val("2"), T.Val(1));
+  T.CheckCall(T.Val(4), T.Val(2), T.Val("1"));
+}
+
+
+TEST(BinopShiftRight) {
+  FunctionTester T("(function(a,b) { return a >> b; })");
+
+  T.CheckCall(4, 8, 1);
+  T.CheckCall(-4, -8, 1);
+  T.CheckCall(T.Val(4), T.Val("8"), T.Val(1));
+  T.CheckCall(T.Val(4), T.Val(8), T.Val("1"));
+}
+
+
+TEST(BinopShiftRightLogical) {
+  FunctionTester T("(function(a,b) { return a >>> b; })");
+
+  T.CheckCall(4, 8, 1);
+  T.CheckCall(0x7ffffffc, -8, 1);
+  T.CheckCall(T.Val(4), T.Val("8"), T.Val(1));
+  T.CheckCall(T.Val(4), T.Val(8), T.Val("1"));
+}
+
+
+TEST(BinopAnd) {
+  FunctionTester T("(function(a,b) { return a & b; })");
+
+  T.CheckCall(7, 7, 15);
+  T.CheckCall(7, 15, 7);
+  T.CheckCall(T.Val(7), T.Val("15"), T.Val(7));
+  T.CheckCall(T.Val(7), T.Val(15), T.Val("7"));
+}
+
+
+TEST(BinopOr) {
+  FunctionTester T("(function(a,b) { return a | b; })");
+
+  T.CheckCall(6, 4, 2);
+  T.CheckCall(6, 2, 4);
+  T.CheckCall(T.Val(6), T.Val("2"), T.Val(4));
+  T.CheckCall(T.Val(6), T.Val(2), T.Val("4"));
+}
+
+
+TEST(BinopXor) {
+  FunctionTester T("(function(a,b) { return a ^ b; })");
+
+  T.CheckCall(7, 15, 8);
+  T.CheckCall(7, 8, 15);
+  T.CheckCall(T.Val(7), T.Val("8"), T.Val(15));
+  T.CheckCall(T.Val(7), T.Val(8), T.Val("15"));
+}
+
+
+TEST(BinopStrictEqual) {
+  FunctionTester T("(function(a,b) { return a === b; })");
+
+  T.CheckTrue(7, 7);
+  T.CheckFalse(7, 8);
+  T.CheckTrue(7.1, 7.1);
+  T.CheckFalse(7.1, 8.1);
+
+  T.CheckTrue(T.Val("7.1"), T.Val("7.1"));
+  T.CheckFalse(T.Val(7.1), T.Val("7.1"));
+  T.CheckFalse(T.Val(7), T.undefined());
+  T.CheckFalse(T.undefined(), T.Val(7));
+
+  CompileRun("var o = { desc : 'I am a singleton' }");
+  T.CheckFalse(T.NewObject("([1])"), T.NewObject("([1])"));
+  T.CheckFalse(T.NewObject("({})"), T.NewObject("({})"));
+  T.CheckTrue(T.NewObject("(o)"), T.NewObject("(o)"));
+}
+
+
+TEST(BinopEqual) {
+  FunctionTester T("(function(a,b) { return a == b; })");
+
+  T.CheckTrue(7, 7);
+  T.CheckFalse(7, 8);
+  T.CheckTrue(7.1, 7.1);
+  T.CheckFalse(7.1, 8.1);
+
+  T.CheckTrue(T.Val("7.1"), T.Val("7.1"));
+  T.CheckTrue(T.Val(7.1), T.Val("7.1"));
+
+  CompileRun("var o = { desc : 'I am a singleton' }");
+  T.CheckFalse(T.NewObject("([1])"), T.NewObject("([1])"));
+  T.CheckFalse(T.NewObject("({})"), T.NewObject("({})"));
+  T.CheckTrue(T.NewObject("(o)"), T.NewObject("(o)"));
+}
+
+
+TEST(BinopNotEqual) {
+  FunctionTester T("(function(a,b) { return a != b; })");
+
+  T.CheckFalse(7, 7);
+  T.CheckTrue(7, 8);
+  T.CheckFalse(7.1, 7.1);
+  T.CheckTrue(7.1, 8.1);
+
+  T.CheckFalse(T.Val("7.1"), T.Val("7.1"));
+  T.CheckFalse(T.Val(7.1), T.Val("7.1"));
+
+  CompileRun("var o = { desc : 'I am a singleton' }");
+  T.CheckTrue(T.NewObject("([1])"), T.NewObject("([1])"));
+  T.CheckTrue(T.NewObject("({})"), T.NewObject("({})"));
+  T.CheckFalse(T.NewObject("(o)"), T.NewObject("(o)"));
+}
+
+
+TEST(BinopLessThan) {
+  FunctionTester T("(function(a,b) { return a < b; })");
+
+  T.CheckTrue(7, 8);
+  T.CheckFalse(8, 7);
+  T.CheckTrue(-8.1, -8);
+  T.CheckFalse(-8, -8.1);
+  T.CheckFalse(0.111, 0.111);
+
+  T.CheckFalse(T.Val("7.1"), T.Val("7.1"));
+  T.CheckFalse(T.Val(7.1), T.Val("6.1"));
+  T.CheckFalse(T.Val(7.1), T.Val("7.1"));
+  T.CheckTrue(T.Val(7.1), T.Val("8.1"));
+}
+
+
+TEST(BinopLessThanEqual) {
+  FunctionTester T("(function(a,b) { return a <= b; })");
+
+  T.CheckTrue(7, 8);
+  T.CheckFalse(8, 7);
+  T.CheckTrue(-8.1, -8);
+  T.CheckFalse(-8, -8.1);
+  T.CheckTrue(0.111, 0.111);
+
+  T.CheckTrue(T.Val("7.1"), T.Val("7.1"));
+  T.CheckFalse(T.Val(7.1), T.Val("6.1"));
+  T.CheckTrue(T.Val(7.1), T.Val("7.1"));
+  T.CheckTrue(T.Val(7.1), T.Val("8.1"));
+}
+
+
+TEST(BinopGreaterThan) {
+  FunctionTester T("(function(a,b) { return a > b; })");
+
+  T.CheckFalse(7, 8);
+  T.CheckTrue(8, 7);
+  T.CheckFalse(-8.1, -8);
+  T.CheckTrue(-8, -8.1);
+  T.CheckFalse(0.111, 0.111);
+
+  T.CheckFalse(T.Val("7.1"), T.Val("7.1"));
+  T.CheckTrue(T.Val(7.1), T.Val("6.1"));
+  T.CheckFalse(T.Val(7.1), T.Val("7.1"));
+  T.CheckFalse(T.Val(7.1), T.Val("8.1"));
+}
+
+
+TEST(BinopGreaterThanOrEqual) {
+  FunctionTester T("(function(a,b) { return a >= b; })");
+
+  T.CheckFalse(7, 8);
+  T.CheckTrue(8, 7);
+  T.CheckFalse(-8.1, -8);
+  T.CheckTrue(-8, -8.1);
+  T.CheckTrue(0.111, 0.111);
+
+  T.CheckTrue(T.Val("7.1"), T.Val("7.1"));
+  T.CheckTrue(T.Val(7.1), T.Val("6.1"));
+  T.CheckTrue(T.Val(7.1), T.Val("7.1"));
+  T.CheckFalse(T.Val(7.1), T.Val("8.1"));
+}
+
+
+TEST(BinopIn) {
+  FunctionTester T("(function(a,b) { return a in b; })");
+
+  T.CheckTrue(T.Val("x"), T.NewObject("({x:23})"));
+  T.CheckFalse(T.Val("y"), T.NewObject("({x:42})"));
+  T.CheckFalse(T.Val(123), T.NewObject("({x:65})"));
+  T.CheckTrue(T.Val(1), T.NewObject("([1,2,3])"));
+}
+
+
+TEST(BinopInstanceOf) {
+  FunctionTester T("(function(a,b) { return a instanceof b; })");
+
+  T.CheckTrue(T.NewObject("(new Number(23))"), T.NewObject("Number"));
+  T.CheckFalse(T.NewObject("(new Number(23))"), T.NewObject("String"));
+  T.CheckFalse(T.NewObject("(new String('a'))"), T.NewObject("Number"));
+  T.CheckTrue(T.NewObject("(new String('b'))"), T.NewObject("String"));
+  T.CheckFalse(T.Val(1), T.NewObject("Number"));
+  T.CheckFalse(T.Val("abc"), T.NewObject("String"));
+
+  CompileRun("var bound = (function() {}).bind(undefined)");
+  T.CheckTrue(T.NewObject("(new bound())"), T.NewObject("bound"));
+  T.CheckTrue(T.NewObject("(new bound())"), T.NewObject("Object"));
+  T.CheckFalse(T.NewObject("(new bound())"), T.NewObject("Number"));
+}
+
+
+TEST(UnopNot) {
+  FunctionTester T("(function(a) { return !a; })");
+
+  T.CheckCall(T.true_value(), T.false_value(), T.undefined());
+  T.CheckCall(T.false_value(), T.true_value(), T.undefined());
+  T.CheckCall(T.true_value(), T.Val(0.0), T.undefined());
+  T.CheckCall(T.false_value(), T.Val(123), T.undefined());
+  T.CheckCall(T.false_value(), T.Val("x"), T.undefined());
+  T.CheckCall(T.true_value(), T.undefined(), T.undefined());
+  T.CheckCall(T.true_value(), T.nan(), T.undefined());
+}
+
+
+TEST(UnopCountPost) {
+  FunctionTester T("(function(a) { return a++; })");
+
+  T.CheckCall(T.Val(0.0), T.Val(0.0), T.undefined());
+  T.CheckCall(T.Val(2.3), T.Val(2.3), T.undefined());
+  T.CheckCall(T.Val(123), T.Val(123), T.undefined());
+  T.CheckCall(T.Val(7), T.Val("7"), T.undefined());
+  T.CheckCall(T.nan(), T.Val("x"), T.undefined());
+  T.CheckCall(T.nan(), T.undefined(), T.undefined());
+  T.CheckCall(T.Val(1.0), T.true_value(), T.undefined());
+  T.CheckCall(T.Val(0.0), T.false_value(), T.undefined());
+  T.CheckCall(T.nan(), T.nan(), T.undefined());
+}
+
+
+TEST(UnopCountPre) {
+  FunctionTester T("(function(a) { return ++a; })");
+
+  T.CheckCall(T.Val(1.0), T.Val(0.0), T.undefined());
+  T.CheckCall(T.Val(3.3), T.Val(2.3), T.undefined());
+  T.CheckCall(T.Val(124), T.Val(123), T.undefined());
+  T.CheckCall(T.Val(8), T.Val("7"), T.undefined());
+  T.CheckCall(T.nan(), T.Val("x"), T.undefined());
+  T.CheckCall(T.nan(), T.undefined(), T.undefined());
+  T.CheckCall(T.Val(2.0), T.true_value(), T.undefined());
+  T.CheckCall(T.Val(1.0), T.false_value(), T.undefined());
+  T.CheckCall(T.nan(), T.nan(), T.undefined());
+}
+
+
+TEST(PropertyNamedLoad) {
+  FunctionTester T("(function(a,b) { return a.x; })");
+
+  T.CheckCall(T.Val(23), T.NewObject("({x:23})"), T.undefined());
+  T.CheckCall(T.undefined(), T.NewObject("({y:23})"), T.undefined());
+}
+
+
+TEST(PropertyKeyedLoad) {
+  FunctionTester T("(function(a,b) { return a[b]; })");
+
+  T.CheckCall(T.Val(23), T.NewObject("({x:23})"), T.Val("x"));
+  T.CheckCall(T.Val(42), T.NewObject("([23,42,65])"), T.Val(1));
+  T.CheckCall(T.undefined(), T.NewObject("({x:23})"), T.Val("y"));
+  T.CheckCall(T.undefined(), T.NewObject("([23,42,65])"), T.Val(4));
+}
+
+
+TEST(PropertyNamedStore) {
+  FunctionTester T("(function(a) { a.x = 7; return a.x; })");
+
+  T.CheckCall(T.Val(7), T.NewObject("({})"), T.undefined());
+  T.CheckCall(T.Val(7), T.NewObject("({x:23})"), T.undefined());
+}
+
+
+TEST(PropertyKeyedStore) {
+  FunctionTester T("(function(a,b) { a[b] = 7; return a.x; })");
+
+  T.CheckCall(T.Val(7), T.NewObject("({})"), T.Val("x"));
+  T.CheckCall(T.Val(7), T.NewObject("({x:23})"), T.Val("x"));
+  T.CheckCall(T.Val(9), T.NewObject("({x:9})"), T.Val("y"));
+}
+
+
+TEST(PropertyNamedDelete) {
+  FunctionTester T("(function(a) { return delete a.x; })");
+
+  CompileRun("var o = Object.create({}, { x: { value:23 } });");
+  T.CheckTrue(T.NewObject("({x:42})"), T.undefined());
+  T.CheckTrue(T.NewObject("({})"), T.undefined());
+  T.CheckFalse(T.NewObject("(o)"), T.undefined());
+}
+
+
+TEST(PropertyKeyedDelete) {
+  FunctionTester T("(function(a, b) { return delete a[b]; })");
+
+  CompileRun("function getX() { return 'x'; }");
+  CompileRun("var o = Object.create({}, { x: { value:23 } });");
+  T.CheckTrue(T.NewObject("({x:42})"), T.Val("x"));
+  T.CheckFalse(T.NewObject("(o)"), T.Val("x"));
+  T.CheckFalse(T.NewObject("(o)"), T.NewObject("({toString:getX})"));
+}
+
+
+TEST(GlobalLoad) {
+  FunctionTester T("(function() { return g; })");
+
+  T.CheckThrows(T.undefined(), T.undefined());
+  CompileRun("var g = 23;");
+  T.CheckCall(T.Val(23));
+}
+
+
+TEST(GlobalStoreSloppy) {
+  FunctionTester T("(function(a,b) { g = a + b; return g; })");
+
+  T.CheckCall(T.Val(33), T.Val(22), T.Val(11));
+  CompileRun("delete g");
+  CompileRun("const g = 23");
+  T.CheckCall(T.Val(23), T.Val(55), T.Val(44));
+}
+
+
+TEST(GlobalStoreStrict) {
+  FunctionTester T("(function(a,b) { 'use strict'; g = a + b; return g; })");
+
+  T.CheckThrows(T.Val(22), T.Val(11));
+  CompileRun("var g = 'a global variable';");
+  T.CheckCall(T.Val(33), T.Val(22), T.Val(11));
+}
+
+
+TEST(ContextLoad) {
+  FunctionTester T("(function(a,b) { (function(){a}); return a + b; })");
+
+  T.CheckCall(T.Val(65), T.Val(23), T.Val(42));
+  T.CheckCall(T.Val("ab"), T.Val("a"), T.Val("b"));
+}
+
+
+TEST(ContextStore) {
+  FunctionTester T("(function(a,b) { (function(){x}); var x = a; return x; })");
+
+  T.CheckCall(T.Val(23), T.Val(23), T.undefined());
+  T.CheckCall(T.Val("a"), T.Val("a"), T.undefined());
+}
+
+
+TEST(LookupLoad) {
+  FunctionTester T("(function(a,b) { with(a) { return x + b; } })");
+
+  T.CheckCall(T.Val(24), T.NewObject("({x:23})"), T.Val(1));
+  T.CheckCall(T.Val(32), T.NewObject("({x:23, b:9})"), T.Val(2));
+  T.CheckCall(T.Val(45), T.NewObject("({__proto__:{x:42}})"), T.Val(3));
+  T.CheckCall(T.Val(69), T.NewObject("({get x() { return 65; }})"), T.Val(4));
+}
+
+
+TEST(LookupStore) {
+  FunctionTester T("(function(a,b) { var x; with(a) { x = b; } return x; })");
+
+  T.CheckCall(T.undefined(), T.NewObject("({x:23})"), T.Val(1));
+  T.CheckCall(T.Val(2), T.NewObject("({y:23})"), T.Val(2));
+  T.CheckCall(T.Val(23), T.NewObject("({b:23})"), T.Val(3));
+  T.CheckCall(T.undefined(), T.NewObject("({__proto__:{x:42}})"), T.Val(4));
+}
+
+
+TEST(BlockLoadStore) {
+  FLAG_harmony_scoping = true;
+  FunctionTester T("(function(a) { 'use strict'; { let x = a+a; return x; }})");
+
+  T.CheckCall(T.Val(46), T.Val(23));
+  T.CheckCall(T.Val("aa"), T.Val("a"));
+}
+
+
+TEST(BlockLoadStoreNested) {
+  FLAG_harmony_scoping = true;
+  const char* src =
+      "(function(a,b) {"
+      "'use strict';"
+      "{ let x = a, y = a;"
+      "  { let y = b;"
+      "    return x + y;"
+      "  }"
+      "}})";
+  FunctionTester T(src);
+
+  T.CheckCall(T.Val(65), T.Val(23), T.Val(42));
+  T.CheckCall(T.Val("ab"), T.Val("a"), T.Val("b"));
+}
+
+
+TEST(ObjectLiteralComputed) {
+  FunctionTester T("(function(a,b) { o = { x:a+b }; return o.x; })");
+
+  T.CheckCall(T.Val(65), T.Val(23), T.Val(42));
+  T.CheckCall(T.Val("ab"), T.Val("a"), T.Val("b"));
+}
+
+
+TEST(ObjectLiteralNonString) {
+  FunctionTester T("(function(a,b) { o = { 7:a+b }; return o[7]; })");
+
+  T.CheckCall(T.Val(65), T.Val(23), T.Val(42));
+  T.CheckCall(T.Val("ab"), T.Val("a"), T.Val("b"));
+}
+
+
+TEST(ObjectLiteralPrototype) {
+  FunctionTester T("(function(a) { o = { __proto__:a }; return o.x; })");
+
+  T.CheckCall(T.Val(23), T.NewObject("({x:23})"), T.undefined());
+  T.CheckCall(T.undefined(), T.NewObject("({y:42})"), T.undefined());
+}
+
+
+TEST(ObjectLiteralGetter) {
+  FunctionTester T("(function(a) { o = { get x() {return a} }; return o.x; })");
+
+  T.CheckCall(T.Val(23), T.Val(23), T.undefined());
+  T.CheckCall(T.Val("x"), T.Val("x"), T.undefined());
+}
+
+
+TEST(ArrayLiteral) {
+  FunctionTester T("(function(a,b) { o = [1, a + b, 3]; return o[1]; })");
+
+  T.CheckCall(T.Val(65), T.Val(23), T.Val(42));
+  T.CheckCall(T.Val("ab"), T.Val("a"), T.Val("b"));
+}
+
+
+TEST(RegExpLiteral) {
+  FunctionTester T("(function(a) { o = /b/; return o.test(a); })");
+
+  T.CheckTrue(T.Val("abc"));
+  T.CheckFalse(T.Val("xyz"));
+}
diff --git a/test/cctest/compiler/compiler/test-run-machops.cc b/test/cctest/compiler/compiler/test-run-machops.cc
new file mode 100644 (file)
index 0000000..1f71c4a
--- /dev/null
@@ -0,0 +1,3798 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <limits>
+#include "src/v8.h"
+
+#include "test/cctest/cctest.h"
+#include "test/cctest/compiler/codegen-tester.h"
+#include "test/cctest/compiler/value-helper.h"
+
+#if V8_TURBOFAN_TARGET
+
+using namespace v8::internal;
+using namespace v8::internal::compiler;
+
+typedef RawMachineAssembler::Label MLabel;
+
+TEST(RunInt32Add) {
+  RawMachineAssemblerTester<int32_t> m;
+  Node* add = m.Int32Add(m.Int32Constant(0), m.Int32Constant(1));
+  m.Return(add);
+  CHECK_EQ(1, m.Call());
+}
+
+
+static Node* Int32Input(RawMachineAssemblerTester<int32_t>* m, int index) {
+  switch (index) {
+    case 0:
+      return m->Parameter(0);
+    case 1:
+      return m->Parameter(1);
+    case 2:
+      return m->Int32Constant(0);
+    case 3:
+      return m->Int32Constant(1);
+    case 4:
+      return m->Int32Constant(-1);
+    case 5:
+      return m->Int32Constant(0xff);
+    case 6:
+      return m->Int32Constant(0x01234567);
+    case 7:
+      return m->Load(kMachineWord32, m->PointerConstant(NULL));
+    default:
+      return NULL;
+  }
+}
+
+
+TEST(CodeGenInt32Binop) {
+  RawMachineAssemblerTester<void> m;
+
+  Operator* ops[] = {
+      m.machine()->Word32And(),      m.machine()->Word32Or(),
+      m.machine()->Word32Xor(),      m.machine()->Word32Shl(),
+      m.machine()->Word32Shr(),      m.machine()->Word32Sar(),
+      m.machine()->Word32Equal(),    m.machine()->Int32Add(),
+      m.machine()->Int32Sub(),       m.machine()->Int32Mul(),
+      m.machine()->Int32Div(),       m.machine()->Int32UDiv(),
+      m.machine()->Int32Mod(),       m.machine()->Int32UMod(),
+      m.machine()->Int32LessThan(),  m.machine()->Int32LessThanOrEqual(),
+      m.machine()->Uint32LessThan(), m.machine()->Uint32LessThanOrEqual(),
+      NULL};
+
+  for (int i = 0; ops[i] != NULL; i++) {
+    for (int j = 0; j < 8; j++) {
+      for (int k = 0; k < 8; k++) {
+        RawMachineAssemblerTester<int32_t> m(kMachineWord32, kMachineWord32);
+        Node* a = Int32Input(&m, j);
+        Node* b = Int32Input(&m, k);
+        m.Return(m.NewNode(ops[i], a, b));
+        m.GenerateCode();
+      }
+    }
+  }
+}
+
+
+TEST(RunGoto) {
+  RawMachineAssemblerTester<int32_t> m;
+  int constant = 99999;
+
+  MLabel next;
+  m.Goto(&next);
+  m.Bind(&next);
+  m.Return(m.Int32Constant(constant));
+
+  CHECK_EQ(constant, m.Call());
+}
+
+
+TEST(RunGotoMultiple) {
+  RawMachineAssemblerTester<int32_t> m;
+  int constant = 9999977;
+
+  MLabel labels[10];
+  for (size_t i = 0; i < ARRAY_SIZE(labels); i++) {
+    m.Goto(&labels[i]);
+    m.Bind(&labels[i]);
+  }
+  m.Return(m.Int32Constant(constant));
+
+  CHECK_EQ(constant, m.Call());
+}
+
+
+TEST(RunBranch) {
+  RawMachineAssemblerTester<int32_t> m;
+  int constant = 999777;
+
+  MLabel blocka, blockb;
+  m.Branch(m.Int32Constant(0), &blocka, &blockb);
+  m.Bind(&blocka);
+  m.Return(m.Int32Constant(0 - constant));
+  m.Bind(&blockb);
+  m.Return(m.Int32Constant(constant));
+
+  CHECK_EQ(constant, m.Call());
+}
+
+
+TEST(RunRedundantBranch1) {
+  RawMachineAssemblerTester<int32_t> m;
+  int constant = 944777;
+
+  MLabel blocka;
+  m.Branch(m.Int32Constant(0), &blocka, &blocka);
+  m.Bind(&blocka);
+  m.Return(m.Int32Constant(constant));
+
+  CHECK_EQ(constant, m.Call());
+}
+
+
+TEST(RunRedundantBranch2) {
+  RawMachineAssemblerTester<int32_t> m;
+  int constant = 955777;
+
+  MLabel blocka, blockb;
+  m.Branch(m.Int32Constant(0), &blocka, &blocka);
+  m.Bind(&blockb);
+  m.Goto(&blocka);
+  m.Bind(&blocka);
+  m.Return(m.Int32Constant(constant));
+
+  CHECK_EQ(constant, m.Call());
+}
+
+
+TEST(RunRedundantBranch3) {
+  RawMachineAssemblerTester<int32_t> m;
+  int constant = 966777;
+
+  MLabel blocka, blockb, blockc;
+  m.Branch(m.Int32Constant(0), &blocka, &blockc);
+  m.Bind(&blocka);
+  m.Branch(m.Int32Constant(0), &blockb, &blockb);
+  m.Bind(&blockc);
+  m.Goto(&blockb);
+  m.Bind(&blockb);
+  m.Return(m.Int32Constant(constant));
+
+  CHECK_EQ(constant, m.Call());
+}
+
+
+TEST(RunDiamond2) {
+  RawMachineAssemblerTester<int32_t> m;
+
+  int constant = 995666;
+
+  MLabel blocka, blockb, end;
+  m.Branch(m.Int32Constant(0), &blocka, &blockb);
+  m.Bind(&blocka);
+  m.Goto(&end);
+  m.Bind(&blockb);
+  m.Goto(&end);
+  m.Bind(&end);
+  m.Return(m.Int32Constant(constant));
+
+  CHECK_EQ(constant, m.Call());
+}
+
+
+TEST(RunLoop) {
+  RawMachineAssemblerTester<int32_t> m;
+  int constant = 999555;
+
+  MLabel header, body, exit;
+  m.Goto(&header);
+  m.Bind(&header);
+  m.Branch(m.Int32Constant(0), &body, &exit);
+  m.Bind(&body);
+  m.Goto(&header);
+  m.Bind(&exit);
+  m.Return(m.Int32Constant(constant));
+
+  CHECK_EQ(constant, m.Call());
+}
+
+
+template <typename R>
+static void BuildDiamondPhi(RawMachineAssemblerTester<R>* m, Node* cond_node,
+                            Node* true_node, Node* false_node) {
+  MLabel blocka, blockb;
+  MLabel* end = m->Exit();
+  m->Branch(cond_node, &blocka, &blockb);
+  m->Bind(&blocka);
+  m->Goto(end);
+  m->Bind(&blockb);
+  m->Goto(end);
+
+  m->Bind(end);
+  Node* phi = m->Phi(true_node, false_node);
+  m->Return(phi);
+}
+
+
+TEST(RunDiamondPhiConst) {
+  RawMachineAssemblerTester<int32_t> m(kMachineWord32);
+  int false_val = 0xFF666;
+  int true_val = 0x00DDD;
+  Node* true_node = m.Int32Constant(true_val);
+  Node* false_node = m.Int32Constant(false_val);
+  BuildDiamondPhi(&m, m.Parameter(0), true_node, false_node);
+  CHECK_EQ(false_val, m.Call(0));
+  CHECK_EQ(true_val, m.Call(1));
+}
+
+
+TEST(RunDiamondPhiNumber) {
+  RawMachineAssemblerTester<Object*> m(kMachineWord32);
+  double false_val = -11.1;
+  double true_val = 200.1;
+  Node* true_node = m.NumberConstant(true_val);
+  Node* false_node = m.NumberConstant(false_val);
+  BuildDiamondPhi(&m, m.Parameter(0), true_node, false_node);
+  m.CheckNumber(false_val, m.Call(0));
+  m.CheckNumber(true_val, m.Call(1));
+}
+
+
+TEST(RunDiamondPhiString) {
+  RawMachineAssemblerTester<Object*> m(kMachineWord32);
+  const char* false_val = "false";
+  const char* true_val = "true";
+  Node* true_node = m.StringConstant(true_val);
+  Node* false_node = m.StringConstant(false_val);
+  BuildDiamondPhi(&m, m.Parameter(0), true_node, false_node);
+  m.CheckString(false_val, m.Call(0));
+  m.CheckString(true_val, m.Call(1));
+}
+
+
+TEST(RunDiamondPhiParam) {
+  RawMachineAssemblerTester<int32_t> m(kMachineWord32, kMachineWord32,
+                                       kMachineWord32);
+  BuildDiamondPhi(&m, m.Parameter(0), m.Parameter(1), m.Parameter(2));
+  int32_t c1 = 0x260cb75a;
+  int32_t c2 = 0xcd3e9c8b;
+  int result = m.Call(0, c1, c2);
+  CHECK_EQ(c2, result);
+  result = m.Call(1, c1, c2);
+  CHECK_EQ(c1, result);
+}
+
+
+TEST(RunLoopPhiConst) {
+  RawMachineAssemblerTester<int32_t> m;
+  int true_val = 0x44000;
+  int false_val = 0x00888;
+
+  Node* cond_node = m.Int32Constant(0);
+  Node* true_node = m.Int32Constant(true_val);
+  Node* false_node = m.Int32Constant(false_val);
+
+  // x = false_val; while(false) { x = true_val; } return x;
+  MLabel body, header;
+  MLabel* end = m.Exit();
+
+  m.Goto(&header);
+  m.Bind(&header);
+  Node* phi = m.Phi(false_node, true_node);
+  m.Branch(cond_node, &body, end);
+  m.Bind(&body);
+  m.Goto(&header);
+  m.Bind(end);
+  m.Return(phi);
+
+  CHECK_EQ(false_val, m.Call());
+}
+
+
+TEST(RunLoopPhiParam) {
+  RawMachineAssemblerTester<int32_t> m(kMachineWord32, kMachineWord32,
+                                       kMachineWord32);
+
+  MLabel blocka, blockb;
+  MLabel* end = m.Exit();
+
+  m.Goto(&blocka);
+
+  m.Bind(&blocka);
+  Node* phi = m.Phi(m.Parameter(1), m.Parameter(2));
+  Node* cond = m.Phi(m.Parameter(0), m.Int32Constant(0));
+  m.Branch(cond, &blockb, end);
+
+  m.Bind(&blockb);
+  m.Goto(&blocka);
+
+  m.Bind(end);
+  m.Return(phi);
+
+  int32_t c1 = 0xa81903b4;
+  int32_t c2 = 0x5a1207da;
+  int result = m.Call(0, c1, c2);
+  CHECK_EQ(c1, result);
+  result = m.Call(1, c1, c2);
+  CHECK_EQ(c2, result);
+}
+
+
+TEST(RunLoopPhiInduction) {
+  RawMachineAssemblerTester<int32_t> m;
+
+  int false_val = 0x10777;
+
+  // x = false_val; while(false) { x++; } return x;
+  MLabel header, body;
+  MLabel* end = m.Exit();
+  Node* false_node = m.Int32Constant(false_val);
+
+  m.Goto(&header);
+
+  m.Bind(&header);
+  Node* phi = m.Phi(false_node, false_node);
+  m.Branch(m.Int32Constant(0), &body, end);
+
+  m.Bind(&body);
+  Node* add = m.Int32Add(phi, m.Int32Constant(1));
+  phi->ReplaceInput(1, add);
+  m.Goto(&header);
+
+  m.Bind(end);
+  m.Return(phi);
+
+  CHECK_EQ(false_val, m.Call());
+}
+
+
+TEST(RunLoopIncrement) {
+  RawMachineAssemblerTester<int32_t> m;
+  Int32BinopTester bt(&m);
+
+  // x = 0; while(x ^ param) { x++; } return x;
+  MLabel header, body;
+  MLabel* end = m.Exit();
+  Node* zero = m.Int32Constant(0);
+
+  m.Goto(&header);
+
+  m.Bind(&header);
+  Node* phi = m.Phi(zero, zero);
+  m.Branch(m.WordXor(phi, bt.param0), &body, end);
+
+  m.Bind(&body);
+  phi->ReplaceInput(1, m.Int32Add(phi, m.Int32Constant(1)));
+  m.Goto(&header);
+
+  m.Bind(end);
+  bt.AddReturn(phi);
+
+  CHECK_EQ(11, bt.call(11, 0));
+  CHECK_EQ(110, bt.call(110, 0));
+  CHECK_EQ(176, bt.call(176, 0));
+}
+
+
+TEST(RunLoopIncrement2) {
+  RawMachineAssemblerTester<int32_t> m;
+  Int32BinopTester bt(&m);
+
+  // x = 0; while(x < param) { x++; } return x;
+  MLabel header, body;
+  MLabel* end = m.Exit();
+  Node* zero = m.Int32Constant(0);
+
+  m.Goto(&header);
+
+  m.Bind(&header);
+  Node* phi = m.Phi(zero, zero);
+  m.Branch(m.Int32LessThan(phi, bt.param0), &body, end);
+
+  m.Bind(&body);
+  phi->ReplaceInput(1, m.Int32Add(phi, m.Int32Constant(1)));
+  m.Goto(&header);
+
+  m.Bind(end);
+  bt.AddReturn(phi);
+
+  CHECK_EQ(11, bt.call(11, 0));
+  CHECK_EQ(110, bt.call(110, 0));
+  CHECK_EQ(176, bt.call(176, 0));
+  CHECK_EQ(0, bt.call(-200, 0));
+}
+
+
+TEST(RunLoopIncrement3) {
+  RawMachineAssemblerTester<int32_t> m;
+  Int32BinopTester bt(&m);
+
+  // x = 0; while(x < param) { x++; } return x;
+  MLabel header, body;
+  MLabel* end = m.Exit();
+  Node* zero = m.Int32Constant(0);
+
+  m.Goto(&header);
+
+  m.Bind(&header);
+  Node* phi = m.Phi(zero, zero);
+  m.Branch(m.Uint32LessThan(phi, bt.param0), &body, end);
+
+  m.Bind(&body);
+  phi->ReplaceInput(1, m.Int32Add(phi, m.Int32Constant(1)));
+  m.Goto(&header);
+
+  m.Bind(end);
+  bt.AddReturn(phi);
+
+  CHECK_EQ(11, bt.call(11, 0));
+  CHECK_EQ(110, bt.call(110, 0));
+  CHECK_EQ(176, bt.call(176, 0));
+  CHECK_EQ(200, bt.call(200, 0));
+}
+
+
+TEST(RunLoopDecrement) {
+  RawMachineAssemblerTester<int32_t> m;
+  Int32BinopTester bt(&m);
+
+  // x = param; while(x) { x--; } return x;
+  MLabel header, body;
+  MLabel* end = m.Exit();
+
+  m.Goto(&header);
+
+  m.Bind(&header);
+  Node* phi = m.Phi(bt.param0, m.Int32Constant(0));
+  m.Branch(phi, &body, end);
+
+  m.Bind(&body);
+  phi->ReplaceInput(1, m.Int32Sub(phi, m.Int32Constant(1)));
+  m.Goto(&header);
+
+  m.Bind(end);
+  bt.AddReturn(phi);
+
+  CHECK_EQ(0, bt.call(11, 0));
+  CHECK_EQ(0, bt.call(110, 0));
+  CHECK_EQ(0, bt.call(197, 0));
+}
+
+
+TEST(RunLoopIncrementFloat64) {
+  RawMachineAssemblerTester<int32_t> m;
+
+  // x = -3.0; while(x < 10) { x = x + 0.5; } return (int) x;
+  MLabel header, body;
+  MLabel* end = m.Exit();
+  Node* minus_3 = m.Float64Constant(-3.0);
+  Node* ten = m.Float64Constant(10.0);
+
+  m.Goto(&header);
+
+  m.Bind(&header);
+  Node* phi = m.Phi(minus_3, ten);
+  m.Branch(m.Float64LessThan(phi, ten), &body, end);
+
+  m.Bind(&body);
+  phi->ReplaceInput(1, m.Float64Add(phi, m.Float64Constant(0.5)));
+  m.Goto(&header);
+
+  m.Bind(end);
+  m.Return(m.ConvertFloat64ToInt32(phi));
+
+  CHECK_EQ(10, m.Call());
+}
+
+
+TEST(RunLoadInt32) {
+  RawMachineAssemblerTester<int32_t> m;
+
+  int32_t p1 = 0;  // loads directly from this location.
+  m.Return(m.LoadFromPointer(&p1, kMachineWord32));
+
+  FOR_INT32_INPUTS(i) {
+    p1 = *i;
+    CHECK_EQ(p1, m.Call());
+  }
+}
+
+
+TEST(RunLoadInt32Offset) {
+  int32_t p1 = 0;  // loads directly from this location.
+
+  int32_t offsets[] = {-2000000, -100, -101, 1,          3,
+                       7,        120,  2000, 2000000000, 0xff};
+
+  for (size_t i = 0; i < ARRAY_SIZE(offsets); i++) {
+    RawMachineAssemblerTester<int32_t> m;
+    int32_t offset = offsets[i];
+    byte* pointer = reinterpret_cast<byte*>(&p1) - offset;
+    // generate load [#base + #index]
+    m.Return(m.LoadFromPointer(pointer, kMachineWord32, offset));
+
+    FOR_INT32_INPUTS(j) {
+      p1 = *j;
+      CHECK_EQ(p1, m.Call());
+    }
+  }
+}
+
+
+TEST(RunLoadStoreFloat64Offset) {
+  double p1 = 0;  // loads directly from this location.
+  double p2 = 0;  // and stores directly into this location.
+
+  FOR_INT32_INPUTS(i) {
+    int32_t magic = 0x2342aabb + *i * 3;
+    RawMachineAssemblerTester<int32_t> m;
+    int32_t offset = *i;
+    byte* from = reinterpret_cast<byte*>(&p1) - offset;
+    byte* to = reinterpret_cast<byte*>(&p2) - offset;
+    // generate load [#base + #index]
+    Node* load = m.Load(kMachineFloat64, m.PointerConstant(from),
+                        m.Int32Constant(offset));
+    m.Store(kMachineFloat64, m.PointerConstant(to), m.Int32Constant(offset),
+            load);
+    m.Return(m.Int32Constant(magic));
+
+    FOR_FLOAT64_INPUTS(j) {
+      p1 = *j;
+      p2 = *j - 5;
+      CHECK_EQ(magic, m.Call());
+      CHECK_EQ(p1, p2);
+    }
+  }
+}
+
+
+TEST(RunInt32AddP) {
+  RawMachineAssemblerTester<int32_t> m;
+  Int32BinopTester bt(&m);
+
+  bt.AddReturn(m.Int32Add(bt.param0, bt.param1));
+
+  FOR_INT32_INPUTS(i) {
+    FOR_INT32_INPUTS(j) {
+      // Use uint32_t because signed overflow is UB in C.
+      int expected = static_cast<int32_t>(*i + *j);
+      CHECK_EQ(expected, bt.call(*i, *j));
+    }
+  }
+}
+
+
+TEST(RunInt32AddAndWord32SarP) {
+  {
+    RawMachineAssemblerTester<int32_t> m(kMachineWord32, kMachineWord32,
+                                         kMachineWord32);
+    m.Return(m.Int32Add(m.Parameter(0),
+                        m.Word32Sar(m.Parameter(1), m.Parameter(2))));
+    FOR_UINT32_INPUTS(i) {
+      FOR_INT32_INPUTS(j) {
+        FOR_UINT32_INPUTS(k) {
+          uint32_t shift = *k & 0x1F;
+          // Use uint32_t because signed overflow is UB in C.
+          int32_t expected = *i + (*j >> shift);
+          CHECK_EQ(expected, m.Call(*i, *j, shift));
+        }
+      }
+    }
+  }
+  {
+    RawMachineAssemblerTester<int32_t> m(kMachineWord32, kMachineWord32,
+                                         kMachineWord32);
+    m.Return(m.Int32Add(m.Word32Sar(m.Parameter(0), m.Parameter(1)),
+                        m.Parameter(2)));
+    FOR_INT32_INPUTS(i) {
+      FOR_UINT32_INPUTS(j) {
+        FOR_UINT32_INPUTS(k) {
+          uint32_t shift = *j & 0x1F;
+          // Use uint32_t because signed overflow is UB in C.
+          int32_t expected = (*i >> shift) + *k;
+          CHECK_EQ(expected, m.Call(*i, shift, *k));
+        }
+      }
+    }
+  }
+}
+
+
+TEST(RunInt32AddAndWord32ShlP) {
+  {
+    RawMachineAssemblerTester<int32_t> m(kMachineWord32, kMachineWord32,
+                                         kMachineWord32);
+    m.Return(m.Int32Add(m.Parameter(0),
+                        m.Word32Shl(m.Parameter(1), m.Parameter(2))));
+    FOR_UINT32_INPUTS(i) {
+      FOR_INT32_INPUTS(j) {
+        FOR_UINT32_INPUTS(k) {
+          uint32_t shift = *k & 0x1F;
+          // Use uint32_t because signed overflow is UB in C.
+          int32_t expected = *i + (*j << shift);
+          CHECK_EQ(expected, m.Call(*i, *j, shift));
+        }
+      }
+    }
+  }
+  {
+    RawMachineAssemblerTester<int32_t> m(kMachineWord32, kMachineWord32,
+                                         kMachineWord32);
+    m.Return(m.Int32Add(m.Word32Shl(m.Parameter(0), m.Parameter(1)),
+                        m.Parameter(2)));
+    FOR_INT32_INPUTS(i) {
+      FOR_UINT32_INPUTS(j) {
+        FOR_UINT32_INPUTS(k) {
+          uint32_t shift = *j & 0x1F;
+          // Use uint32_t because signed overflow is UB in C.
+          int32_t expected = (*i << shift) + *k;
+          CHECK_EQ(expected, m.Call(*i, shift, *k));
+        }
+      }
+    }
+  }
+}
+
+
+TEST(RunInt32AddAndWord32ShrP) {
+  {
+    RawMachineAssemblerTester<int32_t> m(kMachineWord32, kMachineWord32,
+                                         kMachineWord32);
+    m.Return(m.Int32Add(m.Parameter(0),
+                        m.Word32Shr(m.Parameter(1), m.Parameter(2))));
+    FOR_UINT32_INPUTS(i) {
+      FOR_UINT32_INPUTS(j) {
+        FOR_UINT32_INPUTS(k) {
+          uint32_t shift = *k & 0x1F;
+          // Use uint32_t because signed overflow is UB in C.
+          int32_t expected = *i + (*j >> shift);
+          CHECK_EQ(expected, m.Call(*i, *j, shift));
+        }
+      }
+    }
+  }
+  {
+    RawMachineAssemblerTester<int32_t> m(kMachineWord32, kMachineWord32,
+                                         kMachineWord32);
+    m.Return(m.Int32Add(m.Word32Shr(m.Parameter(0), m.Parameter(1)),
+                        m.Parameter(2)));
+    FOR_UINT32_INPUTS(i) {
+      FOR_UINT32_INPUTS(j) {
+        FOR_UINT32_INPUTS(k) {
+          uint32_t shift = *j & 0x1F;
+          // Use uint32_t because signed overflow is UB in C.
+          int32_t expected = (*i >> shift) + *k;
+          CHECK_EQ(expected, m.Call(*i, shift, *k));
+        }
+      }
+    }
+  }
+}
+
+
+TEST(RunInt32AddInBranch) {
+  static const int32_t constant = 987654321;
+  {
+    RawMachineAssemblerTester<int32_t> m;
+    Int32BinopTester bt(&m);
+    MLabel blocka, blockb;
+    m.Branch(
+        m.Word32Equal(m.Int32Add(bt.param0, bt.param1), m.Int32Constant(0)),
+        &blocka, &blockb);
+    m.Bind(&blocka);
+    bt.AddReturn(m.Int32Constant(constant));
+    m.Bind(&blockb);
+    bt.AddReturn(m.Int32Constant(0 - constant));
+    FOR_UINT32_INPUTS(i) {
+      FOR_UINT32_INPUTS(j) {
+        int32_t expected = (*i + *j) == 0 ? constant : 0 - constant;
+        CHECK_EQ(expected, bt.call(*i, *j));
+      }
+    }
+  }
+  {
+    RawMachineAssemblerTester<int32_t> m;
+    Int32BinopTester bt(&m);
+    MLabel blocka, blockb;
+    m.Branch(
+        m.Word32NotEqual(m.Int32Add(bt.param0, bt.param1), m.Int32Constant(0)),
+        &blocka, &blockb);
+    m.Bind(&blocka);
+    bt.AddReturn(m.Int32Constant(constant));
+    m.Bind(&blockb);
+    bt.AddReturn(m.Int32Constant(0 - constant));
+    FOR_UINT32_INPUTS(i) {
+      FOR_UINT32_INPUTS(j) {
+        int32_t expected = (*i + *j) != 0 ? constant : 0 - constant;
+        CHECK_EQ(expected, bt.call(*i, *j));
+      }
+    }
+  }
+  {
+    FOR_UINT32_INPUTS(i) {
+      RawMachineAssemblerTester<int32_t> m(kMachineWord32);
+      MLabel blocka, blockb;
+      m.Branch(m.Word32Equal(m.Int32Add(m.Int32Constant(*i), m.Parameter(0)),
+                             m.Int32Constant(0)),
+               &blocka, &blockb);
+      m.Bind(&blocka);
+      m.Return(m.Int32Constant(constant));
+      m.Bind(&blockb);
+      m.Return(m.Int32Constant(0 - constant));
+      FOR_UINT32_INPUTS(j) {
+        int32_t expected = (*i + *j) == 0 ? constant : 0 - constant;
+        CHECK_EQ(expected, m.Call(*j));
+      }
+    }
+  }
+  {
+    FOR_UINT32_INPUTS(i) {
+      RawMachineAssemblerTester<int32_t> m(kMachineWord32);
+      MLabel blocka, blockb;
+      m.Branch(m.Word32NotEqual(m.Int32Add(m.Int32Constant(*i), m.Parameter(0)),
+                                m.Int32Constant(0)),
+               &blocka, &blockb);
+      m.Bind(&blocka);
+      m.Return(m.Int32Constant(constant));
+      m.Bind(&blockb);
+      m.Return(m.Int32Constant(0 - constant));
+      FOR_UINT32_INPUTS(j) {
+        int32_t expected = (*i + *j) != 0 ? constant : 0 - constant;
+        CHECK_EQ(expected, m.Call(*j));
+      }
+    }
+  }
+  {
+    RawMachineAssemblerTester<void> m;
+    Operator* shops[] = {m.machine()->Word32Sar(), m.machine()->Word32Shl(),
+                         m.machine()->Word32Shr()};
+    for (size_t n = 0; n < ARRAY_SIZE(shops); n++) {
+      RawMachineAssemblerTester<int32_t> m(kMachineWord32, kMachineWord32,
+                                           kMachineWord32);
+      MLabel blocka, blockb;
+      m.Branch(m.Word32Equal(m.Int32Add(m.Parameter(0),
+                                        m.NewNode(shops[n], m.Parameter(1),
+                                                  m.Parameter(2))),
+                             m.Int32Constant(0)),
+               &blocka, &blockb);
+      m.Bind(&blocka);
+      m.Return(m.Int32Constant(constant));
+      m.Bind(&blockb);
+      m.Return(m.Int32Constant(0 - constant));
+      FOR_UINT32_INPUTS(i) {
+        FOR_INT32_INPUTS(j) {
+          FOR_UINT32_INPUTS(k) {
+            uint32_t shift = *k & 0x1F;
+            int32_t right;
+            switch (shops[n]->opcode()) {
+              default:
+                UNREACHABLE();
+              case IrOpcode::kWord32Sar:
+                right = *j >> shift;
+                break;
+              case IrOpcode::kWord32Shl:
+                right = *j << shift;
+                break;
+              case IrOpcode::kWord32Shr:
+                right = static_cast<uint32_t>(*j) >> shift;
+                break;
+            }
+            int32_t expected = ((*i + right) == 0) ? constant : 0 - constant;
+            CHECK_EQ(expected, m.Call(*i, *j, shift));
+          }
+        }
+      }
+    }
+  }
+}
+
+
+TEST(RunInt32AddInComparison) {
+  {
+    RawMachineAssemblerTester<int32_t> m;
+    Int32BinopTester bt(&m);
+    bt.AddReturn(
+        m.Word32Equal(m.Int32Add(bt.param0, bt.param1), m.Int32Constant(0)));
+    FOR_UINT32_INPUTS(i) {
+      FOR_UINT32_INPUTS(j) {
+        int32_t expected = (*i + *j) == 0;
+        CHECK_EQ(expected, bt.call(*i, *j));
+      }
+    }
+  }
+  {
+    RawMachineAssemblerTester<int32_t> m;
+    Int32BinopTester bt(&m);
+    bt.AddReturn(
+        m.Word32Equal(m.Int32Constant(0), m.Int32Add(bt.param0, bt.param1)));
+    FOR_UINT32_INPUTS(i) {
+      FOR_UINT32_INPUTS(j) {
+        int32_t expected = (*i + *j) == 0;
+        CHECK_EQ(expected, bt.call(*i, *j));
+      }
+    }
+  }
+  {
+    FOR_UINT32_INPUTS(i) {
+      RawMachineAssemblerTester<int32_t> m(kMachineWord32);
+      m.Return(m.Word32Equal(m.Int32Add(m.Int32Constant(*i), m.Parameter(0)),
+                             m.Int32Constant(0)));
+      FOR_UINT32_INPUTS(j) {
+        int32_t expected = (*i + *j) == 0;
+        CHECK_EQ(expected, m.Call(*j));
+      }
+    }
+  }
+  {
+    FOR_UINT32_INPUTS(i) {
+      RawMachineAssemblerTester<int32_t> m(kMachineWord32);
+      m.Return(m.Word32Equal(m.Int32Add(m.Parameter(0), m.Int32Constant(*i)),
+                             m.Int32Constant(0)));
+      FOR_UINT32_INPUTS(j) {
+        int32_t expected = (*j + *i) == 0;
+        CHECK_EQ(expected, m.Call(*j));
+      }
+    }
+  }
+  {
+    RawMachineAssemblerTester<void> m;
+    Operator* shops[] = {m.machine()->Word32Sar(), m.machine()->Word32Shl(),
+                         m.machine()->Word32Shr()};
+    for (size_t n = 0; n < ARRAY_SIZE(shops); n++) {
+      RawMachineAssemblerTester<int32_t> m(kMachineWord32, kMachineWord32,
+                                           kMachineWord32);
+      m.Return(m.Word32Equal(
+          m.Int32Add(m.Parameter(0),
+                     m.NewNode(shops[n], m.Parameter(1), m.Parameter(2))),
+          m.Int32Constant(0)));
+      FOR_UINT32_INPUTS(i) {
+        FOR_INT32_INPUTS(j) {
+          FOR_UINT32_INPUTS(k) {
+            uint32_t shift = *k & 0x1F;
+            int32_t right;
+            switch (shops[n]->opcode()) {
+              default:
+                UNREACHABLE();
+              case IrOpcode::kWord32Sar:
+                right = *j >> shift;
+                break;
+              case IrOpcode::kWord32Shl:
+                right = *j << shift;
+                break;
+              case IrOpcode::kWord32Shr:
+                right = static_cast<uint32_t>(*j) >> shift;
+                break;
+            }
+            int32_t expected = (*i + right) == 0;
+            CHECK_EQ(expected, m.Call(*i, *j, shift));
+          }
+        }
+      }
+    }
+  }
+}
+
+
+TEST(RunInt32SubP) {
+  RawMachineAssemblerTester<int32_t> m;
+  Int32BinopTester bt(&m);
+
+  m.Return(m.Int32Sub(bt.param0, bt.param1));
+
+  FOR_UINT32_INPUTS(i) {
+    FOR_UINT32_INPUTS(j) {
+      // Use uint32_t because signed overflow is UB in C.
+      int expected = static_cast<int32_t>(*i - *j);
+      CHECK_EQ(expected, bt.call(*i, *j));
+    }
+  }
+}
+
+
+TEST(RunInt32SubImm) {
+  {
+    FOR_UINT32_INPUTS(i) {
+      RawMachineAssemblerTester<int32_t> m(kMachineWord32);
+      m.Return(m.Int32Sub(m.Int32Constant(*i), m.Parameter(0)));
+      FOR_UINT32_INPUTS(j) {
+        // Use uint32_t because signed overflow is UB in C.
+        int32_t expected = static_cast<int32_t>(*i - *j);
+        CHECK_EQ(expected, m.Call(*j));
+      }
+    }
+  }
+  {
+    FOR_UINT32_INPUTS(i) {
+      RawMachineAssemblerTester<int32_t> m(kMachineWord32);
+      m.Return(m.Int32Sub(m.Parameter(0), m.Int32Constant(*i)));
+      FOR_UINT32_INPUTS(j) {
+        // Use uint32_t because signed overflow is UB in C.
+        int32_t expected = static_cast<int32_t>(*j - *i);
+        CHECK_EQ(expected, m.Call(*j));
+      }
+    }
+  }
+}
+
+
+TEST(RunInt32SubAndWord32SarP) {
+  {
+    RawMachineAssemblerTester<int32_t> m(kMachineWord32, kMachineWord32,
+                                         kMachineWord32);
+    m.Return(m.Int32Sub(m.Parameter(0),
+                        m.Word32Sar(m.Parameter(1), m.Parameter(2))));
+    FOR_UINT32_INPUTS(i) {
+      FOR_INT32_INPUTS(j) {
+        FOR_UINT32_INPUTS(k) {
+          uint32_t shift = *k & 0x1F;
+          // Use uint32_t because signed overflow is UB in C.
+          int32_t expected = *i - (*j >> shift);
+          CHECK_EQ(expected, m.Call(*i, *j, shift));
+        }
+      }
+    }
+  }
+  {
+    RawMachineAssemblerTester<int32_t> m(kMachineWord32, kMachineWord32,
+                                         kMachineWord32);
+    m.Return(m.Int32Sub(m.Word32Sar(m.Parameter(0), m.Parameter(1)),
+                        m.Parameter(2)));
+    FOR_INT32_INPUTS(i) {
+      FOR_UINT32_INPUTS(j) {
+        FOR_UINT32_INPUTS(k) {
+          uint32_t shift = *j & 0x1F;
+          // Use uint32_t because signed overflow is UB in C.
+          int32_t expected = (*i >> shift) - *k;
+          CHECK_EQ(expected, m.Call(*i, shift, *k));
+        }
+      }
+    }
+  }
+}
+
+
+TEST(RunInt32SubAndWord32ShlP) {
+  {
+    RawMachineAssemblerTester<int32_t> m(kMachineWord32, kMachineWord32,
+                                         kMachineWord32);
+    m.Return(m.Int32Sub(m.Parameter(0),
+                        m.Word32Shl(m.Parameter(1), m.Parameter(2))));
+    FOR_UINT32_INPUTS(i) {
+      FOR_INT32_INPUTS(j) {
+        FOR_UINT32_INPUTS(k) {
+          uint32_t shift = *k & 0x1F;
+          // Use uint32_t because signed overflow is UB in C.
+          int32_t expected = *i - (*j << shift);
+          CHECK_EQ(expected, m.Call(*i, *j, shift));
+        }
+      }
+    }
+  }
+  {
+    RawMachineAssemblerTester<int32_t> m(kMachineWord32, kMachineWord32,
+                                         kMachineWord32);
+    m.Return(m.Int32Sub(m.Word32Shl(m.Parameter(0), m.Parameter(1)),
+                        m.Parameter(2)));
+    FOR_INT32_INPUTS(i) {
+      FOR_UINT32_INPUTS(j) {
+        FOR_UINT32_INPUTS(k) {
+          uint32_t shift = *j & 0x1F;
+          // Use uint32_t because signed overflow is UB in C.
+          int32_t expected = (*i << shift) - *k;
+          CHECK_EQ(expected, m.Call(*i, shift, *k));
+        }
+      }
+    }
+  }
+}
+
+
+TEST(RunInt32SubAndWord32ShrP) {
+  {
+    RawMachineAssemblerTester<int32_t> m(kMachineWord32, kMachineWord32,
+                                         kMachineWord32);
+    m.Return(m.Int32Sub(m.Parameter(0),
+                        m.Word32Shr(m.Parameter(1), m.Parameter(2))));
+    FOR_UINT32_INPUTS(i) {
+      FOR_UINT32_INPUTS(j) {
+        FOR_UINT32_INPUTS(k) {
+          uint32_t shift = *k & 0x1F;
+          // Use uint32_t because signed overflow is UB in C.
+          int32_t expected = *i - (*j >> shift);
+          CHECK_EQ(expected, m.Call(*i, *j, shift));
+        }
+      }
+    }
+  }
+  {
+    RawMachineAssemblerTester<int32_t> m(kMachineWord32, kMachineWord32,
+                                         kMachineWord32);
+    m.Return(m.Int32Sub(m.Word32Shr(m.Parameter(0), m.Parameter(1)),
+                        m.Parameter(2)));
+    FOR_UINT32_INPUTS(i) {
+      FOR_UINT32_INPUTS(j) {
+        FOR_UINT32_INPUTS(k) {
+          uint32_t shift = *j & 0x1F;
+          // Use uint32_t because signed overflow is UB in C.
+          int32_t expected = (*i >> shift) - *k;
+          CHECK_EQ(expected, m.Call(*i, shift, *k));
+        }
+      }
+    }
+  }
+}
+
+
+TEST(RunInt32SubInBranch) {
+  static const int constant = 987654321;
+  {
+    RawMachineAssemblerTester<int32_t> m;
+    Int32BinopTester bt(&m);
+    MLabel blocka, blockb;
+    m.Branch(
+        m.Word32Equal(m.Int32Sub(bt.param0, bt.param1), m.Int32Constant(0)),
+        &blocka, &blockb);
+    m.Bind(&blocka);
+    bt.AddReturn(m.Int32Constant(constant));
+    m.Bind(&blockb);
+    bt.AddReturn(m.Int32Constant(0 - constant));
+    FOR_UINT32_INPUTS(i) {
+      FOR_UINT32_INPUTS(j) {
+        int32_t expected = (*i - *j) == 0 ? constant : 0 - constant;
+        CHECK_EQ(expected, bt.call(*i, *j));
+      }
+    }
+  }
+  {
+    RawMachineAssemblerTester<int32_t> m;
+    Int32BinopTester bt(&m);
+    MLabel blocka, blockb;
+    m.Branch(
+        m.Word32NotEqual(m.Int32Sub(bt.param0, bt.param1), m.Int32Constant(0)),
+        &blocka, &blockb);
+    m.Bind(&blocka);
+    bt.AddReturn(m.Int32Constant(constant));
+    m.Bind(&blockb);
+    bt.AddReturn(m.Int32Constant(0 - constant));
+    FOR_UINT32_INPUTS(i) {
+      FOR_UINT32_INPUTS(j) {
+        int32_t expected = (*i - *j) != 0 ? constant : 0 - constant;
+        CHECK_EQ(expected, bt.call(*i, *j));
+      }
+    }
+  }
+  {
+    FOR_UINT32_INPUTS(i) {
+      RawMachineAssemblerTester<int32_t> m(kMachineWord32);
+      MLabel blocka, blockb;
+      m.Branch(m.Word32Equal(m.Int32Sub(m.Int32Constant(*i), m.Parameter(0)),
+                             m.Int32Constant(0)),
+               &blocka, &blockb);
+      m.Bind(&blocka);
+      m.Return(m.Int32Constant(constant));
+      m.Bind(&blockb);
+      m.Return(m.Int32Constant(0 - constant));
+      FOR_UINT32_INPUTS(j) {
+        int32_t expected = (*i - *j) == 0 ? constant : 0 - constant;
+        CHECK_EQ(expected, m.Call(*j));
+      }
+    }
+  }
+  {
+    FOR_UINT32_INPUTS(i) {
+      RawMachineAssemblerTester<int32_t> m(kMachineWord32);
+      MLabel blocka, blockb;
+      m.Branch(m.Word32NotEqual(m.Int32Sub(m.Int32Constant(*i), m.Parameter(0)),
+                                m.Int32Constant(0)),
+               &blocka, &blockb);
+      m.Bind(&blocka);
+      m.Return(m.Int32Constant(constant));
+      m.Bind(&blockb);
+      m.Return(m.Int32Constant(0 - constant));
+      FOR_UINT32_INPUTS(j) {
+        int32_t expected = (*i - *j) != 0 ? constant : 0 - constant;
+        CHECK_EQ(expected, m.Call(*j));
+      }
+    }
+  }
+  {
+    RawMachineAssemblerTester<void> m;
+    Operator* shops[] = {m.machine()->Word32Sar(), m.machine()->Word32Shl(),
+                         m.machine()->Word32Shr()};
+    for (size_t n = 0; n < ARRAY_SIZE(shops); n++) {
+      RawMachineAssemblerTester<int32_t> m(kMachineWord32, kMachineWord32,
+                                           kMachineWord32);
+      MLabel blocka, blockb;
+      m.Branch(m.Word32Equal(m.Int32Sub(m.Parameter(0),
+                                        m.NewNode(shops[n], m.Parameter(1),
+                                                  m.Parameter(2))),
+                             m.Int32Constant(0)),
+               &blocka, &blockb);
+      m.Bind(&blocka);
+      m.Return(m.Int32Constant(constant));
+      m.Bind(&blockb);
+      m.Return(m.Int32Constant(0 - constant));
+      FOR_UINT32_INPUTS(i) {
+        FOR_INT32_INPUTS(j) {
+          FOR_UINT32_INPUTS(k) {
+            uint32_t shift = *k & 0x1F;
+            int32_t right;
+            switch (shops[n]->opcode()) {
+              default:
+                UNREACHABLE();
+              case IrOpcode::kWord32Sar:
+                right = *j >> shift;
+                break;
+              case IrOpcode::kWord32Shl:
+                right = *j << shift;
+                break;
+              case IrOpcode::kWord32Shr:
+                right = static_cast<uint32_t>(*j) >> shift;
+                break;
+            }
+            int32_t expected = ((*i - right) == 0) ? constant : 0 - constant;
+            CHECK_EQ(expected, m.Call(*i, *j, shift));
+          }
+        }
+      }
+    }
+  }
+}
+
+
+TEST(RunInt32SubInComparison) {
+  {
+    RawMachineAssemblerTester<int32_t> m;
+    Int32BinopTester bt(&m);
+    bt.AddReturn(
+        m.Word32Equal(m.Int32Sub(bt.param0, bt.param1), m.Int32Constant(0)));
+    FOR_UINT32_INPUTS(i) {
+      FOR_UINT32_INPUTS(j) {
+        int32_t expected = (*i - *j) == 0;
+        CHECK_EQ(expected, bt.call(*i, *j));
+      }
+    }
+  }
+  {
+    RawMachineAssemblerTester<int32_t> m;
+    Int32BinopTester bt(&m);
+    bt.AddReturn(
+        m.Word32Equal(m.Int32Constant(0), m.Int32Sub(bt.param0, bt.param1)));
+    FOR_UINT32_INPUTS(i) {
+      FOR_UINT32_INPUTS(j) {
+        int32_t expected = (*i - *j) == 0;
+        CHECK_EQ(expected, bt.call(*i, *j));
+      }
+    }
+  }
+  {
+    FOR_UINT32_INPUTS(i) {
+      RawMachineAssemblerTester<int32_t> m(kMachineWord32);
+      m.Return(m.Word32Equal(m.Int32Sub(m.Int32Constant(*i), m.Parameter(0)),
+                             m.Int32Constant(0)));
+      FOR_UINT32_INPUTS(j) {
+        int32_t expected = (*i - *j) == 0;
+        CHECK_EQ(expected, m.Call(*j));
+      }
+    }
+  }
+  {
+    FOR_UINT32_INPUTS(i) {
+      RawMachineAssemblerTester<int32_t> m(kMachineWord32);
+      m.Return(m.Word32Equal(m.Int32Sub(m.Parameter(0), m.Int32Constant(*i)),
+                             m.Int32Constant(0)));
+      FOR_UINT32_INPUTS(j) {
+        int32_t expected = (*j - *i) == 0;
+        CHECK_EQ(expected, m.Call(*j));
+      }
+    }
+  }
+  {
+    RawMachineAssemblerTester<void> m;
+    Operator* shops[] = {m.machine()->Word32Sar(), m.machine()->Word32Shl(),
+                         m.machine()->Word32Shr()};
+    for (size_t n = 0; n < ARRAY_SIZE(shops); n++) {
+      RawMachineAssemblerTester<int32_t> m(kMachineWord32, kMachineWord32,
+                                           kMachineWord32);
+      m.Return(m.Word32Equal(
+          m.Int32Sub(m.Parameter(0),
+                     m.NewNode(shops[n], m.Parameter(1), m.Parameter(2))),
+          m.Int32Constant(0)));
+      FOR_UINT32_INPUTS(i) {
+        FOR_INT32_INPUTS(j) {
+          FOR_UINT32_INPUTS(k) {
+            uint32_t shift = *k & 0x1F;
+            int32_t right;
+            switch (shops[n]->opcode()) {
+              default:
+                UNREACHABLE();
+              case IrOpcode::kWord32Sar:
+                right = *j >> shift;
+                break;
+              case IrOpcode::kWord32Shl:
+                right = *j << shift;
+                break;
+              case IrOpcode::kWord32Shr:
+                right = static_cast<uint32_t>(*j) >> shift;
+                break;
+            }
+            int32_t expected = (*i - right) == 0;
+            CHECK_EQ(expected, m.Call(*i, *j, shift));
+          }
+        }
+      }
+    }
+  }
+}
+
+
+TEST(RunInt32MulP) {
+  {
+    RawMachineAssemblerTester<int32_t> m;
+    Int32BinopTester bt(&m);
+    bt.AddReturn(m.Int32Mul(bt.param0, bt.param1));
+    FOR_INT32_INPUTS(i) {
+      FOR_INT32_INPUTS(j) {
+        int expected = static_cast<int32_t>(*i * *j);
+        CHECK_EQ(expected, bt.call(*i, *j));
+      }
+    }
+  }
+  {
+    RawMachineAssemblerTester<int32_t> m;
+    Int32BinopTester bt(&m);
+    bt.AddReturn(m.Int32Mul(bt.param0, bt.param1));
+    FOR_UINT32_INPUTS(i) {
+      FOR_UINT32_INPUTS(j) {
+        int expected = static_cast<int32_t>(*i * *j);
+        CHECK_EQ(expected, bt.call(*i, *j));
+      }
+    }
+  }
+}
+
+
+TEST(RunInt32MulImm) {
+  {
+    FOR_UINT32_INPUTS(i) {
+      RawMachineAssemblerTester<int32_t> m(kMachineWord32);
+      m.Return(m.Int32Mul(m.Int32Constant(*i), m.Parameter(0)));
+      FOR_UINT32_INPUTS(j) {
+        int32_t expected = static_cast<int32_t>(*i * *j);
+        CHECK_EQ(expected, m.Call(*j));
+      }
+    }
+  }
+  {
+    FOR_UINT32_INPUTS(i) {
+      RawMachineAssemblerTester<int32_t> m(kMachineWord32);
+      m.Return(m.Int32Mul(m.Parameter(0), m.Int32Constant(*i)));
+      FOR_UINT32_INPUTS(j) {
+        int32_t expected = static_cast<int32_t>(*j * *i);
+        CHECK_EQ(expected, m.Call(*j));
+      }
+    }
+  }
+}
+
+
+TEST(RunInt32MulAndInt32AddP) {
+  {
+    RawMachineAssemblerTester<int32_t> m(kMachineWord32, kMachineWord32,
+                                         kMachineWord32);
+    m.Return(
+        m.Int32Add(m.Parameter(0), m.Int32Mul(m.Parameter(1), m.Parameter(2))));
+    FOR_INT32_INPUTS(i) {
+      FOR_INT32_INPUTS(j) {
+        FOR_INT32_INPUTS(k) {
+          int32_t p0 = *i;
+          int32_t p1 = *j;
+          int32_t p2 = *k;
+          int expected = p0 + static_cast<int32_t>(p1 * p2);
+          CHECK_EQ(expected, m.Call(p0, p1, p2));
+        }
+      }
+    }
+  }
+  {
+    RawMachineAssemblerTester<int32_t> m(kMachineWord32, kMachineWord32,
+                                         kMachineWord32);
+    m.Return(
+        m.Int32Add(m.Int32Mul(m.Parameter(0), m.Parameter(1)), m.Parameter(2)));
+    FOR_INT32_INPUTS(i) {
+      FOR_INT32_INPUTS(j) {
+        FOR_INT32_INPUTS(k) {
+          int32_t p0 = *i;
+          int32_t p1 = *j;
+          int32_t p2 = *k;
+          int expected = static_cast<int32_t>(p0 * p1) + p2;
+          CHECK_EQ(expected, m.Call(p0, p1, p2));
+        }
+      }
+    }
+  }
+  {
+    FOR_INT32_INPUTS(i) {
+      RawMachineAssemblerTester<int32_t> m;
+      Int32BinopTester bt(&m);
+      bt.AddReturn(
+          m.Int32Add(m.Int32Constant(*i), m.Int32Mul(bt.param0, bt.param1)));
+      FOR_INT32_INPUTS(j) {
+        FOR_INT32_INPUTS(k) {
+          int32_t p0 = *j;
+          int32_t p1 = *k;
+          int expected = *i + static_cast<int32_t>(p0 * p1);
+          CHECK_EQ(expected, bt.call(p0, p1));
+        }
+      }
+    }
+  }
+}
+
+
+TEST(RunInt32MulAndInt32SubP) {
+  {
+    RawMachineAssemblerTester<int32_t> m(kMachineWord32, kMachineWord32,
+                                         kMachineWord32);
+    m.Return(
+        m.Int32Sub(m.Parameter(0), m.Int32Mul(m.Parameter(1), m.Parameter(2))));
+    FOR_UINT32_INPUTS(i) {
+      FOR_INT32_INPUTS(j) {
+        FOR_INT32_INPUTS(k) {
+          uint32_t p0 = *i;
+          int32_t p1 = *j;
+          int32_t p2 = *k;
+          // Use uint32_t because signed overflow is UB in C.
+          int expected = p0 - static_cast<uint32_t>(p1 * p2);
+          CHECK_EQ(expected, m.Call(p0, p1, p2));
+        }
+      }
+    }
+  }
+  {
+    FOR_UINT32_INPUTS(i) {
+      RawMachineAssemblerTester<int32_t> m;
+      Int32BinopTester bt(&m);
+      bt.AddReturn(
+          m.Int32Sub(m.Int32Constant(*i), m.Int32Mul(bt.param0, bt.param1)));
+      FOR_INT32_INPUTS(j) {
+        FOR_INT32_INPUTS(k) {
+          int32_t p0 = *j;
+          int32_t p1 = *k;
+          // Use uint32_t because signed overflow is UB in C.
+          int expected = *i - static_cast<uint32_t>(p0 * p1);
+          CHECK_EQ(expected, bt.call(p0, p1));
+        }
+      }
+    }
+  }
+}
+
+
+TEST(RunInt32DivP) {
+  {
+    RawMachineAssemblerTester<int32_t> m;
+    Int32BinopTester bt(&m);
+    bt.AddReturn(m.Int32Div(bt.param0, bt.param1));
+    FOR_INT32_INPUTS(i) {
+      FOR_INT32_INPUTS(j) {
+        int p0 = *i;
+        int p1 = *j;
+        if (p1 != 0 && (static_cast<uint32_t>(p0) != 0x80000000 || p1 != -1)) {
+          int expected = static_cast<int32_t>(p0 / p1);
+          CHECK_EQ(expected, bt.call(p0, p1));
+        }
+      }
+    }
+  }
+  {
+    RawMachineAssemblerTester<int32_t> m;
+    Int32BinopTester bt(&m);
+    bt.AddReturn(m.Int32Add(bt.param0, m.Int32Div(bt.param0, bt.param1)));
+    FOR_INT32_INPUTS(i) {
+      FOR_INT32_INPUTS(j) {
+        int p0 = *i;
+        int p1 = *j;
+        if (p1 != 0 && (static_cast<uint32_t>(p0) != 0x80000000 || p1 != -1)) {
+          int expected = static_cast<int32_t>(p0 + (p0 / p1));
+          CHECK_EQ(expected, bt.call(p0, p1));
+        }
+      }
+    }
+  }
+}
+
+
+TEST(RunInt32UDivP) {
+  {
+    RawMachineAssemblerTester<int32_t> m;
+    Int32BinopTester bt(&m);
+    bt.AddReturn(m.Int32UDiv(bt.param0, bt.param1));
+    FOR_UINT32_INPUTS(i) {
+      FOR_UINT32_INPUTS(j) {
+        uint32_t p0 = *i;
+        uint32_t p1 = *j;
+        if (p1 != 0) {
+          uint32_t expected = static_cast<uint32_t>(p0 / p1);
+          CHECK_EQ(expected, bt.call(p0, p1));
+        }
+      }
+    }
+  }
+  {
+    RawMachineAssemblerTester<int32_t> m;
+    Int32BinopTester bt(&m);
+    bt.AddReturn(m.Int32Add(bt.param0, m.Int32UDiv(bt.param0, bt.param1)));
+    FOR_UINT32_INPUTS(i) {
+      FOR_UINT32_INPUTS(j) {
+        uint32_t p0 = *i;
+        uint32_t p1 = *j;
+        if (p1 != 0) {
+          uint32_t expected = static_cast<uint32_t>(p0 + (p0 / p1));
+          CHECK_EQ(expected, bt.call(p0, p1));
+        }
+      }
+    }
+  }
+}
+
+
+TEST(RunInt32ModP) {
+  {
+    RawMachineAssemblerTester<int32_t> m;
+    Int32BinopTester bt(&m);
+    bt.AddReturn(m.Int32Mod(bt.param0, bt.param1));
+    FOR_INT32_INPUTS(i) {
+      FOR_INT32_INPUTS(j) {
+        int p0 = *i;
+        int p1 = *j;
+        if (p1 != 0 && (static_cast<uint32_t>(p0) != 0x80000000 || p1 != -1)) {
+          int expected = static_cast<int32_t>(p0 % p1);
+          CHECK_EQ(expected, bt.call(p0, p1));
+        }
+      }
+    }
+  }
+  {
+    RawMachineAssemblerTester<int32_t> m;
+    Int32BinopTester bt(&m);
+    bt.AddReturn(m.Int32Add(bt.param0, m.Int32Mod(bt.param0, bt.param1)));
+    FOR_INT32_INPUTS(i) {
+      FOR_INT32_INPUTS(j) {
+        int p0 = *i;
+        int p1 = *j;
+        if (p1 != 0 && (static_cast<uint32_t>(p0) != 0x80000000 || p1 != -1)) {
+          int expected = static_cast<int32_t>(p0 + (p0 % p1));
+          CHECK_EQ(expected, bt.call(p0, p1));
+        }
+      }
+    }
+  }
+}
+
+
+TEST(RunInt32UModP) {
+  {
+    RawMachineAssemblerTester<int32_t> m;
+    Int32BinopTester bt(&m);
+    bt.AddReturn(m.Int32UMod(bt.param0, bt.param1));
+    FOR_UINT32_INPUTS(i) {
+      FOR_UINT32_INPUTS(j) {
+        uint32_t p0 = *i;
+        uint32_t p1 = *j;
+        if (p1 != 0) {
+          uint32_t expected = static_cast<uint32_t>(p0 % p1);
+          CHECK_EQ(expected, bt.call(p0, p1));
+        }
+      }
+    }
+  }
+  {
+    RawMachineAssemblerTester<int32_t> m;
+    Int32BinopTester bt(&m);
+    bt.AddReturn(m.Int32Add(bt.param0, m.Int32UMod(bt.param0, bt.param1)));
+    FOR_UINT32_INPUTS(i) {
+      FOR_UINT32_INPUTS(j) {
+        uint32_t p0 = *i;
+        uint32_t p1 = *j;
+        if (p1 != 0) {
+          uint32_t expected = static_cast<uint32_t>(p0 + (p0 % p1));
+          CHECK_EQ(expected, bt.call(p0, p1));
+        }
+      }
+    }
+  }
+}
+
+
+TEST(RunWord32AndP) {
+  {
+    RawMachineAssemblerTester<int32_t> m;
+    Int32BinopTester bt(&m);
+    bt.AddReturn(m.Word32And(bt.param0, bt.param1));
+    FOR_UINT32_INPUTS(i) {
+      FOR_UINT32_INPUTS(j) {
+        uint32_t expected = *i & *j;
+        CHECK_EQ(expected, bt.call(*i, *j));
+      }
+    }
+  }
+  {
+    RawMachineAssemblerTester<int32_t> m;
+    Int32BinopTester bt(&m);
+    bt.AddReturn(m.Word32And(bt.param0, m.Word32Not(bt.param1)));
+    FOR_UINT32_INPUTS(i) {
+      FOR_UINT32_INPUTS(j) {
+        uint32_t expected = *i & ~(*j);
+        CHECK_EQ(expected, bt.call(*i, *j));
+      }
+    }
+  }
+  {
+    RawMachineAssemblerTester<int32_t> m;
+    Int32BinopTester bt(&m);
+    bt.AddReturn(m.Word32And(m.Word32Not(bt.param0), bt.param1));
+    FOR_UINT32_INPUTS(i) {
+      FOR_UINT32_INPUTS(j) {
+        uint32_t expected = ~(*i) & *j;
+        CHECK_EQ(expected, bt.call(*i, *j));
+      }
+    }
+  }
+}
+
+
+TEST(RunWord32AndAndWord32ShlP) {
+  {
+    RawMachineAssemblerTester<int32_t> m;
+    Int32BinopTester bt(&m);
+    bt.AddReturn(
+        m.Word32Shl(bt.param0, m.Word32And(bt.param1, m.Int32Constant(0x1f))));
+    FOR_UINT32_INPUTS(i) {
+      FOR_UINT32_INPUTS(j) {
+        uint32_t expected = *i << (*j & 0x1f);
+        CHECK_EQ(expected, bt.call(*i, *j));
+      }
+    }
+  }
+  {
+    RawMachineAssemblerTester<int32_t> m;
+    Int32BinopTester bt(&m);
+    bt.AddReturn(
+        m.Word32Shl(bt.param0, m.Word32And(m.Int32Constant(0x1f), bt.param1)));
+    FOR_UINT32_INPUTS(i) {
+      FOR_UINT32_INPUTS(j) {
+        uint32_t expected = *i << (0x1f & *j);
+        CHECK_EQ(expected, bt.call(*i, *j));
+      }
+    }
+  }
+}
+
+
+TEST(RunWord32AndAndWord32ShrP) {
+  {
+    RawMachineAssemblerTester<int32_t> m;
+    Int32BinopTester bt(&m);
+    bt.AddReturn(
+        m.Word32Shr(bt.param0, m.Word32And(bt.param1, m.Int32Constant(0x1f))));
+    FOR_UINT32_INPUTS(i) {
+      FOR_UINT32_INPUTS(j) {
+        uint32_t expected = *i >> (*j & 0x1f);
+        CHECK_EQ(expected, bt.call(*i, *j));
+      }
+    }
+  }
+  {
+    RawMachineAssemblerTester<int32_t> m;
+    Int32BinopTester bt(&m);
+    bt.AddReturn(
+        m.Word32Shr(bt.param0, m.Word32And(m.Int32Constant(0x1f), bt.param1)));
+    FOR_UINT32_INPUTS(i) {
+      FOR_UINT32_INPUTS(j) {
+        uint32_t expected = *i >> (0x1f & *j);
+        CHECK_EQ(expected, bt.call(*i, *j));
+      }
+    }
+  }
+}
+
+
+TEST(RunWord32AndAndWord32SarP) {
+  {
+    RawMachineAssemblerTester<int32_t> m;
+    Int32BinopTester bt(&m);
+    bt.AddReturn(
+        m.Word32Sar(bt.param0, m.Word32And(bt.param1, m.Int32Constant(0x1f))));
+    FOR_INT32_INPUTS(i) {
+      FOR_UINT32_INPUTS(j) {
+        uint32_t expected = *i >> (*j & 0x1f);
+        CHECK_EQ(expected, bt.call(*i, *j));
+      }
+    }
+  }
+  {
+    RawMachineAssemblerTester<int32_t> m;
+    Int32BinopTester bt(&m);
+    bt.AddReturn(
+        m.Word32Sar(bt.param0, m.Word32And(m.Int32Constant(0x1f), bt.param1)));
+    FOR_INT32_INPUTS(i) {
+      FOR_UINT32_INPUTS(j) {
+        uint32_t expected = *i >> (0x1f & *j);
+        CHECK_EQ(expected, bt.call(*i, *j));
+      }
+    }
+  }
+}
+
+
+TEST(RunWord32AndImm) {
+  {
+    FOR_UINT32_INPUTS(i) {
+      RawMachineAssemblerTester<int32_t> m(kMachineWord32);
+      m.Return(m.Word32And(m.Int32Constant(*i), m.Parameter(0)));
+      FOR_UINT32_INPUTS(j) {
+        uint32_t expected = *i & *j;
+        CHECK_EQ(expected, m.Call(*j));
+      }
+    }
+  }
+  {
+    FOR_UINT32_INPUTS(i) {
+      RawMachineAssemblerTester<int32_t> m(kMachineWord32);
+      m.Return(m.Word32And(m.Int32Constant(*i), m.Word32Not(m.Parameter(0))));
+      FOR_UINT32_INPUTS(j) {
+        uint32_t expected = *i & ~(*j);
+        CHECK_EQ(expected, m.Call(*j));
+      }
+    }
+  }
+}
+
+
+TEST(RunWord32AndInBranch) {
+  static const int constant = 987654321;
+  {
+    RawMachineAssemblerTester<int32_t> m;
+    Int32BinopTester bt(&m);
+    MLabel blocka, blockb;
+    m.Branch(
+        m.Word32Equal(m.Word32And(bt.param0, bt.param1), m.Int32Constant(0)),
+        &blocka, &blockb);
+    m.Bind(&blocka);
+    bt.AddReturn(m.Int32Constant(constant));
+    m.Bind(&blockb);
+    bt.AddReturn(m.Int32Constant(0 - constant));
+    FOR_UINT32_INPUTS(i) {
+      FOR_UINT32_INPUTS(j) {
+        int32_t expected = (*i & *j) == 0 ? constant : 0 - constant;
+        CHECK_EQ(expected, bt.call(*i, *j));
+      }
+    }
+  }
+  {
+    RawMachineAssemblerTester<int32_t> m;
+    Int32BinopTester bt(&m);
+    MLabel blocka, blockb;
+    m.Branch(
+        m.Word32NotEqual(m.Word32And(bt.param0, bt.param1), m.Int32Constant(0)),
+        &blocka, &blockb);
+    m.Bind(&blocka);
+    bt.AddReturn(m.Int32Constant(constant));
+    m.Bind(&blockb);
+    bt.AddReturn(m.Int32Constant(0 - constant));
+    FOR_UINT32_INPUTS(i) {
+      FOR_UINT32_INPUTS(j) {
+        int32_t expected = (*i & *j) != 0 ? constant : 0 - constant;
+        CHECK_EQ(expected, bt.call(*i, *j));
+      }
+    }
+  }
+  {
+    FOR_UINT32_INPUTS(i) {
+      RawMachineAssemblerTester<int32_t> m(kMachineWord32);
+      MLabel blocka, blockb;
+      m.Branch(m.Word32Equal(m.Word32And(m.Int32Constant(*i), m.Parameter(0)),
+                             m.Int32Constant(0)),
+               &blocka, &blockb);
+      m.Bind(&blocka);
+      m.Return(m.Int32Constant(constant));
+      m.Bind(&blockb);
+      m.Return(m.Int32Constant(0 - constant));
+      FOR_UINT32_INPUTS(j) {
+        int32_t expected = (*i & *j) == 0 ? constant : 0 - constant;
+        CHECK_EQ(expected, m.Call(*j));
+      }
+    }
+  }
+  {
+    FOR_UINT32_INPUTS(i) {
+      RawMachineAssemblerTester<int32_t> m(kMachineWord32);
+      MLabel blocka, blockb;
+      m.Branch(
+          m.Word32NotEqual(m.Word32And(m.Int32Constant(*i), m.Parameter(0)),
+                           m.Int32Constant(0)),
+          &blocka, &blockb);
+      m.Bind(&blocka);
+      m.Return(m.Int32Constant(constant));
+      m.Bind(&blockb);
+      m.Return(m.Int32Constant(0 - constant));
+      FOR_UINT32_INPUTS(j) {
+        int32_t expected = (*i & *j) != 0 ? constant : 0 - constant;
+        CHECK_EQ(expected, m.Call(*j));
+      }
+    }
+  }
+  {
+    RawMachineAssemblerTester<void> m;
+    Operator* shops[] = {m.machine()->Word32Sar(), m.machine()->Word32Shl(),
+                         m.machine()->Word32Shr()};
+    for (size_t n = 0; n < ARRAY_SIZE(shops); n++) {
+      RawMachineAssemblerTester<int32_t> m(kMachineWord32, kMachineWord32,
+                                           kMachineWord32);
+      MLabel blocka, blockb;
+      m.Branch(m.Word32Equal(m.Word32And(m.Parameter(0),
+                                         m.NewNode(shops[n], m.Parameter(1),
+                                                   m.Parameter(2))),
+                             m.Int32Constant(0)),
+               &blocka, &blockb);
+      m.Bind(&blocka);
+      m.Return(m.Int32Constant(constant));
+      m.Bind(&blockb);
+      m.Return(m.Int32Constant(0 - constant));
+      FOR_UINT32_INPUTS(i) {
+        FOR_INT32_INPUTS(j) {
+          FOR_UINT32_INPUTS(k) {
+            uint32_t shift = *k & 0x1F;
+            int32_t right;
+            switch (shops[n]->opcode()) {
+              default:
+                UNREACHABLE();
+              case IrOpcode::kWord32Sar:
+                right = *j >> shift;
+                break;
+              case IrOpcode::kWord32Shl:
+                right = *j << shift;
+                break;
+              case IrOpcode::kWord32Shr:
+                right = static_cast<uint32_t>(*j) >> shift;
+                break;
+            }
+            int32_t expected = ((*i & right) == 0) ? constant : 0 - constant;
+            CHECK_EQ(expected, m.Call(*i, *j, shift));
+          }
+        }
+      }
+    }
+  }
+}
+
+
+TEST(RunWord32AndInComparison) {
+  {
+    RawMachineAssemblerTester<int32_t> m;
+    Int32BinopTester bt(&m);
+    bt.AddReturn(
+        m.Word32Equal(m.Word32And(bt.param0, bt.param1), m.Int32Constant(0)));
+    FOR_UINT32_INPUTS(i) {
+      FOR_UINT32_INPUTS(j) {
+        int32_t expected = (*i & *j) == 0;
+        CHECK_EQ(expected, bt.call(*i, *j));
+      }
+    }
+  }
+  {
+    RawMachineAssemblerTester<int32_t> m;
+    Int32BinopTester bt(&m);
+    bt.AddReturn(
+        m.Word32Equal(m.Int32Constant(0), m.Word32And(bt.param0, bt.param1)));
+    FOR_UINT32_INPUTS(i) {
+      FOR_UINT32_INPUTS(j) {
+        int32_t expected = (*i & *j) == 0;
+        CHECK_EQ(expected, bt.call(*i, *j));
+      }
+    }
+  }
+  {
+    FOR_UINT32_INPUTS(i) {
+      RawMachineAssemblerTester<int32_t> m(kMachineWord32);
+      m.Return(m.Word32Equal(m.Word32And(m.Int32Constant(*i), m.Parameter(0)),
+                             m.Int32Constant(0)));
+      FOR_UINT32_INPUTS(j) {
+        int32_t expected = (*i & *j) == 0;
+        CHECK_EQ(expected, m.Call(*j));
+      }
+    }
+  }
+  {
+    FOR_UINT32_INPUTS(i) {
+      RawMachineAssemblerTester<int32_t> m(kMachineWord32);
+      m.Return(m.Word32Equal(m.Word32And(m.Parameter(0), m.Int32Constant(*i)),
+                             m.Int32Constant(0)));
+      FOR_UINT32_INPUTS(j) {
+        int32_t expected = (*j & *i) == 0;
+        CHECK_EQ(expected, m.Call(*j));
+      }
+    }
+  }
+}
+
+
+TEST(RunWord32OrP) {
+  {
+    RawMachineAssemblerTester<int32_t> m;
+    Int32BinopTester bt(&m);
+    bt.AddReturn(m.Word32Or(bt.param0, bt.param1));
+    FOR_UINT32_INPUTS(i) {
+      FOR_UINT32_INPUTS(j) {
+        uint32_t expected = *i | *j;
+        CHECK_EQ(expected, bt.call(*i, *j));
+      }
+    }
+  }
+  {
+    RawMachineAssemblerTester<int32_t> m;
+    Int32BinopTester bt(&m);
+    bt.AddReturn(m.Word32Or(bt.param0, m.Word32Not(bt.param1)));
+    FOR_UINT32_INPUTS(i) {
+      FOR_UINT32_INPUTS(j) {
+        uint32_t expected = *i | ~(*j);
+        CHECK_EQ(expected, bt.call(*i, *j));
+      }
+    }
+  }
+  {
+    RawMachineAssemblerTester<int32_t> m;
+    Int32BinopTester bt(&m);
+    bt.AddReturn(m.Word32Or(m.Word32Not(bt.param0), bt.param1));
+    FOR_UINT32_INPUTS(i) {
+      FOR_UINT32_INPUTS(j) {
+        uint32_t expected = ~(*i) | *j;
+        CHECK_EQ(expected, bt.call(*i, *j));
+      }
+    }
+  }
+}
+
+
+TEST(RunWord32OrImm) {
+  {
+    FOR_UINT32_INPUTS(i) {
+      RawMachineAssemblerTester<int32_t> m(kMachineWord32);
+      m.Return(m.Word32Or(m.Int32Constant(*i), m.Parameter(0)));
+      FOR_UINT32_INPUTS(j) {
+        uint32_t expected = *i | *j;
+        CHECK_EQ(expected, m.Call(*j));
+      }
+    }
+  }
+  {
+    FOR_UINT32_INPUTS(i) {
+      RawMachineAssemblerTester<int32_t> m(kMachineWord32);
+      m.Return(m.Word32Or(m.Int32Constant(*i), m.Word32Not(m.Parameter(0))));
+      FOR_UINT32_INPUTS(j) {
+        uint32_t expected = *i | ~(*j);
+        CHECK_EQ(expected, m.Call(*j));
+      }
+    }
+  }
+}
+
+
+TEST(RunWord32OrInBranch) {
+  static const int constant = 987654321;
+  {
+    RawMachineAssemblerTester<int32_t> m;
+    Int32BinopTester bt(&m);
+    MLabel blocka, blockb;
+    m.Branch(
+        m.Word32Equal(m.Word32Or(bt.param0, bt.param1), m.Int32Constant(0)),
+        &blocka, &blockb);
+    m.Bind(&blocka);
+    bt.AddReturn(m.Int32Constant(constant));
+    m.Bind(&blockb);
+    bt.AddReturn(m.Int32Constant(0 - constant));
+    FOR_UINT32_INPUTS(i) {
+      FOR_UINT32_INPUTS(j) {
+        int32_t expected = (*i | *j) == 0 ? constant : 0 - constant;
+        CHECK_EQ(expected, bt.call(*i, *j));
+      }
+    }
+  }
+  {
+    RawMachineAssemblerTester<int32_t> m;
+    Int32BinopTester bt(&m);
+    MLabel blocka, blockb;
+    m.Branch(
+        m.Word32NotEqual(m.Word32Or(bt.param0, bt.param1), m.Int32Constant(0)),
+        &blocka, &blockb);
+    m.Bind(&blocka);
+    bt.AddReturn(m.Int32Constant(constant));
+    m.Bind(&blockb);
+    bt.AddReturn(m.Int32Constant(0 - constant));
+    FOR_UINT32_INPUTS(i) {
+      FOR_UINT32_INPUTS(j) {
+        int32_t expected = (*i | *j) != 0 ? constant : 0 - constant;
+        CHECK_EQ(expected, bt.call(*i, *j));
+      }
+    }
+  }
+  {
+    FOR_UINT32_INPUTS(i) {
+      RawMachineAssemblerTester<int32_t> m(kMachineWord32);
+      MLabel blocka, blockb;
+      m.Branch(m.Word32Equal(m.Word32Or(m.Int32Constant(*i), m.Parameter(0)),
+                             m.Int32Constant(0)),
+               &blocka, &blockb);
+      m.Bind(&blocka);
+      m.Return(m.Int32Constant(constant));
+      m.Bind(&blockb);
+      m.Return(m.Int32Constant(0 - constant));
+      FOR_UINT32_INPUTS(j) {
+        int32_t expected = (*i | *j) == 0 ? constant : 0 - constant;
+        CHECK_EQ(expected, m.Call(*j));
+      }
+    }
+  }
+  {
+    FOR_UINT32_INPUTS(i) {
+      RawMachineAssemblerTester<int32_t> m(kMachineWord32);
+      MLabel blocka, blockb;
+      m.Branch(m.Word32NotEqual(m.Word32Or(m.Int32Constant(*i), m.Parameter(0)),
+                                m.Int32Constant(0)),
+               &blocka, &blockb);
+      m.Bind(&blocka);
+      m.Return(m.Int32Constant(constant));
+      m.Bind(&blockb);
+      m.Return(m.Int32Constant(0 - constant));
+      FOR_UINT32_INPUTS(j) {
+        int32_t expected = (*i | *j) != 0 ? constant : 0 - constant;
+        CHECK_EQ(expected, m.Call(*j));
+      }
+    }
+  }
+  {
+    RawMachineAssemblerTester<void> m;
+    Operator* shops[] = {m.machine()->Word32Sar(), m.machine()->Word32Shl(),
+                         m.machine()->Word32Shr()};
+    for (size_t n = 0; n < ARRAY_SIZE(shops); n++) {
+      RawMachineAssemblerTester<int32_t> m(kMachineWord32, kMachineWord32,
+                                           kMachineWord32);
+      MLabel blocka, blockb;
+      m.Branch(m.Word32Equal(m.Word32Or(m.Parameter(0),
+                                        m.NewNode(shops[n], m.Parameter(1),
+                                                  m.Parameter(2))),
+                             m.Int32Constant(0)),
+               &blocka, &blockb);
+      m.Bind(&blocka);
+      m.Return(m.Int32Constant(constant));
+      m.Bind(&blockb);
+      m.Return(m.Int32Constant(0 - constant));
+      FOR_UINT32_INPUTS(i) {
+        FOR_INT32_INPUTS(j) {
+          FOR_UINT32_INPUTS(k) {
+            uint32_t shift = *k & 0x1F;
+            int32_t right;
+            switch (shops[n]->opcode()) {
+              default:
+                UNREACHABLE();
+              case IrOpcode::kWord32Sar:
+                right = *j >> shift;
+                break;
+              case IrOpcode::kWord32Shl:
+                right = *j << shift;
+                break;
+              case IrOpcode::kWord32Shr:
+                right = static_cast<uint32_t>(*j) >> shift;
+                break;
+            }
+            int32_t expected = ((*i | right) == 0) ? constant : 0 - constant;
+            CHECK_EQ(expected, m.Call(*i, *j, shift));
+          }
+        }
+      }
+    }
+  }
+}
+
+
+TEST(RunWord32OrInComparison) {
+  {
+    RawMachineAssemblerTester<int32_t> m;
+    Int32BinopTester bt(&m);
+    bt.AddReturn(
+        m.Word32Equal(m.Word32Or(bt.param0, bt.param1), m.Int32Constant(0)));
+    FOR_UINT32_INPUTS(i) {
+      FOR_UINT32_INPUTS(j) {
+        int32_t expected = (*i | *j) == 0;
+        CHECK_EQ(expected, bt.call(*i, *j));
+      }
+    }
+  }
+  {
+    RawMachineAssemblerTester<int32_t> m;
+    Int32BinopTester bt(&m);
+    bt.AddReturn(
+        m.Word32Equal(m.Int32Constant(0), m.Word32Or(bt.param0, bt.param1)));
+    FOR_UINT32_INPUTS(i) {
+      FOR_UINT32_INPUTS(j) {
+        int32_t expected = (*i | *j) == 0;
+        CHECK_EQ(expected, bt.call(*i, *j));
+      }
+    }
+  }
+  {
+    FOR_UINT32_INPUTS(i) {
+      RawMachineAssemblerTester<int32_t> m(kMachineWord32);
+      m.Return(m.Word32Equal(m.Word32Or(m.Int32Constant(*i), m.Parameter(0)),
+                             m.Int32Constant(0)));
+      FOR_UINT32_INPUTS(j) {
+        int32_t expected = (*i | *j) == 0;
+        CHECK_EQ(expected, m.Call(*j));
+      }
+    }
+  }
+  {
+    FOR_UINT32_INPUTS(i) {
+      RawMachineAssemblerTester<int32_t> m(kMachineWord32);
+      m.Return(m.Word32Equal(m.Word32Or(m.Parameter(0), m.Int32Constant(*i)),
+                             m.Int32Constant(0)));
+      FOR_UINT32_INPUTS(j) {
+        int32_t expected = (*j | *i) == 0;
+        CHECK_EQ(expected, m.Call(*j));
+      }
+    }
+  }
+}
+
+
+TEST(RunWord32XorP) {
+  {
+    FOR_UINT32_INPUTS(i) {
+      RawMachineAssemblerTester<int32_t> m(kMachineWord32);
+      m.Return(m.Word32Xor(m.Int32Constant(*i), m.Parameter(0)));
+      FOR_UINT32_INPUTS(j) {
+        uint32_t expected = *i ^ *j;
+        CHECK_EQ(expected, m.Call(*j));
+      }
+    }
+  }
+  {
+    RawMachineAssemblerTester<int32_t> m;
+    Int32BinopTester bt(&m);
+    bt.AddReturn(m.Word32Xor(bt.param0, bt.param1));
+    FOR_UINT32_INPUTS(i) {
+      FOR_UINT32_INPUTS(j) {
+        uint32_t expected = *i ^ *j;
+        CHECK_EQ(expected, bt.call(*i, *j));
+      }
+    }
+  }
+  {
+    RawMachineAssemblerTester<int32_t> m;
+    Int32BinopTester bt(&m);
+    bt.AddReturn(m.Word32Xor(bt.param0, m.Word32Not(bt.param1)));
+    FOR_UINT32_INPUTS(i) {
+      FOR_UINT32_INPUTS(j) {
+        uint32_t expected = *i ^ ~(*j);
+        CHECK_EQ(expected, bt.call(*i, *j));
+      }
+    }
+  }
+  {
+    RawMachineAssemblerTester<int32_t> m;
+    Int32BinopTester bt(&m);
+    bt.AddReturn(m.Word32Xor(m.Word32Not(bt.param0), bt.param1));
+    FOR_UINT32_INPUTS(i) {
+      FOR_UINT32_INPUTS(j) {
+        uint32_t expected = ~(*i) ^ *j;
+        CHECK_EQ(expected, bt.call(*i, *j));
+      }
+    }
+  }
+  {
+    FOR_UINT32_INPUTS(i) {
+      RawMachineAssemblerTester<int32_t> m(kMachineWord32);
+      m.Return(m.Word32Xor(m.Int32Constant(*i), m.Word32Not(m.Parameter(0))));
+      FOR_UINT32_INPUTS(j) {
+        uint32_t expected = *i ^ ~(*j);
+        CHECK_EQ(expected, m.Call(*j));
+      }
+    }
+  }
+}
+
+
+TEST(RunWord32XorInBranch) {
+  static const int constant = 987654321;
+  {
+    RawMachineAssemblerTester<int32_t> m;
+    Int32BinopTester bt(&m);
+    MLabel blocka, blockb;
+    m.Branch(
+        m.Word32Equal(m.Word32Xor(bt.param0, bt.param1), m.Int32Constant(0)),
+        &blocka, &blockb);
+    m.Bind(&blocka);
+    bt.AddReturn(m.Int32Constant(constant));
+    m.Bind(&blockb);
+    bt.AddReturn(m.Int32Constant(0 - constant));
+    FOR_UINT32_INPUTS(i) {
+      FOR_UINT32_INPUTS(j) {
+        int32_t expected = (*i ^ *j) == 0 ? constant : 0 - constant;
+        CHECK_EQ(expected, bt.call(*i, *j));
+      }
+    }
+  }
+  {
+    RawMachineAssemblerTester<int32_t> m;
+    Int32BinopTester bt(&m);
+    MLabel blocka, blockb;
+    m.Branch(
+        m.Word32NotEqual(m.Word32Xor(bt.param0, bt.param1), m.Int32Constant(0)),
+        &blocka, &blockb);
+    m.Bind(&blocka);
+    bt.AddReturn(m.Int32Constant(constant));
+    m.Bind(&blockb);
+    bt.AddReturn(m.Int32Constant(0 - constant));
+    FOR_UINT32_INPUTS(i) {
+      FOR_UINT32_INPUTS(j) {
+        int32_t expected = (*i ^ *j) != 0 ? constant : 0 - constant;
+        CHECK_EQ(expected, bt.call(*i, *j));
+      }
+    }
+  }
+  {
+    FOR_UINT32_INPUTS(i) {
+      RawMachineAssemblerTester<int32_t> m(kMachineWord32);
+      MLabel blocka, blockb;
+      m.Branch(m.Word32Equal(m.Word32Xor(m.Int32Constant(*i), m.Parameter(0)),
+                             m.Int32Constant(0)),
+               &blocka, &blockb);
+      m.Bind(&blocka);
+      m.Return(m.Int32Constant(constant));
+      m.Bind(&blockb);
+      m.Return(m.Int32Constant(0 - constant));
+      FOR_UINT32_INPUTS(j) {
+        int32_t expected = (*i ^ *j) == 0 ? constant : 0 - constant;
+        CHECK_EQ(expected, m.Call(*j));
+      }
+    }
+  }
+  {
+    FOR_UINT32_INPUTS(i) {
+      RawMachineAssemblerTester<int32_t> m(kMachineWord32);
+      MLabel blocka, blockb;
+      m.Branch(
+          m.Word32NotEqual(m.Word32Xor(m.Int32Constant(*i), m.Parameter(0)),
+                           m.Int32Constant(0)),
+          &blocka, &blockb);
+      m.Bind(&blocka);
+      m.Return(m.Int32Constant(constant));
+      m.Bind(&blockb);
+      m.Return(m.Int32Constant(0 - constant));
+      FOR_UINT32_INPUTS(j) {
+        int32_t expected = (*i ^ *j) != 0 ? constant : 0 - constant;
+        CHECK_EQ(expected, m.Call(*j));
+      }
+    }
+  }
+  {
+    RawMachineAssemblerTester<void> m;
+    Operator* shops[] = {m.machine()->Word32Sar(), m.machine()->Word32Shl(),
+                         m.machine()->Word32Shr()};
+    for (size_t n = 0; n < ARRAY_SIZE(shops); n++) {
+      RawMachineAssemblerTester<int32_t> m(kMachineWord32, kMachineWord32,
+                                           kMachineWord32);
+      MLabel blocka, blockb;
+      m.Branch(m.Word32Equal(m.Word32Xor(m.Parameter(0),
+                                         m.NewNode(shops[n], m.Parameter(1),
+                                                   m.Parameter(2))),
+                             m.Int32Constant(0)),
+               &blocka, &blockb);
+      m.Bind(&blocka);
+      m.Return(m.Int32Constant(constant));
+      m.Bind(&blockb);
+      m.Return(m.Int32Constant(0 - constant));
+      FOR_UINT32_INPUTS(i) {
+        FOR_INT32_INPUTS(j) {
+          FOR_UINT32_INPUTS(k) {
+            uint32_t shift = *k & 0x1F;
+            int32_t right;
+            switch (shops[n]->opcode()) {
+              default:
+                UNREACHABLE();
+              case IrOpcode::kWord32Sar:
+                right = *j >> shift;
+                break;
+              case IrOpcode::kWord32Shl:
+                right = *j << shift;
+                break;
+              case IrOpcode::kWord32Shr:
+                right = static_cast<uint32_t>(*j) >> shift;
+                break;
+            }
+            int32_t expected = ((*i ^ right) == 0) ? constant : 0 - constant;
+            CHECK_EQ(expected, m.Call(*i, *j, shift));
+          }
+        }
+      }
+    }
+  }
+}
+
+
+TEST(RunWord32ShlP) {
+  {
+    FOR_UINT32_INPUTS(i) {
+      uint32_t shift = *i & 0x1F;
+      RawMachineAssemblerTester<int32_t> m(kMachineWord32);
+      m.Return(m.Word32Shl(m.Parameter(0), m.Int32Constant(shift)));
+      FOR_UINT32_INPUTS(j) {
+        uint32_t expected = *j << shift;
+        CHECK_EQ(expected, m.Call(*j));
+      }
+    }
+  }
+  {
+    RawMachineAssemblerTester<int32_t> m;
+    Int32BinopTester bt(&m);
+    bt.AddReturn(m.Word32Shl(bt.param0, bt.param1));
+    FOR_UINT32_INPUTS(i) {
+      FOR_UINT32_INPUTS(j) {
+        uint32_t shift = *j & 0x1F;
+        uint32_t expected = *i << shift;
+        CHECK_EQ(expected, bt.call(*i, shift));
+      }
+    }
+  }
+}
+
+
+TEST(RunWord32ShrP) {
+  {
+    FOR_UINT32_INPUTS(i) {
+      uint32_t shift = *i & 0x1F;
+      RawMachineAssemblerTester<int32_t> m(kMachineWord32);
+      m.Return(m.Word32Shr(m.Parameter(0), m.Int32Constant(shift)));
+      FOR_UINT32_INPUTS(j) {
+        uint32_t expected = *j >> shift;
+        CHECK_EQ(expected, m.Call(*j));
+      }
+    }
+  }
+  {
+    RawMachineAssemblerTester<int32_t> m;
+    Int32BinopTester bt(&m);
+    bt.AddReturn(m.Word32Shr(bt.param0, bt.param1));
+    FOR_UINT32_INPUTS(i) {
+      FOR_UINT32_INPUTS(j) {
+        uint32_t shift = *j & 0x1F;
+        uint32_t expected = *i >> shift;
+        CHECK_EQ(expected, bt.call(*i, shift));
+      }
+    }
+    CHECK_EQ(0x00010000, bt.call(0x80000000, 15));
+  }
+}
+
+
+TEST(RunWord32SarP) {
+  {
+    FOR_INT32_INPUTS(i) {
+      int32_t shift = *i & 0x1F;
+      RawMachineAssemblerTester<int32_t> m(kMachineWord32);
+      m.Return(m.Word32Sar(m.Parameter(0), m.Int32Constant(shift)));
+      FOR_INT32_INPUTS(j) {
+        int32_t expected = *j >> shift;
+        CHECK_EQ(expected, m.Call(*j));
+      }
+    }
+  }
+  {
+    RawMachineAssemblerTester<int32_t> m;
+    Int32BinopTester bt(&m);
+    bt.AddReturn(m.Word32Sar(bt.param0, bt.param1));
+    FOR_INT32_INPUTS(i) {
+      FOR_INT32_INPUTS(j) {
+        int32_t shift = *j & 0x1F;
+        int32_t expected = *i >> shift;
+        CHECK_EQ(expected, bt.call(*i, shift));
+      }
+    }
+    CHECK_EQ(0xFFFF0000, bt.call(0x80000000, 15));
+  }
+}
+
+
+TEST(RunWord32NotP) {
+  RawMachineAssemblerTester<int32_t> m(kMachineWord32);
+  m.Return(m.Word32Not(m.Parameter(0)));
+  FOR_UINT32_INPUTS(i) {
+    int expected = ~(*i);
+    CHECK_EQ(expected, m.Call(*i));
+  }
+}
+
+
+TEST(RunInt32NegP) {
+  RawMachineAssemblerTester<int32_t> m(kMachineWord32);
+  m.Return(m.Int32Neg(m.Parameter(0)));
+  FOR_INT32_INPUTS(i) {
+    int expected = -*i;
+    CHECK_EQ(expected, m.Call(*i));
+  }
+}
+
+
+TEST(RunWord32EqualAndWord32SarP) {
+  {
+    RawMachineAssemblerTester<int32_t> m(kMachineWord32, kMachineWord32,
+                                         kMachineWord32);
+    m.Return(m.Word32Equal(m.Parameter(0),
+                           m.Word32Sar(m.Parameter(1), m.Parameter(2))));
+    FOR_INT32_INPUTS(i) {
+      FOR_INT32_INPUTS(j) {
+        FOR_UINT32_INPUTS(k) {
+          uint32_t shift = *k & 0x1F;
+          int32_t expected = (*i == (*j >> shift));
+          CHECK_EQ(expected, m.Call(*i, *j, shift));
+        }
+      }
+    }
+  }
+  {
+    RawMachineAssemblerTester<int32_t> m(kMachineWord32, kMachineWord32,
+                                         kMachineWord32);
+    m.Return(m.Word32Equal(m.Word32Sar(m.Parameter(0), m.Parameter(1)),
+                           m.Parameter(2)));
+    FOR_INT32_INPUTS(i) {
+      FOR_UINT32_INPUTS(j) {
+        FOR_INT32_INPUTS(k) {
+          uint32_t shift = *j & 0x1F;
+          int32_t expected = ((*i >> shift) == *k);
+          CHECK_EQ(expected, m.Call(*i, shift, *k));
+        }
+      }
+    }
+  }
+}
+
+
+TEST(RunWord32EqualAndWord32ShlP) {
+  {
+    RawMachineAssemblerTester<int32_t> m(kMachineWord32, kMachineWord32,
+                                         kMachineWord32);
+    m.Return(m.Word32Equal(m.Parameter(0),
+                           m.Word32Shl(m.Parameter(1), m.Parameter(2))));
+    FOR_UINT32_INPUTS(i) {
+      FOR_UINT32_INPUTS(j) {
+        FOR_UINT32_INPUTS(k) {
+          uint32_t shift = *k & 0x1F;
+          int32_t expected = (*i == (*j << shift));
+          CHECK_EQ(expected, m.Call(*i, *j, shift));
+        }
+      }
+    }
+  }
+  {
+    RawMachineAssemblerTester<int32_t> m(kMachineWord32, kMachineWord32,
+                                         kMachineWord32);
+    m.Return(m.Word32Equal(m.Word32Shl(m.Parameter(0), m.Parameter(1)),
+                           m.Parameter(2)));
+    FOR_UINT32_INPUTS(i) {
+      FOR_UINT32_INPUTS(j) {
+        FOR_UINT32_INPUTS(k) {
+          uint32_t shift = *j & 0x1F;
+          int32_t expected = ((*i << shift) == *k);
+          CHECK_EQ(expected, m.Call(*i, shift, *k));
+        }
+      }
+    }
+  }
+}
+
+
+TEST(RunWord32EqualAndWord32ShrP) {
+  {
+    RawMachineAssemblerTester<int32_t> m(kMachineWord32, kMachineWord32,
+                                         kMachineWord32);
+    m.Return(m.Word32Equal(m.Parameter(0),
+                           m.Word32Shr(m.Parameter(1), m.Parameter(2))));
+    FOR_UINT32_INPUTS(i) {
+      FOR_UINT32_INPUTS(j) {
+        FOR_UINT32_INPUTS(k) {
+          uint32_t shift = *k & 0x1F;
+          int32_t expected = (*i == (*j >> shift));
+          CHECK_EQ(expected, m.Call(*i, *j, shift));
+        }
+      }
+    }
+  }
+  {
+    RawMachineAssemblerTester<int32_t> m(kMachineWord32, kMachineWord32,
+                                         kMachineWord32);
+    m.Return(m.Word32Equal(m.Word32Shr(m.Parameter(0), m.Parameter(1)),
+                           m.Parameter(2)));
+    FOR_UINT32_INPUTS(i) {
+      FOR_UINT32_INPUTS(j) {
+        FOR_UINT32_INPUTS(k) {
+          uint32_t shift = *j & 0x1F;
+          int32_t expected = ((*i >> shift) == *k);
+          CHECK_EQ(expected, m.Call(*i, shift, *k));
+        }
+      }
+    }
+  }
+}
+
+
+TEST(RunDeadNodes) {
+  for (int i = 0; true; i++) {
+    RawMachineAssemblerTester<int32_t> m(i == 5 ? kMachineWord32
+                                                : kMachineLast);
+    int constant = 0x55 + i;
+    switch (i) {
+      case 0:
+        m.Int32Constant(44);
+        break;
+      case 1:
+        m.StringConstant("unused");
+        break;
+      case 2:
+        m.NumberConstant(11.1);
+        break;
+      case 3:
+        m.PointerConstant(&constant);
+        break;
+      case 4:
+        m.LoadFromPointer(&constant, kMachineWord32);
+        break;
+      case 5:
+        m.Parameter(0);
+        break;
+      default:
+        return;
+    }
+    m.Return(m.Int32Constant(constant));
+    if (i != 5) {
+      CHECK_EQ(constant, m.Call());
+    } else {
+      CHECK_EQ(constant, m.Call(0));
+    }
+  }
+}
+
+
+TEST(RunDeadInt32Binops) {
+  RawMachineAssemblerTester<int32_t> m;
+
+  Operator* ops[] = {
+      m.machine()->Word32And(),      m.machine()->Word32Or(),
+      m.machine()->Word32Xor(),      m.machine()->Word32Shl(),
+      m.machine()->Word32Shr(),      m.machine()->Word32Sar(),
+      m.machine()->Word32Equal(),    m.machine()->Int32Add(),
+      m.machine()->Int32Sub(),       m.machine()->Int32Mul(),
+      m.machine()->Int32Div(),       m.machine()->Int32UDiv(),
+      m.machine()->Int32Mod(),       m.machine()->Int32UMod(),
+      m.machine()->Int32LessThan(),  m.machine()->Int32LessThanOrEqual(),
+      m.machine()->Uint32LessThan(), m.machine()->Uint32LessThanOrEqual(),
+      NULL};
+
+  for (int i = 0; ops[i] != NULL; i++) {
+    RawMachineAssemblerTester<int32_t> m(kMachineWord32, kMachineWord32);
+    int constant = 0x55555 + i;
+    m.NewNode(ops[i], m.Parameter(0), m.Parameter(1));
+    m.Return(m.Int32Constant(constant));
+
+    CHECK_EQ(constant, m.Call(1, 1));
+  }
+}
+
+
+template <typename CType>
+static void RunLoadImmIndex(MachineRepresentation rep) {
+  const int kNumElems = 3;
+  CType buffer[kNumElems];
+
+  // initialize the buffer with raw data.
+  byte* raw = reinterpret_cast<byte*>(buffer);
+  for (size_t i = 0; i < sizeof(buffer); i++) {
+    raw[i] = (i + sizeof(buffer)) ^ 0xAA;
+  }
+
+  // Test with various large and small offsets.
+  for (int offset = -1; offset <= 200000; offset *= -5) {
+    for (int i = 0; i < kNumElems; i++) {
+      RawMachineAssemblerTester<CType> m;
+      Node* base = m.PointerConstant(buffer - offset);
+      Node* index = m.Int32Constant((offset + i) * sizeof(buffer[0]));
+      m.Return(m.Load(rep, base, index));
+
+      CHECK_EQ(buffer[i], m.Call());
+      printf("XXX\n");
+    }
+  }
+}
+
+
+TEST(RunLoadImmIndex) {
+  RunLoadImmIndex<int8_t>(kMachineWord8);
+  RunLoadImmIndex<int16_t>(kMachineWord16);
+  RunLoadImmIndex<int32_t>(kMachineWord32);
+  RunLoadImmIndex<int32_t*>(kMachineTagged);
+
+  // TODO(titzer): test kMachineFloat64 loads
+  // TODO(titzer): test various indexing modes.
+}
+
+
+template <typename CType>
+static void RunLoadStore(MachineRepresentation rep) {
+  const int kNumElems = 4;
+  CType buffer[kNumElems];
+
+  for (int32_t x = 0; x < kNumElems; x++) {
+    int32_t y = kNumElems - x - 1;
+    // initialize the buffer with raw data.
+    byte* raw = reinterpret_cast<byte*>(buffer);
+    for (size_t i = 0; i < sizeof(buffer); i++) {
+      raw[i] = (i + sizeof(buffer)) ^ 0xAA;
+    }
+
+    RawMachineAssemblerTester<int32_t> m;
+    int32_t OK = 0x29000 + x;
+    Node* base = m.PointerConstant(buffer);
+    Node* index0 = m.Int32Constant(x * sizeof(buffer[0]));
+    Node* load = m.Load(rep, base, index0);
+    Node* index1 = m.Int32Constant(y * sizeof(buffer[0]));
+    m.Store(rep, base, index1, load);
+    m.Return(m.Int32Constant(OK));
+
+    CHECK_NE(buffer[x], buffer[y]);
+    CHECK_EQ(OK, m.Call());
+    CHECK_EQ(buffer[x], buffer[y]);
+  }
+}
+
+
+TEST(RunLoadStore) {
+  RunLoadStore<int8_t>(kMachineWord8);
+  RunLoadStore<int16_t>(kMachineWord16);
+  RunLoadStore<int32_t>(kMachineWord32);
+  RunLoadStore<void*>(kMachineTagged);
+  RunLoadStore<double>(kMachineFloat64);
+}
+
+
+TEST(RunFloat64Binop) {
+  RawMachineAssemblerTester<int32_t> m;
+  double result;
+
+  Operator* ops[] = {m.machine()->Float64Add(), m.machine()->Float64Sub(),
+                     m.machine()->Float64Mul(), m.machine()->Float64Div(),
+                     m.machine()->Float64Mod(), NULL};
+
+  double inf = V8_INFINITY;
+  Operator* inputs[] = {
+      m.common()->Float64Constant(0),     m.common()->Float64Constant(1),
+      m.common()->Float64Constant(1),     m.common()->Float64Constant(0),
+      m.common()->Float64Constant(0),     m.common()->Float64Constant(-1),
+      m.common()->Float64Constant(-1),    m.common()->Float64Constant(0),
+      m.common()->Float64Constant(0.22),  m.common()->Float64Constant(-1.22),
+      m.common()->Float64Constant(-1.22), m.common()->Float64Constant(0.22),
+      m.common()->Float64Constant(inf),   m.common()->Float64Constant(0.22),
+      m.common()->Float64Constant(inf),   m.common()->Float64Constant(-inf),
+      NULL};
+
+  for (int i = 0; ops[i] != NULL; i++) {
+    for (int j = 0; inputs[j] != NULL; j += 2) {
+      RawMachineAssemblerTester<int32_t> m;
+      Node* a = m.NewNode(inputs[j]);
+      Node* b = m.NewNode(inputs[j + 1]);
+      Node* binop = m.NewNode(ops[i], a, b);
+      Node* base = m.PointerConstant(&result);
+      Node* zero = m.Int32Constant(0);
+      m.Store(kMachineFloat64, base, zero, binop);
+      m.Return(m.Int32Constant(i + j));
+      CHECK_EQ(i + j, m.Call());
+    }
+  }
+}
+
+
+TEST(RunDeadFloat64Binops) {
+  RawMachineAssemblerTester<int32_t> m;
+
+  Operator* ops[] = {m.machine()->Float64Add(), m.machine()->Float64Sub(),
+                     m.machine()->Float64Mul(), m.machine()->Float64Div(),
+                     m.machine()->Float64Mod(), NULL};
+
+  for (int i = 0; ops[i] != NULL; i++) {
+    RawMachineAssemblerTester<int32_t> m;
+    int constant = 0x53355 + i;
+    m.NewNode(ops[i], m.Float64Constant(0.1), m.Float64Constant(1.11));
+    m.Return(m.Int32Constant(constant));
+    CHECK_EQ(constant, m.Call());
+  }
+}
+
+
+TEST(RunFloat64AddP) {
+  RawMachineAssemblerTester<int32_t> m;
+  Float64BinopTester bt(&m);
+
+  bt.AddReturn(m.Float64Add(bt.param0, bt.param1));
+
+  FOR_FLOAT64_INPUTS(pl) {
+    FOR_FLOAT64_INPUTS(pr) {
+      double expected = *pl + *pr;
+      CHECK_EQ(expected, bt.call(*pl, *pr));
+    }
+  }
+}
+
+
+TEST(RunFloat64SubP) {
+  RawMachineAssemblerTester<int32_t> m;
+  Float64BinopTester bt(&m);
+
+  bt.AddReturn(m.Float64Sub(bt.param0, bt.param1));
+
+  FOR_FLOAT64_INPUTS(pl) {
+    FOR_FLOAT64_INPUTS(pr) {
+      double expected = *pl - *pr;
+      CHECK_EQ(expected, bt.call(*pl, *pr));
+    }
+  }
+}
+
+
+TEST(RunFloat64SubImm1) {
+  double input = 0.0;
+  double output = 0.0;
+
+  FOR_FLOAT64_INPUTS(i) {
+    RawMachineAssemblerTester<int32_t> m;
+    Node* t0 = m.LoadFromPointer(&input, kMachineFloat64);
+    Node* t1 = m.Float64Sub(m.Float64Constant(*i), t0);
+    m.StoreToPointer(&output, kMachineFloat64, t1);
+    m.Return(m.Int32Constant(0));
+    FOR_FLOAT64_INPUTS(j) {
+      input = *j;
+      double expected = *i - input;
+      CHECK_EQ(0, m.Call());
+      CHECK_EQ(expected, output);
+    }
+  }
+}
+
+
+TEST(RunFloat64SubImm2) {
+  double input = 0.0;
+  double output = 0.0;
+
+  FOR_FLOAT64_INPUTS(i) {
+    RawMachineAssemblerTester<int32_t> m;
+    Node* t0 = m.LoadFromPointer(&input, kMachineFloat64);
+    Node* t1 = m.Float64Sub(t0, m.Float64Constant(*i));
+    m.StoreToPointer(&output, kMachineFloat64, t1);
+    m.Return(m.Int32Constant(0));
+    FOR_FLOAT64_INPUTS(j) {
+      input = *j;
+      double expected = input - *i;
+      CHECK_EQ(0, m.Call());
+      CHECK_EQ(expected, output);
+    }
+  }
+}
+
+
+TEST(RunFloat64MulP) {
+  RawMachineAssemblerTester<int32_t> m;
+  Float64BinopTester bt(&m);
+
+  bt.AddReturn(m.Float64Mul(bt.param0, bt.param1));
+
+  FOR_FLOAT64_INPUTS(pl) {
+    FOR_FLOAT64_INPUTS(pr) {
+      double expected = *pl * *pr;
+      CHECK_EQ(expected, bt.call(*pl, *pr));
+    }
+  }
+}
+
+
+TEST(RunFloat64MulAndFloat64AddP) {
+  double input_a = 0.0;
+  double input_b = 0.0;
+  double input_c = 0.0;
+  double output = 0.0;
+
+  {
+    RawMachineAssemblerTester<int32_t> m;
+    Node* a = m.LoadFromPointer(&input_a, kMachineFloat64);
+    Node* b = m.LoadFromPointer(&input_b, kMachineFloat64);
+    Node* c = m.LoadFromPointer(&input_c, kMachineFloat64);
+    m.StoreToPointer(&output, kMachineFloat64,
+                     m.Float64Add(m.Float64Mul(a, b), c));
+    m.Return(m.Int32Constant(0));
+    FOR_FLOAT64_INPUTS(i) {
+      FOR_FLOAT64_INPUTS(j) {
+        FOR_FLOAT64_INPUTS(k) {
+          input_a = *i;
+          input_b = *j;
+          input_c = *k;
+          volatile double temp = input_a * input_b;
+          volatile double expected = temp + input_c;
+          CHECK_EQ(0, m.Call());
+          CHECK_EQ(expected, output);
+        }
+      }
+    }
+  }
+  {
+    RawMachineAssemblerTester<int32_t> m;
+    Node* a = m.LoadFromPointer(&input_a, kMachineFloat64);
+    Node* b = m.LoadFromPointer(&input_b, kMachineFloat64);
+    Node* c = m.LoadFromPointer(&input_c, kMachineFloat64);
+    m.StoreToPointer(&output, kMachineFloat64,
+                     m.Float64Add(a, m.Float64Mul(b, c)));
+    m.Return(m.Int32Constant(0));
+    FOR_FLOAT64_INPUTS(i) {
+      FOR_FLOAT64_INPUTS(j) {
+        FOR_FLOAT64_INPUTS(k) {
+          input_a = *i;
+          input_b = *j;
+          input_c = *k;
+          volatile double temp = input_b * input_c;
+          volatile double expected = input_a + temp;
+          CHECK_EQ(0, m.Call());
+          CHECK_EQ(expected, output);
+        }
+      }
+    }
+  }
+}
+
+
+TEST(RunFloat64MulAndFloat64SubP) {
+  double input_a = 0.0;
+  double input_b = 0.0;
+  double input_c = 0.0;
+  double output = 0.0;
+
+  RawMachineAssemblerTester<int32_t> m;
+  Node* a = m.LoadFromPointer(&input_a, kMachineFloat64);
+  Node* b = m.LoadFromPointer(&input_b, kMachineFloat64);
+  Node* c = m.LoadFromPointer(&input_c, kMachineFloat64);
+  m.StoreToPointer(&output, kMachineFloat64,
+                   m.Float64Sub(a, m.Float64Mul(b, c)));
+  m.Return(m.Int32Constant(0));
+
+  FOR_FLOAT64_INPUTS(i) {
+    FOR_FLOAT64_INPUTS(j) {
+      FOR_FLOAT64_INPUTS(k) {
+        input_a = *i;
+        input_b = *j;
+        input_c = *k;
+        volatile double temp = input_b * input_c;
+        volatile double expected = input_a - temp;
+        CHECK_EQ(0, m.Call());
+        CHECK_EQ(expected, output);
+      }
+    }
+  }
+}
+
+
+TEST(RunFloat64MulImm) {
+  double input = 0.0;
+  double output = 0.0;
+
+  {
+    FOR_FLOAT64_INPUTS(i) {
+      RawMachineAssemblerTester<int32_t> m;
+      Node* t0 = m.LoadFromPointer(&input, kMachineFloat64);
+      Node* t1 = m.Float64Mul(m.Float64Constant(*i), t0);
+      m.StoreToPointer(&output, kMachineFloat64, t1);
+      m.Return(m.Int32Constant(0));
+      FOR_FLOAT64_INPUTS(j) {
+        input = *j;
+        double expected = *i * input;
+        CHECK_EQ(0, m.Call());
+        CHECK_EQ(expected, output);
+      }
+    }
+  }
+  {
+    FOR_FLOAT64_INPUTS(i) {
+      RawMachineAssemblerTester<int32_t> m;
+      Node* t0 = m.LoadFromPointer(&input, kMachineFloat64);
+      Node* t1 = m.Float64Mul(t0, m.Float64Constant(*i));
+      m.StoreToPointer(&output, kMachineFloat64, t1);
+      m.Return(m.Int32Constant(0));
+      FOR_FLOAT64_INPUTS(j) {
+        input = *j;
+        double expected = input * *i;
+        CHECK_EQ(0, m.Call());
+        CHECK_EQ(expected, output);
+      }
+    }
+  }
+}
+
+
+TEST(RunFloat64DivP) {
+  RawMachineAssemblerTester<int32_t> m;
+  Float64BinopTester bt(&m);
+
+  bt.AddReturn(m.Float64Div(bt.param0, bt.param1));
+
+  FOR_FLOAT64_INPUTS(pl) {
+    FOR_FLOAT64_INPUTS(pr) {
+      double expected = *pl / *pr;
+      CHECK_EQ(expected, bt.call(*pl, *pr));
+    }
+  }
+}
+
+
+TEST(RunFloat64ModP) {
+  RawMachineAssemblerTester<int32_t> m;
+  Float64BinopTester bt(&m);
+
+  bt.AddReturn(m.Float64Mod(bt.param0, bt.param1));
+
+  FOR_FLOAT64_INPUTS(i) {
+    FOR_FLOAT64_INPUTS(j) {
+      double expected = modulo(*i, *j);
+      double found = bt.call(*i, *j);
+      CHECK_EQ(expected, found);
+    }
+  }
+}
+
+
+TEST(RunConvertInt32ToFloat64_A) {
+  RawMachineAssemblerTester<int32_t> m;
+  int32_t magic = 0x986234;
+  double result = 0;
+
+  Node* convert = m.ConvertInt32ToFloat64(m.Int32Constant(magic));
+  m.Store(kMachineFloat64, m.PointerConstant(&result), m.Int32Constant(0),
+          convert);
+  m.Return(m.Int32Constant(magic));
+
+  CHECK_EQ(magic, m.Call());
+  CHECK_EQ(static_cast<double>(magic), result);
+}
+
+
+TEST(RunConvertInt32ToFloat64_B) {
+  RawMachineAssemblerTester<int32_t> m(kMachineWord32);
+  double output = 0;
+
+  Node* convert = m.ConvertInt32ToFloat64(m.Parameter(0));
+  m.Store(kMachineFloat64, m.PointerConstant(&output), m.Int32Constant(0),
+          convert);
+  m.Return(m.Parameter(0));
+
+  FOR_INT32_INPUTS(i) {
+    int32_t expect = *i;
+    CHECK_EQ(expect, m.Call(expect));
+    CHECK_EQ(static_cast<double>(expect), output);
+  }
+}
+
+
+// TODO(titzer): Test ConvertUint32ToFloat64
+
+
+TEST(RunConvertFloat64ToInt32_A) {
+  RawMachineAssemblerTester<int32_t> m;
+  int32_t magic = 0x786234;
+  double input = 11.1;
+  int32_t result = 0;
+
+  m.Store(kMachineWord32, m.PointerConstant(&result), m.Int32Constant(0),
+          m.ConvertFloat64ToInt32(m.Float64Constant(input)));
+  m.Return(m.Int32Constant(magic));
+
+  CHECK_EQ(magic, m.Call());
+  CHECK_EQ(static_cast<int32_t>(input), result);
+}
+
+
+TEST(RunConvertFloat64ToInt32_B) {
+  RawMachineAssemblerTester<int32_t> m;
+  double input = 0;
+  int32_t output = 0;
+
+  Node* load =
+      m.Load(kMachineFloat64, m.PointerConstant(&input), m.Int32Constant(0));
+  Node* convert = m.ConvertFloat64ToInt32(load);
+  m.Store(kMachineWord32, m.PointerConstant(&output), m.Int32Constant(0),
+          convert);
+  m.Return(convert);
+
+  {
+    FOR_INT32_INPUTS(i) {
+      input = *i;
+      int expect = *i;
+      CHECK_EQ(expect, m.Call());
+      CHECK_EQ(expect, output);
+    }
+  }
+
+  {
+    FOR_FLOAT64_INPUTS(i) {
+      input = *i;
+      // TODO(titzer): float64 -> int32 outside of the int32 range; the machine
+      // backends are all wrong in different ways, and they certainly don't
+      // implement the JavaScript conversions correctly.
+      if (std::isnan(input) || input > INT_MAX || input < INT_MIN) {
+        continue;
+      }
+      int32_t expect = static_cast<int32_t>(input);
+      CHECK_EQ(expect, m.Call());
+      CHECK_EQ(expect, output);
+    }
+  }
+}
+
+
+// TODO(titzer): test ConvertFloat64ToUint32
+
+
+TEST(RunConvertFloat64ToInt32_truncation) {
+  RawMachineAssemblerTester<int32_t> m;
+  int32_t magic = 0x786234;
+  double input = 3.9;
+  int32_t result = 0;
+
+  Node* input_node =
+      m.Load(kMachineFloat64, m.PointerConstant(&input), m.Int32Constant(0));
+  m.Store(kMachineWord32, m.PointerConstant(&result), m.Int32Constant(0),
+          m.ConvertFloat64ToInt32(input_node));
+  m.Return(m.Int32Constant(magic));
+
+  for (int i = -200; i < 200; i++) {
+    input = i + (i < 0 ? -0.9 : 0.9);
+    CHECK_EQ(magic, m.Call());
+    CHECK_EQ(i, result);
+  }
+}
+
+
+TEST(RunConvertFloat64ToInt32_spilled) {
+  RawMachineAssemblerTester<int32_t> m;
+  const int kNumInputs = 32;
+  int32_t magic = 0x786234;
+  double input[kNumInputs];
+  int32_t result[kNumInputs];
+  Node* input_node[kNumInputs];
+
+  for (int i = 0; i < kNumInputs; i++) {
+    input_node[i] = m.Load(kMachineFloat64, m.PointerConstant(&input),
+                           m.Int32Constant(i * 8));
+  }
+
+  for (int i = 0; i < kNumInputs; i++) {
+    m.Store(kMachineWord32, m.PointerConstant(&result), m.Int32Constant(i * 4),
+            m.ConvertFloat64ToInt32(input_node[i]));
+  }
+
+  m.Return(m.Int32Constant(magic));
+
+  for (int i = 0; i < kNumInputs; i++) {
+    input[i] = 100.9 + i;
+  }
+
+  CHECK_EQ(magic, m.Call());
+
+  for (int i = 0; i < kNumInputs; i++) {
+    CHECK_EQ(result[i], 100 + i);
+  }
+}
+
+
+TEST(RunDeadConvertFloat64ToInt32) {
+  RawMachineAssemblerTester<int32_t> m;
+  const int magic = 0x88abcda4;
+  m.ConvertFloat64ToInt32(m.Float64Constant(999.78));
+  m.Return(m.Int32Constant(magic));
+  CHECK_EQ(magic, m.Call());
+}
+
+
+TEST(RunDeadConvertInt32ToFloat64) {
+  RawMachineAssemblerTester<int32_t> m;
+  const int magic = 0x8834abcd;
+  m.ConvertInt32ToFloat64(m.Int32Constant(magic - 6888));
+  m.Return(m.Int32Constant(magic));
+  CHECK_EQ(magic, m.Call());
+}
+
+
+TEST(RunLoopPhiInduction2) {
+  RawMachineAssemblerTester<int32_t> m;
+
+  int false_val = 0x10777;
+
+  // x = false_val; while(false) { x++; } return x;
+  MLabel header, body, end;
+  Node* false_node = m.Int32Constant(false_val);
+  m.Goto(&header);
+  m.Bind(&header);
+  Node* phi = m.Phi(false_node, false_node);
+  m.Branch(m.Int32Constant(0), &body, &end);
+  m.Bind(&body);
+  Node* add = m.Int32Add(phi, m.Int32Constant(1));
+  phi->ReplaceInput(1, add);
+  m.Goto(&header);
+  m.Bind(&end);
+  m.Return(phi);
+
+  CHECK_EQ(false_val, m.Call());
+}
+
+
+TEST(RunDoubleDiamond) {
+  RawMachineAssemblerTester<int32_t> m;
+
+  const int magic = 99645;
+  double buffer = 0.1;
+  double constant = 99.99;
+
+  MLabel blocka, blockb, end;
+  Node* k1 = m.Float64Constant(constant);
+  Node* k2 = m.Float64Constant(0 - constant);
+  m.Branch(m.Int32Constant(0), &blocka, &blockb);
+  m.Bind(&blocka);
+  m.Goto(&end);
+  m.Bind(&blockb);
+  m.Goto(&end);
+  m.Bind(&end);
+  Node* phi = m.Phi(k2, k1);
+  m.Store(kMachineFloat64, m.PointerConstant(&buffer), m.Int32Constant(0), phi);
+  m.Return(m.Int32Constant(magic));
+
+  CHECK_EQ(magic, m.Call());
+  CHECK_EQ(constant, buffer);
+}
+
+
+TEST(RunRefDiamond) {
+  RawMachineAssemblerTester<int32_t> m;
+
+  const int magic = 99644;
+  Handle<String> rexpected =
+      CcTest::i_isolate()->factory()->InternalizeUtf8String("A");
+  String* buffer;
+
+  MLabel blocka, blockb, end;
+  Node* k1 = m.StringConstant("A");
+  Node* k2 = m.StringConstant("B");
+  m.Branch(m.Int32Constant(0), &blocka, &blockb);
+  m.Bind(&blocka);
+  m.Goto(&end);
+  m.Bind(&blockb);
+  m.Goto(&end);
+  m.Bind(&end);
+  Node* phi = m.Phi(k2, k1);
+  m.Store(kMachineTagged, m.PointerConstant(&buffer), m.Int32Constant(0), phi);
+  m.Return(m.Int32Constant(magic));
+
+  CHECK_EQ(magic, m.Call());
+  CHECK(rexpected->SameValue(buffer));
+}
+
+
+TEST(RunDoubleRefDiamond) {
+  RawMachineAssemblerTester<int32_t> m;
+
+  const int magic = 99648;
+  double dbuffer = 0.1;
+  double dconstant = 99.99;
+  Handle<String> rexpected =
+      CcTest::i_isolate()->factory()->InternalizeUtf8String("AX");
+  String* rbuffer;
+
+  MLabel blocka, blockb, end;
+  Node* d1 = m.Float64Constant(dconstant);
+  Node* d2 = m.Float64Constant(0 - dconstant);
+  Node* r1 = m.StringConstant("AX");
+  Node* r2 = m.StringConstant("BX");
+  m.Branch(m.Int32Constant(0), &blocka, &blockb);
+  m.Bind(&blocka);
+  m.Goto(&end);
+  m.Bind(&blockb);
+  m.Goto(&end);
+  m.Bind(&end);
+  Node* dphi = m.Phi(d2, d1);
+  Node* rphi = m.Phi(r2, r1);
+  m.Store(kMachineFloat64, m.PointerConstant(&dbuffer), m.Int32Constant(0),
+          dphi);
+  m.Store(kMachineTagged, m.PointerConstant(&rbuffer), m.Int32Constant(0),
+          rphi);
+  m.Return(m.Int32Constant(magic));
+
+  CHECK_EQ(magic, m.Call());
+  CHECK_EQ(dconstant, dbuffer);
+  CHECK(rexpected->SameValue(rbuffer));
+}
+
+
+TEST(RunDoubleRefDoubleDiamond) {
+  RawMachineAssemblerTester<int32_t> m;
+
+  const int magic = 99649;
+  double dbuffer = 0.1;
+  double dconstant = 99.997;
+  Handle<String> rexpected =
+      CcTest::i_isolate()->factory()->InternalizeUtf8String("AD");
+  String* rbuffer;
+
+  MLabel blocka, blockb, mid, blockd, blocke, end;
+  Node* d1 = m.Float64Constant(dconstant);
+  Node* d2 = m.Float64Constant(0 - dconstant);
+  Node* r1 = m.StringConstant("AD");
+  Node* r2 = m.StringConstant("BD");
+  m.Branch(m.Int32Constant(0), &blocka, &blockb);
+  m.Bind(&blocka);
+  m.Goto(&mid);
+  m.Bind(&blockb);
+  m.Goto(&mid);
+  m.Bind(&mid);
+  Node* dphi1 = m.Phi(d2, d1);
+  Node* rphi1 = m.Phi(r2, r1);
+  m.Branch(m.Int32Constant(0), &blockd, &blocke);
+
+  m.Bind(&blockd);
+  m.Goto(&end);
+  m.Bind(&blocke);
+  m.Goto(&end);
+  m.Bind(&end);
+  Node* dphi2 = m.Phi(d1, dphi1);
+  Node* rphi2 = m.Phi(r1, rphi1);
+
+  m.Store(kMachineFloat64, m.PointerConstant(&dbuffer), m.Int32Constant(0),
+          dphi2);
+  m.Store(kMachineTagged, m.PointerConstant(&rbuffer), m.Int32Constant(0),
+          rphi2);
+  m.Return(m.Int32Constant(magic));
+
+  CHECK_EQ(magic, m.Call());
+  CHECK_EQ(dconstant, dbuffer);
+  CHECK(rexpected->SameValue(rbuffer));
+}
+
+
+TEST(RunDoubleLoopPhi) {
+  RawMachineAssemblerTester<int32_t> m;
+  MLabel header, body, end;
+
+  int magic = 99773;
+  double buffer = 0.99;
+  double dconstant = 777.1;
+
+  Node* zero = m.Int32Constant(0);
+  Node* dk = m.Float64Constant(dconstant);
+
+  m.Goto(&header);
+  m.Bind(&header);
+  Node* phi = m.Phi(dk, dk);
+  phi->ReplaceInput(1, phi);
+  m.Branch(zero, &body, &end);
+  m.Bind(&body);
+  m.Goto(&header);
+  m.Bind(&end);
+  m.Store(kMachineFloat64, m.PointerConstant(&buffer), m.Int32Constant(0), phi);
+  m.Return(m.Int32Constant(magic));
+
+  CHECK_EQ(magic, m.Call());
+}
+
+
+TEST(RunCountToTenAccRaw) {
+  RawMachineAssemblerTester<int32_t> m;
+
+  Node* zero = m.Int32Constant(0);
+  Node* ten = m.Int32Constant(10);
+  Node* one = m.Int32Constant(1);
+
+  MLabel header, body, body_cont, end;
+
+  m.Goto(&header);
+
+  m.Bind(&header);
+  Node* i = m.Phi(zero, zero);
+  Node* j = m.Phi(zero, zero);
+  m.Goto(&body);
+
+  m.Bind(&body);
+  Node* next_i = m.Int32Add(i, one);
+  Node* next_j = m.Int32Add(j, one);
+  m.Branch(m.Word32Equal(next_i, ten), &end, &body_cont);
+
+  m.Bind(&body_cont);
+  i->ReplaceInput(1, next_i);
+  j->ReplaceInput(1, next_j);
+  m.Goto(&header);
+
+  m.Bind(&end);
+  m.Return(ten);
+
+  CHECK_EQ(10, m.Call());
+}
+
+
+TEST(RunCountToTenAccRaw2) {
+  RawMachineAssemblerTester<int32_t> m;
+
+  Node* zero = m.Int32Constant(0);
+  Node* ten = m.Int32Constant(10);
+  Node* one = m.Int32Constant(1);
+
+  MLabel header, body, body_cont, end;
+
+  m.Goto(&header);
+
+  m.Bind(&header);
+  Node* i = m.Phi(zero, zero);
+  Node* j = m.Phi(zero, zero);
+  Node* k = m.Phi(zero, zero);
+  m.Goto(&body);
+
+  m.Bind(&body);
+  Node* next_i = m.Int32Add(i, one);
+  Node* next_j = m.Int32Add(j, one);
+  Node* next_k = m.Int32Add(j, one);
+  m.Branch(m.Word32Equal(next_i, ten), &end, &body_cont);
+
+  m.Bind(&body_cont);
+  i->ReplaceInput(1, next_i);
+  j->ReplaceInput(1, next_j);
+  k->ReplaceInput(1, next_k);
+  m.Goto(&header);
+
+  m.Bind(&end);
+  m.Return(ten);
+
+  CHECK_EQ(10, m.Call());
+}
+
+
+TEST(RunAddTree) {
+  RawMachineAssemblerTester<int32_t> m;
+  int32_t inputs[] = {11, 12, 13, 14, 15, 16, 17, 18};
+
+  Node* base = m.PointerConstant(inputs);
+  Node* n0 = m.Load(kMachineWord32, base, m.Int32Constant(0 * sizeof(int32_t)));
+  Node* n1 = m.Load(kMachineWord32, base, m.Int32Constant(1 * sizeof(int32_t)));
+  Node* n2 = m.Load(kMachineWord32, base, m.Int32Constant(2 * sizeof(int32_t)));
+  Node* n3 = m.Load(kMachineWord32, base, m.Int32Constant(3 * sizeof(int32_t)));
+  Node* n4 = m.Load(kMachineWord32, base, m.Int32Constant(4 * sizeof(int32_t)));
+  Node* n5 = m.Load(kMachineWord32, base, m.Int32Constant(5 * sizeof(int32_t)));
+  Node* n6 = m.Load(kMachineWord32, base, m.Int32Constant(6 * sizeof(int32_t)));
+  Node* n7 = m.Load(kMachineWord32, base, m.Int32Constant(7 * sizeof(int32_t)));
+
+  Node* i1 = m.Int32Add(n0, n1);
+  Node* i2 = m.Int32Add(n2, n3);
+  Node* i3 = m.Int32Add(n4, n5);
+  Node* i4 = m.Int32Add(n6, n7);
+
+  Node* i5 = m.Int32Add(i1, i2);
+  Node* i6 = m.Int32Add(i3, i4);
+
+  Node* i7 = m.Int32Add(i5, i6);
+
+  m.Return(i7);
+
+  CHECK_EQ(116, m.Call());
+}
+
+
+#if MACHINE_ASSEMBLER_SUPPORTS_CALL_C
+
+static int Seven() { return 7; }
+static int UnaryMinus(int a) { return -a; }
+static int APlusTwoB(int a, int b) { return a + 2 * b; }
+
+
+TEST(RunCallSeven) {
+  for (int i = 0; i < 2; i++) {
+    bool call_direct = i == 0;
+    void* function_address =
+        reinterpret_cast<void*>(reinterpret_cast<intptr_t>(&Seven));
+
+    RawMachineAssemblerTester<int32_t> m;
+    Node** args = NULL;
+    MachineRepresentation* arg_types = NULL;
+    Node* function =
+        call_direct ? m.PointerConstant(function_address)
+                    : m.LoadFromPointer(&function_address,
+                                        MachineOperatorBuilder::pointer_rep());
+    m.Return(m.CallC(function, kMachineWord32, arg_types, args, 0));
+
+    CHECK_EQ(7, m.Call());
+  }
+}
+
+
+TEST(RunCallUnaryMinus) {
+  for (int i = 0; i < 2; i++) {
+    bool call_direct = i == 0;
+    void* function_address =
+        reinterpret_cast<void*>(reinterpret_cast<intptr_t>(&UnaryMinus));
+
+    RawMachineAssemblerTester<int32_t> m(kMachineWord32);
+    Node* args[] = {m.Parameter(0)};
+    MachineRepresentation arg_types[] = {kMachineWord32};
+    Node* function =
+        call_direct ? m.PointerConstant(function_address)
+                    : m.LoadFromPointer(&function_address,
+                                        MachineOperatorBuilder::pointer_rep());
+    m.Return(m.CallC(function, kMachineWord32, arg_types, args, 1));
+
+    FOR_INT32_INPUTS(i) {
+      int a = *i;
+      CHECK_EQ(-a, m.Call(a));
+    }
+  }
+}
+
+
+TEST(RunCallAPlusTwoB) {
+  for (int i = 0; i < 2; i++) {
+    bool call_direct = i == 0;
+    void* function_address =
+        reinterpret_cast<void*>(reinterpret_cast<intptr_t>(&APlusTwoB));
+
+    RawMachineAssemblerTester<int32_t> m(kMachineWord32, kMachineWord32);
+    Node* args[] = {m.Parameter(0), m.Parameter(1)};
+    MachineRepresentation arg_types[] = {kMachineWord32, kMachineWord32};
+    Node* function =
+        call_direct ? m.PointerConstant(function_address)
+                    : m.LoadFromPointer(&function_address,
+                                        MachineOperatorBuilder::pointer_rep());
+    m.Return(m.CallC(function, kMachineWord32, arg_types, args, 2));
+
+    FOR_INT32_INPUTS(i) {
+      FOR_INT32_INPUTS(j) {
+        int a = *i;
+        int b = *j;
+        int result = m.Call(a, b);
+        CHECK_EQ(a + 2 * b, result);
+      }
+    }
+  }
+}
+
+#endif  // MACHINE_ASSEMBLER_SUPPORTS_CALL_C
+
+
+static const int kFloat64CompareHelperTestCases = 15;
+static const int kFloat64CompareHelperNodeType = 4;
+
+static int Float64CompareHelper(RawMachineAssemblerTester<int32_t>* m,
+                                int test_case, int node_type, double x,
+                                double y) {
+  static double buffer[2];
+  buffer[0] = x;
+  buffer[1] = y;
+  CHECK(0 <= test_case && test_case < kFloat64CompareHelperTestCases);
+  CHECK(0 <= node_type && node_type < kFloat64CompareHelperNodeType);
+  CHECK(x < y);
+  bool load_a = node_type / 2 == 1;
+  bool load_b = node_type % 2 == 1;
+  Node* a = load_a ? m->Load(kMachineFloat64, m->PointerConstant(&buffer[0]))
+                   : m->Float64Constant(x);
+  Node* b = load_b ? m->Load(kMachineFloat64, m->PointerConstant(&buffer[1]))
+                   : m->Float64Constant(y);
+  Node* cmp = NULL;
+  bool expected = false;
+  switch (test_case) {
+    // Equal tests.
+    case 0:
+      cmp = m->Float64Equal(a, b);
+      expected = false;
+      break;
+    case 1:
+      cmp = m->Float64Equal(a, a);
+      expected = true;
+      break;
+    // LessThan tests.
+    case 2:
+      cmp = m->Float64LessThan(a, b);
+      expected = true;
+      break;
+    case 3:
+      cmp = m->Float64LessThan(b, a);
+      expected = false;
+      break;
+    case 4:
+      cmp = m->Float64LessThan(a, a);
+      expected = false;
+      break;
+    // LessThanOrEqual tests.
+    case 5:
+      cmp = m->Float64LessThanOrEqual(a, b);
+      expected = true;
+      break;
+    case 6:
+      cmp = m->Float64LessThanOrEqual(b, a);
+      expected = false;
+      break;
+    case 7:
+      cmp = m->Float64LessThanOrEqual(a, a);
+      expected = true;
+      break;
+    // NotEqual tests.
+    case 8:
+      cmp = m->Float64NotEqual(a, b);
+      expected = true;
+      break;
+    case 9:
+      cmp = m->Float64NotEqual(b, a);
+      expected = true;
+      break;
+    case 10:
+      cmp = m->Float64NotEqual(a, a);
+      expected = false;
+      break;
+    // GreaterThan tests.
+    case 11:
+      cmp = m->Float64GreaterThan(a, a);
+      expected = false;
+      break;
+    case 12:
+      cmp = m->Float64GreaterThan(a, b);
+      expected = false;
+      break;
+    // GreaterThanOrEqual tests.
+    case 13:
+      cmp = m->Float64GreaterThanOrEqual(a, a);
+      expected = true;
+      break;
+    case 14:
+      cmp = m->Float64GreaterThanOrEqual(b, a);
+      expected = true;
+      break;
+    default:
+      UNREACHABLE();
+  }
+  m->Return(cmp);
+  return expected;
+}
+
+
+TEST(RunFloat64Compare) {
+  double inf = V8_INFINITY;
+  // All pairs (a1, a2) are of the form a1 < a2.
+  double inputs[] = {0.0,  1.0,  -1.0, 0.22, -1.22, 0.22,
+                     -inf, 0.22, 0.22, inf,  -inf,  inf};
+
+  for (int test = 0; test < kFloat64CompareHelperTestCases; test++) {
+    for (int node_type = 0; node_type < kFloat64CompareHelperNodeType;
+         node_type++) {
+      for (size_t input = 0; input < ARRAY_SIZE(inputs); input += 2) {
+        RawMachineAssemblerTester<int32_t> m;
+        int expected = Float64CompareHelper(&m, test, node_type, inputs[input],
+                                            inputs[input + 1]);
+        CHECK_EQ(expected, m.Call());
+      }
+    }
+  }
+}
+
+
+TEST(RunFloat64UnorderedCompare) {
+  RawMachineAssemblerTester<int32_t> m;
+
+  Operator* operators[] = {m.machine()->Float64Equal(),
+                           m.machine()->Float64LessThan(),
+                           m.machine()->Float64LessThanOrEqual()};
+
+  double nan = v8::base::OS::nan_value();
+
+  FOR_FLOAT64_INPUTS(i) {
+    for (size_t o = 0; o < ARRAY_SIZE(operators); ++o) {
+      for (int j = 0; j < 2; j++) {
+        RawMachineAssemblerTester<int32_t> m;
+        Node* a = m.Float64Constant(*i);
+        Node* b = m.Float64Constant(nan);
+        if (j == 1) std::swap(a, b);
+        m.Return(m.NewNode(operators[o], a, b));
+        CHECK_EQ(0, m.Call());
+      }
+    }
+  }
+}
+
+
+TEST(RunFloat64Equal) {
+  double input_a = 0.0;
+  double input_b = 0.0;
+
+  RawMachineAssemblerTester<int32_t> m;
+  Node* a = m.LoadFromPointer(&input_a, kMachineFloat64);
+  Node* b = m.LoadFromPointer(&input_b, kMachineFloat64);
+  m.Return(m.Float64Equal(a, b));
+
+  CompareWrapper cmp(IrOpcode::kFloat64Equal);
+  FOR_FLOAT64_INPUTS(pl) {
+    FOR_FLOAT64_INPUTS(pr) {
+      input_a = *pl;
+      input_b = *pr;
+      int32_t expected = cmp.Float64Compare(input_a, input_b) ? 1 : 0;
+      CHECK_EQ(expected, m.Call());
+    }
+  }
+}
+
+
+TEST(RunFloat64LessThan) {
+  double input_a = 0.0;
+  double input_b = 0.0;
+
+  RawMachineAssemblerTester<int32_t> m;
+  Node* a = m.LoadFromPointer(&input_a, kMachineFloat64);
+  Node* b = m.LoadFromPointer(&input_b, kMachineFloat64);
+  m.Return(m.Float64LessThan(a, b));
+
+  CompareWrapper cmp(IrOpcode::kFloat64LessThan);
+  FOR_FLOAT64_INPUTS(pl) {
+    FOR_FLOAT64_INPUTS(pr) {
+      input_a = *pl;
+      input_b = *pr;
+      int32_t expected = cmp.Float64Compare(input_a, input_b) ? 1 : 0;
+      CHECK_EQ(expected, m.Call());
+    }
+  }
+}
+
+
+template <typename IntType, MachineRepresentation kRepresentation>
+static void LoadStoreTruncation() {
+  IntType input;
+
+  RawMachineAssemblerTester<int32_t> m;
+  Node* a = m.LoadFromPointer(&input, kRepresentation);
+  Node* ap1 = m.Int32Add(a, m.Int32Constant(1));
+  m.StoreToPointer(&input, kRepresentation, ap1);
+  m.Return(ap1);
+
+  const IntType max = std::numeric_limits<IntType>::max();
+  const IntType min = std::numeric_limits<IntType>::min();
+
+  // Test upper bound.
+  input = max;
+  CHECK_EQ(max + 1, m.Call());
+  CHECK_EQ(min, input);
+
+  // Test lower bound.
+  input = min;
+  CHECK_EQ(max + 2, m.Call());
+  CHECK_EQ(min + 1, input);
+
+  // Test all one byte values that are not one byte bounds.
+  for (int i = -127; i < 127; i++) {
+    input = i;
+    int expected = i >= 0 ? i + 1 : max + (i - min) + 2;
+    CHECK_EQ(expected, m.Call());
+    CHECK_EQ(i + 1, input);
+  }
+}
+
+
+TEST(RunLoadStoreTruncation) {
+  LoadStoreTruncation<int8_t, kMachineWord8>();
+  LoadStoreTruncation<int16_t, kMachineWord16>();
+}
+
+
+static void IntPtrCompare(intptr_t left, intptr_t right) {
+  for (int test = 0; test < 7; test++) {
+    RawMachineAssemblerTester<bool> m(MachineOperatorBuilder::pointer_rep(),
+                                      MachineOperatorBuilder::pointer_rep());
+    Node* p0 = m.Parameter(0);
+    Node* p1 = m.Parameter(1);
+    Node* res = NULL;
+    bool expected = false;
+    switch (test) {
+      case 0:
+        res = m.IntPtrLessThan(p0, p1);
+        expected = true;
+        break;
+      case 1:
+        res = m.IntPtrLessThanOrEqual(p0, p1);
+        expected = true;
+        break;
+      case 2:
+        res = m.IntPtrEqual(p0, p1);
+        expected = false;
+        break;
+      case 3:
+        res = m.IntPtrGreaterThanOrEqual(p0, p1);
+        expected = false;
+        break;
+      case 4:
+        res = m.IntPtrGreaterThan(p0, p1);
+        expected = false;
+        break;
+      case 5:
+        res = m.IntPtrEqual(p0, p0);
+        expected = true;
+        break;
+      case 6:
+        res = m.IntPtrNotEqual(p0, p1);
+        expected = true;
+        break;
+      default:
+        UNREACHABLE();
+        break;
+    }
+    m.Return(res);
+    CHECK_EQ(expected, m.Call(reinterpret_cast<int32_t*>(left),
+                              reinterpret_cast<int32_t*>(right)));
+  }
+}
+
+
+TEST(RunIntPtrCompare) {
+  intptr_t min = std::numeric_limits<intptr_t>::min();
+  intptr_t max = std::numeric_limits<intptr_t>::max();
+  // An ascending chain of intptr_t
+  intptr_t inputs[] = {min, min / 2, -1, 0, 1, max / 2, max};
+  for (size_t i = 0; i < ARRAY_SIZE(inputs) - 1; i++) {
+    IntPtrCompare(inputs[i], inputs[i + 1]);
+  }
+}
+
+
+TEST(RunTestIntPtrArithmetic) {
+  static const int kInputSize = 10;
+  int32_t inputs[kInputSize];
+  int32_t outputs[kInputSize];
+  for (int i = 0; i < kInputSize; i++) {
+    inputs[i] = i;
+    outputs[i] = -1;
+  }
+  RawMachineAssemblerTester<int32_t*> m;
+  Node* input = m.PointerConstant(&inputs[0]);
+  Node* output = m.PointerConstant(&outputs[kInputSize - 1]);
+  Node* elem_size = m.ConvertInt32ToIntPtr(m.Int32Constant(sizeof(inputs[0])));
+  for (int i = 0; i < kInputSize; i++) {
+    m.Store(kMachineWord32, output, m.Load(kMachineWord32, input));
+    input = m.IntPtrAdd(input, elem_size);
+    output = m.IntPtrSub(output, elem_size);
+  }
+  m.Return(input);
+  CHECK_EQ(&inputs[kInputSize], m.Call());
+  for (int i = 0; i < kInputSize; i++) {
+    CHECK_EQ(i, inputs[i]);
+    CHECK_EQ(kInputSize - i - 1, outputs[i]);
+  }
+}
+
+
+TEST(RunSpillLotsOfThings) {
+  static const int kInputSize = 1000;
+  RawMachineAssemblerTester<void> m;
+  Node* accs[kInputSize];
+  int32_t outputs[kInputSize];
+  Node* one = m.Int32Constant(1);
+  Node* acc = one;
+  for (int i = 0; i < kInputSize; i++) {
+    acc = m.Int32Add(acc, one);
+    accs[i] = acc;
+  }
+  for (int i = 0; i < kInputSize; i++) {
+    m.StoreToPointer(&outputs[i], kMachineWord32, accs[i]);
+  }
+  m.Return(one);
+  m.Call();
+  for (int i = 0; i < kInputSize; i++) {
+    CHECK_EQ(outputs[i], i + 2);
+  }
+}
+
+
+TEST(RunSpillConstantsAndParameters) {
+  static const size_t kInputSize = 1000;
+  static const int32_t kBase = 987;
+  RawMachineAssemblerTester<int32_t> m(kMachineWord32, kMachineWord32);
+  int32_t outputs[kInputSize];
+  Node* csts[kInputSize];
+  Node* accs[kInputSize];
+  Node* acc = m.Int32Constant(0);
+  for (size_t i = 0; i < kInputSize; i++) {
+    csts[i] = m.Int32Constant(static_cast<int32_t>(kBase + i));
+  }
+  for (size_t i = 0; i < kInputSize; i++) {
+    acc = m.Int32Add(acc, csts[i]);
+    accs[i] = acc;
+  }
+  for (size_t i = 0; i < kInputSize; i++) {
+    m.StoreToPointer(&outputs[i], kMachineWord32, accs[i]);
+  }
+  m.Return(m.Int32Add(acc, m.Int32Add(m.Parameter(0), m.Parameter(1))));
+  FOR_INT32_INPUTS(i) {
+    FOR_INT32_INPUTS(j) {
+      int32_t expected = *i + *j;
+      for (size_t k = 0; k < kInputSize; k++) {
+        expected += kBase + k;
+      }
+      CHECK_EQ(expected, m.Call(*i, *j));
+      expected = 0;
+      for (size_t k = 0; k < kInputSize; k++) {
+        expected += kBase + k;
+        CHECK_EQ(expected, outputs[k]);
+      }
+    }
+  }
+}
+
+
+TEST(RunNewSpaceConstantsInPhi) {
+  RawMachineAssemblerTester<Object*> m(kMachineWord32);
+
+  Isolate* isolate = CcTest::i_isolate();
+  Handle<HeapNumber> true_val = isolate->factory()->NewHeapNumber(11.2);
+  Handle<HeapNumber> false_val = isolate->factory()->NewHeapNumber(11.3);
+  Node* true_node = m.HeapConstant(true_val);
+  Node* false_node = m.HeapConstant(false_val);
+
+  MLabel blocka, blockb, end;
+  m.Branch(m.Parameter(0), &blocka, &blockb);
+  m.Bind(&blocka);
+  m.Goto(&end);
+  m.Bind(&blockb);
+  m.Goto(&end);
+
+  m.Bind(&end);
+  Node* phi = m.Phi(true_node, false_node);
+  m.Return(phi);
+
+  CHECK_EQ(*false_val, m.Call(0));
+  CHECK_EQ(*true_val, m.Call(1));
+}
+
+
+#if MACHINE_ASSEMBLER_SUPPORTS_CALL_C
+
+TEST(RunSpillLotsOfThingsWithCall) {
+  static const int kInputSize = 1000;
+  RawMachineAssemblerTester<void> m;
+  Node* accs[kInputSize];
+  int32_t outputs[kInputSize];
+  Node* one = m.Int32Constant(1);
+  Node* acc = one;
+  for (int i = 0; i < kInputSize; i++) {
+    acc = m.Int32Add(acc, one);
+    accs[i] = acc;
+  }
+  // If the spill slot computation is wrong, it might load from the c frame
+  {
+    void* func = reinterpret_cast<void*>(reinterpret_cast<intptr_t>(&Seven));
+    Node** args = NULL;
+    MachineRepresentation* arg_types = NULL;
+    m.CallC(m.PointerConstant(func), kMachineWord32, arg_types, args, 0);
+  }
+  for (int i = 0; i < kInputSize; i++) {
+    m.StoreToPointer(&outputs[i], kMachineWord32, accs[i]);
+  }
+  m.Return(one);
+  m.Call();
+  for (int i = 0; i < kInputSize; i++) {
+    CHECK_EQ(outputs[i], i + 2);
+  }
+}
+
+#endif  // MACHINE_ASSEMBLER_SUPPORTS_CALL_C
+
+#endif
diff --git a/test/cctest/compiler/compiler/test-run-variables.cc b/test/cctest/compiler/compiler/test-run-variables.cc
new file mode 100644 (file)
index 0000000..bf86e0d
--- /dev/null
@@ -0,0 +1,121 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#include "test/cctest/compiler/function-tester.h"
+
+using namespace v8::internal;
+using namespace v8::internal::compiler;
+
+static const char* throws = NULL;
+
+static const char* load_tests[] = {
+    "var x = a; r = x",                       "123",       "0",
+    "var x = (r = x)",                        "undefined", "undefined",
+    "var x = (a?1:2); r = x",                 "1",         "2",
+    "const x = a; r = x",                     "123",       "0",
+    "const x = (r = x)",                      "undefined", "undefined",
+    "const x = (a?3:4); r = x",               "3",         "4",
+    "'use strict'; const x = a; r = x",       "123",       "0",
+    "'use strict'; const x = (r = x)",        throws,      throws,
+    "'use strict'; const x = (a?5:6); r = x", "5",         "6",
+    "'use strict'; let x = a; r = x",         "123",       "0",
+    "'use strict'; let x = (r = x)",          throws,      throws,
+    "'use strict'; let x = (a?7:8); r = x",   "7",         "8",
+    NULL};
+
+static const char* store_tests[] = {
+    "var x = 1; x = a; r = x",                     "123",  "0",
+    "var x = (a?(x=4,2):3); r = x",                "2",    "3",
+    "var x = (a?4:5); x = a; r = x",               "123",  "0",
+    "const x = 1; x = a; r = x",                   "1",    "1",
+    "const x = (a?(x=4,2):3); r = x",              "2",    "3",
+    "const x = (a?4:5); x = a; r = x",             "4",    "5",
+    // Assignments to 'const' are SyntaxErrors, handled by the parser,
+    // hence we cannot test them here because they are early errors.
+    "'use strict'; let x = 1; x = a; r = x",       "123",  "0",
+    "'use strict'; let x = (a?(x=4,2):3); r = x",  throws, "3",
+    "'use strict'; let x = (a?4:5); x = a; r = x", "123",  "0",
+    NULL};
+
+static const char* bind_tests[] = {
+    "if (a) { const x = a }; r = x;",            "123", "undefined",
+    "for (; a > 0; a--) { const x = a }; r = x", "123", "undefined",
+    // Re-initialization of variables other than legacy 'const' is not
+    // possible due to sane variable scoping, hence no tests here.
+    NULL};
+
+
+static void RunVariableTests(const char* source, const char* tests[]) {
+  FLAG_harmony_scoping = true;
+  EmbeddedVector<char, 512> buffer;
+
+  for (int i = 0; tests[i] != NULL; i += 3) {
+    SNPrintF(buffer, source, tests[i]);
+    PrintF("#%d: %s\n", i / 3, buffer.start());
+    FunctionTester T(buffer.start());
+
+    // Check function with non-falsey parameter.
+    if (tests[i + 1] != throws) {
+      Handle<Object> r = v8::Utils::OpenHandle(*CompileRun(tests[i + 1]));
+      T.CheckCall(r, T.Val(123), T.Val("result"));
+    } else {
+      T.CheckThrows(T.Val(123), T.Val("result"));
+    }
+
+    // Check function with falsey parameter.
+    if (tests[i + 2] != throws) {
+      Handle<Object> r = v8::Utils::OpenHandle(*CompileRun(tests[i + 2]));
+      T.CheckCall(r, T.Val(0.0), T.Val("result"));
+    } else {
+      T.CheckThrows(T.Val(0.0), T.Val("result"));
+    }
+  }
+}
+
+
+TEST(StackLoadVariables) {
+  const char* source = "(function(a,r) { %s; return r; })";
+  RunVariableTests(source, load_tests);
+}
+
+
+TEST(ContextLoadVariables) {
+  const char* source = "(function(a,r) { %s; function f() {x} return r; })";
+  RunVariableTests(source, load_tests);
+}
+
+
+TEST(StackStoreVariables) {
+  const char* source = "(function(a,r) { %s; return r; })";
+  RunVariableTests(source, store_tests);
+}
+
+
+TEST(ContextStoreVariables) {
+  const char* source = "(function(a,r) { %s; function f() {x} return r; })";
+  RunVariableTests(source, store_tests);
+}
+
+
+TEST(StackInitializeVariables) {
+  const char* source = "(function(a,r) { %s; return r; })";
+  RunVariableTests(source, bind_tests);
+}
+
+
+TEST(ContextInitializeVariables) {
+  const char* source = "(function(a,r) { %s; function f() {x} return r; })";
+  RunVariableTests(source, bind_tests);
+}
+
+
+TEST(SelfReferenceVariable) {
+  FunctionTester T("(function self() { return self; })");
+
+  T.CheckCall(T.function);
+  CompileRun("var self = 'not a function'");
+  T.CheckCall(T.function);
+}
diff --git a/test/cctest/compiler/compiler/test-schedule.cc b/test/cctest/compiler/compiler/test-schedule.cc
new file mode 100644 (file)
index 0000000..aa7dd99
--- /dev/null
@@ -0,0 +1,159 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#include "src/compiler/common-operator.h"
+#include "src/compiler/generic-node-inl.h"
+#include "src/compiler/graph.h"
+#include "src/compiler/machine-operator.h"
+#include "src/compiler/node.h"
+#include "src/compiler/operator.h"
+#include "src/compiler/schedule.h"
+#include "test/cctest/cctest.h"
+
+using namespace v8::internal;
+using namespace v8::internal::compiler;
+
+static SimpleOperator dummy_operator(IrOpcode::kParameter, Operator::kNoWrite,
+                                     0, 0, "dummy");
+
+TEST(TestScheduleAllocation) {
+  HandleAndZoneScope scope;
+  Schedule schedule(scope.main_zone());
+
+  CHECK_NE(NULL, schedule.entry());
+  CHECK_EQ(schedule.entry(), *(schedule.all_blocks().begin()));
+}
+
+
+TEST(TestScheduleAddNode) {
+  HandleAndZoneScope scope;
+  Graph graph(scope.main_zone());
+  Node* n0 = graph.NewNode(&dummy_operator);
+  Node* n1 = graph.NewNode(&dummy_operator);
+
+  Schedule schedule(scope.main_zone());
+
+  BasicBlock* entry = schedule.entry();
+  schedule.AddNode(entry, n0);
+  schedule.AddNode(entry, n1);
+
+  CHECK_EQ(entry, schedule.block(n0));
+  CHECK_EQ(entry, schedule.block(n1));
+  CHECK(schedule.SameBasicBlock(n0, n1));
+
+  Node* n2 = graph.NewNode(&dummy_operator);
+  CHECK_EQ(NULL, schedule.block(n2));
+}
+
+
+TEST(TestScheduleAddGoto) {
+  HandleAndZoneScope scope;
+
+  Schedule schedule(scope.main_zone());
+  BasicBlock* entry = schedule.entry();
+  BasicBlock* next = schedule.NewBasicBlock();
+
+  schedule.AddGoto(entry, next);
+
+  CHECK_EQ(0, entry->PredecessorCount());
+  CHECK_EQ(1, entry->SuccessorCount());
+  CHECK_EQ(next, entry->SuccessorAt(0));
+
+  CHECK_EQ(1, next->PredecessorCount());
+  CHECK_EQ(entry, next->PredecessorAt(0));
+  CHECK_EQ(0, next->SuccessorCount());
+}
+
+
+TEST(TestScheduleAddBranch) {
+  HandleAndZoneScope scope;
+  Schedule schedule(scope.main_zone());
+
+  BasicBlock* entry = schedule.entry();
+  BasicBlock* tblock = schedule.NewBasicBlock();
+  BasicBlock* fblock = schedule.NewBasicBlock();
+
+  Graph graph(scope.main_zone());
+  CommonOperatorBuilder common(scope.main_zone());
+  Node* n0 = graph.NewNode(&dummy_operator);
+  Node* b = graph.NewNode(common.Branch(), n0);
+
+  schedule.AddBranch(entry, b, tblock, fblock);
+
+  CHECK_EQ(0, entry->PredecessorCount());
+  CHECK_EQ(2, entry->SuccessorCount());
+  CHECK_EQ(tblock, entry->SuccessorAt(0));
+  CHECK_EQ(fblock, entry->SuccessorAt(1));
+
+  CHECK_EQ(1, tblock->PredecessorCount());
+  CHECK_EQ(entry, tblock->PredecessorAt(0));
+  CHECK_EQ(0, tblock->SuccessorCount());
+
+  CHECK_EQ(1, fblock->PredecessorCount());
+  CHECK_EQ(entry, fblock->PredecessorAt(0));
+  CHECK_EQ(0, fblock->SuccessorCount());
+}
+
+
+TEST(TestScheduleAddReturn) {
+  HandleAndZoneScope scope;
+  Schedule schedule(scope.main_zone());
+  Graph graph(scope.main_zone());
+  Node* n0 = graph.NewNode(&dummy_operator);
+  BasicBlock* entry = schedule.entry();
+  schedule.AddReturn(entry, n0);
+
+  CHECK_EQ(0, entry->PredecessorCount());
+  CHECK_EQ(1, entry->SuccessorCount());
+  CHECK_EQ(schedule.exit(), entry->SuccessorAt(0));
+}
+
+
+TEST(TestScheduleAddThrow) {
+  HandleAndZoneScope scope;
+  Schedule schedule(scope.main_zone());
+  Graph graph(scope.main_zone());
+  Node* n0 = graph.NewNode(&dummy_operator);
+  BasicBlock* entry = schedule.entry();
+  schedule.AddThrow(entry, n0);
+
+  CHECK_EQ(0, entry->PredecessorCount());
+  CHECK_EQ(1, entry->SuccessorCount());
+  CHECK_EQ(schedule.exit(), entry->SuccessorAt(0));
+}
+
+
+TEST(TestScheduleAddDeopt) {
+  HandleAndZoneScope scope;
+  Schedule schedule(scope.main_zone());
+  Graph graph(scope.main_zone());
+  Node* n0 = graph.NewNode(&dummy_operator);
+  BasicBlock* entry = schedule.entry();
+  schedule.AddDeoptimize(entry, n0);
+
+  CHECK_EQ(0, entry->PredecessorCount());
+  CHECK_EQ(1, entry->SuccessorCount());
+  CHECK_EQ(schedule.exit(), entry->SuccessorAt(0));
+}
+
+
+TEST(BuildMulNodeGraph) {
+  HandleAndZoneScope scope;
+  Schedule schedule(scope.main_zone());
+  Graph graph(scope.main_zone());
+  CommonOperatorBuilder common(scope.main_zone());
+  MachineOperatorBuilder machine(scope.main_zone(), kMachineWord32);
+
+  Node* start = graph.NewNode(common.Start());
+  graph.SetStart(start);
+  Node* param0 = graph.NewNode(common.Parameter(0));
+  Node* param1 = graph.NewNode(common.Parameter(1));
+
+  Node* mul = graph.NewNode(machine.Int32Mul(), param0, param1);
+  Node* ret = graph.NewNode(common.Return(), mul, start);
+
+  USE(ret);
+}
diff --git a/test/cctest/compiler/compiler/test-scheduler.cc b/test/cctest/compiler/compiler/test-scheduler.cc
new file mode 100644 (file)
index 0000000..6b56f10
--- /dev/null
@@ -0,0 +1,1840 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+#include "test/cctest/cctest.h"
+
+#include "src/compiler/common-operator.h"
+#include "src/compiler/generic-node-inl.h"
+#include "src/compiler/generic-node.h"
+#include "src/compiler/graph.h"
+#include "src/compiler/graph-visualizer.h"
+#include "src/compiler/js-operator.h"
+#include "src/compiler/machine-operator.h"
+#include "src/compiler/node.h"
+#include "src/compiler/operator.h"
+#include "src/compiler/schedule.h"
+#include "src/compiler/scheduler.h"
+
+using namespace v8::internal;
+using namespace v8::internal::compiler;
+
+struct TestLoop {
+  int count;
+  BasicBlock** nodes;
+  BasicBlock* header() { return nodes[0]; }
+  BasicBlock* last() { return nodes[count - 1]; }
+  ~TestLoop() { delete[] nodes; }
+};
+
+
+static TestLoop* CreateLoop(Schedule* schedule, int count) {
+  TestLoop* loop = new TestLoop();
+  loop->count = count;
+  loop->nodes = new BasicBlock* [count];
+  for (int i = 0; i < count; i++) {
+    loop->nodes[i] = schedule->NewBasicBlock();
+    if (i > 0) schedule->AddSuccessor(loop->nodes[i - 1], loop->nodes[i]);
+  }
+  schedule->AddSuccessor(loop->nodes[count - 1], loop->nodes[0]);
+  return loop;
+}
+
+
+static void CheckRPONumbers(BasicBlockVector* order, int expected,
+                            bool loops_allowed) {
+  CHECK_EQ(expected, static_cast<int>(order->size()));
+  for (int i = 0; i < static_cast<int>(order->size()); i++) {
+    CHECK(order->at(i)->rpo_number_ == i);
+    if (!loops_allowed) CHECK_LT(order->at(i)->loop_end_, 0);
+  }
+}
+
+
+static void CheckLoopContains(BasicBlock** blocks, int body_size) {
+  BasicBlock* header = blocks[0];
+  CHECK_GT(header->loop_end_, 0);
+  CHECK_EQ(body_size, (header->loop_end_ - header->rpo_number_));
+  for (int i = 0; i < body_size; i++) {
+    int num = blocks[i]->rpo_number_;
+    CHECK(num >= header->rpo_number_ && num < header->loop_end_);
+    CHECK(header->LoopContains(blocks[i]));
+    CHECK(header->IsLoopHeader() || blocks[i]->loop_header_ == header);
+  }
+}
+
+
+TEST(RPODegenerate1) {
+  HandleAndZoneScope scope;
+  Schedule schedule(scope.main_zone());
+  Scheduler scheduler(scope.main_zone(), NULL, &schedule);
+
+  BasicBlockVector* order = scheduler.ComputeSpecialRPO();
+  CheckRPONumbers(order, 1, false);
+  CHECK_EQ(schedule.entry(), order->at(0));
+}
+
+
+TEST(RPODegenerate2) {
+  HandleAndZoneScope scope;
+  Schedule schedule(scope.main_zone());
+  Scheduler scheduler(scope.main_zone(), NULL, &schedule);
+
+  schedule.AddGoto(schedule.entry(), schedule.exit());
+  BasicBlockVector* order = scheduler.ComputeSpecialRPO();
+  CheckRPONumbers(order, 2, false);
+  CHECK_EQ(schedule.entry(), order->at(0));
+  CHECK_EQ(schedule.exit(), order->at(1));
+}
+
+
+TEST(RPOLine) {
+  HandleAndZoneScope scope;
+
+  for (int i = 0; i < 10; i++) {
+    Schedule schedule(scope.main_zone());
+    Scheduler scheduler(scope.main_zone(), NULL, &schedule);
+
+    BasicBlock* last = schedule.entry();
+    for (int j = 0; j < i; j++) {
+      BasicBlock* block = schedule.NewBasicBlock();
+      schedule.AddGoto(last, block);
+      last = block;
+    }
+    BasicBlockVector* order = scheduler.ComputeSpecialRPO();
+    CheckRPONumbers(order, 1 + i, false);
+
+    Schedule::BasicBlocks blocks(schedule.all_blocks());
+    for (Schedule::BasicBlocks::iterator iter = blocks.begin();
+         iter != blocks.end(); ++iter) {
+      BasicBlock* block = *iter;
+      if (block->rpo_number_ >= 0 && block->SuccessorCount() == 1) {
+        CHECK(block->rpo_number_ + 1 == block->SuccessorAt(0)->rpo_number_);
+      }
+    }
+  }
+}
+
+
+TEST(RPOSelfLoop) {
+  HandleAndZoneScope scope;
+  Schedule schedule(scope.main_zone());
+  Scheduler scheduler(scope.main_zone(), NULL, &schedule);
+  schedule.AddSuccessor(schedule.entry(), schedule.entry());
+  BasicBlockVector* order = scheduler.ComputeSpecialRPO();
+  CheckRPONumbers(order, 1, true);
+  BasicBlock* loop[] = {schedule.entry()};
+  CheckLoopContains(loop, 1);
+}
+
+
+TEST(RPOEntryLoop) {
+  HandleAndZoneScope scope;
+  Schedule schedule(scope.main_zone());
+  Scheduler scheduler(scope.main_zone(), NULL, &schedule);
+  schedule.AddSuccessor(schedule.entry(), schedule.exit());
+  schedule.AddSuccessor(schedule.exit(), schedule.entry());
+  BasicBlockVector* order = scheduler.ComputeSpecialRPO();
+  CheckRPONumbers(order, 2, true);
+  BasicBlock* loop[] = {schedule.entry(), schedule.exit()};
+  CheckLoopContains(loop, 2);
+}
+
+
+TEST(RPOEndLoop) {
+  HandleAndZoneScope scope;
+  Schedule schedule(scope.main_zone());
+  Scheduler scheduler(scope.main_zone(), NULL, &schedule);
+  TestLoop* loop1 = CreateLoop(&schedule, 2);
+  schedule.AddSuccessor(schedule.entry(), loop1->header());
+  BasicBlockVector* order = scheduler.ComputeSpecialRPO();
+  CheckRPONumbers(order, 3, true);
+  CheckLoopContains(loop1->nodes, loop1->count);
+}
+
+
+TEST(RPOEndLoopNested) {
+  HandleAndZoneScope scope;
+  Schedule schedule(scope.main_zone());
+  Scheduler scheduler(scope.main_zone(), NULL, &schedule);
+  TestLoop* loop1 = CreateLoop(&schedule, 2);
+  schedule.AddSuccessor(schedule.entry(), loop1->header());
+  schedule.AddSuccessor(loop1->last(), schedule.entry());
+  BasicBlockVector* order = scheduler.ComputeSpecialRPO();
+  CheckRPONumbers(order, 3, true);
+  CheckLoopContains(loop1->nodes, loop1->count);
+}
+
+
+TEST(RPODiamond) {
+  HandleAndZoneScope scope;
+  Schedule schedule(scope.main_zone());
+  Scheduler scheduler(scope.main_zone(), NULL, &schedule);
+
+  BasicBlock* A = schedule.entry();
+  BasicBlock* B = schedule.NewBasicBlock();
+  BasicBlock* C = schedule.NewBasicBlock();
+  BasicBlock* D = schedule.exit();
+
+  schedule.AddSuccessor(A, B);
+  schedule.AddSuccessor(A, C);
+  schedule.AddSuccessor(B, D);
+  schedule.AddSuccessor(C, D);
+
+  BasicBlockVector* order = scheduler.ComputeSpecialRPO();
+  CheckRPONumbers(order, 4, false);
+
+  CHECK_EQ(0, A->rpo_number_);
+  CHECK((B->rpo_number_ == 1 && C->rpo_number_ == 2) ||
+        (B->rpo_number_ == 2 && C->rpo_number_ == 1));
+  CHECK_EQ(3, D->rpo_number_);
+}
+
+
+TEST(RPOLoop1) {
+  HandleAndZoneScope scope;
+  Schedule schedule(scope.main_zone());
+  Scheduler scheduler(scope.main_zone(), NULL, &schedule);
+
+  BasicBlock* A = schedule.entry();
+  BasicBlock* B = schedule.NewBasicBlock();
+  BasicBlock* C = schedule.NewBasicBlock();
+  BasicBlock* D = schedule.exit();
+
+  schedule.AddSuccessor(A, B);
+  schedule.AddSuccessor(B, C);
+  schedule.AddSuccessor(C, B);
+  schedule.AddSuccessor(C, D);
+
+  BasicBlockVector* order = scheduler.ComputeSpecialRPO();
+  CheckRPONumbers(order, 4, true);
+  BasicBlock* loop[] = {B, C};
+  CheckLoopContains(loop, 2);
+}
+
+
+TEST(RPOLoop2) {
+  HandleAndZoneScope scope;
+  Schedule schedule(scope.main_zone());
+  Scheduler scheduler(scope.main_zone(), NULL, &schedule);
+
+  BasicBlock* A = schedule.entry();
+  BasicBlock* B = schedule.NewBasicBlock();
+  BasicBlock* C = schedule.NewBasicBlock();
+  BasicBlock* D = schedule.exit();
+
+  schedule.AddSuccessor(A, B);
+  schedule.AddSuccessor(B, C);
+  schedule.AddSuccessor(C, B);
+  schedule.AddSuccessor(B, D);
+
+  BasicBlockVector* order = scheduler.ComputeSpecialRPO();
+  CheckRPONumbers(order, 4, true);
+  BasicBlock* loop[] = {B, C};
+  CheckLoopContains(loop, 2);
+}
+
+
+TEST(RPOLoopN) {
+  HandleAndZoneScope scope;
+
+  for (int i = 0; i < 11; i++) {
+    Schedule schedule(scope.main_zone());
+    Scheduler scheduler(scope.main_zone(), NULL, &schedule);
+    BasicBlock* A = schedule.entry();
+    BasicBlock* B = schedule.NewBasicBlock();
+    BasicBlock* C = schedule.NewBasicBlock();
+    BasicBlock* D = schedule.NewBasicBlock();
+    BasicBlock* E = schedule.NewBasicBlock();
+    BasicBlock* F = schedule.NewBasicBlock();
+    BasicBlock* G = schedule.exit();
+
+    schedule.AddSuccessor(A, B);
+    schedule.AddSuccessor(B, C);
+    schedule.AddSuccessor(C, D);
+    schedule.AddSuccessor(D, E);
+    schedule.AddSuccessor(E, F);
+    schedule.AddSuccessor(F, B);
+    schedule.AddSuccessor(B, G);
+
+    // Throw in extra backedges from time to time.
+    if (i == 1) schedule.AddSuccessor(B, B);
+    if (i == 2) schedule.AddSuccessor(C, B);
+    if (i == 3) schedule.AddSuccessor(D, B);
+    if (i == 4) schedule.AddSuccessor(E, B);
+    if (i == 5) schedule.AddSuccessor(F, B);
+
+    // Throw in extra loop exits from time to time.
+    if (i == 6) schedule.AddSuccessor(B, G);
+    if (i == 7) schedule.AddSuccessor(C, G);
+    if (i == 8) schedule.AddSuccessor(D, G);
+    if (i == 9) schedule.AddSuccessor(E, G);
+    if (i == 10) schedule.AddSuccessor(F, G);
+
+    BasicBlockVector* order = scheduler.ComputeSpecialRPO();
+    CheckRPONumbers(order, 7, true);
+    BasicBlock* loop[] = {B, C, D, E, F};
+    CheckLoopContains(loop, 5);
+  }
+}
+
+
+TEST(RPOLoopNest1) {
+  HandleAndZoneScope scope;
+  Schedule schedule(scope.main_zone());
+  Scheduler scheduler(scope.main_zone(), NULL, &schedule);
+
+  BasicBlock* A = schedule.entry();
+  BasicBlock* B = schedule.NewBasicBlock();
+  BasicBlock* C = schedule.NewBasicBlock();
+  BasicBlock* D = schedule.NewBasicBlock();
+  BasicBlock* E = schedule.NewBasicBlock();
+  BasicBlock* F = schedule.exit();
+
+  schedule.AddSuccessor(A, B);
+  schedule.AddSuccessor(B, C);
+  schedule.AddSuccessor(C, D);
+  schedule.AddSuccessor(D, C);
+  schedule.AddSuccessor(D, E);
+  schedule.AddSuccessor(E, B);
+  schedule.AddSuccessor(E, F);
+
+  BasicBlockVector* order = scheduler.ComputeSpecialRPO();
+  CheckRPONumbers(order, 6, true);
+  BasicBlock* loop1[] = {B, C, D, E};
+  CheckLoopContains(loop1, 4);
+
+  BasicBlock* loop2[] = {C, D};
+  CheckLoopContains(loop2, 2);
+}
+
+
+TEST(RPOLoopNest2) {
+  HandleAndZoneScope scope;
+  Schedule schedule(scope.main_zone());
+  Scheduler scheduler(scope.main_zone(), NULL, &schedule);
+
+  BasicBlock* A = schedule.entry();
+  BasicBlock* B = schedule.NewBasicBlock();
+  BasicBlock* C = schedule.NewBasicBlock();
+  BasicBlock* D = schedule.NewBasicBlock();
+  BasicBlock* E = schedule.NewBasicBlock();
+  BasicBlock* F = schedule.NewBasicBlock();
+  BasicBlock* G = schedule.NewBasicBlock();
+  BasicBlock* H = schedule.exit();
+
+  schedule.AddSuccessor(A, B);
+  schedule.AddSuccessor(B, C);
+  schedule.AddSuccessor(C, D);
+  schedule.AddSuccessor(D, E);
+  schedule.AddSuccessor(E, F);
+  schedule.AddSuccessor(F, G);
+  schedule.AddSuccessor(G, H);
+
+  schedule.AddSuccessor(E, D);
+  schedule.AddSuccessor(F, C);
+  schedule.AddSuccessor(G, B);
+
+  BasicBlockVector* order = scheduler.ComputeSpecialRPO();
+  CheckRPONumbers(order, 8, true);
+  BasicBlock* loop1[] = {B, C, D, E, F, G};
+  CheckLoopContains(loop1, 6);
+
+  BasicBlock* loop2[] = {C, D, E, F};
+  CheckLoopContains(loop2, 4);
+
+  BasicBlock* loop3[] = {D, E};
+  CheckLoopContains(loop3, 2);
+}
+
+
+TEST(RPOLoopFollow1) {
+  HandleAndZoneScope scope;
+  Schedule schedule(scope.main_zone());
+  Scheduler scheduler(scope.main_zone(), NULL, &schedule);
+
+  TestLoop* loop1 = CreateLoop(&schedule, 1);
+  TestLoop* loop2 = CreateLoop(&schedule, 1);
+
+  BasicBlock* A = schedule.entry();
+  BasicBlock* E = schedule.exit();
+
+  schedule.AddSuccessor(A, loop1->header());
+  schedule.AddSuccessor(loop1->header(), loop2->header());
+  schedule.AddSuccessor(loop2->last(), E);
+
+  BasicBlockVector* order = scheduler.ComputeSpecialRPO();
+
+  CheckLoopContains(loop1->nodes, loop1->count);
+
+  CHECK_EQ(schedule.BasicBlockCount(), static_cast<int>(order->size()));
+  CheckLoopContains(loop1->nodes, loop1->count);
+  CheckLoopContains(loop2->nodes, loop2->count);
+  delete loop1;
+  delete loop2;
+}
+
+
+TEST(RPOLoopFollow2) {
+  HandleAndZoneScope scope;
+  Schedule schedule(scope.main_zone());
+  Scheduler scheduler(scope.main_zone(), NULL, &schedule);
+
+  TestLoop* loop1 = CreateLoop(&schedule, 1);
+  TestLoop* loop2 = CreateLoop(&schedule, 1);
+
+  BasicBlock* A = schedule.entry();
+  BasicBlock* S = schedule.NewBasicBlock();
+  BasicBlock* E = schedule.exit();
+
+  schedule.AddSuccessor(A, loop1->header());
+  schedule.AddSuccessor(loop1->header(), S);
+  schedule.AddSuccessor(S, loop2->header());
+  schedule.AddSuccessor(loop2->last(), E);
+
+  BasicBlockVector* order = scheduler.ComputeSpecialRPO();
+
+  CheckLoopContains(loop1->nodes, loop1->count);
+
+  CHECK_EQ(schedule.BasicBlockCount(), static_cast<int>(order->size()));
+  CheckLoopContains(loop1->nodes, loop1->count);
+  CheckLoopContains(loop2->nodes, loop2->count);
+  delete loop1;
+  delete loop2;
+}
+
+
+TEST(RPOLoopFollowN) {
+  HandleAndZoneScope scope;
+
+  for (int size = 1; size < 5; size++) {
+    for (int exit = 0; exit < size; exit++) {
+      Schedule schedule(scope.main_zone());
+      Scheduler scheduler(scope.main_zone(), NULL, &schedule);
+      TestLoop* loop1 = CreateLoop(&schedule, size);
+      TestLoop* loop2 = CreateLoop(&schedule, size);
+      BasicBlock* A = schedule.entry();
+      BasicBlock* E = schedule.exit();
+
+      schedule.AddSuccessor(A, loop1->header());
+      schedule.AddSuccessor(loop1->nodes[exit], loop2->header());
+      schedule.AddSuccessor(loop2->nodes[exit], E);
+      BasicBlockVector* order = scheduler.ComputeSpecialRPO();
+      CheckLoopContains(loop1->nodes, loop1->count);
+
+      CHECK_EQ(schedule.BasicBlockCount(), static_cast<int>(order->size()));
+      CheckLoopContains(loop1->nodes, loop1->count);
+      CheckLoopContains(loop2->nodes, loop2->count);
+      delete loop1;
+      delete loop2;
+    }
+  }
+}
+
+
+TEST(RPONestedLoopFollow1) {
+  HandleAndZoneScope scope;
+  Schedule schedule(scope.main_zone());
+  Scheduler scheduler(scope.main_zone(), NULL, &schedule);
+
+  TestLoop* loop1 = CreateLoop(&schedule, 1);
+  TestLoop* loop2 = CreateLoop(&schedule, 1);
+
+  BasicBlock* A = schedule.entry();
+  BasicBlock* B = schedule.NewBasicBlock();
+  BasicBlock* C = schedule.NewBasicBlock();
+  BasicBlock* E = schedule.exit();
+
+  schedule.AddSuccessor(A, B);
+  schedule.AddSuccessor(B, loop1->header());
+  schedule.AddSuccessor(loop1->header(), loop2->header());
+  schedule.AddSuccessor(loop2->last(), C);
+  schedule.AddSuccessor(C, E);
+  schedule.AddSuccessor(C, B);
+
+  BasicBlockVector* order = scheduler.ComputeSpecialRPO();
+
+  CheckLoopContains(loop1->nodes, loop1->count);
+
+  CHECK_EQ(schedule.BasicBlockCount(), static_cast<int>(order->size()));
+  CheckLoopContains(loop1->nodes, loop1->count);
+  CheckLoopContains(loop2->nodes, loop2->count);
+
+  BasicBlock* loop3[] = {B, loop1->nodes[0], loop2->nodes[0], C};
+  CheckLoopContains(loop3, 4);
+  delete loop1;
+  delete loop2;
+}
+
+
+TEST(RPOLoopBackedges1) {
+  HandleAndZoneScope scope;
+
+  int size = 8;
+  for (int i = 0; i < size; i++) {
+    for (int j = 0; j < size; j++) {
+      Schedule schedule(scope.main_zone());
+      Scheduler scheduler(scope.main_zone(), NULL, &schedule);
+      BasicBlock* A = schedule.entry();
+      BasicBlock* E = schedule.exit();
+
+      TestLoop* loop1 = CreateLoop(&schedule, size);
+      schedule.AddSuccessor(A, loop1->header());
+      schedule.AddSuccessor(loop1->last(), E);
+
+      schedule.AddSuccessor(loop1->nodes[i], loop1->header());
+      schedule.AddSuccessor(loop1->nodes[j], E);
+
+      BasicBlockVector* order = scheduler.ComputeSpecialRPO();
+      CheckRPONumbers(order, schedule.BasicBlockCount(), true);
+      CheckLoopContains(loop1->nodes, loop1->count);
+      delete loop1;
+    }
+  }
+}
+
+
+TEST(RPOLoopOutedges1) {
+  HandleAndZoneScope scope;
+
+  int size = 8;
+  for (int i = 0; i < size; i++) {
+    for (int j = 0; j < size; j++) {
+      Schedule schedule(scope.main_zone());
+      Scheduler scheduler(scope.main_zone(), NULL, &schedule);
+      BasicBlock* A = schedule.entry();
+      BasicBlock* D = schedule.NewBasicBlock();
+      BasicBlock* E = schedule.exit();
+
+      TestLoop* loop1 = CreateLoop(&schedule, size);
+      schedule.AddSuccessor(A, loop1->header());
+      schedule.AddSuccessor(loop1->last(), E);
+
+      schedule.AddSuccessor(loop1->nodes[i], loop1->header());
+      schedule.AddSuccessor(loop1->nodes[j], D);
+      schedule.AddSuccessor(D, E);
+
+      BasicBlockVector* order = scheduler.ComputeSpecialRPO();
+      CheckRPONumbers(order, schedule.BasicBlockCount(), true);
+      CheckLoopContains(loop1->nodes, loop1->count);
+      delete loop1;
+    }
+  }
+}
+
+
+TEST(RPOLoopOutedges2) {
+  HandleAndZoneScope scope;
+
+  int size = 8;
+  for (int i = 0; i < size; i++) {
+    Schedule schedule(scope.main_zone());
+    Scheduler scheduler(scope.main_zone(), NULL, &schedule);
+    BasicBlock* A = schedule.entry();
+    BasicBlock* E = schedule.exit();
+
+    TestLoop* loop1 = CreateLoop(&schedule, size);
+    schedule.AddSuccessor(A, loop1->header());
+    schedule.AddSuccessor(loop1->last(), E);
+
+    for (int j = 0; j < size; j++) {
+      BasicBlock* O = schedule.NewBasicBlock();
+      schedule.AddSuccessor(loop1->nodes[j], O);
+      schedule.AddSuccessor(O, E);
+    }
+
+    BasicBlockVector* order = scheduler.ComputeSpecialRPO();
+    CheckRPONumbers(order, schedule.BasicBlockCount(), true);
+    CheckLoopContains(loop1->nodes, loop1->count);
+    delete loop1;
+  }
+}
+
+
+TEST(RPOLoopOutloops1) {
+  HandleAndZoneScope scope;
+
+  int size = 8;
+  for (int i = 0; i < size; i++) {
+    Schedule schedule(scope.main_zone());
+    Scheduler scheduler(scope.main_zone(), NULL, &schedule);
+    BasicBlock* A = schedule.entry();
+    BasicBlock* E = schedule.exit();
+    TestLoop* loop1 = CreateLoop(&schedule, size);
+    schedule.AddSuccessor(A, loop1->header());
+    schedule.AddSuccessor(loop1->last(), E);
+
+    TestLoop** loopN = new TestLoop* [size];
+    for (int j = 0; j < size; j++) {
+      loopN[j] = CreateLoop(&schedule, 2);
+      schedule.AddSuccessor(loop1->nodes[j], loopN[j]->header());
+      schedule.AddSuccessor(loopN[j]->last(), E);
+    }
+
+    BasicBlockVector* order = scheduler.ComputeSpecialRPO();
+    CheckRPONumbers(order, schedule.BasicBlockCount(), true);
+    CheckLoopContains(loop1->nodes, loop1->count);
+
+    for (int j = 0; j < size; j++) {
+      CheckLoopContains(loopN[j]->nodes, loopN[j]->count);
+      delete loopN[j];
+    }
+    delete[] loopN;
+    delete loop1;
+  }
+}
+
+
+TEST(RPOLoopMultibackedge) {
+  HandleAndZoneScope scope;
+  Schedule schedule(scope.main_zone());
+  Scheduler scheduler(scope.main_zone(), NULL, &schedule);
+
+  BasicBlock* A = schedule.entry();
+  BasicBlock* B = schedule.NewBasicBlock();
+  BasicBlock* C = schedule.NewBasicBlock();
+  BasicBlock* D = schedule.exit();
+  BasicBlock* E = schedule.NewBasicBlock();
+
+  schedule.AddSuccessor(A, B);
+  schedule.AddSuccessor(B, C);
+  schedule.AddSuccessor(B, D);
+  schedule.AddSuccessor(B, E);
+  schedule.AddSuccessor(C, B);
+  schedule.AddSuccessor(D, B);
+  schedule.AddSuccessor(E, B);
+
+  BasicBlockVector* order = scheduler.ComputeSpecialRPO();
+  CheckRPONumbers(order, 5, true);
+
+  BasicBlock* loop1[] = {B, C, D, E};
+  CheckLoopContains(loop1, 4);
+}
+
+
+TEST(BuildScheduleEmpty) {
+  HandleAndZoneScope scope;
+  Graph graph(scope.main_zone());
+  CommonOperatorBuilder builder(scope.main_zone());
+  graph.SetStart(graph.NewNode(builder.Start()));
+  graph.SetEnd(graph.NewNode(builder.End(), graph.start()));
+
+  Scheduler scheduler(scope.main_zone());
+  USE(scheduler.NewSchedule(&graph));
+}
+
+
+TEST(BuildScheduleOneParameter) {
+  HandleAndZoneScope scope;
+  Graph graph(scope.main_zone());
+  CommonOperatorBuilder builder(scope.main_zone());
+  graph.SetStart(graph.NewNode(builder.Start()));
+
+  Node* p1 = graph.NewNode(builder.Parameter(0));
+  Node* ret = graph.NewNode(builder.Return(), p1, graph.start(), graph.start());
+
+  graph.SetEnd(graph.NewNode(builder.End(), ret));
+
+  Scheduler scheduler(scope.main_zone());
+  USE(scheduler.NewSchedule(&graph));
+}
+
+
+static int GetScheduledNodeCount(Schedule* schedule) {
+  int node_count = 0;
+  for (BasicBlockVectorIter i = schedule->rpo_order()->begin();
+       i != schedule->rpo_order()->end(); ++i) {
+    BasicBlock* block = *i;
+    for (BasicBlock::const_iterator j = block->begin(); j != block->end();
+         ++j) {
+      ++node_count;
+    }
+    BasicBlock::Control control = block->control_;
+    if (control != BasicBlock::kNone) {
+      ++node_count;
+    }
+  }
+  return node_count;
+}
+
+
+static void PrintGraph(Graph* graph) {
+  OFStream os(stdout);
+  os << AsDOT(*graph);
+}
+
+
+static void PrintSchedule(Schedule* schedule) {
+  OFStream os(stdout);
+  os << *schedule << endl;
+}
+
+
+TEST(BuildScheduleIfSplit) {
+  HandleAndZoneScope scope;
+  Graph graph(scope.main_zone());
+  CommonOperatorBuilder builder(scope.main_zone());
+  JSOperatorBuilder js_builder(scope.main_zone());
+  graph.SetStart(graph.NewNode(builder.Start()));
+
+  Node* p1 = graph.NewNode(builder.Parameter(0));
+  Node* p2 = graph.NewNode(builder.Parameter(1));
+  Node* p3 = graph.NewNode(builder.Parameter(2));
+  Node* p4 = graph.NewNode(builder.Parameter(3));
+  Node* p5 = graph.NewNode(builder.Parameter(4));
+  Node* cmp = graph.NewNode(js_builder.LessThanOrEqual(), p1, p2, p3,
+                            graph.start(), graph.start());
+  Node* branch = graph.NewNode(builder.Branch(), cmp, graph.start());
+  Node* true_branch = graph.NewNode(builder.IfTrue(), branch);
+  Node* false_branch = graph.NewNode(builder.IfFalse(), branch);
+
+  Node* ret1 = graph.NewNode(builder.Return(), p4, graph.start(), true_branch);
+  Node* ret2 = graph.NewNode(builder.Return(), p5, graph.start(), false_branch);
+  Node* merge = graph.NewNode(builder.Merge(2), ret1, ret2);
+  graph.SetEnd(graph.NewNode(builder.End(), merge));
+
+  PrintGraph(&graph);
+
+  Scheduler scheduler(scope.main_zone());
+  Schedule* schedule = scheduler.NewSchedule(&graph);
+
+  PrintSchedule(schedule);
+
+
+  CHECK_EQ(13, GetScheduledNodeCount(schedule));
+}
+
+
+TEST(BuildScheduleIfSplitWithEffects) {
+  HandleAndZoneScope scope;
+  Isolate* isolate = scope.main_isolate();
+  Graph graph(scope.main_zone());
+  CommonOperatorBuilder common_builder(scope.main_zone());
+  JSOperatorBuilder js_builder(scope.main_zone());
+  Operator* op;
+
+  Handle<Object> object =
+      Handle<Object>(isolate->heap()->undefined_value(), isolate);
+  PrintableUnique<Object> unique_constant =
+      PrintableUnique<Object>::CreateUninitialized(scope.main_zone(), object);
+
+  // Manually transcripted code for:
+  // function turbo_fan_test(a, b, c, y) {
+  //   if (a < b) {
+  //     return a + b - c * c - a + y;
+  //   } else {
+  //     return c * c - a;
+  //   }
+  // }
+  Node* nil = graph.NewNode(common_builder.Dead());
+  op = common_builder.End();
+  Node* n23 = graph.NewNode(op, nil);
+  USE(n23);
+  op = common_builder.Merge(2);
+  Node* n22 = graph.NewNode(op, nil, nil);
+  USE(n22);
+  op = common_builder.Return();
+  Node* n16 = graph.NewNode(op, nil, nil, nil);
+  USE(n16);
+  op = js_builder.Add();
+  Node* n15 = graph.NewNode(op, nil, nil, nil, nil, nil);
+  USE(n15);
+  op = js_builder.Subtract();
+  Node* n14 = graph.NewNode(op, nil, nil, nil, nil, nil);
+  USE(n14);
+  op = js_builder.Subtract();
+  Node* n13 = graph.NewNode(op, nil, nil, nil, nil, nil);
+  USE(n13);
+  op = js_builder.Add();
+  Node* n11 = graph.NewNode(op, nil, nil, nil, nil, nil);
+  USE(n11);
+  op = common_builder.Parameter(0);
+  Node* n2 = graph.NewNode(op);
+  USE(n2);
+  n11->ReplaceInput(0, n2);
+  op = common_builder.Parameter(0);
+  Node* n3 = graph.NewNode(op);
+  USE(n3);
+  n11->ReplaceInput(1, n3);
+  op = common_builder.HeapConstant(unique_constant);
+  Node* n7 = graph.NewNode(op);
+  USE(n7);
+  n11->ReplaceInput(2, n7);
+  op = js_builder.LessThan();
+  Node* n8 = graph.NewNode(op, nil, nil, nil, nil, nil);
+  USE(n8);
+  n8->ReplaceInput(0, n2);
+  n8->ReplaceInput(1, n3);
+  n8->ReplaceInput(2, n7);
+  op = common_builder.Start();
+  Node* n0 = graph.NewNode(op);
+  USE(n0);
+  n8->ReplaceInput(3, n0);
+  n8->ReplaceInput(4, n0);
+  n11->ReplaceInput(3, n8);
+  op = common_builder.IfTrue();
+  Node* n10 = graph.NewNode(op, nil);
+  USE(n10);
+  op = common_builder.Branch();
+  Node* n9 = graph.NewNode(op, nil, nil);
+  USE(n9);
+  n9->ReplaceInput(0, n8);
+  n9->ReplaceInput(1, n0);
+  n10->ReplaceInput(0, n9);
+  n11->ReplaceInput(4, n10);
+  n13->ReplaceInput(0, n11);
+  op = js_builder.Multiply();
+  Node* n12 = graph.NewNode(op, nil, nil, nil, nil, nil);
+  USE(n12);
+  op = common_builder.Parameter(0);
+  Node* n4 = graph.NewNode(op);
+  USE(n4);
+  n12->ReplaceInput(0, n4);
+  n12->ReplaceInput(1, n4);
+  n12->ReplaceInput(2, n7);
+  n12->ReplaceInput(3, n11);
+  n12->ReplaceInput(4, n10);
+  n13->ReplaceInput(1, n12);
+  n13->ReplaceInput(2, n7);
+  n13->ReplaceInput(3, n12);
+  n13->ReplaceInput(4, n10);
+  n14->ReplaceInput(0, n13);
+  n14->ReplaceInput(1, n2);
+  n14->ReplaceInput(2, n7);
+  n14->ReplaceInput(3, n13);
+  n14->ReplaceInput(4, n10);
+  n15->ReplaceInput(0, n14);
+  op = common_builder.Parameter(0);
+  Node* n5 = graph.NewNode(op);
+  USE(n5);
+  n15->ReplaceInput(1, n5);
+  n15->ReplaceInput(2, n7);
+  n15->ReplaceInput(3, n14);
+  n15->ReplaceInput(4, n10);
+  n16->ReplaceInput(0, n15);
+  n16->ReplaceInput(1, n15);
+  n16->ReplaceInput(2, n10);
+  n22->ReplaceInput(0, n16);
+  op = common_builder.Return();
+  Node* n21 = graph.NewNode(op, nil, nil, nil);
+  USE(n21);
+  op = js_builder.Subtract();
+  Node* n20 = graph.NewNode(op, nil, nil, nil, nil, nil);
+  USE(n20);
+  op = js_builder.Multiply();
+  Node* n19 = graph.NewNode(op, nil, nil, nil, nil, nil);
+  USE(n19);
+  n19->ReplaceInput(0, n4);
+  n19->ReplaceInput(1, n4);
+  n19->ReplaceInput(2, n7);
+  n19->ReplaceInput(3, n8);
+  op = common_builder.IfFalse();
+  Node* n18 = graph.NewNode(op, nil);
+  USE(n18);
+  n18->ReplaceInput(0, n9);
+  n19->ReplaceInput(4, n18);
+  n20->ReplaceInput(0, n19);
+  n20->ReplaceInput(1, n2);
+  n20->ReplaceInput(2, n7);
+  n20->ReplaceInput(3, n19);
+  n20->ReplaceInput(4, n18);
+  n21->ReplaceInput(0, n20);
+  n21->ReplaceInput(1, n20);
+  n21->ReplaceInput(2, n18);
+  n22->ReplaceInput(1, n21);
+  n23->ReplaceInput(0, n22);
+
+  graph.SetStart(n0);
+  graph.SetEnd(n23);
+
+  PrintGraph(&graph);
+
+  Scheduler scheduler(scope.main_zone());
+  Schedule* schedule = scheduler.NewSchedule(&graph);
+
+  PrintSchedule(schedule);
+
+  CHECK_EQ(20, GetScheduledNodeCount(schedule));
+}
+
+
+TEST(BuildScheduleSimpleLoop) {
+  HandleAndZoneScope scope;
+  Isolate* isolate = scope.main_isolate();
+  Graph graph(scope.main_zone());
+  CommonOperatorBuilder common_builder(scope.main_zone());
+  JSOperatorBuilder js_builder(scope.main_zone());
+  Operator* op;
+
+  Handle<Object> object =
+      Handle<Object>(isolate->heap()->undefined_value(), isolate);
+  PrintableUnique<Object> unique_constant =
+      PrintableUnique<Object>::CreateUninitialized(scope.main_zone(), object);
+
+  // Manually transcripted code for:
+  // function turbo_fan_test(a, b) {
+  //   while (a < b) {
+  //     a++;
+  //   }
+  //   return a;
+  // }
+  Node* nil = graph.NewNode(common_builder.Dead());
+  op = common_builder.End();
+  Node* n20 = graph.NewNode(op, nil);
+  USE(n20);
+  op = common_builder.Return();
+  Node* n19 = graph.NewNode(op, nil, nil, nil);
+  USE(n19);
+  op = common_builder.Phi(2);
+  Node* n8 = graph.NewNode(op, nil, nil, nil);
+  USE(n8);
+  op = common_builder.Parameter(0);
+  Node* n2 = graph.NewNode(op);
+  USE(n2);
+  n8->ReplaceInput(0, n2);
+  op = js_builder.Add();
+  Node* n18 = graph.NewNode(op, nil, nil, nil, nil, nil);
+  USE(n18);
+  op = js_builder.ToNumber();
+  Node* n16 = graph.NewNode(op, nil, nil, nil, nil);
+  USE(n16);
+  n16->ReplaceInput(0, n8);
+  op = common_builder.HeapConstant(unique_constant);
+  Node* n5 = graph.NewNode(op);
+  USE(n5);
+  n16->ReplaceInput(1, n5);
+  op = js_builder.LessThan();
+  Node* n12 = graph.NewNode(op, nil, nil, nil, nil, nil);
+  USE(n12);
+  n12->ReplaceInput(0, n8);
+  op = common_builder.Phi(2);
+  Node* n9 = graph.NewNode(op, nil, nil, nil);
+  USE(n9);
+  op = common_builder.Parameter(0);
+  Node* n3 = graph.NewNode(op);
+  USE(n3);
+  n9->ReplaceInput(0, n3);
+  n9->ReplaceInput(1, n9);
+  op = common_builder.Loop(2);
+  Node* n6 = graph.NewNode(op, nil, nil);
+  USE(n6);
+  op = common_builder.Start();
+  Node* n0 = graph.NewNode(op);
+  USE(n0);
+  n6->ReplaceInput(0, n0);
+  op = common_builder.IfTrue();
+  Node* n14 = graph.NewNode(op, nil);
+  USE(n14);
+  op = common_builder.Branch();
+  Node* n13 = graph.NewNode(op, nil, nil);
+  USE(n13);
+  n13->ReplaceInput(0, n12);
+  n13->ReplaceInput(1, n6);
+  n14->ReplaceInput(0, n13);
+  n6->ReplaceInput(1, n14);
+  n9->ReplaceInput(2, n6);
+  n12->ReplaceInput(1, n9);
+  n12->ReplaceInput(2, n5);
+  op = common_builder.Phi(2);
+  Node* n10 = graph.NewNode(op, nil, nil, nil);
+  USE(n10);
+  n10->ReplaceInput(0, n0);
+  n10->ReplaceInput(1, n18);
+  n10->ReplaceInput(2, n6);
+  n12->ReplaceInput(3, n10);
+  n12->ReplaceInput(4, n6);
+  n16->ReplaceInput(2, n12);
+  n16->ReplaceInput(3, n14);
+  n18->ReplaceInput(0, n16);
+  op = common_builder.NumberConstant(0);
+  Node* n17 = graph.NewNode(op);
+  USE(n17);
+  n18->ReplaceInput(1, n17);
+  n18->ReplaceInput(2, n5);
+  n18->ReplaceInput(3, n16);
+  n18->ReplaceInput(4, n14);
+  n8->ReplaceInput(1, n18);
+  n8->ReplaceInput(2, n6);
+  n19->ReplaceInput(0, n8);
+  n19->ReplaceInput(1, n12);
+  op = common_builder.IfFalse();
+  Node* n15 = graph.NewNode(op, nil);
+  USE(n15);
+  n15->ReplaceInput(0, n13);
+  n19->ReplaceInput(2, n15);
+  n20->ReplaceInput(0, n19);
+
+  graph.SetStart(n0);
+  graph.SetEnd(n20);
+
+  PrintGraph(&graph);
+
+  Scheduler scheduler(scope.main_zone());
+  Schedule* schedule = scheduler.NewSchedule(&graph);
+
+  PrintSchedule(schedule);
+
+  CHECK_EQ(19, GetScheduledNodeCount(schedule));
+}
+
+
+TEST(BuildScheduleComplexLoops) {
+  HandleAndZoneScope scope;
+  Isolate* isolate = scope.main_isolate();
+  Graph graph(scope.main_zone());
+  CommonOperatorBuilder common_builder(scope.main_zone());
+  JSOperatorBuilder js_builder(scope.main_zone());
+  Operator* op;
+
+  Handle<Object> object =
+      Handle<Object>(isolate->heap()->undefined_value(), isolate);
+  PrintableUnique<Object> unique_constant =
+      PrintableUnique<Object>::CreateUninitialized(scope.main_zone(), object);
+
+  // Manually transcripted code for:
+  // function turbo_fan_test(a, b, c) {
+  //   while (a < b) {
+  //     a++;
+  //     while (c < b) {
+  //       c++;
+  //     }
+  //   }
+  //   while (a < b) {
+  //     a += 2;
+  //   }
+  //   return a;
+  // }
+  Node* nil = graph.NewNode(common_builder.Dead());
+  op = common_builder.End();
+  Node* n46 = graph.NewNode(op, nil);
+  USE(n46);
+  op = common_builder.Return();
+  Node* n45 = graph.NewNode(op, nil, nil, nil);
+  USE(n45);
+  op = common_builder.Phi(2);
+  Node* n35 = graph.NewNode(op, nil, nil, nil);
+  USE(n35);
+  op = common_builder.Phi(2);
+  Node* n9 = graph.NewNode(op, nil, nil, nil);
+  USE(n9);
+  op = common_builder.Parameter(0);
+  Node* n2 = graph.NewNode(op);
+  USE(n2);
+  n9->ReplaceInput(0, n2);
+  op = common_builder.Phi(2);
+  Node* n23 = graph.NewNode(op, nil, nil, nil);
+  USE(n23);
+  op = js_builder.Add();
+  Node* n20 = graph.NewNode(op, nil, nil, nil, nil, nil);
+  USE(n20);
+  op = js_builder.ToNumber();
+  Node* n18 = graph.NewNode(op, nil, nil, nil, nil);
+  USE(n18);
+  n18->ReplaceInput(0, n9);
+  op = common_builder.HeapConstant(unique_constant);
+  Node* n6 = graph.NewNode(op);
+  USE(n6);
+  n18->ReplaceInput(1, n6);
+  op = js_builder.LessThan();
+  Node* n14 = graph.NewNode(op, nil, nil, nil, nil, nil);
+  USE(n14);
+  n14->ReplaceInput(0, n9);
+  op = common_builder.Phi(2);
+  Node* n10 = graph.NewNode(op, nil, nil, nil);
+  USE(n10);
+  op = common_builder.Parameter(0);
+  Node* n3 = graph.NewNode(op);
+  USE(n3);
+  n10->ReplaceInput(0, n3);
+  op = common_builder.Phi(2);
+  Node* n24 = graph.NewNode(op, nil, nil, nil);
+  USE(n24);
+  n24->ReplaceInput(0, n10);
+  n24->ReplaceInput(1, n24);
+  op = common_builder.Loop(2);
+  Node* n21 = graph.NewNode(op, nil, nil);
+  USE(n21);
+  op = common_builder.IfTrue();
+  Node* n16 = graph.NewNode(op, nil);
+  USE(n16);
+  op = common_builder.Branch();
+  Node* n15 = graph.NewNode(op, nil, nil);
+  USE(n15);
+  n15->ReplaceInput(0, n14);
+  op = common_builder.Loop(2);
+  Node* n7 = graph.NewNode(op, nil, nil);
+  USE(n7);
+  op = common_builder.Start();
+  Node* n0 = graph.NewNode(op);
+  USE(n0);
+  n7->ReplaceInput(0, n0);
+  op = common_builder.IfFalse();
+  Node* n30 = graph.NewNode(op, nil);
+  USE(n30);
+  op = common_builder.Branch();
+  Node* n28 = graph.NewNode(op, nil, nil);
+  USE(n28);
+  op = js_builder.LessThan();
+  Node* n27 = graph.NewNode(op, nil, nil, nil, nil, nil);
+  USE(n27);
+  op = common_builder.Phi(2);
+  Node* n25 = graph.NewNode(op, nil, nil, nil);
+  USE(n25);
+  op = common_builder.Phi(2);
+  Node* n11 = graph.NewNode(op, nil, nil, nil);
+  USE(n11);
+  op = common_builder.Parameter(0);
+  Node* n4 = graph.NewNode(op);
+  USE(n4);
+  n11->ReplaceInput(0, n4);
+  n11->ReplaceInput(1, n25);
+  n11->ReplaceInput(2, n7);
+  n25->ReplaceInput(0, n11);
+  op = js_builder.Add();
+  Node* n32 = graph.NewNode(op, nil, nil, nil, nil, nil);
+  USE(n32);
+  op = js_builder.ToNumber();
+  Node* n31 = graph.NewNode(op, nil, nil, nil, nil);
+  USE(n31);
+  n31->ReplaceInput(0, n25);
+  n31->ReplaceInput(1, n6);
+  n31->ReplaceInput(2, n27);
+  op = common_builder.IfTrue();
+  Node* n29 = graph.NewNode(op, nil);
+  USE(n29);
+  n29->ReplaceInput(0, n28);
+  n31->ReplaceInput(3, n29);
+  n32->ReplaceInput(0, n31);
+  op = common_builder.NumberConstant(0);
+  Node* n19 = graph.NewNode(op);
+  USE(n19);
+  n32->ReplaceInput(1, n19);
+  n32->ReplaceInput(2, n6);
+  n32->ReplaceInput(3, n31);
+  n32->ReplaceInput(4, n29);
+  n25->ReplaceInput(1, n32);
+  n25->ReplaceInput(2, n21);
+  n27->ReplaceInput(0, n25);
+  n27->ReplaceInput(1, n24);
+  n27->ReplaceInput(2, n6);
+  op = common_builder.Phi(2);
+  Node* n26 = graph.NewNode(op, nil, nil, nil);
+  USE(n26);
+  n26->ReplaceInput(0, n20);
+  n26->ReplaceInput(1, n32);
+  n26->ReplaceInput(2, n21);
+  n27->ReplaceInput(3, n26);
+  n27->ReplaceInput(4, n21);
+  n28->ReplaceInput(0, n27);
+  n28->ReplaceInput(1, n21);
+  n30->ReplaceInput(0, n28);
+  n7->ReplaceInput(1, n30);
+  n15->ReplaceInput(1, n7);
+  n16->ReplaceInput(0, n15);
+  n21->ReplaceInput(0, n16);
+  n21->ReplaceInput(1, n29);
+  n24->ReplaceInput(2, n21);
+  n10->ReplaceInput(1, n24);
+  n10->ReplaceInput(2, n7);
+  n14->ReplaceInput(1, n10);
+  n14->ReplaceInput(2, n6);
+  op = common_builder.Phi(2);
+  Node* n12 = graph.NewNode(op, nil, nil, nil);
+  USE(n12);
+  n12->ReplaceInput(0, n0);
+  n12->ReplaceInput(1, n27);
+  n12->ReplaceInput(2, n7);
+  n14->ReplaceInput(3, n12);
+  n14->ReplaceInput(4, n7);
+  n18->ReplaceInput(2, n14);
+  n18->ReplaceInput(3, n16);
+  n20->ReplaceInput(0, n18);
+  n20->ReplaceInput(1, n19);
+  n20->ReplaceInput(2, n6);
+  n20->ReplaceInput(3, n18);
+  n20->ReplaceInput(4, n16);
+  n23->ReplaceInput(0, n20);
+  n23->ReplaceInput(1, n23);
+  n23->ReplaceInput(2, n21);
+  n9->ReplaceInput(1, n23);
+  n9->ReplaceInput(2, n7);
+  n35->ReplaceInput(0, n9);
+  op = js_builder.Add();
+  Node* n44 = graph.NewNode(op, nil, nil, nil, nil, nil);
+  USE(n44);
+  n44->ReplaceInput(0, n35);
+  op = common_builder.NumberConstant(0);
+  Node* n43 = graph.NewNode(op);
+  USE(n43);
+  n44->ReplaceInput(1, n43);
+  n44->ReplaceInput(2, n6);
+  op = js_builder.LessThan();
+  Node* n39 = graph.NewNode(op, nil, nil, nil, nil, nil);
+  USE(n39);
+  n39->ReplaceInput(0, n35);
+  op = common_builder.Phi(2);
+  Node* n36 = graph.NewNode(op, nil, nil, nil);
+  USE(n36);
+  n36->ReplaceInput(0, n10);
+  n36->ReplaceInput(1, n36);
+  op = common_builder.Loop(2);
+  Node* n33 = graph.NewNode(op, nil, nil);
+  USE(n33);
+  op = common_builder.IfFalse();
+  Node* n17 = graph.NewNode(op, nil);
+  USE(n17);
+  n17->ReplaceInput(0, n15);
+  n33->ReplaceInput(0, n17);
+  op = common_builder.IfTrue();
+  Node* n41 = graph.NewNode(op, nil);
+  USE(n41);
+  op = common_builder.Branch();
+  Node* n40 = graph.NewNode(op, nil, nil);
+  USE(n40);
+  n40->ReplaceInput(0, n39);
+  n40->ReplaceInput(1, n33);
+  n41->ReplaceInput(0, n40);
+  n33->ReplaceInput(1, n41);
+  n36->ReplaceInput(2, n33);
+  n39->ReplaceInput(1, n36);
+  n39->ReplaceInput(2, n6);
+  op = common_builder.Phi(2);
+  Node* n38 = graph.NewNode(op, nil, nil, nil);
+  USE(n38);
+  n38->ReplaceInput(0, n14);
+  n38->ReplaceInput(1, n44);
+  n38->ReplaceInput(2, n33);
+  n39->ReplaceInput(3, n38);
+  n39->ReplaceInput(4, n33);
+  n44->ReplaceInput(3, n39);
+  n44->ReplaceInput(4, n41);
+  n35->ReplaceInput(1, n44);
+  n35->ReplaceInput(2, n33);
+  n45->ReplaceInput(0, n35);
+  n45->ReplaceInput(1, n39);
+  op = common_builder.IfFalse();
+  Node* n42 = graph.NewNode(op, nil);
+  USE(n42);
+  n42->ReplaceInput(0, n40);
+  n45->ReplaceInput(2, n42);
+  n46->ReplaceInput(0, n45);
+
+  graph.SetStart(n0);
+  graph.SetEnd(n46);
+
+  PrintGraph(&graph);
+
+  Scheduler scheduler(scope.main_zone());
+  Schedule* schedule = scheduler.NewSchedule(&graph);
+
+  PrintSchedule(schedule);
+
+  CHECK_EQ(46, GetScheduledNodeCount(schedule));
+}
+
+
+TEST(BuildScheduleBreakAndContinue) {
+  HandleAndZoneScope scope;
+  Isolate* isolate = scope.main_isolate();
+  Graph graph(scope.main_zone());
+  CommonOperatorBuilder common_builder(scope.main_zone());
+  JSOperatorBuilder js_builder(scope.main_zone());
+  Operator* op;
+
+  Handle<Object> object =
+      Handle<Object>(isolate->heap()->undefined_value(), isolate);
+  PrintableUnique<Object> unique_constant =
+      PrintableUnique<Object>::CreateUninitialized(scope.main_zone(), object);
+
+  // Manually transcripted code for:
+  // function turbo_fan_test(a, b, c) {
+  //   var d = 0;
+  //   while (a < b) {
+  //     a++;
+  //     while (c < b) {
+  //       c++;
+  //       if (d == 0) break;
+  //       a++;
+  //     }
+  //     if (a == 1) continue;
+  //     d++;
+  //   }
+  //   return a + d;
+  // }
+  Node* nil = graph.NewNode(common_builder.Dead());
+  op = common_builder.End();
+  Node* n58 = graph.NewNode(op, nil);
+  USE(n58);
+  op = common_builder.Return();
+  Node* n57 = graph.NewNode(op, nil, nil, nil);
+  USE(n57);
+  op = js_builder.Add();
+  Node* n56 = graph.NewNode(op, nil, nil, nil, nil, nil);
+  USE(n56);
+  op = common_builder.Phi(2);
+  Node* n10 = graph.NewNode(op, nil, nil, nil);
+  USE(n10);
+  op = common_builder.Parameter(0);
+  Node* n2 = graph.NewNode(op);
+  USE(n2);
+  n10->ReplaceInput(0, n2);
+  op = common_builder.Phi(2);
+  Node* n25 = graph.NewNode(op, nil, nil, nil);
+  USE(n25);
+  op = js_builder.Add();
+  Node* n22 = graph.NewNode(op, nil, nil, nil, nil, nil);
+  USE(n22);
+  op = js_builder.ToNumber();
+  Node* n20 = graph.NewNode(op, nil, nil, nil, nil);
+  USE(n20);
+  n20->ReplaceInput(0, n10);
+  op = common_builder.HeapConstant(unique_constant);
+  Node* n6 = graph.NewNode(op);
+  USE(n6);
+  n20->ReplaceInput(1, n6);
+  op = js_builder.LessThan();
+  Node* n16 = graph.NewNode(op, nil, nil, nil, nil, nil);
+  USE(n16);
+  n16->ReplaceInput(0, n10);
+  op = common_builder.Phi(2);
+  Node* n11 = graph.NewNode(op, nil, nil, nil);
+  USE(n11);
+  op = common_builder.Parameter(0);
+  Node* n3 = graph.NewNode(op);
+  USE(n3);
+  n11->ReplaceInput(0, n3);
+  op = common_builder.Phi(2);
+  Node* n26 = graph.NewNode(op, nil, nil, nil);
+  USE(n26);
+  n26->ReplaceInput(0, n11);
+  n26->ReplaceInput(1, n26);
+  op = common_builder.Loop(2);
+  Node* n23 = graph.NewNode(op, nil, nil);
+  USE(n23);
+  op = common_builder.IfTrue();
+  Node* n18 = graph.NewNode(op, nil);
+  USE(n18);
+  op = common_builder.Branch();
+  Node* n17 = graph.NewNode(op, nil, nil);
+  USE(n17);
+  n17->ReplaceInput(0, n16);
+  op = common_builder.Loop(2);
+  Node* n8 = graph.NewNode(op, nil, nil);
+  USE(n8);
+  op = common_builder.Start();
+  Node* n0 = graph.NewNode(op);
+  USE(n0);
+  n8->ReplaceInput(0, n0);
+  op = common_builder.Merge(2);
+  Node* n53 = graph.NewNode(op, nil, nil);
+  USE(n53);
+  op = common_builder.IfTrue();
+  Node* n49 = graph.NewNode(op, nil);
+  USE(n49);
+  op = common_builder.Branch();
+  Node* n48 = graph.NewNode(op, nil, nil);
+  USE(n48);
+  op = js_builder.Equal();
+  Node* n47 = graph.NewNode(op, nil, nil, nil, nil, nil);
+  USE(n47);
+  n47->ReplaceInput(0, n25);
+  op = common_builder.NumberConstant(0);
+  Node* n46 = graph.NewNode(op);
+  USE(n46);
+  n47->ReplaceInput(1, n46);
+  n47->ReplaceInput(2, n6);
+  op = common_builder.Phi(2);
+  Node* n42 = graph.NewNode(op, nil, nil, nil);
+  USE(n42);
+  op = js_builder.LessThan();
+  Node* n30 = graph.NewNode(op, nil, nil, nil, nil, nil);
+  USE(n30);
+  op = common_builder.Phi(2);
+  Node* n27 = graph.NewNode(op, nil, nil, nil);
+  USE(n27);
+  op = common_builder.Phi(2);
+  Node* n12 = graph.NewNode(op, nil, nil, nil);
+  USE(n12);
+  op = common_builder.Parameter(0);
+  Node* n4 = graph.NewNode(op);
+  USE(n4);
+  n12->ReplaceInput(0, n4);
+  op = common_builder.Phi(2);
+  Node* n41 = graph.NewNode(op, nil, nil, nil);
+  USE(n41);
+  n41->ReplaceInput(0, n27);
+  op = js_builder.Add();
+  Node* n35 = graph.NewNode(op, nil, nil, nil, nil, nil);
+  USE(n35);
+  op = js_builder.ToNumber();
+  Node* n34 = graph.NewNode(op, nil, nil, nil, nil);
+  USE(n34);
+  n34->ReplaceInput(0, n27);
+  n34->ReplaceInput(1, n6);
+  n34->ReplaceInput(2, n30);
+  op = common_builder.IfTrue();
+  Node* n32 = graph.NewNode(op, nil);
+  USE(n32);
+  op = common_builder.Branch();
+  Node* n31 = graph.NewNode(op, nil, nil);
+  USE(n31);
+  n31->ReplaceInput(0, n30);
+  n31->ReplaceInput(1, n23);
+  n32->ReplaceInput(0, n31);
+  n34->ReplaceInput(3, n32);
+  n35->ReplaceInput(0, n34);
+  op = common_builder.NumberConstant(0);
+  Node* n21 = graph.NewNode(op);
+  USE(n21);
+  n35->ReplaceInput(1, n21);
+  n35->ReplaceInput(2, n6);
+  n35->ReplaceInput(3, n34);
+  n35->ReplaceInput(4, n32);
+  n41->ReplaceInput(1, n35);
+  op = common_builder.Merge(2);
+  Node* n40 = graph.NewNode(op, nil, nil);
+  USE(n40);
+  op = common_builder.IfFalse();
+  Node* n33 = graph.NewNode(op, nil);
+  USE(n33);
+  n33->ReplaceInput(0, n31);
+  n40->ReplaceInput(0, n33);
+  op = common_builder.IfTrue();
+  Node* n39 = graph.NewNode(op, nil);
+  USE(n39);
+  op = common_builder.Branch();
+  Node* n38 = graph.NewNode(op, nil, nil);
+  USE(n38);
+  op = js_builder.Equal();
+  Node* n37 = graph.NewNode(op, nil, nil, nil, nil, nil);
+  USE(n37);
+  op = common_builder.Phi(2);
+  Node* n28 = graph.NewNode(op, nil, nil, nil);
+  USE(n28);
+  op = common_builder.Phi(2);
+  Node* n13 = graph.NewNode(op, nil, nil, nil);
+  USE(n13);
+  op = common_builder.NumberConstant(0);
+  Node* n7 = graph.NewNode(op);
+  USE(n7);
+  n13->ReplaceInput(0, n7);
+  op = common_builder.Phi(2);
+  Node* n54 = graph.NewNode(op, nil, nil, nil);
+  USE(n54);
+  n54->ReplaceInput(0, n28);
+  op = js_builder.Add();
+  Node* n52 = graph.NewNode(op, nil, nil, nil, nil, nil);
+  USE(n52);
+  op = js_builder.ToNumber();
+  Node* n51 = graph.NewNode(op, nil, nil, nil, nil);
+  USE(n51);
+  n51->ReplaceInput(0, n28);
+  n51->ReplaceInput(1, n6);
+  n51->ReplaceInput(2, n47);
+  op = common_builder.IfFalse();
+  Node* n50 = graph.NewNode(op, nil);
+  USE(n50);
+  n50->ReplaceInput(0, n48);
+  n51->ReplaceInput(3, n50);
+  n52->ReplaceInput(0, n51);
+  n52->ReplaceInput(1, n21);
+  n52->ReplaceInput(2, n6);
+  n52->ReplaceInput(3, n51);
+  n52->ReplaceInput(4, n50);
+  n54->ReplaceInput(1, n52);
+  n54->ReplaceInput(2, n53);
+  n13->ReplaceInput(1, n54);
+  n13->ReplaceInput(2, n8);
+  n28->ReplaceInput(0, n13);
+  n28->ReplaceInput(1, n28);
+  n28->ReplaceInput(2, n23);
+  n37->ReplaceInput(0, n28);
+  op = common_builder.NumberConstant(0);
+  Node* n36 = graph.NewNode(op);
+  USE(n36);
+  n37->ReplaceInput(1, n36);
+  n37->ReplaceInput(2, n6);
+  n37->ReplaceInput(3, n35);
+  n37->ReplaceInput(4, n32);
+  n38->ReplaceInput(0, n37);
+  n38->ReplaceInput(1, n32);
+  n39->ReplaceInput(0, n38);
+  n40->ReplaceInput(1, n39);
+  n41->ReplaceInput(2, n40);
+  n12->ReplaceInput(1, n41);
+  n12->ReplaceInput(2, n8);
+  n27->ReplaceInput(0, n12);
+  n27->ReplaceInput(1, n35);
+  n27->ReplaceInput(2, n23);
+  n30->ReplaceInput(0, n27);
+  n30->ReplaceInput(1, n26);
+  n30->ReplaceInput(2, n6);
+  op = common_builder.Phi(2);
+  Node* n29 = graph.NewNode(op, nil, nil, nil);
+  USE(n29);
+  n29->ReplaceInput(0, n22);
+  op = js_builder.Add();
+  Node* n45 = graph.NewNode(op, nil, nil, nil, nil, nil);
+  USE(n45);
+  op = js_builder.ToNumber();
+  Node* n44 = graph.NewNode(op, nil, nil, nil, nil);
+  USE(n44);
+  n44->ReplaceInput(0, n25);
+  n44->ReplaceInput(1, n6);
+  n44->ReplaceInput(2, n37);
+  op = common_builder.IfFalse();
+  Node* n43 = graph.NewNode(op, nil);
+  USE(n43);
+  n43->ReplaceInput(0, n38);
+  n44->ReplaceInput(3, n43);
+  n45->ReplaceInput(0, n44);
+  n45->ReplaceInput(1, n21);
+  n45->ReplaceInput(2, n6);
+  n45->ReplaceInput(3, n44);
+  n45->ReplaceInput(4, n43);
+  n29->ReplaceInput(1, n45);
+  n29->ReplaceInput(2, n23);
+  n30->ReplaceInput(3, n29);
+  n30->ReplaceInput(4, n23);
+  n42->ReplaceInput(0, n30);
+  n42->ReplaceInput(1, n37);
+  n42->ReplaceInput(2, n40);
+  n47->ReplaceInput(3, n42);
+  n47->ReplaceInput(4, n40);
+  n48->ReplaceInput(0, n47);
+  n48->ReplaceInput(1, n40);
+  n49->ReplaceInput(0, n48);
+  n53->ReplaceInput(0, n49);
+  n53->ReplaceInput(1, n50);
+  n8->ReplaceInput(1, n53);
+  n17->ReplaceInput(1, n8);
+  n18->ReplaceInput(0, n17);
+  n23->ReplaceInput(0, n18);
+  n23->ReplaceInput(1, n43);
+  n26->ReplaceInput(2, n23);
+  n11->ReplaceInput(1, n26);
+  n11->ReplaceInput(2, n8);
+  n16->ReplaceInput(1, n11);
+  n16->ReplaceInput(2, n6);
+  op = common_builder.Phi(2);
+  Node* n14 = graph.NewNode(op, nil, nil, nil);
+  USE(n14);
+  n14->ReplaceInput(0, n0);
+  op = common_builder.Phi(2);
+  Node* n55 = graph.NewNode(op, nil, nil, nil);
+  USE(n55);
+  n55->ReplaceInput(0, n47);
+  n55->ReplaceInput(1, n52);
+  n55->ReplaceInput(2, n53);
+  n14->ReplaceInput(1, n55);
+  n14->ReplaceInput(2, n8);
+  n16->ReplaceInput(3, n14);
+  n16->ReplaceInput(4, n8);
+  n20->ReplaceInput(2, n16);
+  n20->ReplaceInput(3, n18);
+  n22->ReplaceInput(0, n20);
+  n22->ReplaceInput(1, n21);
+  n22->ReplaceInput(2, n6);
+  n22->ReplaceInput(3, n20);
+  n22->ReplaceInput(4, n18);
+  n25->ReplaceInput(0, n22);
+  n25->ReplaceInput(1, n45);
+  n25->ReplaceInput(2, n23);
+  n10->ReplaceInput(1, n25);
+  n10->ReplaceInput(2, n8);
+  n56->ReplaceInput(0, n10);
+  n56->ReplaceInput(1, n13);
+  n56->ReplaceInput(2, n6);
+  n56->ReplaceInput(3, n16);
+  op = common_builder.IfFalse();
+  Node* n19 = graph.NewNode(op, nil);
+  USE(n19);
+  n19->ReplaceInput(0, n17);
+  n56->ReplaceInput(4, n19);
+  n57->ReplaceInput(0, n56);
+  n57->ReplaceInput(1, n56);
+  n57->ReplaceInput(2, n19);
+  n58->ReplaceInput(0, n57);
+
+  graph.SetStart(n0);
+  graph.SetEnd(n58);
+
+  PrintGraph(&graph);
+
+  Scheduler scheduler(scope.main_zone());
+  Schedule* schedule = scheduler.NewSchedule(&graph);
+
+  PrintSchedule(schedule);
+
+  CHECK_EQ(62, GetScheduledNodeCount(schedule));
+}
+
+
+TEST(BuildScheduleSimpleLoopWithCodeMotion) {
+  HandleAndZoneScope scope;
+  Isolate* isolate = scope.main_isolate();
+  Graph graph(scope.main_zone());
+  CommonOperatorBuilder common_builder(scope.main_zone());
+  JSOperatorBuilder js_builder(scope.main_zone());
+  MachineOperatorBuilder machine_builder(scope.main_zone(), kMachineWord32);
+  Operator* op;
+
+  Handle<Object> object =
+      Handle<Object>(isolate->heap()->undefined_value(), isolate);
+  PrintableUnique<Object> unique_constant =
+      PrintableUnique<Object>::CreateUninitialized(scope.main_zone(), object);
+
+  // Manually transcripted code for:
+  // function turbo_fan_test(a, b, c) {
+  //   while (a < b) {
+  //     a += b + c;
+  //   }
+  //   return a;
+  // }
+  Node* nil = graph.NewNode(common_builder.Dead());
+  op = common_builder.End();
+  Node* n22 = graph.NewNode(op, nil);
+  USE(n22);
+  op = common_builder.Return();
+  Node* n21 = graph.NewNode(op, nil, nil, nil);
+  USE(n21);
+  op = common_builder.Phi(2);
+  Node* n9 = graph.NewNode(op, nil, nil, nil);
+  USE(n9);
+  op = common_builder.Parameter(0);
+  Node* n2 = graph.NewNode(op);
+  USE(n2);
+  n9->ReplaceInput(0, n2);
+  op = js_builder.Add();
+  Node* n20 = graph.NewNode(op, nil, nil, nil, nil, nil);
+  USE(n20);
+  n20->ReplaceInput(0, n9);
+  op = machine_builder.Int32Add();
+  Node* n19 = graph.NewNode(op, nil, nil);
+  USE(n19);
+  op = common_builder.Phi(2);
+  Node* n10 = graph.NewNode(op, nil, nil, nil);
+  USE(n10);
+  op = common_builder.Parameter(0);
+  Node* n3 = graph.NewNode(op);
+  USE(n3);
+  n10->ReplaceInput(0, n3);
+  n10->ReplaceInput(1, n10);
+  op = common_builder.Loop(2);
+  Node* n7 = graph.NewNode(op, nil, nil);
+  USE(n7);
+  op = common_builder.Start();
+  Node* n0 = graph.NewNode(op);
+  USE(n0);
+  n7->ReplaceInput(0, n0);
+  op = common_builder.IfTrue();
+  Node* n17 = graph.NewNode(op, nil);
+  USE(n17);
+  op = common_builder.Branch();
+  Node* n16 = graph.NewNode(op, nil, nil);
+  USE(n16);
+  op = js_builder.ToBoolean();
+  Node* n15 = graph.NewNode(op, nil, nil, nil, nil);
+  USE(n15);
+  op = js_builder.LessThan();
+  Node* n14 = graph.NewNode(op, nil, nil, nil, nil, nil);
+  USE(n14);
+  n14->ReplaceInput(0, n9);
+  n14->ReplaceInput(1, n10);
+  op = common_builder.HeapConstant(unique_constant);
+  Node* n6 = graph.NewNode(op);
+  USE(n6);
+  n14->ReplaceInput(2, n6);
+  op = common_builder.Phi(2);
+  Node* n12 = graph.NewNode(op, nil, nil, nil);
+  USE(n12);
+  n12->ReplaceInput(0, n0);
+  n12->ReplaceInput(1, n20);
+  n12->ReplaceInput(2, n7);
+  n14->ReplaceInput(3, n12);
+  n14->ReplaceInput(4, n7);
+  n15->ReplaceInput(0, n14);
+  n15->ReplaceInput(1, n6);
+  n15->ReplaceInput(2, n14);
+  n15->ReplaceInput(3, n7);
+  n16->ReplaceInput(0, n15);
+  n16->ReplaceInput(1, n7);
+  n17->ReplaceInput(0, n16);
+  n7->ReplaceInput(1, n17);
+  n10->ReplaceInput(2, n7);
+  n19->ReplaceInput(0, n2);
+  op = common_builder.Phi(2);
+  Node* n11 = graph.NewNode(op, nil, nil, nil);
+  USE(n11);
+  op = common_builder.Parameter(0);
+  Node* n4 = graph.NewNode(op);
+  USE(n4);
+  n11->ReplaceInput(0, n4);
+  n11->ReplaceInput(1, n11);
+  n11->ReplaceInput(2, n7);
+  n19->ReplaceInput(1, n3);
+  n20->ReplaceInput(1, n19);
+  n20->ReplaceInput(2, n6);
+  n20->ReplaceInput(3, n19);
+  n20->ReplaceInput(4, n17);
+  n9->ReplaceInput(1, n20);
+  n9->ReplaceInput(2, n7);
+  n21->ReplaceInput(0, n9);
+  n21->ReplaceInput(1, n15);
+  op = common_builder.IfFalse();
+  Node* n18 = graph.NewNode(op, nil);
+  USE(n18);
+  n18->ReplaceInput(0, n16);
+  n21->ReplaceInput(2, n18);
+  n22->ReplaceInput(0, n21);
+
+  graph.SetStart(n0);
+  graph.SetEnd(n22);
+
+  PrintGraph(&graph);
+
+  Scheduler scheduler(scope.main_zone());
+  Schedule* schedule = scheduler.NewSchedule(&graph);
+
+  PrintSchedule(schedule);
+
+  CHECK_EQ(19, GetScheduledNodeCount(schedule));
+
+  // Make sure the integer-only add gets hoisted to a different block that the
+  // JSAdd.
+  CHECK(schedule->block(n19) != schedule->block(n20));
+}
+
+
+// So we can get a real JS function.
+static Handle<JSFunction> Compile(const char* source) {
+  Isolate* isolate = CcTest::i_isolate();
+  Handle<String> source_code = isolate->factory()
+                                   ->NewStringFromUtf8(CStrVector(source))
+                                   .ToHandleChecked();
+  Handle<SharedFunctionInfo> shared_function = Compiler::CompileScript(
+      source_code, Handle<String>(), 0, 0, false,
+      Handle<Context>(isolate->native_context()), NULL, NULL,
+      v8::ScriptCompiler::kNoCompileOptions, NOT_NATIVES_CODE);
+  return isolate->factory()->NewFunctionFromSharedFunctionInfo(
+      shared_function, isolate->native_context());
+}
+
+
+TEST(BuildScheduleTrivialLazyDeoptCall) {
+  HandleAndZoneScope scope;
+  Isolate* isolate = scope.main_isolate();
+  Graph graph(scope.main_zone());
+  CommonOperatorBuilder common_builder(scope.main_zone());
+  JSOperatorBuilder js_builder(scope.main_zone());
+
+  InitializedHandleScope handles;
+  Handle<JSFunction> function = Compile("m()");
+  CompilationInfoWithZone info(function);
+  Linkage linkage(&info);
+
+  // Manually transcribed code for:
+  // function turbo_fan_test() {
+  //   m();
+  // }
+  // where m can lazy deopt (so it has a deopt block associated with it).
+
+
+  //                  Start                                    //
+  //                    ^                                      //
+  //                    | (EC)                                 //
+  //                    |                                      //
+  //         /------> Call <--------------\                    //
+  //        /        ^    ^                \                   //
+  //       /         |    |                 \        undef     //
+  //      /          /    \                  \         ^       //
+  //  (E) |     (C) /      \  (C)             \ (E)    |       //
+  //      | Continuation  LazyDeoptimization  |        |       //
+  //      \___    ^           ^               /        |       //
+  //          \   |           |        ______/    Framestate   //
+  //    undef  \  | (VC)      | (C)   /            ^           //
+  //         \  \ |           |      /            /            //
+  //          Return    Deoptimization ----------/             //
+  //              ^           ^                                //
+  //               \         /                                 //
+  //            (C) \       / (C)                              //
+  //                 \     /                                   //
+  //                  Merge                                    //
+  //                    ^                                      //
+  //                    |                                      //
+  //                   End                                     //
+
+  Handle<Object> undef_object =
+      Handle<Object>(isolate->heap()->undefined_value(), isolate);
+  PrintableUnique<Object> undef_constant =
+      PrintableUnique<Object>::CreateUninitialized(scope.main_zone(),
+                                                   undef_object);
+
+  Node* undef_node = graph.NewNode(common_builder.HeapConstant(undef_constant));
+
+  Node* start_node = graph.NewNode(common_builder.Start());
+
+  CallDescriptor* descriptor = linkage.GetJSCallDescriptor(0);
+  Node* call_node = graph.NewNode(common_builder.Call(descriptor),
+                                  undef_node,   // function
+                                  undef_node,   // context
+                                  start_node,   // effect
+                                  start_node);  // control
+
+  Node* cont_node = graph.NewNode(common_builder.Continuation(), call_node);
+  Node* lazy_deopt_node =
+      graph.NewNode(common_builder.LazyDeoptimization(), call_node);
+
+  FrameStateDescriptor stateDescriptor(BailoutId(1234));
+  Node* state_node = graph.NewNode(common_builder.FrameState(stateDescriptor));
+
+  Node* return_node = graph.NewNode(common_builder.Return(),
+                                    undef_node,  // return value
+                                    call_node,   // effect
+                                    cont_node);  // control
+  Node* deoptimization_node = graph.NewNode(common_builder.Deoptimize(),
+                                            state_node,  // deopt environment
+                                            call_node,   // effect
+                                            lazy_deopt_node);  // control
+
+  Node* merge_node =
+      graph.NewNode(common_builder.Merge(2), return_node, deoptimization_node);
+
+  Node* end_node = graph.NewNode(common_builder.End(), merge_node);
+
+  graph.SetStart(start_node);
+  graph.SetEnd(end_node);
+
+  PrintGraph(&graph);
+
+  Scheduler scheduler(scope.main_zone());
+  Schedule* schedule = scheduler.NewSchedule(&graph);
+
+  PrintSchedule(schedule);
+
+  // Tests:
+  // Continuation and deopt have basic blocks.
+  BasicBlock* cont_block = schedule->block(cont_node);
+  BasicBlock* deopt_block = schedule->block(lazy_deopt_node);
+  BasicBlock* call_block = schedule->block(call_node);
+  CHECK_NE(NULL, cont_block);
+  CHECK_NE(NULL, deopt_block);
+  CHECK_NE(NULL, call_block);
+  // The basic blocks are different.
+  CHECK_NE(cont_block, deopt_block);
+  CHECK_NE(cont_block, call_block);
+  CHECK_NE(deopt_block, call_block);
+  // The call node finishes its own basic block.
+  CHECK_EQ(BasicBlock::kCall, call_block->control_);
+  CHECK_EQ(call_node, call_block->control_input_);
+  // The lazy deopt block is deferred.
+  CHECK(deopt_block->deferred_);
+  CHECK(!call_block->deferred_);
+  CHECK(!cont_block->deferred_);
+  // The lazy deopt block contains framestate + bailout (and nothing else).
+  CHECK_EQ(deoptimization_node, deopt_block->control_input_);
+  CHECK_EQ(2, deopt_block->nodes_.size());
+  CHECK_EQ(lazy_deopt_node, deopt_block->nodes_[0]);
+  CHECK_EQ(state_node, deopt_block->nodes_[1]);
+}
diff --git a/test/cctest/compiler/compiler/test-simplified-lowering.cc b/test/cctest/compiler/compiler/test-simplified-lowering.cc
new file mode 100644 (file)
index 0000000..f1d9570
--- /dev/null
@@ -0,0 +1,614 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <limits>
+
+#include "src/compiler/control-builders.h"
+#include "src/compiler/node-properties-inl.h"
+#include "src/compiler/pipeline.h"
+#include "src/compiler/simplified-lowering.h"
+#include "src/compiler/simplified-node-factory.h"
+#include "src/compiler/typer.h"
+#include "src/compiler/verifier.h"
+#include "src/execution.h"
+#include "src/parser.h"
+#include "src/rewriter.h"
+#include "src/scopes.h"
+#include "test/cctest/cctest.h"
+#include "test/cctest/compiler/codegen-tester.h"
+#include "test/cctest/compiler/graph-builder-tester.h"
+#include "test/cctest/compiler/value-helper.h"
+
+using namespace v8::internal;
+using namespace v8::internal::compiler;
+
+template <typename ReturnType>
+class SimplifiedGraphBuilderTester : public GraphBuilderTester<ReturnType> {
+ public:
+  SimplifiedGraphBuilderTester(MachineRepresentation p0 = kMachineLast,
+                               MachineRepresentation p1 = kMachineLast,
+                               MachineRepresentation p2 = kMachineLast,
+                               MachineRepresentation p3 = kMachineLast,
+                               MachineRepresentation p4 = kMachineLast)
+      : GraphBuilderTester<ReturnType>(p0, p1, p2, p3, p4) {}
+
+  // Close graph and lower one node.
+  void Lower(Node* node) {
+    this->End();
+    Typer typer(this->zone());
+    CommonOperatorBuilder common(this->zone());
+    SourcePositionTable source_positions(this->graph());
+    JSGraph jsgraph(this->graph(), &common, &typer);
+    SimplifiedLowering lowering(&jsgraph, &source_positions);
+    if (node == NULL) {
+      lowering.LowerAllNodes();
+    } else {
+      lowering.Lower(node);
+    }
+  }
+
+  // Close graph and lower all nodes.
+  void LowerAllNodes() { Lower(NULL); }
+
+  void StoreFloat64(Node* node, double* ptr) {
+    Node* ptr_node = this->PointerConstant(ptr);
+    this->Store(kMachineFloat64, ptr_node, node);
+  }
+
+  Node* LoadInt32(int32_t* ptr) {
+    Node* ptr_node = this->PointerConstant(ptr);
+    return this->Load(kMachineWord32, ptr_node);
+  }
+
+  Node* LoadUint32(uint32_t* ptr) {
+    Node* ptr_node = this->PointerConstant(ptr);
+    return this->Load(kMachineWord32, ptr_node);
+  }
+
+  Node* LoadFloat64(double* ptr) {
+    Node* ptr_node = this->PointerConstant(ptr);
+    return this->Load(kMachineFloat64, ptr_node);
+  }
+
+  Factory* factory() { return this->isolate()->factory(); }
+  Heap* heap() { return this->isolate()->heap(); }
+};
+
+
+class SimplifiedGraphBuilderJSTester
+    : public SimplifiedGraphBuilderTester<Object*> {
+ public:
+  SimplifiedGraphBuilderJSTester()
+      : SimplifiedGraphBuilderTester<Object*>(),
+        f_(v8::Utils::OpenHandle(*v8::Handle<v8::Function>::Cast(CompileRun(
+            "(function() { 'use strict'; return 2.7123; })")))),
+        swapped_(false) {
+    set_current_context(HeapConstant(handle(f_->context())));
+  }
+
+  template <typename T>
+  T* CallJS() {
+    if (!swapped_) {
+      Compile();
+    }
+    Handle<Object>* args = NULL;
+    MaybeHandle<Object> result = Execution::Call(
+        isolate(), f_, factory()->undefined_value(), 0, args, false);
+    return T::cast(*result.ToHandleChecked());
+  }
+
+ private:
+  void Compile() {
+    CompilationInfoWithZone info(f_);
+    CHECK(Parser::Parse(&info));
+    StrictMode strict_mode = info.function()->strict_mode();
+    info.SetStrictMode(strict_mode);
+    info.SetOptimizing(BailoutId::None(), Handle<Code>(f_->code()));
+    CHECK(Rewriter::Rewrite(&info));
+    CHECK(Scope::Analyze(&info));
+    CHECK_NE(NULL, info.scope());
+    Pipeline pipeline(&info);
+    Linkage linkage(&info);
+    Handle<Code> code = pipeline.GenerateCodeForMachineGraph(&linkage, graph());
+    CHECK(!code.is_null());
+    f_->ReplaceCode(*code);
+    swapped_ = true;
+  }
+
+  Handle<JSFunction> f_;
+  bool swapped_;
+};
+
+
+TEST(RunChangeTaggedToInt32) {
+  SimplifiedGraphBuilderTester<int32_t> t(kMachineTagged);
+  Node* x = t.ChangeTaggedToInt32(t.Parameter(0));
+  t.Return(x);
+
+  t.Lower(x);
+
+  // TODO(titzer): remove me.
+  return;
+
+  FOR_INT32_INPUTS(i) {
+    int32_t input = *i;
+
+    if (Smi::IsValid(input)) {
+      int32_t result = t.Call(Smi::FromInt(input));
+      CHECK_EQ(input, result);
+    }
+
+    {
+      Handle<Object> number = t.factory()->NewNumber(input);
+      int32_t result = t.Call(*number);
+      CHECK_EQ(input, result);
+    }
+
+    {
+      Handle<HeapNumber> number = t.factory()->NewHeapNumber(input);
+      int32_t result = t.Call(*number);
+      CHECK_EQ(input, result);
+    }
+  }
+}
+
+
+TEST(RunChangeTaggedToUint32) {
+  SimplifiedGraphBuilderTester<int32_t> t(kMachineTagged);
+  Node* x = t.ChangeTaggedToUint32(t.Parameter(0));
+  t.Return(x);
+
+  t.Lower(x);
+
+  // TODO(titzer): remove me.
+  return;
+
+  FOR_UINT32_INPUTS(i) {
+    uint32_t input = *i;
+
+    if (Smi::IsValid(input)) {
+      int32_t result = t.Call(Smi::FromInt(input));
+      CHECK_EQ(static_cast<int32_t>(input), result);
+    }
+
+    {
+      Handle<Object> number = t.factory()->NewNumber(input);
+      int32_t result = t.Call(*number);
+      CHECK_EQ(static_cast<int32_t>(input), result);
+    }
+
+    {
+      Handle<HeapNumber> number = t.factory()->NewHeapNumber(input);
+      int32_t result = t.Call(*number);
+      CHECK_EQ(static_cast<int32_t>(input), result);
+    }
+  }
+}
+
+
+TEST(RunChangeTaggedToFloat64) {
+  SimplifiedGraphBuilderTester<int32_t> t(kMachineTagged);
+  double result;
+  Node* x = t.ChangeTaggedToFloat64(t.Parameter(0));
+  t.StoreFloat64(x, &result);
+  t.Return(t.Int32Constant(0));
+
+  t.Lower(x);
+
+  // TODO(titzer): remove me.
+  return;
+
+  {
+    FOR_INT32_INPUTS(i) {
+      int32_t input = *i;
+
+      if (Smi::IsValid(input)) {
+        t.Call(Smi::FromInt(input));
+        CHECK_EQ(input, static_cast<int32_t>(result));
+      }
+
+      {
+        Handle<Object> number = t.factory()->NewNumber(input);
+        t.Call(*number);
+        CHECK_EQ(input, static_cast<int32_t>(result));
+      }
+
+      {
+        Handle<HeapNumber> number = t.factory()->NewHeapNumber(input);
+        t.Call(*number);
+        CHECK_EQ(input, static_cast<int32_t>(result));
+      }
+    }
+  }
+
+  {
+    FOR_FLOAT64_INPUTS(i) {
+      double input = *i;
+      {
+        Handle<Object> number = t.factory()->NewNumber(input);
+        t.Call(*number);
+        CHECK_EQ(input, result);
+      }
+
+      {
+        Handle<HeapNumber> number = t.factory()->NewHeapNumber(input);
+        t.Call(*number);
+        CHECK_EQ(input, result);
+      }
+    }
+  }
+}
+
+
+TEST(RunChangeBoolToBit) {
+  SimplifiedGraphBuilderTester<int32_t> t(kMachineTagged);
+  Node* x = t.ChangeBoolToBit(t.Parameter(0));
+  t.Return(x);
+
+  t.Lower(x);
+
+  if (!Pipeline::SupportedTarget()) return;
+
+  {
+    Object* true_obj = t.heap()->true_value();
+    int32_t result = t.Call(true_obj);
+    CHECK_EQ(1, result);
+  }
+
+  {
+    Object* false_obj = t.heap()->false_value();
+    int32_t result = t.Call(false_obj);
+    CHECK_EQ(0, result);
+  }
+}
+
+
+TEST(RunChangeBitToBool) {
+  SimplifiedGraphBuilderTester<Object*> t(kMachineTagged);
+  Node* x = t.ChangeBitToBool(t.Parameter(0));
+  t.Return(x);
+
+  t.Lower(x);
+
+  // TODO(titzer): remove me.
+  return;
+
+  {
+    Object* result = t.Call(1);
+    Object* true_obj = t.heap()->true_value();
+    CHECK_EQ(true_obj, result);
+  }
+
+  {
+    Object* result = t.Call(0);
+    Object* false_obj = t.heap()->false_value();
+    CHECK_EQ(false_obj, result);
+  }
+}
+
+
+TEST(RunChangeInt32ToTagged) {
+  SimplifiedGraphBuilderJSTester t;
+  int32_t input;
+  Node* load = t.LoadInt32(&input);
+  Node* x = t.ChangeInt32ToTagged(load);
+  t.Return(x);
+
+  t.Lower(x);
+
+  // TODO(titzer): remove me.
+  return;
+
+
+  {
+    FOR_INT32_INPUTS(i) {
+      input = *i;
+      HeapNumber* result = t.CallJS<HeapNumber>();
+      CHECK_EQ(static_cast<double>(input), result->value());
+    }
+  }
+
+  {
+    FOR_INT32_INPUTS(i) {
+      input = *i;
+      SimulateFullSpace(CcTest::heap()->new_space());
+      HeapNumber* result = t.CallJS<HeapNumber>();
+      CHECK_EQ(static_cast<double>(input), result->value());
+    }
+  }
+}
+
+
+TEST(RunChangeUint32ToTagged) {
+  SimplifiedGraphBuilderJSTester t;
+  uint32_t input;
+  Node* load = t.LoadUint32(&input);
+  Node* x = t.ChangeUint32ToTagged(load);
+  t.Return(x);
+
+  t.Lower(x);
+
+  // TODO(titzer): remove me.
+  return;
+
+  {
+    FOR_UINT32_INPUTS(i) {
+      input = *i;
+      HeapNumber* result = t.CallJS<HeapNumber>();
+      double expected = static_cast<double>(input);
+      CHECK_EQ(expected, result->value());
+    }
+  }
+
+  {
+    FOR_UINT32_INPUTS(i) {
+      input = *i;
+      SimulateFullSpace(CcTest::heap()->new_space());
+      HeapNumber* result = t.CallJS<HeapNumber>();
+      double expected = static_cast<double>(static_cast<uint32_t>(input));
+      CHECK_EQ(expected, result->value());
+    }
+  }
+}
+
+
+TEST(RunChangeFloat64ToTagged) {
+  SimplifiedGraphBuilderJSTester t;
+  double input;
+  Node* load = t.LoadFloat64(&input);
+  Node* x = t.ChangeFloat64ToTagged(load);
+  t.Return(x);
+
+  t.Lower(x);
+
+  // TODO(titzer): remove me.
+  return;
+
+  {
+    FOR_FLOAT64_INPUTS(i) {
+      input = *i;
+      HeapNumber* result = t.CallJS<HeapNumber>();
+      CHECK_EQ(input, result->value());
+    }
+  }
+  {
+    FOR_FLOAT64_INPUTS(i) {
+      input = *i;
+      SimulateFullSpace(CcTest::heap()->new_space());
+      HeapNumber* result = t.CallJS<HeapNumber>();
+      CHECK_EQ(input, result->value());
+    }
+  }
+}
+
+
+// TODO(dcarney): find a home for these functions.
+namespace {
+
+FieldAccess ForJSObjectMap() {
+  FieldAccess access = {JSObject::kMapOffset, Handle<Name>(), Type::Any(),
+                        kMachineTagged};
+  return access;
+}
+
+
+FieldAccess ForJSObjectProperties() {
+  FieldAccess access = {JSObject::kPropertiesOffset, Handle<Name>(),
+                        Type::Any(), kMachineTagged};
+  return access;
+}
+
+
+FieldAccess ForArrayBufferBackingStore() {
+  FieldAccess access = {
+      JSArrayBuffer::kBackingStoreOffset, Handle<Name>(), Type::UntaggedPtr(),
+      MachineOperatorBuilder::pointer_rep(),
+  };
+  return access;
+}
+
+
+ElementAccess ForFixedArrayElement() {
+  ElementAccess access = {FixedArray::kHeaderSize, Type::Any(), kMachineTagged};
+  return access;
+}
+
+
+ElementAccess ForBackingStoreElement(MachineRepresentation rep) {
+  ElementAccess access = {kNonHeapObjectHeaderSize, Type::Any(), rep};
+  return access;
+}
+}
+
+
+// Create a simple JSObject with a unique map.
+static Handle<JSObject> TestObject() {
+  static int index = 0;
+  char buffer[50];
+  v8::base::OS::SNPrintF(buffer, 50, "({'a_%d':1})", index++);
+  return Handle<JSObject>::cast(v8::Utils::OpenHandle(*CompileRun(buffer)));
+}
+
+
+TEST(RunLoadMap) {
+  SimplifiedGraphBuilderTester<Object*> t(kMachineTagged);
+  FieldAccess access = ForJSObjectMap();
+  Node* load = t.LoadField(access, t.Parameter(0));
+  t.Return(load);
+
+  t.LowerAllNodes();
+
+  if (!Pipeline::SupportedTarget()) return;
+
+  Handle<JSObject> src = TestObject();
+  Handle<Map> src_map(src->map());
+  Object* result = t.Call(*src);
+  CHECK_EQ(*src_map, result);
+}
+
+
+TEST(RunStoreMap) {
+  SimplifiedGraphBuilderTester<int32_t> t(kMachineTagged, kMachineTagged);
+  FieldAccess access = ForJSObjectMap();
+  t.StoreField(access, t.Parameter(1), t.Parameter(0));
+  t.Return(t.Int32Constant(0));
+
+  t.LowerAllNodes();
+
+  if (!Pipeline::SupportedTarget()) return;
+
+  Handle<JSObject> src = TestObject();
+  Handle<Map> src_map(src->map());
+  Handle<JSObject> dst = TestObject();
+  CHECK(src->map() != dst->map());
+  t.Call(*src_map, *dst);
+  CHECK(*src_map == dst->map());
+}
+
+
+TEST(RunLoadProperties) {
+  SimplifiedGraphBuilderTester<Object*> t(kMachineTagged);
+  FieldAccess access = ForJSObjectProperties();
+  Node* load = t.LoadField(access, t.Parameter(0));
+  t.Return(load);
+
+  t.LowerAllNodes();
+
+  if (!Pipeline::SupportedTarget()) return;
+
+  Handle<JSObject> src = TestObject();
+  Handle<FixedArray> src_props(src->properties());
+  Object* result = t.Call(*src);
+  CHECK_EQ(*src_props, result);
+}
+
+
+TEST(RunLoadStoreMap) {
+  SimplifiedGraphBuilderTester<Object*> t(kMachineTagged, kMachineTagged);
+  FieldAccess access = ForJSObjectMap();
+  Node* load = t.LoadField(access, t.Parameter(0));
+  t.StoreField(access, t.Parameter(1), load);
+  t.Return(load);
+
+  t.LowerAllNodes();
+
+  if (!Pipeline::SupportedTarget()) return;
+
+  Handle<JSObject> src = TestObject();
+  Handle<Map> src_map(src->map());
+  Handle<JSObject> dst = TestObject();
+  CHECK(src->map() != dst->map());
+  Object* result = t.Call(*src, *dst);
+  CHECK(result->IsMap());
+  CHECK_EQ(*src_map, result);
+  CHECK(*src_map == dst->map());
+}
+
+
+TEST(RunLoadStoreFixedArrayIndex) {
+  SimplifiedGraphBuilderTester<Object*> t(kMachineTagged);
+  ElementAccess access = ForFixedArrayElement();
+  Node* load = t.LoadElement(access, t.Parameter(0), t.Int32Constant(0));
+  t.StoreElement(access, t.Parameter(0), t.Int32Constant(1), load);
+  t.Return(load);
+
+  t.LowerAllNodes();
+
+  if (!Pipeline::SupportedTarget()) return;
+
+  Handle<FixedArray> array = t.factory()->NewFixedArray(2);
+  Handle<JSObject> src = TestObject();
+  Handle<JSObject> dst = TestObject();
+  array->set(0, *src);
+  array->set(1, *dst);
+  Object* result = t.Call(*array);
+  CHECK_EQ(*src, result);
+  CHECK_EQ(*src, array->get(0));
+  CHECK_EQ(*src, array->get(1));
+}
+
+
+TEST(RunLoadStoreArrayBuffer) {
+  SimplifiedGraphBuilderTester<int32_t> t(kMachineTagged);
+  const int index = 12;
+  FieldAccess access = ForArrayBufferBackingStore();
+  Node* backing_store = t.LoadField(access, t.Parameter(0));
+  ElementAccess buffer_access = ForBackingStoreElement(kMachineWord8);
+  Node* load =
+      t.LoadElement(buffer_access, backing_store, t.Int32Constant(index));
+  t.StoreElement(buffer_access, backing_store, t.Int32Constant(index + 1),
+                 load);
+  t.Return(load);
+
+  t.LowerAllNodes();
+
+  if (!Pipeline::SupportedTarget()) return;
+
+  Handle<JSArrayBuffer> array = t.factory()->NewJSArrayBuffer();
+  const int array_length = 2 * index;
+  Runtime::SetupArrayBufferAllocatingData(t.isolate(), array, array_length);
+  uint8_t* data = reinterpret_cast<uint8_t*>(array->backing_store());
+  for (int i = 0; i < array_length; i++) {
+    data[i] = i;
+  }
+  int32_t result = t.Call(*array);
+  CHECK_EQ(index, result);
+  for (int i = 0; i < array_length; i++) {
+    uint8_t expected = i;
+    if (i == (index + 1)) expected = result;
+    CHECK_EQ(data[i], expected);
+  }
+}
+
+
+TEST(RunCopyFixedArray) {
+  SimplifiedGraphBuilderTester<int32_t> t(kMachineTagged, kMachineTagged);
+
+  const int kArraySize = 15;
+  Node* one = t.Int32Constant(1);
+  Node* index = t.Int32Constant(0);
+  Node* limit = t.Int32Constant(kArraySize);
+  t.environment()->Push(index);
+  {
+    LoopBuilder loop(&t);
+    loop.BeginLoop();
+    // Loop exit condition.
+    index = t.environment()->Top();
+    Node* condition = t.Int32LessThan(index, limit);
+    loop.BreakUnless(condition);
+    // src[index] = dst[index].
+    index = t.environment()->Pop();
+    ElementAccess access = ForFixedArrayElement();
+    Node* src = t.Parameter(0);
+    Node* load = t.LoadElement(access, src, index);
+    Node* dst = t.Parameter(1);
+    t.StoreElement(access, dst, index, load);
+    // index++
+    index = t.Int32Add(index, one);
+    t.environment()->Push(index);
+    // continue.
+    loop.EndBody();
+    loop.EndLoop();
+  }
+  index = t.environment()->Pop();
+  t.Return(index);
+
+  t.LowerAllNodes();
+
+  if (!Pipeline::SupportedTarget()) return;
+
+  Handle<FixedArray> src = t.factory()->NewFixedArray(kArraySize);
+  Handle<FixedArray> src_copy = t.factory()->NewFixedArray(kArraySize);
+  Handle<FixedArray> dst = t.factory()->NewFixedArray(kArraySize);
+  for (int i = 0; i < kArraySize; i++) {
+    src->set(i, *TestObject());
+    src_copy->set(i, src->get(i));
+    dst->set(i, *TestObject());
+    CHECK_NE(src_copy->get(i), dst->get(i));
+  }
+  CHECK_EQ(kArraySize, t.Call(*src, *dst));
+  for (int i = 0; i < kArraySize; i++) {
+    CHECK_EQ(src_copy->get(i), dst->get(i));
+  }
+}
diff --git a/test/cctest/compiler/compiler/test-structured-ifbuilder-fuzzer.cc b/test/cctest/compiler/compiler/test-structured-ifbuilder-fuzzer.cc
new file mode 100644 (file)
index 0000000..156ab8d
--- /dev/null
@@ -0,0 +1,666 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <string>
+
+#include "src/v8.h"
+#include "test/cctest/cctest.h"
+
+#include "src/base/utils/random-number-generator.h"
+#include "test/cctest/compiler/codegen-tester.h"
+
+#if V8_TURBOFAN_TARGET
+
+using namespace v8::internal;
+using namespace v8::internal::compiler;
+
+typedef StructuredMachineAssembler::IfBuilder IfBuilder;
+typedef StructuredMachineAssembler::LoopBuilder Loop;
+
+static const int32_t kUninitializedVariableOffset = -1;
+static const int32_t kUninitializedOutput = -1;
+static const int32_t kVerifiedOutput = -2;
+
+static const int32_t kInitalVar = 1013;
+static const int32_t kConjunctionInc = 1069;
+static const int32_t kDisjunctionInc = 1151;
+static const int32_t kThenInc = 1223;
+static const int32_t kElseInc = 1291;
+static const int32_t kIfInc = 1373;
+
+class IfBuilderModel {
+ public:
+  explicit IfBuilderModel(Zone* zone)
+      : zone_(zone),
+        variable_offset_(0),
+        root_(new (zone_) Node(NULL)),
+        current_node_(root_),
+        current_expression_(NULL) {}
+
+  void If() {
+    if (current_node_->else_node != NULL) {
+      current_node_ = current_node_->else_node;
+    } else if (current_node_->then_node != NULL) {
+      current_node_ = current_node_->then_node;
+    }
+    ASSERT(current_expression_ == NULL);
+    current_expression_ = new (zone_) Expression(zone_, NULL);
+    current_node_->condition = current_expression_;
+  }
+  void IfNode() { LastChild()->variable_offset = variable_offset_++; }
+
+  void OpenParen() { current_expression_ = LastChild(); }
+  void CloseParen() { current_expression_ = current_expression_->parent; }
+
+  void And() { NewChild()->conjunction = true; }
+  void Or() { NewChild()->disjunction = true; }
+
+  void Then() {
+    ASSERT(current_expression_ == NULL || current_expression_->parent == NULL);
+    current_expression_ = NULL;
+    ASSERT(current_node_->then_node == NULL);
+    current_node_->then_node = new (zone_) Node(current_node_);
+  }
+  void Else() {
+    ASSERT(current_expression_ == NULL || current_expression_->parent == NULL);
+    current_expression_ = NULL;
+    ASSERT(current_node_->else_node == NULL);
+    current_node_->else_node = new (zone_) Node(current_node_);
+  }
+  void Return() {
+    if (current_node_->else_node != NULL) {
+      current_node_->else_node->returns = true;
+    } else if (current_node_->then_node != NULL) {
+      current_node_->then_node->returns = true;
+    } else {
+      CHECK(false);
+    }
+  }
+  void End() {}
+
+  void Print(std::vector<char>* v) { PrintRecursive(v, root_); }
+
+  struct VerificationState {
+    int32_t* inputs;
+    int32_t* outputs;
+    int32_t var;
+  };
+
+  int32_t Verify(int length, int32_t* inputs, int32_t* outputs) {
+    CHECK_EQ(variable_offset_, length);
+    // Input/Output verification.
+    for (int i = 0; i < length; ++i) {
+      CHECK(inputs[i] == 0 || inputs[i] == 1);
+      CHECK(outputs[i] == kUninitializedOutput || outputs[i] >= 0);
+    }
+    // Do verification.
+    VerificationState state;
+    state.inputs = inputs;
+    state.outputs = outputs;
+    state.var = kInitalVar;
+    VerifyRecursive(root_, &state);
+    // Verify all outputs marked.
+    for (int i = 0; i < length; ++i) {
+      CHECK(outputs[i] == kUninitializedOutput ||
+            outputs[i] == kVerifiedOutput);
+    }
+    return state.var;
+  }
+
+ private:
+  struct Expression;
+  typedef std::vector<Expression*, zone_allocator<Expression*> > Expressions;
+
+  struct Expression : public ZoneObject {
+    Expression(Zone* zone, Expression* p)
+        : variable_offset(kUninitializedVariableOffset),
+          disjunction(false),
+          conjunction(false),
+          parent(p),
+          children(Expressions::allocator_type(zone)) {}
+    int variable_offset;
+    bool disjunction;
+    bool conjunction;
+    Expression* parent;
+    Expressions children;
+
+   private:
+    DISALLOW_COPY_AND_ASSIGN(Expression);
+  };
+
+  struct Node : public ZoneObject {
+    explicit Node(Node* p)
+        : parent(p),
+          condition(NULL),
+          then_node(NULL),
+          else_node(NULL),
+          returns(false) {}
+    Node* parent;
+    Expression* condition;
+    Node* then_node;
+    Node* else_node;
+    bool returns;
+
+   private:
+    DISALLOW_COPY_AND_ASSIGN(Node);
+  };
+
+  Expression* LastChild() {
+    if (current_expression_->children.empty()) {
+      current_expression_->children.push_back(
+          new (zone_) Expression(zone_, current_expression_));
+    }
+    return current_expression_->children.back();
+  }
+
+  Expression* NewChild() {
+    Expression* child = new (zone_) Expression(zone_, current_expression_);
+    current_expression_->children.push_back(child);
+    return child;
+  }
+
+  static void PrintRecursive(std::vector<char>* v, Expression* expression) {
+    CHECK(expression != NULL);
+    if (expression->conjunction) {
+      ASSERT(!expression->disjunction);
+      v->push_back('&');
+    } else if (expression->disjunction) {
+      v->push_back('|');
+    }
+    if (expression->variable_offset != kUninitializedVariableOffset) {
+      v->push_back('v');
+    }
+    Expressions& children = expression->children;
+    if (children.empty()) return;
+    v->push_back('(');
+    for (Expressions::iterator i = children.begin(); i != children.end(); ++i) {
+      PrintRecursive(v, *i);
+    }
+    v->push_back(')');
+  }
+
+  static void PrintRecursive(std::vector<char>* v, Node* node) {
+    // Termination condition.
+    if (node->condition == NULL) {
+      CHECK(node->then_node == NULL && node->else_node == NULL);
+      if (node->returns) v->push_back('r');
+      return;
+    }
+    CHECK(!node->returns);
+    v->push_back('i');
+    PrintRecursive(v, node->condition);
+    if (node->then_node != NULL) {
+      v->push_back('t');
+      PrintRecursive(v, node->then_node);
+    }
+    if (node->else_node != NULL) {
+      v->push_back('e');
+      PrintRecursive(v, node->else_node);
+    }
+  }
+
+  static bool VerifyRecursive(Expression* expression,
+                              VerificationState* state) {
+    bool result = false;
+    bool first_iteration = true;
+    Expressions& children = expression->children;
+    CHECK(!children.empty());
+    for (Expressions::iterator i = children.begin(); i != children.end(); ++i) {
+      Expression* child = *i;
+      // Short circuit evaluation,
+      // but mixes of &&s and ||s have weird semantics.
+      if ((child->conjunction && !result) || (child->disjunction && result)) {
+        continue;
+      }
+      if (child->conjunction) state->var += kConjunctionInc;
+      if (child->disjunction) state->var += kDisjunctionInc;
+      bool child_result;
+      if (child->variable_offset != kUninitializedVariableOffset) {
+        // Verify output
+        CHECK_EQ(state->var, state->outputs[child->variable_offset]);
+        state->outputs[child->variable_offset] = kVerifiedOutput;  // Mark seen.
+        child_result = state->inputs[child->variable_offset];
+        CHECK(child->children.empty());
+        state->var += kIfInc;
+      } else {
+        child_result = VerifyRecursive(child, state);
+      }
+      if (child->conjunction) {
+        result &= child_result;
+      } else if (child->disjunction) {
+        result |= child_result;
+      } else {
+        CHECK(first_iteration);
+        result = child_result;
+      }
+      first_iteration = false;
+    }
+    return result;
+  }
+
+  static void VerifyRecursive(Node* node, VerificationState* state) {
+    if (node->condition == NULL) return;
+    bool result = VerifyRecursive(node->condition, state);
+    if (result) {
+      if (node->then_node) {
+        state->var += kThenInc;
+        return VerifyRecursive(node->then_node, state);
+      }
+    } else {
+      if (node->else_node) {
+        state->var += kElseInc;
+        return VerifyRecursive(node->else_node, state);
+      }
+    }
+  }
+
+  Zone* zone_;
+  int variable_offset_;
+  Node* root_;
+  Node* current_node_;
+  Expression* current_expression_;
+  DISALLOW_COPY_AND_ASSIGN(IfBuilderModel);
+};
+
+
+class IfBuilderGenerator : public StructuredMachineAssemblerTester<int32_t> {
+ public:
+  IfBuilderGenerator()
+      : StructuredMachineAssemblerTester(MachineOperatorBuilder::pointer_rep(),
+                                         MachineOperatorBuilder::pointer_rep()),
+        var_(NewVariable(Int32Constant(kInitalVar))),
+        c_(this),
+        m_(this->zone()),
+        one_(Int32Constant(1)),
+        offset_(0) {}
+
+  static void GenerateExpression(v8::base::RandomNumberGenerator* rng,
+                                 std::vector<char>* v, int n_vars) {
+    int depth = 1;
+    v->push_back('(');
+    bool need_if = true;
+    bool populated = false;
+    while (n_vars != 0) {
+      if (need_if) {
+        // can nest a paren or do a variable
+        if (rng->NextBool()) {
+          v->push_back('v');
+          n_vars--;
+          need_if = false;
+          populated = true;
+        } else {
+          v->push_back('(');
+          depth++;
+          populated = false;
+        }
+      } else {
+        // can pop, do && or do ||
+        int options = 3;
+        if (depth == 1 || !populated) {
+          options--;
+        }
+        switch (rng->NextInt(options)) {
+          case 0:
+            v->push_back('&');
+            need_if = true;
+            break;
+          case 1:
+            v->push_back('|');
+            need_if = true;
+            break;
+          case 2:
+            v->push_back(')');
+            depth--;
+            break;
+        }
+      }
+    }
+    CHECK(!need_if);
+    while (depth != 0) {
+      v->push_back(')');
+      depth--;
+    }
+  }
+
+  static void GenerateIfThenElse(v8::base::RandomNumberGenerator* rng,
+                                 std::vector<char>* v, int n_ifs,
+                                 int max_exp_length) {
+    CHECK_GT(n_ifs, 0);
+    CHECK_GT(max_exp_length, 0);
+    bool have_env = true;
+    bool then_done = false;
+    bool else_done = false;
+    bool first_iteration = true;
+    while (n_ifs != 0) {
+      if (have_env) {
+        int options = 3;
+        if (else_done || first_iteration) {  // Don't do else or return
+          options -= 2;
+          first_iteration = false;
+        }
+        switch (rng->NextInt(options)) {
+          case 0:
+            v->push_back('i');
+            n_ifs--;
+            have_env = false;
+            GenerateExpression(rng, v, rng->NextInt(max_exp_length) + 1);
+            break;
+          case 1:
+            v->push_back('r');
+            have_env = false;
+            break;
+          case 2:
+            v->push_back('e');
+            else_done = true;
+            then_done = false;
+            break;
+          default:
+            CHECK(false);
+        }
+      } else {  // Can only do then or else
+        int options = 2;
+        if (then_done) options--;
+        switch (rng->NextInt(options)) {
+          case 0:
+            v->push_back('e');
+            else_done = true;
+            then_done = false;
+            break;
+          case 1:
+            v->push_back('t');
+            then_done = true;
+            else_done = false;
+            break;
+          default:
+            CHECK(false);
+        }
+        have_env = true;
+      }
+    }
+    // Last instruction must have been an if, can complete it in several ways.
+    int options = 2;
+    if (then_done && !else_done) options++;
+    switch (rng->NextInt(3)) {
+      case 0:
+        // Do nothing.
+        break;
+      case 1:
+        v->push_back('t');
+        switch (rng->NextInt(3)) {
+          case 0:
+            v->push_back('r');
+            break;
+          case 1:
+            v->push_back('e');
+            break;
+          case 2:
+            v->push_back('e');
+            v->push_back('r');
+            break;
+          default:
+            CHECK(false);
+        }
+        break;
+      case 2:
+        v->push_back('e');
+        if (rng->NextBool()) v->push_back('r');
+        break;
+      default:
+        CHECK(false);
+    }
+  }
+
+  std::string::const_iterator ParseExpression(std::string::const_iterator it,
+                                              std::string::const_iterator end) {
+    // Prepare for expression.
+    m_.If();
+    c_.If();
+    int depth = 0;
+    for (; it != end; ++it) {
+      switch (*it) {
+        case 'v':
+          m_.IfNode();
+          {
+            Node* offset = Int32Constant(offset_ * 4);
+            Store(kMachineWord32, Parameter(1), offset, var_.Get());
+            var_.Set(Int32Add(var_.Get(), Int32Constant(kIfInc)));
+            c_.If(Load(kMachineWord32, Parameter(0), offset));
+            offset_++;
+          }
+          break;
+        case '&':
+          m_.And();
+          c_.And();
+          var_.Set(Int32Add(var_.Get(), Int32Constant(kConjunctionInc)));
+          break;
+        case '|':
+          m_.Or();
+          c_.Or();
+          var_.Set(Int32Add(var_.Get(), Int32Constant(kDisjunctionInc)));
+          break;
+        case '(':
+          if (depth != 0) {
+            m_.OpenParen();
+            c_.OpenParen();
+          }
+          depth++;
+          break;
+        case ')':
+          depth--;
+          if (depth == 0) return it;
+          m_.CloseParen();
+          c_.CloseParen();
+          break;
+        default:
+          CHECK(false);
+      }
+    }
+    CHECK(false);
+    return it;
+  }
+
+  void ParseIfThenElse(const std::string& str) {
+    int n_vars = 0;
+    for (std::string::const_iterator it = str.begin(); it != str.end(); ++it) {
+      if (*it == 'v') n_vars++;
+    }
+    InitializeConstants(n_vars);
+    for (std::string::const_iterator it = str.begin(); it != str.end(); ++it) {
+      switch (*it) {
+        case 'i': {
+          it++;
+          CHECK(it != str.end());
+          CHECK_EQ('(', *it);
+          it = ParseExpression(it, str.end());
+          CHECK_EQ(')', *it);
+          break;
+        }
+        case 't':
+          m_.Then();
+          c_.Then();
+          var_.Set(Int32Add(var_.Get(), Int32Constant(kThenInc)));
+          break;
+        case 'e':
+          m_.Else();
+          c_.Else();
+          var_.Set(Int32Add(var_.Get(), Int32Constant(kElseInc)));
+          break;
+        case 'r':
+          m_.Return();
+          Return(var_.Get());
+          break;
+        default:
+          CHECK(false);
+      }
+    }
+    m_.End();
+    c_.End();
+    Return(var_.Get());
+    // Compare generated model to parsed version.
+    {
+      std::vector<char> v;
+      m_.Print(&v);
+      std::string m_str(v.begin(), v.end());
+      CHECK(m_str == str);
+    }
+  }
+
+  void ParseExpression(const std::string& str) {
+    CHECK(inputs_.is_empty());
+    std::string wrapped = "i(" + str + ")te";
+    ParseIfThenElse(wrapped);
+  }
+
+  void ParseRandomIfThenElse(v8::base::RandomNumberGenerator* rng, int n_ifs,
+                             int n_vars) {
+    std::vector<char> v;
+    GenerateIfThenElse(rng, &v, n_ifs, n_vars);
+    std::string str(v.begin(), v.end());
+    ParseIfThenElse(str);
+  }
+
+  void RunRandom(v8::base::RandomNumberGenerator* rng) {
+    // TODO(dcarney): permute inputs via model.
+    // TODO(dcarney): compute test_cases from n_ifs and n_vars.
+    int test_cases = 100;
+    for (int test = 0; test < test_cases; test++) {
+      Initialize();
+      for (int i = 0; i < offset_; i++) {
+        inputs_[i] = rng->NextBool();
+      }
+      DoCall();
+    }
+  }
+
+  void Run(const std::string& str, int32_t expected) {
+    Initialize();
+    int offset = 0;
+    for (std::string::const_iterator it = str.begin(); it != str.end(); ++it) {
+      switch (*it) {
+        case 't':
+          inputs_[offset++] = 1;
+          break;
+        case 'f':
+          inputs_[offset++] = 0;
+          break;
+        default:
+          CHECK(false);
+      }
+    }
+    CHECK_EQ(offset_, offset);
+    // Call.
+    int32_t result = DoCall();
+    CHECK_EQ(result, expected);
+  }
+
+ private:
+  typedef std::vector<int32_t, zone_allocator<int32_t> > IOVector;
+
+  void InitializeConstants(int n_vars) {
+    CHECK(inputs_.is_empty());
+    inputs_.Reset(new int32_t[n_vars]);
+    outputs_.Reset(new int32_t[n_vars]);
+  }
+
+  void Initialize() {
+    for (int i = 0; i < offset_; i++) {
+      inputs_[i] = 0;
+      outputs_[i] = kUninitializedOutput;
+    }
+  }
+
+  int32_t DoCall() {
+    int32_t result = Call(inputs_.get(), outputs_.get());
+    int32_t expected = m_.Verify(offset_, inputs_.get(), outputs_.get());
+    CHECK_EQ(result, expected);
+    return result;
+  }
+
+  const v8::internal::compiler::Variable var_;
+  IfBuilder c_;
+  IfBuilderModel m_;
+  Node* one_;
+  int32_t offset_;
+  SmartArrayPointer<int32_t> inputs_;
+  SmartArrayPointer<int32_t> outputs_;
+};
+
+
+TEST(RunExpressionString) {
+  IfBuilderGenerator m;
+  m.ParseExpression("((v|v)|v)");
+  m.Run("ttt", kInitalVar + 1 * kIfInc + kThenInc);
+  m.Run("ftt", kInitalVar + 2 * kIfInc + kDisjunctionInc + kThenInc);
+  m.Run("fft", kInitalVar + 3 * kIfInc + 2 * kDisjunctionInc + kThenInc);
+  m.Run("fff", kInitalVar + 3 * kIfInc + 2 * kDisjunctionInc + kElseInc);
+}
+
+
+TEST(RunExpressionStrings) {
+  const char* strings[] = {
+      "v",       "(v)",     "((v))",     "v|v",
+      "(v|v)",   "((v|v))", "v&v",       "(v&v)",
+      "((v&v))", "v&(v)",   "v&(v|v)",   "v&(v|v)&v",
+      "v|(v)",   "v|(v&v)", "v|(v&v)|v", "v|(((v)|(v&v)|(v)|v)&(v))|v",
+  };
+  v8::base::RandomNumberGenerator rng;
+  for (size_t i = 0; i < ARRAY_SIZE(strings); i++) {
+    IfBuilderGenerator m;
+    m.ParseExpression(strings[i]);
+    m.RunRandom(&rng);
+  }
+}
+
+
+TEST(RunSimpleIfElseTester) {
+  const char* tests[] = {
+      "i(v)",   "i(v)t",   "i(v)te",
+      "i(v)er", "i(v)ter", "i(v)ti(v)trei(v)ei(v)ei(v)ei(v)ei(v)ei(v)ei(v)e"};
+  v8::base::RandomNumberGenerator rng;
+  for (size_t i = 0; i < ARRAY_SIZE(tests); ++i) {
+    IfBuilderGenerator m;
+    m.ParseIfThenElse(tests[i]);
+    m.RunRandom(&rng);
+  }
+}
+
+
+TEST(RunRandomExpressions) {
+  v8::base::RandomNumberGenerator rng;
+  for (int n_vars = 1; n_vars < 12; n_vars++) {
+    for (int i = 0; i < n_vars * n_vars + 10; i++) {
+      IfBuilderGenerator m;
+      m.ParseRandomIfThenElse(&rng, 1, n_vars);
+      m.RunRandom(&rng);
+    }
+  }
+}
+
+
+TEST(RunRandomIfElse) {
+  v8::base::RandomNumberGenerator rng;
+  for (int n_ifs = 1; n_ifs < 12; n_ifs++) {
+    for (int i = 0; i < n_ifs * n_ifs + 10; i++) {
+      IfBuilderGenerator m;
+      m.ParseRandomIfThenElse(&rng, n_ifs, 1);
+      m.RunRandom(&rng);
+    }
+  }
+}
+
+
+TEST(RunRandomIfElseExpressions) {
+  v8::base::RandomNumberGenerator rng;
+  for (int n_vars = 2; n_vars < 6; n_vars++) {
+    for (int n_ifs = 2; n_ifs < 7; n_ifs++) {
+      for (int i = 0; i < n_ifs * n_vars + 10; i++) {
+        IfBuilderGenerator m;
+        m.ParseRandomIfThenElse(&rng, n_ifs, n_vars);
+        m.RunRandom(&rng);
+      }
+    }
+  }
+}
+
+#endif
diff --git a/test/cctest/compiler/compiler/test-structured-machine-assembler.cc b/test/cctest/compiler/compiler/test-structured-machine-assembler.cc
new file mode 100644 (file)
index 0000000..ab06348
--- /dev/null
@@ -0,0 +1,1055 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+#include "test/cctest/cctest.h"
+
+#include "src/base/utils/random-number-generator.h"
+#include "src/compiler/structured-machine-assembler.h"
+#include "test/cctest/compiler/codegen-tester.h"
+#include "test/cctest/compiler/value-helper.h"
+
+#if V8_TURBOFAN_TARGET
+
+using namespace v8::internal::compiler;
+
+typedef StructuredMachineAssembler::IfBuilder IfBuilder;
+typedef StructuredMachineAssembler::LoopBuilder Loop;
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+class StructuredMachineAssemblerFriend {
+ public:
+  static bool VariableAlive(StructuredMachineAssembler* m,
+                            const Variable& var) {
+    CHECK(m->current_environment_ != NULL);
+    int offset = var.offset_;
+    return offset < static_cast<int>(m->CurrentVars()->size()) &&
+           m->CurrentVars()->at(offset) != NULL;
+  }
+};
+}
+}
+}  // namespace v8::internal::compiler
+
+
+TEST(RunVariable) {
+  StructuredMachineAssemblerTester<int32_t> m;
+
+  int32_t constant = 0x86c2bb16;
+
+  Variable v1 = m.NewVariable(m.Int32Constant(constant));
+  Variable v2 = m.NewVariable(v1.Get());
+  m.Return(v2.Get());
+
+  CHECK_EQ(constant, m.Call());
+}
+
+
+TEST(RunSimpleIf) {
+  StructuredMachineAssemblerTester<int32_t> m(kMachineWord32);
+
+  int32_t constant = 0xc4a3e3a6;
+  {
+    IfBuilder cond(&m);
+    cond.If(m.Parameter(0)).Then();
+    m.Return(m.Int32Constant(constant));
+  }
+  m.Return(m.Word32Not(m.Int32Constant(constant)));
+
+  CHECK_EQ(~constant, m.Call(0));
+  CHECK_EQ(constant, m.Call(1));
+}
+
+
+TEST(RunSimpleIfVariable) {
+  StructuredMachineAssemblerTester<int32_t> m(kMachineWord32);
+
+  int32_t constant = 0xdb6f20c2;
+  Variable var = m.NewVariable(m.Int32Constant(constant));
+  {
+    IfBuilder cond(&m);
+    cond.If(m.Parameter(0)).Then();
+    var.Set(m.Word32Not(var.Get()));
+  }
+  m.Return(var.Get());
+
+  CHECK_EQ(constant, m.Call(0));
+  CHECK_EQ(~constant, m.Call(1));
+}
+
+
+TEST(RunSimpleElse) {
+  StructuredMachineAssemblerTester<int32_t> m(kMachineWord32);
+
+  int32_t constant = 0xfc5eadf4;
+  {
+    IfBuilder cond(&m);
+    cond.If(m.Parameter(0)).Else();
+    m.Return(m.Int32Constant(constant));
+  }
+  m.Return(m.Word32Not(m.Int32Constant(constant)));
+
+  CHECK_EQ(constant, m.Call(0));
+  CHECK_EQ(~constant, m.Call(1));
+}
+
+
+TEST(RunSimpleIfElse) {
+  StructuredMachineAssemblerTester<int32_t> m(kMachineWord32);
+
+  int32_t constant = 0xaa9c8cd3;
+  {
+    IfBuilder cond(&m);
+    cond.If(m.Parameter(0)).Then();
+    m.Return(m.Int32Constant(constant));
+    cond.Else();
+    m.Return(m.Word32Not(m.Int32Constant(constant)));
+  }
+
+  CHECK_EQ(~constant, m.Call(0));
+  CHECK_EQ(constant, m.Call(1));
+}
+
+
+TEST(RunSimpleIfElseVariable) {
+  StructuredMachineAssemblerTester<int32_t> m(kMachineWord32);
+
+  int32_t constant = 0x67b6f39c;
+  Variable var = m.NewVariable(m.Int32Constant(constant));
+  {
+    IfBuilder cond(&m);
+    cond.If(m.Parameter(0)).Then();
+    var.Set(m.Word32Not(m.Word32Not(var.Get())));
+    cond.Else();
+    var.Set(m.Word32Not(var.Get()));
+  }
+  m.Return(var.Get());
+
+  CHECK_EQ(~constant, m.Call(0));
+  CHECK_EQ(constant, m.Call(1));
+}
+
+
+TEST(RunSimpleIfNoThenElse) {
+  StructuredMachineAssemblerTester<int32_t> m(kMachineWord32);
+
+  int32_t constant = 0xd5e550ed;
+  {
+    IfBuilder cond(&m);
+    cond.If(m.Parameter(0));
+  }
+  m.Return(m.Int32Constant(constant));
+
+  CHECK_EQ(constant, m.Call(0));
+  CHECK_EQ(constant, m.Call(1));
+}
+
+
+TEST(RunSimpleConjunctionVariable) {
+  StructuredMachineAssemblerTester<int32_t> m(kMachineWord32);
+
+  int32_t constant = 0xf8fb9ec6;
+  Variable var = m.NewVariable(m.Int32Constant(constant));
+  {
+    IfBuilder cond(&m);
+    cond.If(m.Int32Constant(1)).And();
+    var.Set(m.Word32Not(var.Get()));
+    cond.If(m.Parameter(0)).Then();
+    var.Set(m.Word32Not(m.Word32Not(var.Get())));
+    cond.Else();
+    var.Set(m.Word32Not(var.Get()));
+  }
+  m.Return(var.Get());
+
+  CHECK_EQ(constant, m.Call(0));
+  CHECK_EQ(~constant, m.Call(1));
+}
+
+
+TEST(RunSimpleDisjunctionVariable) {
+  StructuredMachineAssemblerTester<int32_t> m(kMachineWord32);
+
+  int32_t constant = 0x118f6ffc;
+  Variable var = m.NewVariable(m.Int32Constant(constant));
+  {
+    IfBuilder cond(&m);
+    cond.If(m.Int32Constant(0)).Or();
+    var.Set(m.Word32Not(var.Get()));
+    cond.If(m.Parameter(0)).Then();
+    var.Set(m.Word32Not(m.Word32Not(var.Get())));
+    cond.Else();
+    var.Set(m.Word32Not(var.Get()));
+  }
+  m.Return(var.Get());
+
+  CHECK_EQ(constant, m.Call(0));
+  CHECK_EQ(~constant, m.Call(1));
+}
+
+
+TEST(RunIfElse) {
+  StructuredMachineAssemblerTester<int32_t> m(kMachineWord32);
+
+  {
+    IfBuilder cond(&m);
+    bool first = true;
+    FOR_INT32_INPUTS(i) {
+      Node* c = m.Int32Constant(*i);
+      if (first) {
+        cond.If(m.Word32Equal(m.Parameter(0), c)).Then();
+        m.Return(c);
+        first = false;
+      } else {
+        cond.Else();
+        cond.If(m.Word32Equal(m.Parameter(0), c)).Then();
+        m.Return(c);
+      }
+    }
+  }
+  m.Return(m.Int32Constant(333));
+
+  FOR_INT32_INPUTS(i) { CHECK_EQ(*i, m.Call(*i)); }
+}
+
+
+enum IfBuilderBranchType { kSkipBranch, kBranchFallsThrough, kBranchReturns };
+
+
+static IfBuilderBranchType all_branch_types[] = {
+    kSkipBranch, kBranchFallsThrough, kBranchReturns};
+
+
+static void RunIfBuilderDisjunction(size_t max, IfBuilderBranchType then_type,
+                                    IfBuilderBranchType else_type) {
+  StructuredMachineAssemblerTester<int32_t> m(kMachineWord32);
+
+  std::vector<int32_t> inputs = ValueHelper::int32_vector();
+  std::vector<int32_t>::const_iterator i = inputs.begin();
+  int32_t hit = 0x8c723c9a;
+  int32_t miss = 0x88a6b9f3;
+  {
+    Node* p0 = m.Parameter(0);
+    IfBuilder cond(&m);
+    for (size_t j = 0; j < max; j++, ++i) {
+      CHECK(i != inputs.end());  // Thank you STL.
+      if (j > 0) cond.Or();
+      cond.If(m.Word32Equal(p0, m.Int32Constant(*i)));
+    }
+    switch (then_type) {
+      case kSkipBranch:
+        break;
+      case kBranchFallsThrough:
+        cond.Then();
+        break;
+      case kBranchReturns:
+        cond.Then();
+        m.Return(m.Int32Constant(hit));
+        break;
+    }
+    switch (else_type) {
+      case kSkipBranch:
+        break;
+      case kBranchFallsThrough:
+        cond.Else();
+        break;
+      case kBranchReturns:
+        cond.Else();
+        m.Return(m.Int32Constant(miss));
+        break;
+    }
+  }
+  if (then_type != kBranchReturns || else_type != kBranchReturns) {
+    m.Return(m.Int32Constant(miss));
+  }
+
+  if (then_type != kBranchReturns) hit = miss;
+
+  i = inputs.begin();
+  for (size_t j = 0; i != inputs.end(); j++, ++i) {
+    int32_t result = m.Call(*i);
+    CHECK_EQ(j < max ? hit : miss, result);
+  }
+}
+
+
+TEST(RunIfBuilderDisjunction) {
+  size_t len = ValueHelper::int32_vector().size() - 1;
+  size_t max = len > 10 ? 10 : len - 1;
+  for (size_t i = 0; i < ARRAY_SIZE(all_branch_types); i++) {
+    for (size_t j = 0; j < ARRAY_SIZE(all_branch_types); j++) {
+      for (size_t size = 1; size < max; size++) {
+        RunIfBuilderDisjunction(size, all_branch_types[i], all_branch_types[j]);
+      }
+      RunIfBuilderDisjunction(len, all_branch_types[i], all_branch_types[j]);
+    }
+  }
+}
+
+
+static void RunIfBuilderConjunction(size_t max, IfBuilderBranchType then_type,
+                                    IfBuilderBranchType else_type) {
+  StructuredMachineAssemblerTester<int32_t> m(kMachineWord32);
+
+  std::vector<int32_t> inputs = ValueHelper::int32_vector();
+  std::vector<int32_t>::const_iterator i = inputs.begin();
+  int32_t hit = 0xa0ceb9ca;
+  int32_t miss = 0x226cafaa;
+  {
+    IfBuilder cond(&m);
+    Node* p0 = m.Parameter(0);
+    for (size_t j = 0; j < max; j++, ++i) {
+      if (j > 0) cond.And();
+      cond.If(m.Word32NotEqual(p0, m.Int32Constant(*i)));
+    }
+    switch (then_type) {
+      case kSkipBranch:
+        break;
+      case kBranchFallsThrough:
+        cond.Then();
+        break;
+      case kBranchReturns:
+        cond.Then();
+        m.Return(m.Int32Constant(hit));
+        break;
+    }
+    switch (else_type) {
+      case kSkipBranch:
+        break;
+      case kBranchFallsThrough:
+        cond.Else();
+        break;
+      case kBranchReturns:
+        cond.Else();
+        m.Return(m.Int32Constant(miss));
+        break;
+    }
+  }
+  if (then_type != kBranchReturns || else_type != kBranchReturns) {
+    m.Return(m.Int32Constant(miss));
+  }
+
+  if (then_type != kBranchReturns) hit = miss;
+
+  i = inputs.begin();
+  for (size_t j = 0; i != inputs.end(); j++, ++i) {
+    int32_t result = m.Call(*i);
+    CHECK_EQ(j >= max ? hit : miss, result);
+  }
+}
+
+
+TEST(RunIfBuilderConjunction) {
+  size_t len = ValueHelper::int32_vector().size() - 1;
+  size_t max = len > 10 ? 10 : len - 1;
+  for (size_t i = 0; i < ARRAY_SIZE(all_branch_types); i++) {
+    for (size_t j = 0; j < ARRAY_SIZE(all_branch_types); j++) {
+      for (size_t size = 1; size < max; size++) {
+        RunIfBuilderConjunction(size, all_branch_types[i], all_branch_types[j]);
+      }
+      RunIfBuilderConjunction(len, all_branch_types[i], all_branch_types[j]);
+    }
+  }
+}
+
+
+static void RunDisjunctionVariables(int disjunctions, bool explicit_then,
+                                    bool explicit_else) {
+  StructuredMachineAssemblerTester<int32_t> m(kMachineWord32);
+
+  int32_t constant = 0x65a09535;
+
+  Node* cmp_val = m.Int32Constant(constant);
+  Node* one = m.Int32Constant(1);
+  Variable var = m.NewVariable(m.Parameter(0));
+  {
+    IfBuilder cond(&m);
+    cond.If(m.Word32Equal(var.Get(), cmp_val));
+    for (int i = 0; i < disjunctions; i++) {
+      cond.Or();
+      var.Set(m.Int32Add(var.Get(), one));
+      cond.If(m.Word32Equal(var.Get(), cmp_val));
+    }
+    if (explicit_then) {
+      cond.Then();
+    }
+    if (explicit_else) {
+      cond.Else();
+      var.Set(m.Int32Add(var.Get(), one));
+    }
+  }
+  m.Return(var.Get());
+
+  int adds = disjunctions + (explicit_else ? 1 : 0);
+  int32_t input = constant - 2 * adds;
+  for (int i = 0; i < adds; i++) {
+    CHECK_EQ(input + adds, m.Call(input));
+    input++;
+  }
+  for (int i = 0; i < adds + 1; i++) {
+    CHECK_EQ(constant, m.Call(input));
+    input++;
+  }
+  for (int i = 0; i < adds; i++) {
+    CHECK_EQ(input + adds, m.Call(input));
+    input++;
+  }
+}
+
+
+TEST(RunDisjunctionVariables) {
+  for (int disjunctions = 0; disjunctions < 10; disjunctions++) {
+    RunDisjunctionVariables(disjunctions, false, false);
+    RunDisjunctionVariables(disjunctions, false, true);
+    RunDisjunctionVariables(disjunctions, true, false);
+    RunDisjunctionVariables(disjunctions, true, true);
+  }
+}
+
+
+static void RunConjunctionVariables(int conjunctions, bool explicit_then,
+                                    bool explicit_else) {
+  StructuredMachineAssemblerTester<int32_t> m(kMachineWord32);
+
+  int32_t constant = 0x2c7f4b45;
+  Node* cmp_val = m.Int32Constant(constant);
+  Node* one = m.Int32Constant(1);
+  Variable var = m.NewVariable(m.Parameter(0));
+  {
+    IfBuilder cond(&m);
+    cond.If(m.Word32NotEqual(var.Get(), cmp_val));
+    for (int i = 0; i < conjunctions; i++) {
+      cond.And();
+      var.Set(m.Int32Add(var.Get(), one));
+      cond.If(m.Word32NotEqual(var.Get(), cmp_val));
+    }
+    if (explicit_then) {
+      cond.Then();
+      var.Set(m.Int32Add(var.Get(), one));
+    }
+    if (explicit_else) {
+      cond.Else();
+    }
+  }
+  m.Return(var.Get());
+
+  int adds = conjunctions + (explicit_then ? 1 : 0);
+  int32_t input = constant - 2 * adds;
+  for (int i = 0; i < adds; i++) {
+    CHECK_EQ(input + adds, m.Call(input));
+    input++;
+  }
+  for (int i = 0; i < adds + 1; i++) {
+    CHECK_EQ(constant, m.Call(input));
+    input++;
+  }
+  for (int i = 0; i < adds; i++) {
+    CHECK_EQ(input + adds, m.Call(input));
+    input++;
+  }
+}
+
+
+TEST(RunConjunctionVariables) {
+  for (int conjunctions = 0; conjunctions < 10; conjunctions++) {
+    RunConjunctionVariables(conjunctions, false, false);
+    RunConjunctionVariables(conjunctions, false, true);
+    RunConjunctionVariables(conjunctions, true, false);
+    RunConjunctionVariables(conjunctions, true, true);
+  }
+}
+
+
+TEST(RunSimpleNestedIf) {
+  StructuredMachineAssemblerTester<int32_t> m(kMachineWord32, kMachineWord32);
+  const size_t NUM_VALUES = 7;
+  std::vector<int32_t> inputs = ValueHelper::int32_vector();
+  CHECK(inputs.size() >= NUM_VALUES);
+  Node* values[NUM_VALUES];
+  for (size_t j = 0; j < NUM_VALUES; j++) {
+    values[j] = m.Int32Constant(inputs[j]);
+  }
+  {
+    IfBuilder if_0(&m);
+    if_0.If(m.Word32Equal(m.Parameter(0), values[0])).Then();
+    {
+      IfBuilder if_1(&m);
+      if_1.If(m.Word32Equal(m.Parameter(1), values[1])).Then();
+      { m.Return(values[3]); }
+      if_1.Else();
+      { m.Return(values[4]); }
+    }
+    if_0.Else();
+    {
+      IfBuilder if_1(&m);
+      if_1.If(m.Word32Equal(m.Parameter(1), values[2])).Then();
+      { m.Return(values[5]); }
+      if_1.Else();
+      { m.Return(values[6]); }
+    }
+  }
+
+  int32_t result = m.Call(inputs[0], inputs[1]);
+  CHECK_EQ(inputs[3], result);
+
+  result = m.Call(inputs[0], inputs[1] + 1);
+  CHECK_EQ(inputs[4], result);
+
+  result = m.Call(inputs[0] + 1, inputs[2]);
+  CHECK_EQ(inputs[5], result);
+
+  result = m.Call(inputs[0] + 1, inputs[2] + 1);
+  CHECK_EQ(inputs[6], result);
+}
+
+
+TEST(RunUnreachableBlockAfterIf) {
+  StructuredMachineAssemblerTester<int32_t> m;
+  {
+    IfBuilder cond(&m);
+    cond.If(m.Int32Constant(0)).Then();
+    m.Return(m.Int32Constant(1));
+    cond.Else();
+    m.Return(m.Int32Constant(2));
+  }
+  // This is unreachable.
+  m.Return(m.Int32Constant(3));
+  CHECK_EQ(2, m.Call());
+}
+
+
+TEST(RunUnreachableBlockAfterLoop) {
+  StructuredMachineAssemblerTester<int32_t> m;
+  {
+    Loop loop(&m);
+    m.Return(m.Int32Constant(1));
+  }
+  // This is unreachable.
+  m.Return(m.Int32Constant(3));
+  CHECK_EQ(1, m.Call());
+}
+
+
+TEST(RunSimpleLoop) {
+  StructuredMachineAssemblerTester<int32_t> m;
+  int32_t constant = 0x120c1f85;
+  {
+    Loop loop(&m);
+    m.Return(m.Int32Constant(constant));
+  }
+  CHECK_EQ(constant, m.Call());
+}
+
+
+TEST(RunSimpleLoopBreak) {
+  StructuredMachineAssemblerTester<int32_t> m;
+  int32_t constant = 0x10ddb0a6;
+  {
+    Loop loop(&m);
+    loop.Break();
+  }
+  m.Return(m.Int32Constant(constant));
+  CHECK_EQ(constant, m.Call());
+}
+
+
+TEST(RunCountToTen) {
+  StructuredMachineAssemblerTester<int32_t> m;
+  Variable i = m.NewVariable(m.Int32Constant(0));
+  Node* ten = m.Int32Constant(10);
+  Node* one = m.Int32Constant(1);
+  {
+    Loop loop(&m);
+    {
+      IfBuilder cond(&m);
+      cond.If(m.Word32Equal(i.Get(), ten)).Then();
+      loop.Break();
+    }
+    i.Set(m.Int32Add(i.Get(), one));
+  }
+  m.Return(i.Get());
+  CHECK_EQ(10, m.Call());
+}
+
+
+TEST(RunCountToTenAcc) {
+  StructuredMachineAssemblerTester<int32_t> m;
+  int32_t constant = 0xf27aed64;
+  Variable i = m.NewVariable(m.Int32Constant(0));
+  Variable var = m.NewVariable(m.Int32Constant(constant));
+  Node* ten = m.Int32Constant(10);
+  Node* one = m.Int32Constant(1);
+  {
+    Loop loop(&m);
+    {
+      IfBuilder cond(&m);
+      cond.If(m.Word32Equal(i.Get(), ten)).Then();
+      loop.Break();
+    }
+    i.Set(m.Int32Add(i.Get(), one));
+    var.Set(m.Int32Add(var.Get(), i.Get()));
+  }
+  m.Return(var.Get());
+
+  CHECK_EQ(constant + 10 + 9 * 5, m.Call());
+}
+
+
+TEST(RunSimpleNestedLoop) {
+  StructuredMachineAssemblerTester<int32_t> m(kMachineWord32);
+
+  Node* zero = m.Int32Constant(0);
+  Node* one = m.Int32Constant(1);
+  Node* two = m.Int32Constant(2);
+  Node* three = m.Int32Constant(3);
+  {
+    Loop l1(&m);
+    {
+      Loop l2(&m);
+      {
+        IfBuilder cond(&m);
+        cond.If(m.Word32Equal(m.Parameter(0), one)).Then();
+        l1.Break();
+      }
+      {
+        Loop l3(&m);
+        {
+          IfBuilder cond(&m);
+          cond.If(m.Word32Equal(m.Parameter(0), two)).Then();
+          l2.Break();
+          cond.Else();
+          cond.If(m.Word32Equal(m.Parameter(0), three)).Then();
+          l3.Break();
+        }
+        m.Return(three);
+      }
+      m.Return(two);
+    }
+    m.Return(one);
+  }
+  m.Return(zero);
+
+  CHECK_EQ(0, m.Call(1));
+  CHECK_EQ(1, m.Call(2));
+  CHECK_EQ(2, m.Call(3));
+  CHECK_EQ(3, m.Call(4));
+}
+
+
+TEST(RunFib) {
+  StructuredMachineAssemblerTester<int32_t> m(kMachineWord32);
+
+  // Constants.
+  Node* zero = m.Int32Constant(0);
+  Node* one = m.Int32Constant(1);
+  Node* two = m.Int32Constant(2);
+  // Variables.
+  // cnt = input
+  Variable cnt = m.NewVariable(m.Parameter(0));
+  // if (cnt < 2) return i
+  {
+    IfBuilder lt2(&m);
+    lt2.If(m.Int32LessThan(cnt.Get(), two)).Then();
+    m.Return(cnt.Get());
+  }
+  // cnt -= 2
+  cnt.Set(m.Int32Sub(cnt.Get(), two));
+  // res = 1
+  Variable res = m.NewVariable(one);
+  {
+    // prv_0 = 1
+    // prv_1 = 1
+    Variable prv_0 = m.NewVariable(one);
+    Variable prv_1 = m.NewVariable(one);
+    // while (cnt != 0) {
+    Loop main(&m);
+    {
+      IfBuilder nz(&m);
+      nz.If(m.Word32Equal(cnt.Get(), zero)).Then();
+      main.Break();
+    }
+    // res = prv_0 + prv_1
+    // prv_0 = prv_1
+    // prv_1 = res
+    res.Set(m.Int32Add(prv_0.Get(), prv_1.Get()));
+    prv_0.Set(prv_1.Get());
+    prv_1.Set(res.Get());
+    // cnt--
+    cnt.Set(m.Int32Sub(cnt.Get(), one));
+  }
+  m.Return(res.Get());
+
+  int32_t values[] = {0, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89, 144};
+  for (size_t i = 0; i < ARRAY_SIZE(values); i++) {
+    CHECK_EQ(values[i], m.Call(static_cast<int32_t>(i)));
+  }
+}
+
+
+static int VariableIntroduction() {
+  while (true) {
+    int ret = 0;
+    for (int i = 0; i < 10; i++) {
+      for (int j = i; j < 10; j++) {
+        for (int k = j; k < 10; k++) {
+          ret++;
+        }
+        ret++;
+      }
+      ret++;
+    }
+    return ret;
+  }
+}
+
+
+TEST(RunVariableIntroduction) {
+  StructuredMachineAssemblerTester<int32_t> m;
+  Node* zero = m.Int32Constant(0);
+  Node* one = m.Int32Constant(1);
+  // Use an IfBuilder to get out of start block.
+  {
+    IfBuilder i0(&m);
+    i0.If(zero).Then();
+    m.Return(one);
+  }
+  Node* ten = m.Int32Constant(10);
+  Variable v0 =
+      m.NewVariable(zero);  // Introduce variable outside of start block.
+  {
+    Loop l0(&m);
+    Variable ret = m.NewVariable(zero);  // Introduce loop variable.
+    {
+      Loop l1(&m);
+      {
+        IfBuilder i1(&m);
+        i1.If(m.Word32Equal(v0.Get(), ten)).Then();
+        l1.Break();
+      }
+      Variable v1 = m.NewVariable(v0.Get());  // Introduce loop variable.
+      {
+        Loop l2(&m);
+        {
+          IfBuilder i2(&m);
+          i2.If(m.Word32Equal(v1.Get(), ten)).Then();
+          l2.Break();
+        }
+        Variable v2 = m.NewVariable(v1.Get());  // Introduce loop variable.
+        {
+          Loop l3(&m);
+          {
+            IfBuilder i3(&m);
+            i3.If(m.Word32Equal(v2.Get(), ten)).Then();
+            l3.Break();
+          }
+          ret.Set(m.Int32Add(ret.Get(), one));
+          v2.Set(m.Int32Add(v2.Get(), one));
+        }
+        ret.Set(m.Int32Add(ret.Get(), one));
+        v1.Set(m.Int32Add(v1.Get(), one));
+      }
+      ret.Set(m.Int32Add(ret.Get(), one));
+      v0.Set(m.Int32Add(v0.Get(), one));
+    }
+    m.Return(ret.Get());  // Return loop variable.
+  }
+  CHECK_EQ(VariableIntroduction(), m.Call());
+}
+
+
+TEST(RunIfBuilderVariableLiveness) {
+  StructuredMachineAssemblerTester<int32_t> m;
+  typedef i::compiler::StructuredMachineAssemblerFriend F;
+  Node* zero = m.Int32Constant(0);
+  Variable v_outer = m.NewVariable(zero);
+  IfBuilder cond(&m);
+  cond.If(zero).Then();
+  Variable v_then = m.NewVariable(zero);
+  CHECK(F::VariableAlive(&m, v_outer));
+  CHECK(F::VariableAlive(&m, v_then));
+  cond.Else();
+  Variable v_else = m.NewVariable(zero);
+  CHECK(F::VariableAlive(&m, v_outer));
+  CHECK(F::VariableAlive(&m, v_else));
+  CHECK(!F::VariableAlive(&m, v_then));
+  cond.End();
+  CHECK(F::VariableAlive(&m, v_outer));
+  CHECK(!F::VariableAlive(&m, v_then));
+  CHECK(!F::VariableAlive(&m, v_else));
+}
+
+
+TEST(RunSimpleExpression1) {
+  StructuredMachineAssemblerTester<int32_t> m;
+
+  int32_t constant = 0x0c2974ef;
+  Node* zero = m.Int32Constant(0);
+  Node* one = m.Int32Constant(1);
+  {
+    // if (((1 && 1) && 1) && 1) return constant; return 0;
+    IfBuilder cond(&m);
+    cond.OpenParen();
+    cond.OpenParen().If(one).And();
+    cond.If(one).CloseParen().And();
+    cond.If(one).CloseParen().And();
+    cond.If(one).Then();
+    m.Return(m.Int32Constant(constant));
+  }
+  m.Return(zero);
+
+  CHECK_EQ(constant, m.Call());
+}
+
+
+TEST(RunSimpleExpression2) {
+  StructuredMachineAssemblerTester<int32_t> m;
+
+  int32_t constant = 0x2eddc11b;
+  Node* zero = m.Int32Constant(0);
+  Node* one = m.Int32Constant(1);
+  {
+    // if (((0 || 1) && 1) && 1) return constant; return 0;
+    IfBuilder cond(&m);
+    cond.OpenParen();
+    cond.OpenParen().If(zero).Or();
+    cond.If(one).CloseParen().And();
+    cond.If(one).CloseParen().And();
+    cond.If(one).Then();
+    m.Return(m.Int32Constant(constant));
+  }
+  m.Return(zero);
+
+  CHECK_EQ(constant, m.Call());
+}
+
+
+TEST(RunSimpleExpression3) {
+  StructuredMachineAssemblerTester<int32_t> m;
+
+  int32_t constant = 0x9ed5e9ef;
+  Node* zero = m.Int32Constant(0);
+  Node* one = m.Int32Constant(1);
+  {
+    // if (1 && ((0 || 1) && 1) && 1) return constant; return 0;
+    IfBuilder cond(&m);
+    cond.If(one).And();
+    cond.OpenParen();
+    cond.OpenParen().If(zero).Or();
+    cond.If(one).CloseParen().And();
+    cond.If(one).CloseParen().And();
+    cond.If(one).Then();
+    m.Return(m.Int32Constant(constant));
+  }
+  m.Return(zero);
+
+  CHECK_EQ(constant, m.Call());
+}
+
+
+TEST(RunSimpleExpressionVariable1) {
+  StructuredMachineAssemblerTester<int32_t> m;
+
+  int32_t constant = 0x4b40a986;
+  Node* one = m.Int32Constant(1);
+  Variable var = m.NewVariable(m.Int32Constant(constant));
+  {
+    // if (var.Get() && ((!var || var) && var) && var) {} return var;
+    // incrementing var in each environment.
+    IfBuilder cond(&m);
+    cond.If(var.Get()).And();
+    var.Set(m.Int32Add(var.Get(), one));
+    cond.OpenParen().OpenParen().If(m.Word32BinaryNot(var.Get())).Or();
+    var.Set(m.Int32Add(var.Get(), one));
+    cond.If(var.Get()).CloseParen().And();
+    var.Set(m.Int32Add(var.Get(), one));
+    cond.If(var.Get()).CloseParen().And();
+    var.Set(m.Int32Add(var.Get(), one));
+    cond.If(var.Get());
+  }
+  m.Return(var.Get());
+
+  CHECK_EQ(constant + 4, m.Call());
+}
+
+
+class QuicksortHelper : public StructuredMachineAssemblerTester<int32_t> {
+ public:
+  QuicksortHelper()
+      : StructuredMachineAssemblerTester(
+            MachineOperatorBuilder::pointer_rep(), kMachineWord32,
+            MachineOperatorBuilder::pointer_rep(), kMachineWord32),
+        input_(NULL),
+        stack_limit_(NULL),
+        one_(Int32Constant(1)),
+        stack_frame_size_(Int32Constant(kFrameVariables * 4)),
+        left_offset_(Int32Constant(0 * 4)),
+        right_offset_(Int32Constant(1 * 4)) {
+    Build();
+  }
+
+  int32_t DoCall(int32_t* input, int32_t input_length) {
+    int32_t stack_space[20];
+    // Do call.
+    int32_t return_val = Call(input, input_length, stack_space,
+                              static_cast<int32_t>(ARRAY_SIZE(stack_space)));
+    // Ran out of stack space.
+    if (return_val != 0) return return_val;
+    // Check sorted.
+    int32_t last = input[0];
+    for (int32_t i = 0; i < input_length; i++) {
+      CHECK(last <= input[i]);
+      last = input[i];
+    }
+    return return_val;
+  }
+
+ private:
+  void Inc32(const Variable& var) { var.Set(Int32Add(var.Get(), one_)); }
+  Node* Index(Node* index) { return Word32Shl(index, Int32Constant(2)); }
+  Node* ArrayLoad(Node* index) {
+    return Load(kMachineWord32, input_, Index(index));
+  }
+  void Swap(Node* a_index, Node* b_index) {
+    Node* a = ArrayLoad(a_index);
+    Node* b = ArrayLoad(b_index);
+    Store(kMachineWord32, input_, Index(a_index), b);
+    Store(kMachineWord32, input_, Index(b_index), a);
+  }
+  void AddToCallStack(const Variable& fp, Node* left, Node* right) {
+    {
+      // Stack limit check.
+      IfBuilder cond(this);
+      cond.If(IntPtrLessThanOrEqual(fp.Get(), stack_limit_)).Then();
+      Return(Int32Constant(-1));
+    }
+    Store(kMachineWord32, fp.Get(), left_offset_, left);
+    Store(kMachineWord32, fp.Get(), right_offset_, right);
+    fp.Set(IntPtrAdd(fp.Get(), ConvertInt32ToIntPtr(stack_frame_size_)));
+  }
+  void Build() {
+    Variable left = NewVariable(Int32Constant(0));
+    Variable right =
+        NewVariable(Int32Sub(Parameter(kInputLengthParameter), one_));
+    input_ = Parameter(kInputParameter);
+    Node* top_of_stack = Parameter(kStackParameter);
+    stack_limit_ = IntPtrSub(
+        top_of_stack, ConvertInt32ToIntPtr(Parameter(kStackLengthParameter)));
+    Variable fp = NewVariable(top_of_stack);
+    {
+      Loop outermost(this);
+      // Edge case - 2 element array.
+      {
+        IfBuilder cond(this);
+        cond.If(Word32Equal(left.Get(), Int32Sub(right.Get(), one_))).And();
+        cond.If(Int32LessThanOrEqual(ArrayLoad(right.Get()),
+                                     ArrayLoad(left.Get()))).Then();
+        Swap(left.Get(), right.Get());
+      }
+      {
+        IfBuilder cond(this);
+        // Algorithm complete condition.
+        cond.If(WordEqual(top_of_stack, fp.Get())).And();
+        cond.If(Int32LessThanOrEqual(Int32Sub(right.Get(), one_), left.Get()))
+            .Then();
+        outermost.Break();
+        // 'Recursion' exit condition. Pop frame and continue.
+        cond.Else();
+        cond.If(Int32LessThanOrEqual(Int32Sub(right.Get(), one_), left.Get()))
+            .Then();
+        fp.Set(IntPtrSub(fp.Get(), ConvertInt32ToIntPtr(stack_frame_size_)));
+        left.Set(Load(kMachineWord32, fp.Get(), left_offset_));
+        right.Set(Load(kMachineWord32, fp.Get(), right_offset_));
+        outermost.Continue();
+      }
+      // Partition.
+      Variable store_index = NewVariable(left.Get());
+      {
+        Node* pivot_index =
+            Int32Div(Int32Add(left.Get(), right.Get()), Int32Constant(2));
+        Node* pivot = ArrayLoad(pivot_index);
+        Swap(pivot_index, right.Get());
+        Variable i = NewVariable(left.Get());
+        {
+          Loop partition(this);
+          {
+            IfBuilder cond(this);
+            // Parition complete.
+            cond.If(Word32Equal(i.Get(), right.Get())).Then();
+            partition.Break();
+            // Need swap.
+            cond.Else();
+            cond.If(Int32LessThanOrEqual(ArrayLoad(i.Get()), pivot)).Then();
+            Swap(i.Get(), store_index.Get());
+            Inc32(store_index);
+          }
+          Inc32(i);
+        }  // End partition loop.
+        Swap(store_index.Get(), right.Get());
+      }
+      // 'Recurse' left and right halves of partition.
+      // Tail recurse second one.
+      AddToCallStack(fp, left.Get(), Int32Sub(store_index.Get(), one_));
+      left.Set(Int32Add(store_index.Get(), one_));
+    }  // End outermost loop.
+    Return(Int32Constant(0));
+  }
+
+  static const int kFrameVariables = 2;  // left, right
+  // Parameter offsets.
+  static const int kInputParameter = 0;
+  static const int kInputLengthParameter = 1;
+  static const int kStackParameter = 2;
+  static const int kStackLengthParameter = 3;
+  // Function inputs.
+  Node* input_;
+  Node* stack_limit_;
+  // Constants.
+  Node* const one_;
+  // Frame constants.
+  Node* const stack_frame_size_;
+  Node* const left_offset_;
+  Node* const right_offset_;
+};
+
+
+TEST(RunSimpleQuicksort) {
+  QuicksortHelper m;
+  int32_t inputs[] = {9, 7, 1, 8, 11};
+  CHECK_EQ(0, m.DoCall(inputs, ARRAY_SIZE(inputs)));
+}
+
+
+TEST(RunRandomQuicksort) {
+  QuicksortHelper m;
+
+  v8::base::RandomNumberGenerator rng;
+  static const int kMaxLength = 40;
+  int32_t inputs[kMaxLength];
+
+  for (int length = 1; length < kMaxLength; length++) {
+    for (int i = 0; i < 70; i++) {
+      // Randomize inputs.
+      for (int j = 0; j < length; j++) {
+        inputs[j] = rng.NextInt(10) - 5;
+      }
+      CHECK_EQ(0, m.DoCall(inputs, length));
+    }
+  }
+}
+
+
+TEST(MultipleScopes) {
+  StructuredMachineAssemblerTester<int32_t> m;
+  for (int i = 0; i < 10; i++) {
+    IfBuilder b(&m);
+    b.If(m.Int32Constant(0)).Then();
+    m.NewVariable(m.Int32Constant(0));
+  }
+  m.Return(m.Int32Constant(0));
+  CHECK_EQ(0, m.Call());
+}
+
+#endif
diff --git a/test/cctest/compiler/compiler/value-helper.h b/test/cctest/compiler/compiler/value-helper.h
new file mode 100644 (file)
index 0000000..7b8fcc6
--- /dev/null
@@ -0,0 +1,122 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_CCTEST_COMPILER_VALUE_HELPER_H_
+#define V8_CCTEST_COMPILER_VALUE_HELPER_H_
+
+#include "src/compiler/common-operator.h"
+#include "src/compiler/node.h"
+#include "src/compiler/node-matchers.h"
+#include "src/isolate.h"
+#include "src/objects.h"
+#include "test/cctest/cctest.h"
+#include "v8.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// A collection of utilities related to numerical and heap values, including
+// example input values of various types, including int32_t, uint32_t, double,
+// etc.
+class ValueHelper {
+ public:
+  Isolate* isolate_;
+
+  ValueHelper() : isolate_(CcTest::InitIsolateOnce()) {}
+
+  template <typename T>
+  void CheckConstant(T expected, Node* node) {
+    CHECK_EQ(expected, ValueOf<T>(node->op()));
+  }
+
+  void CheckFloat64Constant(double expected, Node* node) {
+    CHECK_EQ(IrOpcode::kFloat64Constant, node->opcode());
+    CHECK_EQ(expected, ValueOf<double>(node->op()));
+  }
+
+  void CheckNumberConstant(double expected, Node* node) {
+    CHECK_EQ(IrOpcode::kNumberConstant, node->opcode());
+    CHECK_EQ(expected, ValueOf<double>(node->op()));
+  }
+
+  void CheckInt32Constant(int32_t expected, Node* node) {
+    CHECK_EQ(IrOpcode::kInt32Constant, node->opcode());
+    CHECK_EQ(expected, ValueOf<int32_t>(node->op()));
+  }
+
+  void CheckUint32Constant(int32_t expected, Node* node) {
+    CHECK_EQ(IrOpcode::kInt32Constant, node->opcode());
+    CHECK_EQ(expected, ValueOf<uint32_t>(node->op()));
+  }
+
+  void CheckHeapConstant(Object* expected, Node* node) {
+    CHECK_EQ(IrOpcode::kHeapConstant, node->opcode());
+    CHECK_EQ(expected, *ValueOf<Handle<Object> >(node->op()));
+  }
+
+  void CheckTrue(Node* node) {
+    CheckHeapConstant(isolate_->heap()->true_value(), node);
+  }
+
+  void CheckFalse(Node* node) {
+    CheckHeapConstant(isolate_->heap()->false_value(), node);
+  }
+
+  static std::vector<double> float64_vector() {
+    static const double nan = v8::base::OS::nan_value();
+    static const double values[] = {
+        0.125,           0.25,            0.375,          0.5,
+        1.25,            -1.75,           2,              5.125,
+        6.25,            0.0,             -0.0,           982983.25,
+        888,             2147483647.0,    -999.75,        3.1e7,
+        -2e66,           3e-88,           -2147483648.0,  V8_INFINITY,
+        -V8_INFINITY,    nan,             2147483647.375, 2147483647.75,
+        2147483648.0,    2147483648.25,   2147483649.25,  -2147483647.0,
+        -2147483647.125, -2147483647.875, -2147483648.25, -2147483649.5};
+    return std::vector<double>(&values[0], &values[ARRAY_SIZE(values)]);
+  }
+
+  static const std::vector<int32_t> int32_vector() {
+    std::vector<uint32_t> values = uint32_vector();
+    return std::vector<int32_t>(values.begin(), values.end());
+  }
+
+  static const std::vector<uint32_t> uint32_vector() {
+    static const uint32_t kValues[] = {
+        0x00000000, 0x00000001, 0xffffffff, 0x1b09788b, 0x04c5fce8, 0xcc0de5bf,
+        0x273a798e, 0x187937a3, 0xece3af83, 0x5495a16b, 0x0b668ecc, 0x11223344,
+        0x0000009e, 0x00000043, 0x0000af73, 0x0000116b, 0x00658ecc, 0x002b3b4c,
+        0x88776655, 0x70000000, 0x07200000, 0x7fffffff, 0x56123761, 0x7fffff00,
+        0x761c4761, 0x80000000, 0x88888888, 0xa0000000, 0xdddddddd, 0xe0000000,
+        0xeeeeeeee, 0xfffffffd, 0xf0000000, 0x007fffff, 0x003fffff, 0x001fffff,
+        0x000fffff, 0x0007ffff, 0x0003ffff, 0x0001ffff, 0x0000ffff, 0x00007fff,
+        0x00003fff, 0x00001fff, 0x00000fff, 0x000007ff, 0x000003ff, 0x000001ff};
+    return std::vector<uint32_t>(&kValues[0], &kValues[ARRAY_SIZE(kValues)]);
+  }
+
+  static const std::vector<double> nan_vector(size_t limit = 0) {
+    static const double nan = v8::base::OS::nan_value();
+    static const double values[] = {-nan,               -V8_INFINITY * -0.0,
+                                    -V8_INFINITY * 0.0, V8_INFINITY * -0.0,
+                                    V8_INFINITY * 0.0,  nan};
+    return std::vector<double>(&values[0], &values[ARRAY_SIZE(values)]);
+  }
+};
+
+// Helper macros that can be used in FOR_INT32_INPUTS(i) { ... *i ... }
+// Watch out, these macros aren't hygenic; they pollute your scope. Thanks STL.
+#define FOR_INPUTS(ctype, itype, var)                           \
+  std::vector<ctype> var##_vec = ValueHelper::itype##_vector(); \
+  for (std::vector<ctype>::iterator var = var##_vec.begin();    \
+       var != var##_vec.end(); ++var)
+
+#define FOR_INT32_INPUTS(var) FOR_INPUTS(int32_t, int32, var)
+#define FOR_UINT32_INPUTS(var) FOR_INPUTS(uint32_t, uint32, var)
+#define FOR_FLOAT64_INPUTS(var) FOR_INPUTS(double, float64, var)
+}
+}
+}  // namespace v8::internal::compiler
+
+#endif  // V8_CCTEST_COMPILER_VALUE_HELPER_H_
diff --git a/test/cctest/compiler/function-tester.h b/test/cctest/compiler/function-tester.h
new file mode 100644 (file)
index 0000000..2ed2fe9
--- /dev/null
@@ -0,0 +1,194 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_CCTEST_COMPILER_FUNCTION_TESTER_H_
+#define V8_CCTEST_COMPILER_FUNCTION_TESTER_H_
+
+#include "src/v8.h"
+#include "test/cctest/cctest.h"
+
+#include "src/compiler.h"
+#include "src/compiler/pipeline.h"
+#include "src/execution.h"
+#include "src/full-codegen.h"
+#include "src/handles.h"
+#include "src/objects-inl.h"
+#include "src/parser.h"
+#include "src/rewriter.h"
+#include "src/scopes.h"
+
+#define USE_CRANKSHAFT 0
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+class FunctionTester : public InitializedHandleScope {
+ public:
+  explicit FunctionTester(const char* source)
+      : isolate(main_isolate()),
+        function((FLAG_allow_natives_syntax = true, NewFunction(source))) {
+    Compile(function);
+  }
+
+  Isolate* isolate;
+  Handle<JSFunction> function;
+
+  Handle<JSFunction> Compile(Handle<JSFunction> function) {
+#if V8_TURBOFAN_TARGET
+    CompilationInfoWithZone info(function);
+
+    CHECK(Parser::Parse(&info));
+    StrictMode strict_mode = info.function()->strict_mode();
+    info.SetStrictMode(strict_mode);
+    info.SetOptimizing(BailoutId::None(), Handle<Code>(function->code()));
+    CHECK(Rewriter::Rewrite(&info));
+    CHECK(Scope::Analyze(&info));
+    CHECK_NE(NULL, info.scope());
+
+    EnsureDeoptimizationSupport(&info);
+
+    Pipeline pipeline(&info);
+    Handle<Code> code = pipeline.GenerateCode();
+
+    CHECK(!code.is_null());
+    function->ReplaceCode(*code);
+#elif USE_CRANKSHAFT
+    Handle<Code> unoptimized = Handle<Code>(function->code());
+    Handle<Code> code = Compiler::GetOptimizedCode(function, unoptimized,
+                                                   Compiler::NOT_CONCURRENT);
+    CHECK(!code.is_null());
+#if ENABLE_DISASSEMBLER
+    if (FLAG_print_opt_code) {
+      CodeTracer::Scope tracing_scope(isolate->GetCodeTracer());
+      code->Disassemble("test code", tracing_scope.file());
+    }
+#endif
+    function->ReplaceCode(*code);
+#endif
+    return function;
+  }
+
+  static void EnsureDeoptimizationSupport(CompilationInfo* info) {
+    bool should_recompile = !info->shared_info()->has_deoptimization_support();
+    if (should_recompile) {
+      CompilationInfoWithZone unoptimized(info->shared_info());
+      // Note that we use the same AST that we will use for generating the
+      // optimized code.
+      unoptimized.SetFunction(info->function());
+      unoptimized.PrepareForCompilation(info->scope());
+      unoptimized.SetContext(info->context());
+      if (should_recompile) unoptimized.EnableDeoptimizationSupport();
+      bool succeeded = FullCodeGenerator::MakeCode(&unoptimized);
+      CHECK(succeeded);
+      Handle<SharedFunctionInfo> shared = info->shared_info();
+      shared->EnableDeoptimizationSupport(*unoptimized.code());
+    }
+  }
+
+  MaybeHandle<Object> Call(Handle<Object> a, Handle<Object> b) {
+    Handle<Object> args[] = {a, b};
+    return Execution::Call(isolate, function, undefined(), 2, args, false);
+  }
+
+  void CheckThrows(Handle<Object> a, Handle<Object> b) {
+    TryCatch try_catch;
+    MaybeHandle<Object> no_result = Call(a, b);
+    CHECK(isolate->has_pending_exception());
+    CHECK(try_catch.HasCaught());
+    CHECK(no_result.is_null());
+    // TODO(mstarzinger): Temporary workaround for issue chromium:362388.
+    isolate->OptionalRescheduleException(true);
+  }
+
+  v8::Handle<v8::Message> CheckThrowsReturnMessage(Handle<Object> a,
+                                                   Handle<Object> b) {
+    TryCatch try_catch;
+    MaybeHandle<Object> no_result = Call(a, b);
+    CHECK(isolate->has_pending_exception());
+    CHECK(try_catch.HasCaught());
+    CHECK(no_result.is_null());
+    // TODO(mstarzinger): Calling OptionalRescheduleException is a dirty hack,
+    // it's the only way to make Message() not to assert because an external
+    // exception has been caught by the try_catch.
+    isolate->OptionalRescheduleException(true);
+    return try_catch.Message();
+  }
+
+  void CheckCall(Handle<Object> expected, Handle<Object> a, Handle<Object> b) {
+    Handle<Object> result = Call(a, b).ToHandleChecked();
+    CHECK(expected->SameValue(*result));
+  }
+
+  void CheckCall(Handle<Object> expected, Handle<Object> a) {
+    CheckCall(expected, a, undefined());
+  }
+
+  void CheckCall(Handle<Object> expected) {
+    CheckCall(expected, undefined(), undefined());
+  }
+
+  void CheckCall(double expected, double a, double b) {
+    CheckCall(Val(expected), Val(a), Val(b));
+  }
+
+  void CheckTrue(Handle<Object> a, Handle<Object> b) {
+    CheckCall(true_value(), a, b);
+  }
+
+  void CheckTrue(Handle<Object> a) { CheckCall(true_value(), a, undefined()); }
+
+  void CheckTrue(double a, double b) {
+    CheckCall(true_value(), Val(a), Val(b));
+  }
+
+  void CheckFalse(Handle<Object> a, Handle<Object> b) {
+    CheckCall(false_value(), a, b);
+  }
+
+  void CheckFalse(Handle<Object> a) {
+    CheckCall(false_value(), a, undefined());
+  }
+
+  void CheckFalse(double a, double b) {
+    CheckCall(false_value(), Val(a), Val(b));
+  }
+
+  Handle<JSFunction> NewFunction(const char* source) {
+    return v8::Utils::OpenHandle(
+        *v8::Handle<v8::Function>::Cast(CompileRun(source)));
+  }
+
+  Handle<JSObject> NewObject(const char* source) {
+    return v8::Utils::OpenHandle(
+        *v8::Handle<v8::Object>::Cast(CompileRun(source)));
+  }
+
+  Handle<String> Val(const char* string) {
+    return isolate->factory()->InternalizeUtf8String(string);
+  }
+
+  Handle<Object> Val(double value) {
+    return isolate->factory()->NewNumber(value);
+  }
+
+  Handle<Object> infinity() { return isolate->factory()->infinity_value(); }
+
+  Handle<Object> minus_infinity() { return Val(-V8_INFINITY); }
+
+  Handle<Object> nan() { return isolate->factory()->nan_value(); }
+
+  Handle<Object> undefined() { return isolate->factory()->undefined_value(); }
+
+  Handle<Object> null() { return isolate->factory()->null_value(); }
+
+  Handle<Object> true_value() { return isolate->factory()->true_value(); }
+
+  Handle<Object> false_value() { return isolate->factory()->false_value(); }
+};
+}
+}
+}  // namespace v8::internal::compiler
+
+#endif  // V8_CCTEST_COMPILER_FUNCTION_TESTER_H_
diff --git a/test/cctest/compiler/graph-builder-tester.cc b/test/cctest/compiler/graph-builder-tester.cc
new file mode 100644 (file)
index 0000000..2d8f9d5
--- /dev/null
@@ -0,0 +1,64 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "test/cctest/compiler/graph-builder-tester.h"
+#include "src/compiler/pipeline.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+MachineCallHelper::MachineCallHelper(Zone* zone,
+                                     MachineCallDescriptorBuilder* builder)
+    : CallHelper(zone->isolate()),
+      call_descriptor_builder_(builder),
+      parameters_(NULL),
+      graph_(NULL) {}
+
+
+void MachineCallHelper::InitParameters(GraphBuilder* builder,
+                                       CommonOperatorBuilder* common) {
+  ASSERT_EQ(NULL, parameters_);
+  graph_ = builder->graph();
+  if (parameter_count() == 0) return;
+  parameters_ = builder->graph()->zone()->NewArray<Node*>(parameter_count());
+  for (int i = 0; i < parameter_count(); ++i) {
+    parameters_[i] = builder->NewNode(common->Parameter(i));
+  }
+}
+
+
+byte* MachineCallHelper::Generate() {
+  ASSERT(parameter_count() == 0 || parameters_ != NULL);
+  if (code_.is_null()) {
+    Zone* zone = graph_->zone();
+    CompilationInfo info(zone->isolate(), zone);
+    Linkage linkage(&info, call_descriptor_builder_->BuildCallDescriptor(zone));
+    Pipeline pipeline(&info);
+    code_ = pipeline.GenerateCodeForMachineGraph(&linkage, graph_);
+  }
+  return code_.ToHandleChecked()->entry();
+}
+
+
+void MachineCallHelper::VerifyParameters(
+    int parameter_count, MachineRepresentation* parameter_types) {
+  CHECK_EQ(this->parameter_count(), parameter_count);
+  const MachineRepresentation* expected_types =
+      call_descriptor_builder_->parameter_types();
+  for (int i = 0; i < parameter_count; i++) {
+    CHECK_EQ(expected_types[i], parameter_types[i]);
+  }
+}
+
+
+Node* MachineCallHelper::Parameter(int offset) {
+  ASSERT_NE(NULL, parameters_);
+  ASSERT(0 <= offset && offset < parameter_count());
+  return parameters_[offset];
+}
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
diff --git a/test/cctest/compiler/graph-builder-tester.h b/test/cctest/compiler/graph-builder-tester.h
new file mode 100644 (file)
index 0000000..096828a
--- /dev/null
@@ -0,0 +1,111 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_CCTEST_COMPILER_GRAPH_BUILDER_TESTER_H_
+#define V8_CCTEST_COMPILER_GRAPH_BUILDER_TESTER_H_
+
+#include "src/v8.h"
+#include "test/cctest/cctest.h"
+
+#include "src/compiler/common-operator.h"
+#include "src/compiler/graph-builder.h"
+#include "src/compiler/machine-node-factory.h"
+#include "src/compiler/machine-operator.h"
+#include "src/compiler/simplified-node-factory.h"
+#include "src/compiler/simplified-operator.h"
+#include "test/cctest/compiler/call-tester.h"
+#include "test/cctest/compiler/simplified-graph-builder.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// A class that just passes node creation on to the Graph.
+class DirectGraphBuilder : public GraphBuilder {
+ public:
+  explicit DirectGraphBuilder(Graph* graph) : GraphBuilder(graph) {}
+  virtual ~DirectGraphBuilder() {}
+
+ protected:
+  virtual Node* MakeNode(Operator* op, int value_input_count,
+                         Node** value_inputs) {
+    return graph()->NewNode(op, value_input_count, value_inputs);
+  }
+};
+
+
+class MachineCallHelper : public CallHelper {
+ public:
+  MachineCallHelper(Zone* zone, MachineCallDescriptorBuilder* builder);
+
+  Node* Parameter(int offset);
+
+ protected:
+  virtual byte* Generate();
+  virtual void VerifyParameters(int parameter_count,
+                                MachineRepresentation* parameters);
+  void InitParameters(GraphBuilder* builder, CommonOperatorBuilder* common);
+
+ private:
+  int parameter_count() const {
+    return call_descriptor_builder_->parameter_count();
+  }
+  MachineCallDescriptorBuilder* call_descriptor_builder_;
+  Node** parameters_;
+  // TODO(dcarney): shouldn't need graph stored.
+  Graph* graph_;
+  MaybeHandle<Code> code_;
+};
+
+
+class GraphAndBuilders {
+ public:
+  explicit GraphAndBuilders(Zone* zone)
+      : main_graph_(new (zone) Graph(zone)),
+        main_common_(zone),
+        main_machine_(zone),
+        main_simplified_(zone) {}
+
+ protected:
+  // Prefixed with main_ to avoid naiming conflicts.
+  Graph* const main_graph_;
+  CommonOperatorBuilder main_common_;
+  MachineOperatorBuilder main_machine_;
+  SimplifiedOperatorBuilder main_simplified_;
+};
+
+
+template <typename ReturnType>
+class GraphBuilderTester
+    : public HandleAndZoneScope,
+      private GraphAndBuilders,
+      public MachineCallHelper,
+      public SimplifiedGraphBuilder,
+      public CallHelper2<ReturnType, GraphBuilderTester<ReturnType> > {
+ public:
+  explicit GraphBuilderTester(MachineRepresentation p0,
+                              MachineRepresentation p1,
+                              MachineRepresentation p2,
+                              MachineRepresentation p3,
+                              MachineRepresentation p4)
+      : GraphAndBuilders(main_zone()),
+        MachineCallHelper(
+            main_zone(),
+            ToCallDescriptorBuilder(
+                main_zone(), ReturnValueTraits<ReturnType>::Representation(),
+                p0, p1, p2, p3, p4)),
+        SimplifiedGraphBuilder(main_graph_, &main_common_, &main_machine_,
+                               &main_simplified_) {
+    Begin();
+    InitParameters(this, &main_common_);
+  }
+  virtual ~GraphBuilderTester() {}
+
+  Factory* factory() const { return isolate()->factory(); }
+};
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_CCTEST_COMPILER_GRAPH_BUILDER_TESTER_H_
diff --git a/test/cctest/compiler/graph-tester.h b/test/cctest/compiler/graph-tester.h
new file mode 100644 (file)
index 0000000..41dfa07
--- /dev/null
@@ -0,0 +1,41 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_CCTEST_COMPILER_GRAPH_TESTER_H_
+#define V8_CCTEST_COMPILER_GRAPH_TESTER_H_
+
+#include "src/v8.h"
+#include "test/cctest/cctest.h"
+
+#include "src/compiler/common-operator.h"
+#include "src/compiler/graph.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+class GraphTester : public HandleAndZoneScope, public Graph {
+ public:
+  GraphTester() : Graph(main_zone()) {}
+};
+
+
+class GraphWithStartNodeTester : public GraphTester {
+ public:
+  GraphWithStartNodeTester()
+      : builder_(main_zone()), start_node_(NewNode(builder_.Start())) {
+    SetStart(start_node_);
+  }
+
+  Node* start_node() { return start_node_; }
+
+ private:
+  CommonOperatorBuilder builder_;
+  Node* start_node_;
+};
+}
+}
+}  // namespace v8::internal::compiler
+
+#endif  // V8_CCTEST_COMPILER_GRAPH_TESTER_H_
diff --git a/test/cctest/compiler/instruction-selector-tester.h b/test/cctest/compiler/instruction-selector-tester.h
new file mode 100644 (file)
index 0000000..2a84b57
--- /dev/null
@@ -0,0 +1,119 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_CCTEST_COMPILER_INSTRUCTION_SELECTOR_TEST_H_
+#define V8_CCTEST_COMPILER_INSTRUCTION_SELECTOR_TEST_H_
+
+#include <deque>
+#include <set>
+
+#include "src/compiler/instruction-selector.h"
+#include "src/compiler/raw-machine-assembler.h"
+#include "src/ostreams.h"
+#include "test/cctest/cctest.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+typedef std::set<int> VirtualRegisterSet;
+
+enum InstructionSelectorTesterMode { kTargetMode, kInternalMode };
+
+class InstructionSelectorTester : public HandleAndZoneScope,
+                                  public RawMachineAssembler {
+ public:
+  enum Mode { kTargetMode, kInternalMode };
+
+  static const int kParameterCount = 3;
+  static MachineRepresentation* BuildParameterArray(Zone* zone) {
+    MachineRepresentation* array =
+        zone->NewArray<MachineRepresentation>(kParameterCount);
+    for (int i = 0; i < kParameterCount; ++i) {
+      array[i] = kMachineWord32;
+    }
+    return array;
+  }
+
+  explicit InstructionSelectorTester(Mode mode = kTargetMode)
+      : RawMachineAssembler(
+            new (main_zone()) Graph(main_zone()), new (main_zone())
+            MachineCallDescriptorBuilder(kMachineWord32, kParameterCount,
+                                         BuildParameterArray(main_zone())),
+            MachineOperatorBuilder::pointer_rep()),
+        mode_(mode) {}
+
+  void SelectInstructions() {
+    OFStream out(stdout);
+    Schedule* schedule = Export();
+    CHECK_NE(0, graph()->NodeCount());
+    CompilationInfo info(main_isolate(), main_zone());
+    Linkage linkage(&info, call_descriptor());
+    InstructionSequence sequence(&linkage, graph(), schedule);
+    SourcePositionTable source_positions(graph());
+    InstructionSelector selector(&sequence, &source_positions);
+    selector.SelectInstructions();
+    out << "--- Code sequence after instruction selection --- " << endl
+        << sequence;
+    for (InstructionSequence::const_iterator i = sequence.begin();
+         i != sequence.end(); ++i) {
+      Instruction* instr = *i;
+      if (instr->opcode() < 0) continue;
+      if (mode_ == kTargetMode) {
+        switch (ArchOpcodeField::decode(instr->opcode())) {
+#define CASE(Name) \
+  case k##Name:    \
+    break;
+          TARGET_ARCH_OPCODE_LIST(CASE)
+#undef CASE
+          default:
+            continue;
+        }
+      }
+      code.push_back(instr);
+    }
+    for (int vreg = 0; vreg < sequence.VirtualRegisterCount(); ++vreg) {
+      if (sequence.IsDouble(vreg)) {
+        CHECK(!sequence.IsReference(vreg));
+        doubles.insert(vreg);
+      }
+      if (sequence.IsReference(vreg)) {
+        CHECK(!sequence.IsDouble(vreg));
+        references.insert(vreg);
+      }
+    }
+    immediates.assign(sequence.immediates().begin(),
+                      sequence.immediates().end());
+  }
+
+  int32_t ToInt32(const InstructionOperand* operand) const {
+    size_t i = operand->index();
+    CHECK(i < immediates.size());
+    CHECK_EQ(InstructionOperand::IMMEDIATE, operand->kind());
+    return immediates[i].ToInt32();
+  }
+
+  std::deque<Instruction*> code;
+  VirtualRegisterSet doubles;
+  VirtualRegisterSet references;
+  std::deque<Constant> immediates;
+
+ private:
+  Mode mode_;
+};
+
+
+static inline void CheckSameVreg(InstructionOperand* exp,
+                                 InstructionOperand* val) {
+  CHECK_EQ(InstructionOperand::UNALLOCATED, exp->kind());
+  CHECK_EQ(InstructionOperand::UNALLOCATED, val->kind());
+  CHECK_EQ(UnallocatedOperand::cast(exp)->virtual_register(),
+           UnallocatedOperand::cast(val)->virtual_register());
+}
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_CCTEST_COMPILER_INSTRUCTION_SELECTOR_TEST_H_
diff --git a/test/cctest/compiler/simplified-graph-builder.cc b/test/cctest/compiler/simplified-graph-builder.cc
new file mode 100644 (file)
index 0000000..038c61a
--- /dev/null
@@ -0,0 +1,78 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "test/cctest/compiler/simplified-graph-builder.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+SimplifiedGraphBuilder::SimplifiedGraphBuilder(
+    Graph* graph, CommonOperatorBuilder* common,
+    MachineOperatorBuilder* machine, SimplifiedOperatorBuilder* simplified)
+    : StructuredGraphBuilder(graph, common),
+      machine_(machine),
+      simplified_(simplified) {}
+
+
+void SimplifiedGraphBuilder::Begin() {
+  ASSERT(graph()->start() == NULL);
+  Node* start = graph()->NewNode(common()->Start());
+  graph()->SetStart(start);
+  set_environment(new (zone()) Environment(this, start));
+}
+
+
+void SimplifiedGraphBuilder::Return(Node* value) {
+  Node* control = NewNode(common()->Return(), value);
+  UpdateControlDependencyToLeaveFunction(control);
+}
+
+
+void SimplifiedGraphBuilder::End() {
+  environment()->UpdateControlDependency(exit_control());
+  graph()->SetEnd(NewNode(common()->End()));
+}
+
+
+SimplifiedGraphBuilder::Environment::Environment(
+    SimplifiedGraphBuilder* builder, Node* control_dependency)
+    : StructuredGraphBuilder::Environment(builder, control_dependency) {}
+
+
+Node* SimplifiedGraphBuilder::Environment::Top() {
+  ASSERT(!values()->empty());
+  return values()->back();
+}
+
+
+void SimplifiedGraphBuilder::Environment::Push(Node* node) {
+  values()->push_back(node);
+}
+
+
+Node* SimplifiedGraphBuilder::Environment::Pop() {
+  ASSERT(!values()->empty());
+  Node* back = values()->back();
+  values()->pop_back();
+  return back;
+}
+
+
+void SimplifiedGraphBuilder::Environment::Poke(size_t depth, Node* node) {
+  ASSERT(depth < values()->size());
+  size_t index = values()->size() - depth - 1;
+  values()->at(index) = node;
+}
+
+
+Node* SimplifiedGraphBuilder::Environment::Peek(size_t depth) {
+  ASSERT(depth < values()->size());
+  size_t index = values()->size() - depth - 1;
+  return values()->at(index);
+}
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
diff --git a/test/cctest/compiler/simplified-graph-builder.h b/test/cctest/compiler/simplified-graph-builder.h
new file mode 100644 (file)
index 0000000..22b7bbf
--- /dev/null
@@ -0,0 +1,72 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_CCTEST_COMPILER_SIMPLIFIED_GRAPH_BUILDER_H_
+#define V8_CCTEST_COMPILER_SIMPLIFIED_GRAPH_BUILDER_H_
+
+#include "src/compiler/common-operator.h"
+#include "src/compiler/graph-builder.h"
+#include "src/compiler/machine-node-factory.h"
+#include "src/compiler/machine-operator.h"
+#include "src/compiler/simplified-node-factory.h"
+#include "src/compiler/simplified-operator.h"
+#include "test/cctest/cctest.h"
+#include "test/cctest/compiler/call-tester.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+class SimplifiedGraphBuilder
+    : public StructuredGraphBuilder,
+      public MachineNodeFactory<SimplifiedGraphBuilder>,
+      public SimplifiedNodeFactory<SimplifiedGraphBuilder> {
+ public:
+  SimplifiedGraphBuilder(Graph* graph, CommonOperatorBuilder* common,
+                         MachineOperatorBuilder* machine,
+                         SimplifiedOperatorBuilder* simplified);
+  virtual ~SimplifiedGraphBuilder() {}
+
+  class Environment : public StructuredGraphBuilder::Environment {
+   public:
+    Environment(SimplifiedGraphBuilder* builder, Node* control_dependency);
+
+    // TODO(dcarney): encode somehow and merge into StructuredGraphBuilder.
+    // SSA renaming operations.
+    Node* Top();
+    void Push(Node* node);
+    Node* Pop();
+    void Poke(size_t depth, Node* node);
+    Node* Peek(size_t depth);
+  };
+
+  Isolate* isolate() const { return zone()->isolate(); }
+  Zone* zone() const { return StructuredGraphBuilder::zone(); }
+  CommonOperatorBuilder* common() const {
+    return StructuredGraphBuilder::common();
+  }
+  MachineOperatorBuilder* machine() const { return machine_; }
+  SimplifiedOperatorBuilder* simplified() const { return simplified_; }
+  Environment* environment() {
+    return reinterpret_cast<Environment*>(environment_internal());
+  }
+
+  // Initialize graph and builder.
+  void Begin();
+
+  void Return(Node* value);
+
+  // Close the graph.
+  void End();
+
+ private:
+  MachineOperatorBuilder* machine_;
+  SimplifiedOperatorBuilder* simplified_;
+};
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_CCTEST_COMPILER_SIMPLIFIED_GRAPH_BUILDER_H_
diff --git a/test/cctest/compiler/test-branch-combine.cc b/test/cctest/compiler/test-branch-combine.cc
new file mode 100644 (file)
index 0000000..eb678ea
--- /dev/null
@@ -0,0 +1,462 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#include "test/cctest/cctest.h"
+#include "test/cctest/compiler/codegen-tester.h"
+#include "test/cctest/compiler/value-helper.h"
+
+#if V8_TURBOFAN_TARGET
+
+using namespace v8::internal;
+using namespace v8::internal::compiler;
+
+typedef RawMachineAssembler::Label MLabel;
+
+static IrOpcode::Value int32cmp_opcodes[] = {
+    IrOpcode::kWord32Equal, IrOpcode::kInt32LessThan,
+    IrOpcode::kInt32LessThanOrEqual, IrOpcode::kUint32LessThan,
+    IrOpcode::kUint32LessThanOrEqual};
+
+
+TEST(BranchCombineWord32EqualZero_1) {
+  // Test combining a branch with x == 0
+  RawMachineAssemblerTester<int32_t> m(kMachineWord32);
+  int32_t eq_constant = -1033;
+  int32_t ne_constant = 825118;
+  Node* p0 = m.Parameter(0);
+
+  MLabel blocka, blockb;
+  m.Branch(m.Word32Equal(p0, m.Int32Constant(0)), &blocka, &blockb);
+  m.Bind(&blocka);
+  m.Return(m.Int32Constant(eq_constant));
+  m.Bind(&blockb);
+  m.Return(m.Int32Constant(ne_constant));
+
+  FOR_INT32_INPUTS(i) {
+    int32_t a = *i;
+    int32_t expect = a == 0 ? eq_constant : ne_constant;
+    CHECK_EQ(expect, m.Call(a));
+  }
+}
+
+
+TEST(BranchCombineWord32EqualZero_chain) {
+  // Test combining a branch with a chain of x == 0 == 0 == 0 ...
+  int32_t eq_constant = -1133;
+  int32_t ne_constant = 815118;
+
+  for (int k = 0; k < 6; k++) {
+    RawMachineAssemblerTester<int32_t> m(kMachineWord32);
+    Node* p0 = m.Parameter(0);
+    MLabel blocka, blockb;
+    Node* cond = p0;
+    for (int j = 0; j < k; j++) {
+      cond = m.Word32Equal(cond, m.Int32Constant(0));
+    }
+    m.Branch(cond, &blocka, &blockb);
+    m.Bind(&blocka);
+    m.Return(m.Int32Constant(eq_constant));
+    m.Bind(&blockb);
+    m.Return(m.Int32Constant(ne_constant));
+
+    FOR_INT32_INPUTS(i) {
+      int32_t a = *i;
+      int32_t expect = (k & 1) == 1 ? (a == 0 ? eq_constant : ne_constant)
+                                    : (a == 0 ? ne_constant : eq_constant);
+      CHECK_EQ(expect, m.Call(a));
+    }
+  }
+}
+
+
+TEST(BranchCombineInt32LessThanZero_1) {
+  // Test combining a branch with x < 0
+  RawMachineAssemblerTester<int32_t> m(kMachineWord32);
+  int32_t eq_constant = -1433;
+  int32_t ne_constant = 845118;
+  Node* p0 = m.Parameter(0);
+
+  MLabel blocka, blockb;
+  m.Branch(m.Int32LessThan(p0, m.Int32Constant(0)), &blocka, &blockb);
+  m.Bind(&blocka);
+  m.Return(m.Int32Constant(eq_constant));
+  m.Bind(&blockb);
+  m.Return(m.Int32Constant(ne_constant));
+
+  FOR_INT32_INPUTS(i) {
+    int32_t a = *i;
+    int32_t expect = a < 0 ? eq_constant : ne_constant;
+    CHECK_EQ(expect, m.Call(a));
+  }
+}
+
+
+TEST(BranchCombineUint32LessThan100_1) {
+  // Test combining a branch with x < 100
+  RawMachineAssemblerTester<int32_t> m(kMachineWord32);
+  int32_t eq_constant = 1471;
+  int32_t ne_constant = 88845718;
+  Node* p0 = m.Parameter(0);
+
+  MLabel blocka, blockb;
+  m.Branch(m.Uint32LessThan(p0, m.Int32Constant(100)), &blocka, &blockb);
+  m.Bind(&blocka);
+  m.Return(m.Int32Constant(eq_constant));
+  m.Bind(&blockb);
+  m.Return(m.Int32Constant(ne_constant));
+
+  FOR_UINT32_INPUTS(i) {
+    uint32_t a = *i;
+    int32_t expect = a < 100 ? eq_constant : ne_constant;
+    CHECK_EQ(expect, m.Call(a));
+  }
+}
+
+
+TEST(BranchCombineUint32LessThanOrEqual100_1) {
+  // Test combining a branch with x <= 100
+  RawMachineAssemblerTester<int32_t> m(kMachineWord32);
+  int32_t eq_constant = 1479;
+  int32_t ne_constant = 77845719;
+  Node* p0 = m.Parameter(0);
+
+  MLabel blocka, blockb;
+  m.Branch(m.Uint32LessThanOrEqual(p0, m.Int32Constant(100)), &blocka, &blockb);
+  m.Bind(&blocka);
+  m.Return(m.Int32Constant(eq_constant));
+  m.Bind(&blockb);
+  m.Return(m.Int32Constant(ne_constant));
+
+  FOR_UINT32_INPUTS(i) {
+    uint32_t a = *i;
+    int32_t expect = a <= 100 ? eq_constant : ne_constant;
+    CHECK_EQ(expect, m.Call(a));
+  }
+}
+
+
+TEST(BranchCombineZeroLessThanInt32_1) {
+  // Test combining a branch with 0 < x
+  RawMachineAssemblerTester<int32_t> m(kMachineWord32);
+  int32_t eq_constant = -2033;
+  int32_t ne_constant = 225118;
+  Node* p0 = m.Parameter(0);
+
+  MLabel blocka, blockb;
+  m.Branch(m.Int32LessThan(m.Int32Constant(0), p0), &blocka, &blockb);
+  m.Bind(&blocka);
+  m.Return(m.Int32Constant(eq_constant));
+  m.Bind(&blockb);
+  m.Return(m.Int32Constant(ne_constant));
+
+  FOR_INT32_INPUTS(i) {
+    int32_t a = *i;
+    int32_t expect = 0 < a ? eq_constant : ne_constant;
+    CHECK_EQ(expect, m.Call(a));
+  }
+}
+
+
+TEST(BranchCombineInt32GreaterThanZero_1) {
+  // Test combining a branch with x > 0
+  RawMachineAssemblerTester<int32_t> m(kMachineWord32);
+  int32_t eq_constant = -1073;
+  int32_t ne_constant = 825178;
+  Node* p0 = m.Parameter(0);
+
+  MLabel blocka, blockb;
+  m.Branch(m.Int32GreaterThan(p0, m.Int32Constant(0)), &blocka, &blockb);
+  m.Bind(&blocka);
+  m.Return(m.Int32Constant(eq_constant));
+  m.Bind(&blockb);
+  m.Return(m.Int32Constant(ne_constant));
+
+  FOR_INT32_INPUTS(i) {
+    int32_t a = *i;
+    int32_t expect = a > 0 ? eq_constant : ne_constant;
+    CHECK_EQ(expect, m.Call(a));
+  }
+}
+
+
+TEST(BranchCombineWord32EqualP) {
+  // Test combining a branch with an Word32Equal.
+  RawMachineAssemblerTester<int32_t> m(kMachineWord32, kMachineWord32);
+  int32_t eq_constant = -1035;
+  int32_t ne_constant = 825018;
+  Node* p0 = m.Parameter(0);
+  Node* p1 = m.Parameter(1);
+
+  MLabel blocka, blockb;
+  m.Branch(m.Word32Equal(p0, p1), &blocka, &blockb);
+  m.Bind(&blocka);
+  m.Return(m.Int32Constant(eq_constant));
+  m.Bind(&blockb);
+  m.Return(m.Int32Constant(ne_constant));
+
+  FOR_INT32_INPUTS(i) {
+    FOR_INT32_INPUTS(j) {
+      int32_t a = *i;
+      int32_t b = *j;
+      int32_t expect = a == b ? eq_constant : ne_constant;
+      CHECK_EQ(expect, m.Call(a, b));
+    }
+  }
+}
+
+
+TEST(BranchCombineWord32EqualI) {
+  int32_t eq_constant = -1135;
+  int32_t ne_constant = 925718;
+
+  for (int left = 0; left < 2; left++) {
+    FOR_INT32_INPUTS(i) {
+      RawMachineAssemblerTester<int32_t> m(kMachineWord32);
+      int32_t a = *i;
+
+      Node* p0 = m.Int32Constant(a);
+      Node* p1 = m.Parameter(0);
+
+      MLabel blocka, blockb;
+      if (left == 1) m.Branch(m.Word32Equal(p0, p1), &blocka, &blockb);
+      if (left == 0) m.Branch(m.Word32Equal(p1, p0), &blocka, &blockb);
+      m.Bind(&blocka);
+      m.Return(m.Int32Constant(eq_constant));
+      m.Bind(&blockb);
+      m.Return(m.Int32Constant(ne_constant));
+
+      FOR_INT32_INPUTS(j) {
+        int32_t b = *j;
+        int32_t expect = a == b ? eq_constant : ne_constant;
+        CHECK_EQ(expect, m.Call(b));
+      }
+    }
+  }
+}
+
+
+TEST(BranchCombineInt32CmpP) {
+  int32_t eq_constant = -1235;
+  int32_t ne_constant = 725018;
+
+  for (int op = 0; op < 2; op++) {
+    RawMachineAssemblerTester<int32_t> m(kMachineWord32, kMachineWord32);
+    Node* p0 = m.Parameter(0);
+    Node* p1 = m.Parameter(1);
+
+    MLabel blocka, blockb;
+    if (op == 0) m.Branch(m.Int32LessThan(p0, p1), &blocka, &blockb);
+    if (op == 1) m.Branch(m.Int32LessThanOrEqual(p0, p1), &blocka, &blockb);
+    m.Bind(&blocka);
+    m.Return(m.Int32Constant(eq_constant));
+    m.Bind(&blockb);
+    m.Return(m.Int32Constant(ne_constant));
+
+    FOR_INT32_INPUTS(i) {
+      FOR_INT32_INPUTS(j) {
+        int32_t a = *i;
+        int32_t b = *j;
+        int32_t expect = 0;
+        if (op == 0) expect = a < b ? eq_constant : ne_constant;
+        if (op == 1) expect = a <= b ? eq_constant : ne_constant;
+        CHECK_EQ(expect, m.Call(a, b));
+      }
+    }
+  }
+}
+
+
+TEST(BranchCombineInt32CmpI) {
+  int32_t eq_constant = -1175;
+  int32_t ne_constant = 927711;
+
+  for (int op = 0; op < 2; op++) {
+    FOR_INT32_INPUTS(i) {
+      RawMachineAssemblerTester<int32_t> m(kMachineWord32);
+      int32_t a = *i;
+      Node* p0 = m.Int32Constant(a);
+      Node* p1 = m.Parameter(0);
+
+      MLabel blocka, blockb;
+      if (op == 0) m.Branch(m.Int32LessThan(p0, p1), &blocka, &blockb);
+      if (op == 1) m.Branch(m.Int32LessThanOrEqual(p0, p1), &blocka, &blockb);
+      m.Bind(&blocka);
+      m.Return(m.Int32Constant(eq_constant));
+      m.Bind(&blockb);
+      m.Return(m.Int32Constant(ne_constant));
+
+      FOR_INT32_INPUTS(j) {
+        int32_t b = *j;
+        int32_t expect = 0;
+        if (op == 0) expect = a < b ? eq_constant : ne_constant;
+        if (op == 1) expect = a <= b ? eq_constant : ne_constant;
+        CHECK_EQ(expect, m.Call(b));
+      }
+    }
+  }
+}
+
+
+// Now come the sophisticated tests for many input shape combinations.
+
+// Materializes a boolean (1 or 0) from a comparison.
+class CmpMaterializeBoolGen : public BinopGen<int32_t> {
+ public:
+  CompareWrapper w;
+  bool invert;
+
+  CmpMaterializeBoolGen(IrOpcode::Value opcode, bool i)
+      : w(opcode), invert(i) {}
+
+  virtual void gen(RawMachineAssemblerTester<int32_t>* m, Node* a, Node* b) {
+    Node* cond = w.MakeNode(m, a, b);
+    if (invert) cond = m->Word32Equal(cond, m->Int32Constant(0));
+    m->Return(cond);
+  }
+  virtual int32_t expected(int32_t a, int32_t b) {
+    if (invert) return !w.Int32Compare(a, b) ? 1 : 0;
+    return w.Int32Compare(a, b) ? 1 : 0;
+  }
+};
+
+
+// Generates a branch and return one of two values from a comparison.
+class CmpBranchGen : public BinopGen<int32_t> {
+ public:
+  CompareWrapper w;
+  bool invert;
+  bool true_first;
+  int32_t eq_constant;
+  int32_t ne_constant;
+
+  CmpBranchGen(IrOpcode::Value opcode, bool i, bool t, int32_t eq, int32_t ne)
+      : w(opcode), invert(i), true_first(t), eq_constant(eq), ne_constant(ne) {}
+
+  virtual void gen(RawMachineAssemblerTester<int32_t>* m, Node* a, Node* b) {
+    MLabel blocka, blockb;
+    Node* cond = w.MakeNode(m, a, b);
+    if (invert) cond = m->Word32Equal(cond, m->Int32Constant(0));
+    m->Branch(cond, &blocka, &blockb);
+    if (true_first) {
+      m->Bind(&blocka);
+      m->Return(m->Int32Constant(eq_constant));
+      m->Bind(&blockb);
+      m->Return(m->Int32Constant(ne_constant));
+    } else {
+      m->Bind(&blockb);
+      m->Return(m->Int32Constant(ne_constant));
+      m->Bind(&blocka);
+      m->Return(m->Int32Constant(eq_constant));
+    }
+  }
+  virtual int32_t expected(int32_t a, int32_t b) {
+    if (invert) return !w.Int32Compare(a, b) ? eq_constant : ne_constant;
+    return w.Int32Compare(a, b) ? eq_constant : ne_constant;
+  }
+};
+
+
+TEST(BranchCombineInt32CmpAllInputShapes_materialized) {
+  for (size_t i = 0; i < ARRAY_SIZE(int32cmp_opcodes); i++) {
+    CmpMaterializeBoolGen gen(int32cmp_opcodes[i], false);
+    Int32BinopInputShapeTester tester(&gen);
+    tester.TestAllInputShapes();
+  }
+}
+
+
+TEST(BranchCombineInt32CmpAllInputShapes_inverted_materialized) {
+  for (size_t i = 0; i < ARRAY_SIZE(int32cmp_opcodes); i++) {
+    CmpMaterializeBoolGen gen(int32cmp_opcodes[i], true);
+    Int32BinopInputShapeTester tester(&gen);
+    tester.TestAllInputShapes();
+  }
+}
+
+
+TEST(BranchCombineInt32CmpAllInputShapes_branch_true) {
+  for (size_t i = 0; i < ARRAY_SIZE(int32cmp_opcodes); i++) {
+    CmpBranchGen gen(int32cmp_opcodes[i], false, false, 995 + i, -1011 - i);
+    Int32BinopInputShapeTester tester(&gen);
+    tester.TestAllInputShapes();
+  }
+}
+
+
+TEST(BranchCombineInt32CmpAllInputShapes_branch_false) {
+  for (size_t i = 0; i < ARRAY_SIZE(int32cmp_opcodes); i++) {
+    CmpBranchGen gen(int32cmp_opcodes[i], false, true, 795 + i, -2011 - i);
+    Int32BinopInputShapeTester tester(&gen);
+    tester.TestAllInputShapes();
+  }
+}
+
+
+TEST(BranchCombineInt32CmpAllInputShapes_inverse_branch_true) {
+  for (size_t i = 0; i < ARRAY_SIZE(int32cmp_opcodes); i++) {
+    CmpBranchGen gen(int32cmp_opcodes[i], true, false, 695 + i, -3011 - i);
+    Int32BinopInputShapeTester tester(&gen);
+    tester.TestAllInputShapes();
+  }
+}
+
+
+TEST(BranchCombineInt32CmpAllInputShapes_inverse_branch_false) {
+  for (size_t i = 0; i < ARRAY_SIZE(int32cmp_opcodes); i++) {
+    CmpBranchGen gen(int32cmp_opcodes[i], true, true, 595 + i, -4011 - i);
+    Int32BinopInputShapeTester tester(&gen);
+    tester.TestAllInputShapes();
+  }
+}
+
+
+TEST(BranchCombineFloat64Compares) {
+  double inf = V8_INFINITY;
+  double nan = v8::base::OS::nan_value();
+  double inputs[] = {0.0, 1.0, -1.0, -inf, inf, nan};
+
+  int32_t eq_constant = -1733;
+  int32_t ne_constant = 915118;
+
+  double input_a = 0.0;
+  double input_b = 0.0;
+
+  CompareWrapper cmps[] = {CompareWrapper(IrOpcode::kFloat64Equal),
+                           CompareWrapper(IrOpcode::kFloat64LessThan),
+                           CompareWrapper(IrOpcode::kFloat64LessThanOrEqual)};
+
+  for (size_t c = 0; c < ARRAY_SIZE(cmps); c++) {
+    CompareWrapper cmp = cmps[c];
+    for (int invert = 0; invert < 2; invert++) {
+      RawMachineAssemblerTester<int32_t> m;
+      Node* a = m.LoadFromPointer(&input_a, kMachineFloat64);
+      Node* b = m.LoadFromPointer(&input_b, kMachineFloat64);
+
+      MLabel blocka, blockb;
+      Node* cond = cmp.MakeNode(&m, a, b);
+      if (invert) cond = m.Word32Equal(cond, m.Int32Constant(0));
+      m.Branch(cond, &blocka, &blockb);
+      m.Bind(&blocka);
+      m.Return(m.Int32Constant(eq_constant));
+      m.Bind(&blockb);
+      m.Return(m.Int32Constant(ne_constant));
+
+      for (size_t i = 0; i < ARRAY_SIZE(inputs); i++) {
+        for (size_t j = 0; j < ARRAY_SIZE(inputs); j += 2) {
+          input_a = inputs[i];
+          input_b = inputs[i];
+          int32_t expected =
+              invert ? (cmp.Float64Compare(input_a, input_b) ? ne_constant
+                                                             : eq_constant)
+                     : (cmp.Float64Compare(input_a, input_b) ? eq_constant
+                                                             : ne_constant);
+          CHECK_EQ(expected, m.Call());
+        }
+      }
+    }
+  }
+}
+#endif  // V8_TURBOFAN_TARGET
diff --git a/test/cctest/compiler/test-codegen-deopt.cc b/test/cctest/compiler/test-codegen-deopt.cc
new file mode 100644 (file)
index 0000000..243ece9
--- /dev/null
@@ -0,0 +1,331 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+#include "test/cctest/cctest.h"
+
+#include "src/compiler/code-generator.h"
+#include "src/compiler/common-operator.h"
+#include "src/compiler/graph.h"
+#include "src/compiler/instruction-selector.h"
+#include "src/compiler/machine-operator.h"
+#include "src/compiler/node.h"
+#include "src/compiler/operator.h"
+#include "src/compiler/raw-machine-assembler.h"
+#include "src/compiler/register-allocator.h"
+#include "src/compiler/schedule.h"
+
+#include "src/full-codegen.h"
+#include "src/parser.h"
+#include "src/rewriter.h"
+
+#include "test/cctest/compiler/function-tester.h"
+
+using namespace v8::internal;
+using namespace v8::internal::compiler;
+
+typedef RawMachineAssembler::Label MLabel;
+
+static Handle<JSFunction> NewFunction(const char* source) {
+  return v8::Utils::OpenHandle(
+      *v8::Handle<v8::Function>::Cast(CompileRun(source)));
+}
+
+
+class DeoptCodegenTester {
+ public:
+  explicit DeoptCodegenTester(HandleAndZoneScope* scope, const char* src)
+      : scope_(scope),
+        function(NewFunction(src)),
+        info(function, scope->main_zone()),
+        bailout_id(-1) {
+    CHECK(Parser::Parse(&info));
+    StrictMode strict_mode = info.function()->strict_mode();
+    info.SetStrictMode(strict_mode);
+    info.SetOptimizing(BailoutId::None(), Handle<Code>(function->code()));
+    CHECK(Rewriter::Rewrite(&info));
+    CHECK(Scope::Analyze(&info));
+    CHECK_NE(NULL, info.scope());
+
+    FunctionTester::EnsureDeoptimizationSupport(&info);
+
+    ASSERT(info.shared_info()->has_deoptimization_support());
+
+    graph = new (scope_->main_zone()) Graph(scope_->main_zone());
+  }
+
+  virtual ~DeoptCodegenTester() { delete code; }
+
+  void GenerateCodeFromSchedule(Schedule* schedule) {
+    OFStream os(stdout);
+    os << *schedule;
+
+    // Initialize the codegen and generate code.
+    Linkage* linkage = new (scope_->main_zone()) Linkage(&info);
+    code = new v8::internal::compiler::InstructionSequence(linkage, graph,
+                                                           schedule);
+    SourcePositionTable source_positions(graph);
+    InstructionSelector selector(code, &source_positions);
+    selector.SelectInstructions();
+
+    os << "----- Instruction sequence before register allocation -----\n"
+       << *code;
+
+    RegisterAllocator allocator(code);
+    CHECK(allocator.Allocate());
+
+    os << "----- Instruction sequence after register allocation -----\n"
+       << *code;
+
+    compiler::CodeGenerator generator(code);
+    result_code = generator.GenerateCode();
+
+#ifdef DEBUG
+    result_code->Print();
+#endif
+  }
+
+  Zone* zone() { return scope_->main_zone(); }
+
+  HandleAndZoneScope* scope_;
+  Handle<JSFunction> function;
+  CompilationInfo info;
+  BailoutId bailout_id;
+  Handle<Code> result_code;
+  v8::internal::compiler::InstructionSequence* code;
+  Graph* graph;
+};
+
+
+class TrivialDeoptCodegenTester : public DeoptCodegenTester {
+ public:
+  explicit TrivialDeoptCodegenTester(HandleAndZoneScope* scope)
+      : DeoptCodegenTester(scope,
+                           "function foo() { deopt(); return 42; }; foo") {}
+
+  void GenerateCode() {
+    GenerateCodeFromSchedule(BuildGraphAndSchedule(graph));
+  }
+
+  Schedule* BuildGraphAndSchedule(Graph* graph) {
+    Isolate* isolate = info.isolate();
+    CommonOperatorBuilder common(zone());
+
+    // Manually construct a schedule for the function below:
+    // function foo() {
+    //   deopt();
+    // }
+
+    MachineRepresentation parameter_reps[] = {kMachineTagged};
+    MachineCallDescriptorBuilder descriptor_builder(kMachineTagged, 1,
+                                                    parameter_reps);
+
+    RawMachineAssembler m(graph, &descriptor_builder);
+
+    Handle<Object> undef_object =
+        Handle<Object>(isolate->heap()->undefined_value(), isolate);
+    PrintableUnique<Object> undef_constant =
+        PrintableUnique<Object>::CreateUninitialized(zone(), undef_object);
+    Node* undef_node = m.NewNode(common.HeapConstant(undef_constant));
+
+    Handle<JSFunction> deopt_function =
+        NewFunction("function deopt() { %DeoptimizeFunction(foo); }; deopt");
+    PrintableUnique<Object> deopt_fun_constant =
+        PrintableUnique<Object>::CreateUninitialized(zone(), deopt_function);
+    Node* deopt_fun_node = m.NewNode(common.HeapConstant(deopt_fun_constant));
+
+    MLabel deopt, cont;
+    Node* call = m.CallJS0(deopt_fun_node, undef_node, &cont, &deopt);
+
+    m.Bind(&cont);
+    m.NewNode(common.Continuation(), call);
+    m.Return(undef_node);
+
+    m.Bind(&deopt);
+    m.NewNode(common.LazyDeoptimization(), call);
+
+    bailout_id = GetCallBailoutId();
+    FrameStateDescriptor stateDescriptor(bailout_id);
+    Node* state_node = m.NewNode(common.FrameState(stateDescriptor));
+    m.Deoptimize(state_node);
+
+    // Schedule the graph:
+    Schedule* schedule = m.Export();
+
+    cont_block = cont.block();
+    deopt_block = deopt.block();
+
+    return schedule;
+  }
+
+  BailoutId GetCallBailoutId() {
+    ZoneList<Statement*>* body = info.function()->body();
+    for (int i = 0; i < body->length(); i++) {
+      if (body->at(i)->IsExpressionStatement() &&
+          body->at(i)->AsExpressionStatement()->expression()->IsCall()) {
+        return body->at(i)->AsExpressionStatement()->expression()->id();
+      }
+    }
+    CHECK(false);
+    return BailoutId(-1);
+  }
+
+  BasicBlock* cont_block;
+  BasicBlock* deopt_block;
+};
+
+
+TEST(TurboTrivialDeoptCodegen) {
+  HandleAndZoneScope scope;
+  InitializedHandleScope handles;
+
+  FLAG_allow_natives_syntax = true;
+  FLAG_turbo_deoptimization = true;
+
+  TrivialDeoptCodegenTester t(&scope);
+  t.GenerateCode();
+
+  DeoptimizationInputData* data =
+      DeoptimizationInputData::cast(t.result_code->deoptimization_data());
+
+  Label* cont_label = t.code->GetLabel(t.cont_block);
+  Label* deopt_label = t.code->GetLabel(t.deopt_block);
+
+  // Check the patch table. It should patch the continuation address to the
+  // deoptimization block address.
+  CHECK_EQ(1, data->ReturnAddressPatchCount());
+  CHECK_EQ(cont_label->pos(), data->ReturnAddressPc(0)->value());
+  CHECK_EQ(deopt_label->pos(), data->PatchedAddressPc(0)->value());
+
+  // Check that we deoptimize to the right AST id.
+  CHECK_EQ(1, data->DeoptCount());
+  CHECK_EQ(1, data->DeoptCount());
+  CHECK_EQ(t.bailout_id.ToInt(), data->AstId(0).ToInt());
+}
+
+
+TEST(TurboTrivialDeoptCodegenAndRun) {
+  HandleAndZoneScope scope;
+  InitializedHandleScope handles;
+
+  FLAG_allow_natives_syntax = true;
+  FLAG_turbo_deoptimization = true;
+
+  TrivialDeoptCodegenTester t(&scope);
+  t.GenerateCode();
+
+  t.function->ReplaceCode(*t.result_code);
+  t.info.context()->native_context()->AddOptimizedCode(*t.result_code);
+
+  Isolate* isolate = scope.main_isolate();
+  Handle<Object> result;
+  bool has_pending_exception =
+      !Execution::Call(isolate, t.function,
+                       isolate->factory()->undefined_value(), 0, NULL,
+                       false).ToHandle(&result);
+  CHECK(!has_pending_exception);
+  CHECK(result->SameValue(Smi::FromInt(42)));
+}
+
+
+class TrivialRuntimeDeoptCodegenTester : public DeoptCodegenTester {
+ public:
+  explicit TrivialRuntimeDeoptCodegenTester(HandleAndZoneScope* scope)
+      : DeoptCodegenTester(
+            scope,
+            "function foo() { %DeoptimizeFunction(foo); return 42; }; foo") {}
+
+  void GenerateCode() {
+    GenerateCodeFromSchedule(BuildGraphAndSchedule(graph));
+  }
+
+  Schedule* BuildGraphAndSchedule(Graph* graph) {
+    Isolate* isolate = info.isolate();
+    CommonOperatorBuilder common(zone());
+
+    // Manually construct a schedule for the function below:
+    // function foo() {
+    //   %DeoptimizeFunction(foo);
+    // }
+
+    MachineRepresentation parameter_reps[] = {kMachineTagged};
+    MachineCallDescriptorBuilder descriptor_builder(kMachineTagged, 2,
+                                                    parameter_reps);
+
+    RawMachineAssembler m(graph, &descriptor_builder);
+
+    Handle<Object> undef_object =
+        Handle<Object>(isolate->heap()->undefined_value(), isolate);
+    PrintableUnique<Object> undef_constant =
+        PrintableUnique<Object>::CreateUninitialized(zone(), undef_object);
+    Node* undef_node = m.NewNode(common.HeapConstant(undef_constant));
+
+    PrintableUnique<Object> this_fun_constant =
+        PrintableUnique<Object>::CreateUninitialized(zone(), function);
+    Node* this_fun_node = m.NewNode(common.HeapConstant(this_fun_constant));
+
+    MLabel deopt, cont;
+    Node* call = m.CallRuntime1(Runtime::kDeoptimizeFunction, this_fun_node,
+                                &cont, &deopt);
+
+    m.Bind(&cont);
+    m.NewNode(common.Continuation(), call);
+    m.Return(undef_node);
+
+    m.Bind(&deopt);
+    m.NewNode(common.LazyDeoptimization(), call);
+
+    bailout_id = GetCallBailoutId();
+    FrameStateDescriptor stateDescriptor(bailout_id);
+    Node* state_node = m.NewNode(common.FrameState(stateDescriptor));
+    m.Deoptimize(state_node);
+
+    // Schedule the graph:
+    Schedule* schedule = m.Export();
+
+    cont_block = cont.block();
+    deopt_block = deopt.block();
+
+    return schedule;
+  }
+
+  BailoutId GetCallBailoutId() {
+    ZoneList<Statement*>* body = info.function()->body();
+    for (int i = 0; i < body->length(); i++) {
+      if (body->at(i)->IsExpressionStatement() &&
+          body->at(i)->AsExpressionStatement()->expression()->IsCallRuntime()) {
+        return body->at(i)->AsExpressionStatement()->expression()->id();
+      }
+    }
+    CHECK(false);
+    return BailoutId(-1);
+  }
+
+  BasicBlock* cont_block;
+  BasicBlock* deopt_block;
+};
+
+
+TEST(TurboTrivialRuntimeDeoptCodegenAndRun) {
+  HandleAndZoneScope scope;
+  InitializedHandleScope handles;
+
+  FLAG_allow_natives_syntax = true;
+  FLAG_turbo_deoptimization = true;
+
+  TrivialRuntimeDeoptCodegenTester t(&scope);
+  t.GenerateCode();
+
+  t.function->ReplaceCode(*t.result_code);
+  t.info.context()->native_context()->AddOptimizedCode(*t.result_code);
+
+  Isolate* isolate = scope.main_isolate();
+  Handle<Object> result;
+  bool has_pending_exception =
+      !Execution::Call(isolate, t.function,
+                       isolate->factory()->undefined_value(), 0, NULL,
+                       false).ToHandle(&result);
+  CHECK(!has_pending_exception);
+  CHECK(result->SameValue(Smi::FromInt(42)));
+}
diff --git a/test/cctest/compiler/test-gap-resolver.cc b/test/cctest/compiler/test-gap-resolver.cc
new file mode 100644 (file)
index 0000000..60a4762
--- /dev/null
@@ -0,0 +1,172 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/gap-resolver.h"
+
+#include "src/base/utils/random-number-generator.h"
+#include "test/cctest/cctest.h"
+
+using namespace v8::internal;
+using namespace v8::internal::compiler;
+
+// The state of our move interpreter is the mapping of operands to values. Note
+// that the actual values don't really matter, all we care about is equality.
+class InterpreterState {
+ public:
+  typedef std::vector<MoveOperands> Moves;
+
+  void ExecuteInParallel(Moves moves) {
+    InterpreterState copy(*this);
+    for (Moves::iterator it = moves.begin(); it != moves.end(); ++it) {
+      if (!it->IsRedundant()) write(it->destination(), copy.read(it->source()));
+    }
+  }
+
+  bool operator==(const InterpreterState& other) const {
+    return values_ == other.values_;
+  }
+
+  bool operator!=(const InterpreterState& other) const {
+    return values_ != other.values_;
+  }
+
+ private:
+  // Internally, the state is a normalized permutation of (kind,index) pairs.
+  typedef std::pair<InstructionOperand::Kind, int> Key;
+  typedef Key Value;
+  typedef std::map<Key, Value> OperandMap;
+
+  Value read(const InstructionOperand* op) const {
+    OperandMap::const_iterator it = values_.find(KeyFor(op));
+    return (it == values_.end()) ? ValueFor(op) : it->second;
+  }
+
+  void write(const InstructionOperand* op, Value v) {
+    if (v == ValueFor(op)) {
+      values_.erase(KeyFor(op));
+    } else {
+      values_[KeyFor(op)] = v;
+    }
+  }
+
+  static Key KeyFor(const InstructionOperand* op) {
+    return Key(op->kind(), op->index());
+  }
+
+  static Value ValueFor(const InstructionOperand* op) {
+    return Value(op->kind(), op->index());
+  }
+
+  friend OStream& operator<<(OStream& os, const InterpreterState& is) {
+    for (OperandMap::const_iterator it = is.values_.begin();
+         it != is.values_.end(); ++it) {
+      if (it != is.values_.begin()) os << " ";
+      InstructionOperand source(it->first.first, it->first.second);
+      InstructionOperand destination(it->second.first, it->second.second);
+      os << MoveOperands(&source, &destination);
+    }
+    return os;
+  }
+
+  OperandMap values_;
+};
+
+
+// An abstract interpreter for moves, swaps and parallel moves.
+class MoveInterpreter : public GapResolver::Assembler {
+ public:
+  virtual void AssembleMove(InstructionOperand* source,
+                            InstructionOperand* destination) V8_OVERRIDE {
+    InterpreterState::Moves moves;
+    moves.push_back(MoveOperands(source, destination));
+    state_.ExecuteInParallel(moves);
+  }
+
+  virtual void AssembleSwap(InstructionOperand* source,
+                            InstructionOperand* destination) V8_OVERRIDE {
+    InterpreterState::Moves moves;
+    moves.push_back(MoveOperands(source, destination));
+    moves.push_back(MoveOperands(destination, source));
+    state_.ExecuteInParallel(moves);
+  }
+
+  void AssembleParallelMove(const ParallelMove* pm) {
+    InterpreterState::Moves moves(pm->move_operands()->begin(),
+                                  pm->move_operands()->end());
+    state_.ExecuteInParallel(moves);
+  }
+
+  InterpreterState state() const { return state_; }
+
+ private:
+  InterpreterState state_;
+};
+
+
+class ParallelMoveCreator : public HandleAndZoneScope {
+ public:
+  ParallelMoveCreator() : rng_(CcTest::random_number_generator()) {}
+
+  ParallelMove* Create(int size) {
+    ParallelMove* parallel_move = new (main_zone()) ParallelMove(main_zone());
+    std::set<InstructionOperand*, InstructionOperandComparator> seen;
+    for (int i = 0; i < size; ++i) {
+      MoveOperands mo(CreateRandomOperand(), CreateRandomOperand());
+      if (!mo.IsRedundant() && seen.find(mo.destination()) == seen.end()) {
+        parallel_move->AddMove(mo.source(), mo.destination(), main_zone());
+        seen.insert(mo.destination());
+      }
+    }
+    return parallel_move;
+  }
+
+ private:
+  struct InstructionOperandComparator {
+    bool operator()(const InstructionOperand* x, const InstructionOperand* y) {
+      return (x->kind() < y->kind()) ||
+             (x->kind() == y->kind() && x->index() < y->index());
+    }
+  };
+
+  InstructionOperand* CreateRandomOperand() {
+    int index = rng_->NextInt(6);
+    switch (rng_->NextInt(5)) {
+      case 0:
+        return ConstantOperand::Create(index, main_zone());
+      case 1:
+        return StackSlotOperand::Create(index, main_zone());
+      case 2:
+        return DoubleStackSlotOperand::Create(index, main_zone());
+      case 3:
+        return RegisterOperand::Create(index, main_zone());
+      case 4:
+        return DoubleRegisterOperand::Create(index, main_zone());
+    }
+    UNREACHABLE();
+    return NULL;
+  }
+
+ private:
+  v8::base::RandomNumberGenerator* rng_;
+};
+
+
+TEST(FuzzResolver) {
+  ParallelMoveCreator pmc;
+  for (int size = 0; size < 20; ++size) {
+    for (int repeat = 0; repeat < 50; ++repeat) {
+      ParallelMove* pm = pmc.Create(size);
+
+      // Note: The gap resolver modifies the ParallelMove, so interpret first.
+      MoveInterpreter mi1;
+      mi1.AssembleParallelMove(pm);
+
+      MoveInterpreter mi2;
+      GapResolver resolver(&mi2);
+      resolver.Resolve(pm);
+
+      CHECK(mi1.state() == mi2.state());
+    }
+  }
+}
diff --git a/test/cctest/compiler/test-graph-reducer.cc b/test/cctest/compiler/test-graph-reducer.cc
new file mode 100644 (file)
index 0000000..dfbb6f2
--- /dev/null
@@ -0,0 +1,659 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#include "graph-tester.h"
+#include "src/compiler/generic-node-inl.h"
+#include "src/compiler/graph-reducer.h"
+
+using namespace v8::internal;
+using namespace v8::internal::compiler;
+
+const uint8_t OPCODE_A0 = 10;
+const uint8_t OPCODE_A1 = 11;
+const uint8_t OPCODE_A2 = 12;
+const uint8_t OPCODE_B0 = 20;
+const uint8_t OPCODE_B1 = 21;
+const uint8_t OPCODE_B2 = 22;
+const uint8_t OPCODE_C0 = 30;
+const uint8_t OPCODE_C1 = 31;
+const uint8_t OPCODE_C2 = 32;
+
+static SimpleOperator OPA0(OPCODE_A0, Operator::kNoWrite, 0, 0, "opa0");
+static SimpleOperator OPA1(OPCODE_A1, Operator::kNoWrite, 1, 0, "opa1");
+static SimpleOperator OPA2(OPCODE_A2, Operator::kNoWrite, 2, 0, "opa2");
+static SimpleOperator OPB0(OPCODE_B0, Operator::kNoWrite, 0, 0, "opa0");
+static SimpleOperator OPB1(OPCODE_B1, Operator::kNoWrite, 1, 0, "opa1");
+static SimpleOperator OPB2(OPCODE_B2, Operator::kNoWrite, 2, 0, "opa2");
+static SimpleOperator OPC0(OPCODE_C0, Operator::kNoWrite, 0, 0, "opc0");
+static SimpleOperator OPC1(OPCODE_C1, Operator::kNoWrite, 1, 0, "opc1");
+static SimpleOperator OPC2(OPCODE_C2, Operator::kNoWrite, 2, 0, "opc2");
+
+
+// Replaces all "A" operators with "B" operators without creating new nodes.
+class InPlaceABReducer : public Reducer {
+ public:
+  virtual Reduction Reduce(Node* node) {
+    switch (node->op()->opcode()) {
+      case OPCODE_A0:
+        CHECK_EQ(0, node->InputCount());
+        node->set_op(&OPB0);
+        return Replace(node);
+      case OPCODE_A1:
+        CHECK_EQ(1, node->InputCount());
+        node->set_op(&OPB1);
+        return Replace(node);
+      case OPCODE_A2:
+        CHECK_EQ(2, node->InputCount());
+        node->set_op(&OPB2);
+        return Replace(node);
+    }
+    return NoChange();
+  }
+};
+
+
+// Replaces all "A" operators with "B" operators by allocating new nodes.
+class NewABReducer : public Reducer {
+ public:
+  explicit NewABReducer(Graph* graph) : graph_(graph) {}
+  virtual Reduction Reduce(Node* node) {
+    switch (node->op()->opcode()) {
+      case OPCODE_A0:
+        CHECK_EQ(0, node->InputCount());
+        return Replace(graph_->NewNode(&OPB0));
+      case OPCODE_A1:
+        CHECK_EQ(1, node->InputCount());
+        return Replace(graph_->NewNode(&OPB1, node->InputAt(0)));
+      case OPCODE_A2:
+        CHECK_EQ(2, node->InputCount());
+        return Replace(
+            graph_->NewNode(&OPB2, node->InputAt(0), node->InputAt(1)));
+    }
+    return NoChange();
+  }
+  Graph* graph_;
+};
+
+
+// Replaces all "B" operators with "C" operators without creating new nodes.
+class InPlaceBCReducer : public Reducer {
+ public:
+  virtual Reduction Reduce(Node* node) {
+    switch (node->op()->opcode()) {
+      case OPCODE_B0:
+        CHECK_EQ(0, node->InputCount());
+        node->set_op(&OPC0);
+        return Replace(node);
+      case OPCODE_B1:
+        CHECK_EQ(1, node->InputCount());
+        node->set_op(&OPC1);
+        return Replace(node);
+      case OPCODE_B2:
+        CHECK_EQ(2, node->InputCount());
+        node->set_op(&OPC2);
+        return Replace(node);
+    }
+    return NoChange();
+  }
+};
+
+
+// Wraps all "OPA0" nodes in "OPB1" operators by allocating new nodes.
+class A0Wrapper V8_FINAL : public Reducer {
+ public:
+  explicit A0Wrapper(Graph* graph) : graph_(graph) {}
+  virtual Reduction Reduce(Node* node) V8_OVERRIDE {
+    switch (node->op()->opcode()) {
+      case OPCODE_A0:
+        CHECK_EQ(0, node->InputCount());
+        return Replace(graph_->NewNode(&OPB1, node));
+    }
+    return NoChange();
+  }
+  Graph* graph_;
+};
+
+
+// Wraps all "OPB0" nodes in two "OPC1" operators by allocating new nodes.
+class B0Wrapper V8_FINAL : public Reducer {
+ public:
+  explicit B0Wrapper(Graph* graph) : graph_(graph) {}
+  virtual Reduction Reduce(Node* node) V8_OVERRIDE {
+    switch (node->op()->opcode()) {
+      case OPCODE_B0:
+        CHECK_EQ(0, node->InputCount());
+        return Replace(graph_->NewNode(&OPC1, graph_->NewNode(&OPC1, node)));
+    }
+    return NoChange();
+  }
+  Graph* graph_;
+};
+
+
+// Replaces all "OPA1" nodes with the first input.
+class A1Forwarder : public Reducer {
+  virtual Reduction Reduce(Node* node) {
+    switch (node->op()->opcode()) {
+      case OPCODE_A1:
+        CHECK_EQ(1, node->InputCount());
+        return Replace(node->InputAt(0));
+    }
+    return NoChange();
+  }
+};
+
+
+// Replaces all "OPB1" nodes with the first input.
+class B1Forwarder : public Reducer {
+  virtual Reduction Reduce(Node* node) {
+    switch (node->op()->opcode()) {
+      case OPCODE_B1:
+        CHECK_EQ(1, node->InputCount());
+        return Replace(node->InputAt(0));
+    }
+    return NoChange();
+  }
+};
+
+
+// Swaps the inputs to "OP2A" and "OP2B" nodes based on ids.
+class AB2Sorter : public Reducer {
+  virtual Reduction Reduce(Node* node) {
+    switch (node->op()->opcode()) {
+      case OPCODE_A2:
+      case OPCODE_B2:
+        CHECK_EQ(2, node->InputCount());
+        Node* x = node->InputAt(0);
+        Node* y = node->InputAt(1);
+        if (x->id() > y->id()) {
+          node->ReplaceInput(0, y);
+          node->ReplaceInput(1, x);
+          return Replace(node);
+        }
+    }
+    return NoChange();
+  }
+};
+
+
+// Simply records the nodes visited.
+class ReducerRecorder : public Reducer {
+ public:
+  explicit ReducerRecorder(Zone* zone)
+      : set(NodeSet::key_compare(), NodeSet::allocator_type(zone)) {}
+  virtual Reduction Reduce(Node* node) {
+    set.insert(node);
+    return NoChange();
+  }
+  void CheckContains(Node* node) { CHECK_EQ(1, set.count(node)); }
+  NodeSet set;
+};
+
+
+TEST(ReduceGraphFromEnd1) {
+  GraphTester graph;
+
+  Node* n1 = graph.NewNode(&OPA0);
+  Node* end = graph.NewNode(&OPA1, n1);
+  graph.SetEnd(end);
+
+  GraphReducer reducer(&graph);
+  ReducerRecorder recorder(graph.zone());
+  reducer.AddReducer(&recorder);
+  reducer.ReduceGraph();
+  recorder.CheckContains(n1);
+  recorder.CheckContains(end);
+}
+
+
+TEST(ReduceGraphFromEnd2) {
+  GraphTester graph;
+
+  Node* n1 = graph.NewNode(&OPA0);
+  Node* n2 = graph.NewNode(&OPA1, n1);
+  Node* n3 = graph.NewNode(&OPA1, n1);
+  Node* end = graph.NewNode(&OPA2, n2, n3);
+  graph.SetEnd(end);
+
+  GraphReducer reducer(&graph);
+  ReducerRecorder recorder(graph.zone());
+  reducer.AddReducer(&recorder);
+  reducer.ReduceGraph();
+  recorder.CheckContains(n1);
+  recorder.CheckContains(n2);
+  recorder.CheckContains(n3);
+  recorder.CheckContains(end);
+}
+
+
+TEST(ReduceInPlace1) {
+  GraphTester graph;
+
+  Node* n1 = graph.NewNode(&OPA0);
+  Node* end = graph.NewNode(&OPA1, n1);
+  graph.SetEnd(end);
+
+  GraphReducer reducer(&graph);
+  InPlaceABReducer r;
+  reducer.AddReducer(&r);
+
+  // Tests A* => B* with in-place updates.
+  for (int i = 0; i < 3; i++) {
+    int before = graph.NodeCount();
+    reducer.ReduceGraph();
+    CHECK_EQ(before, graph.NodeCount());
+    CHECK_EQ(&OPB0, n1->op());
+    CHECK_EQ(&OPB1, end->op());
+    CHECK_EQ(n1, end->InputAt(0));
+  }
+}
+
+
+TEST(ReduceInPlace2) {
+  GraphTester graph;
+
+  Node* n1 = graph.NewNode(&OPA0);
+  Node* n2 = graph.NewNode(&OPA1, n1);
+  Node* n3 = graph.NewNode(&OPA1, n1);
+  Node* end = graph.NewNode(&OPA2, n2, n3);
+  graph.SetEnd(end);
+
+  GraphReducer reducer(&graph);
+  InPlaceABReducer r;
+  reducer.AddReducer(&r);
+
+  // Tests A* => B* with in-place updates.
+  for (int i = 0; i < 3; i++) {
+    int before = graph.NodeCount();
+    reducer.ReduceGraph();
+    CHECK_EQ(before, graph.NodeCount());
+    CHECK_EQ(&OPB0, n1->op());
+    CHECK_EQ(&OPB1, n2->op());
+    CHECK_EQ(n1, n2->InputAt(0));
+    CHECK_EQ(&OPB1, n3->op());
+    CHECK_EQ(n1, n3->InputAt(0));
+    CHECK_EQ(&OPB2, end->op());
+    CHECK_EQ(n2, end->InputAt(0));
+    CHECK_EQ(n3, end->InputAt(1));
+  }
+}
+
+
+TEST(ReduceNew1) {
+  GraphTester graph;
+
+  Node* n1 = graph.NewNode(&OPA0);
+  Node* n2 = graph.NewNode(&OPA1, n1);
+  Node* n3 = graph.NewNode(&OPA1, n1);
+  Node* end = graph.NewNode(&OPA2, n2, n3);
+  graph.SetEnd(end);
+
+  GraphReducer reducer(&graph);
+  NewABReducer r(&graph);
+  reducer.AddReducer(&r);
+
+  // Tests A* => B* while creating new nodes.
+  for (int i = 0; i < 3; i++) {
+    int before = graph.NodeCount();
+    reducer.ReduceGraph();
+    if (i == 0) {
+      CHECK_NE(before, graph.NodeCount());
+    } else {
+      CHECK_EQ(before, graph.NodeCount());
+    }
+    Node* nend = graph.end();
+    CHECK_NE(end, nend);  // end() should be updated too.
+
+    Node* nn2 = nend->InputAt(0);
+    Node* nn3 = nend->InputAt(1);
+    Node* nn1 = nn2->InputAt(0);
+
+    CHECK_EQ(nn1, nn3->InputAt(0));
+
+    CHECK_EQ(&OPB0, nn1->op());
+    CHECK_EQ(&OPB1, nn2->op());
+    CHECK_EQ(&OPB1, nn3->op());
+    CHECK_EQ(&OPB2, nend->op());
+  }
+}
+
+
+TEST(Wrapping1) {
+  GraphTester graph;
+
+  Node* end = graph.NewNode(&OPA0);
+  graph.SetEnd(end);
+  CHECK_EQ(1, graph.NodeCount());
+
+  GraphReducer reducer(&graph);
+  A0Wrapper r(&graph);
+  reducer.AddReducer(&r);
+
+  reducer.ReduceGraph();
+  CHECK_EQ(2, graph.NodeCount());
+
+  Node* nend = graph.end();
+  CHECK_NE(end, nend);
+  CHECK_EQ(&OPB1, nend->op());
+  CHECK_EQ(1, nend->InputCount());
+  CHECK_EQ(end, nend->InputAt(0));
+}
+
+
+TEST(Wrapping2) {
+  GraphTester graph;
+
+  Node* end = graph.NewNode(&OPB0);
+  graph.SetEnd(end);
+  CHECK_EQ(1, graph.NodeCount());
+
+  GraphReducer reducer(&graph);
+  B0Wrapper r(&graph);
+  reducer.AddReducer(&r);
+
+  reducer.ReduceGraph();
+  CHECK_EQ(3, graph.NodeCount());
+
+  Node* nend = graph.end();
+  CHECK_NE(end, nend);
+  CHECK_EQ(&OPC1, nend->op());
+  CHECK_EQ(1, nend->InputCount());
+
+  Node* n1 = nend->InputAt(0);
+  CHECK_NE(end, n1);
+  CHECK_EQ(&OPC1, n1->op());
+  CHECK_EQ(1, n1->InputCount());
+  CHECK_EQ(end, n1->InputAt(0));
+}
+
+
+TEST(Forwarding1) {
+  GraphTester graph;
+
+  Node* n1 = graph.NewNode(&OPA0);
+  Node* end = graph.NewNode(&OPA1, n1);
+  graph.SetEnd(end);
+
+  GraphReducer reducer(&graph);
+  A1Forwarder r;
+  reducer.AddReducer(&r);
+
+  // Tests A1(x) => x
+  for (int i = 0; i < 3; i++) {
+    int before = graph.NodeCount();
+    reducer.ReduceGraph();
+    CHECK_EQ(before, graph.NodeCount());
+    CHECK_EQ(&OPA0, n1->op());
+    CHECK_EQ(n1, graph.end());
+  }
+}
+
+
+TEST(Forwarding2) {
+  GraphTester graph;
+
+  Node* n1 = graph.NewNode(&OPA0);
+  Node* n2 = graph.NewNode(&OPA1, n1);
+  Node* n3 = graph.NewNode(&OPA1, n1);
+  Node* end = graph.NewNode(&OPA2, n2, n3);
+  graph.SetEnd(end);
+
+  GraphReducer reducer(&graph);
+  A1Forwarder r;
+  reducer.AddReducer(&r);
+
+  // Tests reducing A2(A1(x), A1(y)) => A2(x, y).
+  for (int i = 0; i < 3; i++) {
+    int before = graph.NodeCount();
+    reducer.ReduceGraph();
+    CHECK_EQ(before, graph.NodeCount());
+    CHECK_EQ(&OPA0, n1->op());
+    CHECK_EQ(n1, end->InputAt(0));
+    CHECK_EQ(n1, end->InputAt(1));
+    CHECK_EQ(&OPA2, end->op());
+    CHECK_EQ(0, n2->UseCount());
+    CHECK_EQ(0, n3->UseCount());
+  }
+}
+
+
+TEST(Forwarding3) {
+  // Tests reducing a chain of A1(A1(A1(A1(x)))) => x.
+  for (int i = 0; i < 8; i++) {
+    GraphTester graph;
+
+    Node* n1 = graph.NewNode(&OPA0);
+    Node* end = n1;
+    for (int j = 0; j < i; j++) {
+      end = graph.NewNode(&OPA1, end);
+    }
+    graph.SetEnd(end);
+
+    GraphReducer reducer(&graph);
+    A1Forwarder r;
+    reducer.AddReducer(&r);
+
+    for (int i = 0; i < 3; i++) {
+      int before = graph.NodeCount();
+      reducer.ReduceGraph();
+      CHECK_EQ(before, graph.NodeCount());
+      CHECK_EQ(&OPA0, n1->op());
+      CHECK_EQ(n1, graph.end());
+    }
+  }
+}
+
+
+TEST(ReduceForward1) {
+  GraphTester graph;
+
+  Node* n1 = graph.NewNode(&OPA0);
+  Node* n2 = graph.NewNode(&OPA1, n1);
+  Node* n3 = graph.NewNode(&OPA1, n1);
+  Node* end = graph.NewNode(&OPA2, n2, n3);
+  graph.SetEnd(end);
+
+  GraphReducer reducer(&graph);
+  InPlaceABReducer r;
+  B1Forwarder f;
+  reducer.AddReducer(&r);
+  reducer.AddReducer(&f);
+
+  // Tests first reducing A => B, then B1(x) => x.
+  for (int i = 0; i < 3; i++) {
+    int before = graph.NodeCount();
+    reducer.ReduceGraph();
+    CHECK_EQ(before, graph.NodeCount());
+    CHECK_EQ(&OPB0, n1->op());
+    CHECK_EQ(&OPB1, n2->op());
+    CHECK_EQ(n1, end->InputAt(0));
+    CHECK_EQ(&OPB1, n3->op());
+    CHECK_EQ(n1, end->InputAt(0));
+    CHECK_EQ(&OPB2, end->op());
+    CHECK_EQ(0, n2->UseCount());
+    CHECK_EQ(0, n3->UseCount());
+  }
+}
+
+
+TEST(Sorter1) {
+  HandleAndZoneScope scope;
+  AB2Sorter r;
+  for (int i = 0; i < 6; i++) {
+    GraphTester graph;
+
+    Node* n1 = graph.NewNode(&OPA0);
+    Node* n2 = graph.NewNode(&OPA1, n1);
+    Node* n3 = graph.NewNode(&OPA1, n1);
+    Node* end;
+
+    if (i == 0) end = graph.NewNode(&OPA2, n2, n3);
+    if (i == 1) end = graph.NewNode(&OPA2, n3, n2);
+    if (i == 2) end = graph.NewNode(&OPA2, n2, n1);
+    if (i == 3) end = graph.NewNode(&OPA2, n1, n2);
+    if (i == 4) end = graph.NewNode(&OPA2, n3, n1);
+    if (i == 5) end = graph.NewNode(&OPA2, n1, n3);
+
+    graph.SetEnd(end);
+
+    GraphReducer reducer(&graph);
+    reducer.AddReducer(&r);
+
+    int before = graph.NodeCount();
+    reducer.ReduceGraph();
+    CHECK_EQ(before, graph.NodeCount());
+    CHECK_EQ(&OPA0, n1->op());
+    CHECK_EQ(&OPA1, n2->op());
+    CHECK_EQ(&OPA1, n3->op());
+    CHECK_EQ(&OPA2, end->op());
+    CHECK_EQ(end, graph.end());
+    CHECK(end->InputAt(0)->id() <= end->InputAt(1)->id());
+  }
+}
+
+
+// Generate a node graph with the given permutations.
+void GenDAG(Graph* graph, int* p3, int* p2, int* p1) {
+  Node* level4 = graph->NewNode(&OPA0);
+  Node* level3[] = {graph->NewNode(&OPA1, level4),
+                    graph->NewNode(&OPA1, level4)};
+
+  Node* level2[] = {graph->NewNode(&OPA1, level3[p3[0]]),
+                    graph->NewNode(&OPA1, level3[p3[1]]),
+                    graph->NewNode(&OPA1, level3[p3[0]]),
+                    graph->NewNode(&OPA1, level3[p3[1]])};
+
+  Node* level1[] = {graph->NewNode(&OPA2, level2[p2[0]], level2[p2[1]]),
+                    graph->NewNode(&OPA2, level2[p2[2]], level2[p2[3]])};
+
+  Node* end = graph->NewNode(&OPA2, level1[p1[0]], level1[p1[1]]);
+  graph->SetEnd(end);
+}
+
+
+TEST(SortForwardReduce) {
+  GraphTester graph;
+
+  // Tests combined reductions on a series of DAGs.
+  for (int j = 0; j < 2; j++) {
+    int p3[] = {j, 1 - j};
+    for (int m = 0; m < 2; m++) {
+      int p1[] = {m, 1 - m};
+      for (int k = 0; k < 24; k++) {  // All permutations of 0, 1, 2, 3
+        int p2[] = {-1, -1, -1, -1};
+        int n = k;
+        for (int d = 4; d >= 1; d--) {  // Construct permutation.
+          int p = n % d;
+          for (int z = 0; z < 4; z++) {
+            if (p2[z] == -1) {
+              if (p == 0) p2[z] = d - 1;
+              p--;
+            }
+          }
+          n = n / d;
+        }
+
+        GenDAG(&graph, p3, p2, p1);
+
+        GraphReducer reducer(&graph);
+        AB2Sorter r1;
+        A1Forwarder r2;
+        InPlaceABReducer r3;
+        reducer.AddReducer(&r1);
+        reducer.AddReducer(&r2);
+        reducer.AddReducer(&r3);
+
+        reducer.ReduceGraph();
+
+        Node* end = graph.end();
+        CHECK_EQ(&OPB2, end->op());
+        Node* n1 = end->InputAt(0);
+        Node* n2 = end->InputAt(1);
+        CHECK_NE(n1, n2);
+        CHECK(n1->id() < n2->id());
+        CHECK_EQ(&OPB2, n1->op());
+        CHECK_EQ(&OPB2, n2->op());
+        Node* n4 = n1->InputAt(0);
+        CHECK_EQ(&OPB0, n4->op());
+        CHECK_EQ(n4, n1->InputAt(1));
+        CHECK_EQ(n4, n2->InputAt(0));
+        CHECK_EQ(n4, n2->InputAt(1));
+      }
+    }
+  }
+}
+
+
+TEST(Order) {
+  // Test that the order of reducers doesn't matter, as they should be
+  // rerun for changed nodes.
+  for (int i = 0; i < 2; i++) {
+    GraphTester graph;
+
+    Node* n1 = graph.NewNode(&OPA0);
+    Node* end = graph.NewNode(&OPA1, n1);
+    graph.SetEnd(end);
+
+    GraphReducer reducer(&graph);
+    InPlaceABReducer abr;
+    InPlaceBCReducer bcr;
+    if (i == 0) {
+      reducer.AddReducer(&abr);
+      reducer.AddReducer(&bcr);
+    } else {
+      reducer.AddReducer(&bcr);
+      reducer.AddReducer(&abr);
+    }
+
+    // Tests A* => C* with in-place updates.
+    for (int i = 0; i < 3; i++) {
+      int before = graph.NodeCount();
+      reducer.ReduceGraph();
+      CHECK_EQ(before, graph.NodeCount());
+      CHECK_EQ(&OPC0, n1->op());
+      CHECK_EQ(&OPC1, end->op());
+      CHECK_EQ(n1, end->InputAt(0));
+    }
+  }
+}
+
+
+// Tests that a reducer is only applied once.
+class OneTimeReducer : public Reducer {
+ public:
+  OneTimeReducer(Reducer* reducer, Zone* zone)
+      : reducer_(reducer),
+        nodes_(NodeSet::key_compare(), NodeSet::allocator_type(zone)) {}
+  virtual Reduction Reduce(Node* node) {
+    CHECK_EQ(0, nodes_.count(node));
+    nodes_.insert(node);
+    return reducer_->Reduce(node);
+  }
+  Reducer* reducer_;
+  NodeSet nodes_;
+};
+
+
+TEST(OneTimeReduce1) {
+  GraphTester graph;
+
+  Node* n1 = graph.NewNode(&OPA0);
+  Node* end = graph.NewNode(&OPA1, n1);
+  graph.SetEnd(end);
+
+  GraphReducer reducer(&graph);
+  InPlaceABReducer r;
+  OneTimeReducer once(&r, graph.zone());
+  reducer.AddReducer(&once);
+
+  // Tests A* => B* with in-place updates. Should only be applied once.
+  int before = graph.NodeCount();
+  reducer.ReduceGraph();
+  CHECK_EQ(before, graph.NodeCount());
+  CHECK_EQ(&OPB0, n1->op());
+  CHECK_EQ(&OPB1, end->op());
+  CHECK_EQ(n1, end->InputAt(0));
+}
diff --git a/test/cctest/compiler/test-instruction-selector-arm.cc b/test/cctest/compiler/test-instruction-selector-arm.cc
new file mode 100644 (file)
index 0000000..4dcf217
--- /dev/null
@@ -0,0 +1,977 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <list>
+
+#include "test/cctest/compiler/instruction-selector-tester.h"
+
+using namespace v8::internal;
+using namespace v8::internal::compiler;
+
+namespace {
+
+typedef RawMachineAssembler::Label MLabel;
+
+struct DPI {
+  Operator* op;
+  ArchOpcode arch_opcode;
+  ArchOpcode reverse_arch_opcode;
+  ArchOpcode test_arch_opcode;
+};
+
+
+// ARM data processing instructions.
+class DPIs V8_FINAL : public std::list<DPI>, private HandleAndZoneScope {
+ public:
+  DPIs() {
+    MachineOperatorBuilder machine(main_zone());
+    DPI and_ = {machine.Word32And(), kArmAnd, kArmAnd, kArmTst};
+    push_back(and_);
+    DPI or_ = {machine.Word32Or(), kArmOrr, kArmOrr, kArmOrr};
+    push_back(or_);
+    DPI xor_ = {machine.Word32Xor(), kArmEor, kArmEor, kArmTeq};
+    push_back(xor_);
+    DPI add = {machine.Int32Add(), kArmAdd, kArmAdd, kArmCmn};
+    push_back(add);
+    DPI sub = {machine.Int32Sub(), kArmSub, kArmRsb, kArmCmp};
+    push_back(sub);
+  }
+};
+
+
+// ARM immediates.
+class Immediates V8_FINAL : public std::list<int32_t> {
+ public:
+  Immediates() {
+    for (uint32_t imm8 = 0; imm8 < 256; ++imm8) {
+      for (uint32_t rot4 = 0; rot4 < 32; rot4 += 2) {
+        int32_t imm = (imm8 >> rot4) | (imm8 << (32 - rot4));
+        CHECK(Assembler::ImmediateFitsAddrMode1Instruction(imm));
+        push_back(imm);
+      }
+    }
+  }
+};
+
+
+struct Shift {
+  Operator* op;
+  int32_t i_low;          // lowest possible immediate
+  int32_t i_high;         // highest possible immediate
+  AddressingMode i_mode;  // Operand2_R_<shift>_I
+  AddressingMode r_mode;  // Operand2_R_<shift>_R
+};
+
+
+// ARM shifts.
+class Shifts V8_FINAL : public std::list<Shift>, private HandleAndZoneScope {
+ public:
+  Shifts() {
+    MachineOperatorBuilder machine(main_zone());
+    Shift sar = {machine.Word32Sar(), 1, 32, kMode_Operand2_R_ASR_I,
+                 kMode_Operand2_R_ASR_R};
+    Shift shl = {machine.Word32Shl(), 0, 31, kMode_Operand2_R_LSL_I,
+                 kMode_Operand2_R_LSL_R};
+    Shift shr = {machine.Word32Shr(), 1, 32, kMode_Operand2_R_LSR_I,
+                 kMode_Operand2_R_LSR_R};
+    push_back(sar);
+    push_back(shl);
+    push_back(shr);
+  }
+};
+
+}  // namespace
+
+
+TEST(InstructionSelectorDPIP) {
+  DPIs dpis;
+  for (DPIs::const_iterator i = dpis.begin(); i != dpis.end(); ++i) {
+    DPI dpi = *i;
+    InstructionSelectorTester m;
+    m.Return(m.NewNode(dpi.op, m.Parameter(0), m.Parameter(1)));
+    m.SelectInstructions();
+    CHECK_EQ(1, m.code.size());
+    CHECK_EQ(dpi.arch_opcode, m.code[0]->arch_opcode());
+    CHECK_EQ(kMode_Operand2_R, m.code[0]->addressing_mode());
+  }
+}
+
+
+TEST(InstructionSelectorDPIAndShiftP) {
+  DPIs dpis;
+  Shifts shifts;
+  for (DPIs::const_iterator i = dpis.begin(); i != dpis.end(); ++i) {
+    DPI dpi = *i;
+    for (Shifts::const_iterator j = shifts.begin(); j != shifts.end(); ++j) {
+      Shift shift = *j;
+      {
+        InstructionSelectorTester m;
+        m.Return(
+            m.NewNode(dpi.op, m.Parameter(0),
+                      m.NewNode(shift.op, m.Parameter(1), m.Parameter(2))));
+        m.SelectInstructions();
+        CHECK_EQ(1, m.code.size());
+        CHECK_EQ(dpi.arch_opcode, m.code[0]->arch_opcode());
+        CHECK_EQ(shift.r_mode, m.code[0]->addressing_mode());
+      }
+      {
+        InstructionSelectorTester m;
+        m.Return(m.NewNode(dpi.op,
+                           m.NewNode(shift.op, m.Parameter(0), m.Parameter(1)),
+                           m.Parameter(2)));
+        m.SelectInstructions();
+        CHECK_EQ(1, m.code.size());
+        CHECK_EQ(dpi.reverse_arch_opcode, m.code[0]->arch_opcode());
+        CHECK_EQ(shift.r_mode, m.code[0]->addressing_mode());
+      }
+    }
+  }
+}
+
+
+TEST(InstructionSelectorDPIAndShiftImm) {
+  DPIs dpis;
+  Shifts shifts;
+  for (DPIs::const_iterator i = dpis.begin(); i != dpis.end(); ++i) {
+    DPI dpi = *i;
+    for (Shifts::const_iterator j = shifts.begin(); j != shifts.end(); ++j) {
+      Shift shift = *j;
+      for (int32_t imm = shift.i_low; imm <= shift.i_high; ++imm) {
+        {
+          InstructionSelectorTester m;
+          m.Return(m.NewNode(
+              dpi.op, m.Parameter(0),
+              m.NewNode(shift.op, m.Parameter(1), m.Int32Constant(imm))));
+          m.SelectInstructions();
+          CHECK_EQ(1, m.code.size());
+          CHECK_EQ(dpi.arch_opcode, m.code[0]->arch_opcode());
+          CHECK_EQ(shift.i_mode, m.code[0]->addressing_mode());
+        }
+        {
+          InstructionSelectorTester m;
+          m.Return(m.NewNode(
+              dpi.op, m.NewNode(shift.op, m.Parameter(0), m.Int32Constant(imm)),
+              m.Parameter(1)));
+          m.SelectInstructions();
+          CHECK_EQ(1, m.code.size());
+          CHECK_EQ(dpi.reverse_arch_opcode, m.code[0]->arch_opcode());
+          CHECK_EQ(shift.i_mode, m.code[0]->addressing_mode());
+        }
+      }
+    }
+  }
+}
+
+
+TEST(InstructionSelectorWord32AndAndWord32XorWithMinus1P) {
+  {
+    InstructionSelectorTester m;
+    m.Return(m.Word32And(m.Parameter(0),
+                         m.Word32Xor(m.Int32Constant(-1), m.Parameter(1))));
+    m.SelectInstructions();
+    CHECK_EQ(1, m.code.size());
+    CHECK_EQ(kArmBic, m.code[0]->arch_opcode());
+    CHECK_EQ(kMode_Operand2_R, m.code[0]->addressing_mode());
+  }
+  {
+    InstructionSelectorTester m;
+    m.Return(m.Word32And(m.Parameter(0),
+                         m.Word32Xor(m.Parameter(1), m.Int32Constant(-1))));
+    m.SelectInstructions();
+    CHECK_EQ(1, m.code.size());
+    CHECK_EQ(kArmBic, m.code[0]->arch_opcode());
+    CHECK_EQ(kMode_Operand2_R, m.code[0]->addressing_mode());
+  }
+  {
+    InstructionSelectorTester m;
+    m.Return(m.Word32And(m.Word32Xor(m.Int32Constant(-1), m.Parameter(0)),
+                         m.Parameter(1)));
+    m.SelectInstructions();
+    CHECK_EQ(1, m.code.size());
+    CHECK_EQ(kArmBic, m.code[0]->arch_opcode());
+    CHECK_EQ(kMode_Operand2_R, m.code[0]->addressing_mode());
+  }
+  {
+    InstructionSelectorTester m;
+    m.Return(m.Word32And(m.Word32Xor(m.Parameter(0), m.Int32Constant(-1)),
+                         m.Parameter(1)));
+    m.SelectInstructions();
+    CHECK_EQ(1, m.code.size());
+    CHECK_EQ(kArmBic, m.code[0]->arch_opcode());
+    CHECK_EQ(kMode_Operand2_R, m.code[0]->addressing_mode());
+  }
+}
+
+
+TEST(InstructionSelectorWord32XorWithMinus1P) {
+  {
+    InstructionSelectorTester m;
+    m.Return(m.Word32Xor(m.Int32Constant(-1), m.Parameter(0)));
+    m.SelectInstructions();
+    CHECK_EQ(1, m.code.size());
+    CHECK_EQ(kArmMvn, m.code[0]->arch_opcode());
+    CHECK_EQ(kMode_Operand2_R, m.code[0]->addressing_mode());
+  }
+  {
+    InstructionSelectorTester m;
+    m.Return(m.Word32Xor(m.Parameter(0), m.Int32Constant(-1)));
+    m.SelectInstructions();
+    CHECK_EQ(1, m.code.size());
+    CHECK_EQ(kArmMvn, m.code[0]->arch_opcode());
+    CHECK_EQ(kMode_Operand2_R, m.code[0]->addressing_mode());
+  }
+}
+
+
+TEST(InstructionSelectorInt32MulP) {
+  InstructionSelectorTester m;
+  m.Return(m.Int32Mul(m.Parameter(0), m.Parameter(1)));
+  m.SelectInstructions();
+  CHECK_EQ(1, m.code.size());
+  CHECK_EQ(kArmMul, m.code[0]->arch_opcode());
+}
+
+
+TEST(InstructionSelectorInt32MulImm) {
+  // x * (2^k + 1) -> (x >> k) + x
+  for (int k = 1; k < 31; ++k) {
+    InstructionSelectorTester m;
+    m.Return(m.Int32Mul(m.Parameter(0), m.Int32Constant((1 << k) + 1)));
+    m.SelectInstructions();
+    CHECK_EQ(1, m.code.size());
+    CHECK_EQ(kArmAdd, m.code[0]->arch_opcode());
+    CHECK_EQ(kMode_Operand2_R_LSL_I, m.code[0]->addressing_mode());
+  }
+  // (2^k + 1) * x -> (x >> k) + x
+  for (int k = 1; k < 31; ++k) {
+    InstructionSelectorTester m;
+    m.Return(m.Int32Mul(m.Int32Constant((1 << k) + 1), m.Parameter(0)));
+    m.SelectInstructions();
+    CHECK_EQ(1, m.code.size());
+    CHECK_EQ(kArmAdd, m.code[0]->arch_opcode());
+    CHECK_EQ(kMode_Operand2_R_LSL_I, m.code[0]->addressing_mode());
+  }
+  // x * (2^k - 1) -> (x >> k) - x
+  for (int k = 3; k < 31; ++k) {
+    InstructionSelectorTester m;
+    m.Return(m.Int32Mul(m.Parameter(0), m.Int32Constant((1 << k) - 1)));
+    m.SelectInstructions();
+    CHECK_EQ(1, m.code.size());
+    CHECK_EQ(kArmRsb, m.code[0]->arch_opcode());
+    CHECK_EQ(kMode_Operand2_R_LSL_I, m.code[0]->addressing_mode());
+  }
+  // (2^k - 1) * x -> (x >> k) - x
+  for (int k = 3; k < 31; ++k) {
+    InstructionSelectorTester m;
+    m.Return(m.Int32Mul(m.Int32Constant((1 << k) - 1), m.Parameter(0)));
+    m.SelectInstructions();
+    CHECK_EQ(1, m.code.size());
+    CHECK_EQ(kArmRsb, m.code[0]->arch_opcode());
+    CHECK_EQ(kMode_Operand2_R_LSL_I, m.code[0]->addressing_mode());
+  }
+}
+
+
+// The following tests depend on the exact CPU features available, which we do
+// only fully control in a simulator build.
+#ifdef USE_SIMULATOR
+
+TEST(InstructionSelectorDPIImm_ARMv7AndVFP3Disabled) {
+  i::FLAG_enable_armv7 = false;
+  i::FLAG_enable_vfp3 = false;
+  DPIs dpis;
+  Immediates immediates;
+  for (DPIs::const_iterator i = dpis.begin(); i != dpis.end(); ++i) {
+    DPI dpi = *i;
+    for (Immediates::const_iterator j = immediates.begin();
+         j != immediates.end(); ++j) {
+      int32_t imm = *j;
+      {
+        InstructionSelectorTester m;
+        m.Return(m.NewNode(dpi.op, m.Parameter(0), m.Int32Constant(imm)));
+        m.SelectInstructions();
+        CHECK_EQ(1, m.code.size());
+        CHECK_EQ(dpi.arch_opcode, m.code[0]->arch_opcode());
+        CHECK_EQ(kMode_Operand2_I, m.code[0]->addressing_mode());
+      }
+      {
+        InstructionSelectorTester m;
+        m.Return(m.NewNode(dpi.op, m.Int32Constant(imm), m.Parameter(0)));
+        m.SelectInstructions();
+        CHECK_EQ(1, m.code.size());
+        CHECK_EQ(dpi.reverse_arch_opcode, m.code[0]->arch_opcode());
+        CHECK_EQ(kMode_Operand2_I, m.code[0]->addressing_mode());
+      }
+    }
+  }
+}
+
+
+TEST(InstructionSelectorWord32AndImm_ARMv7Enabled) {
+  i::FLAG_enable_armv7 = true;
+  for (uint32_t width = 1; width <= 32; ++width) {
+    InstructionSelectorTester m;
+    m.Return(m.Word32And(m.Parameter(0),
+                         m.Int32Constant(0xffffffffu >> (32 - width))));
+    m.SelectInstructions();
+    CHECK_EQ(1, m.code.size());
+    CHECK_EQ(kArmUbfx, m.code[0]->arch_opcode());
+    CHECK_EQ(3, m.code[0]->InputCount());
+    CHECK_EQ(0, m.ToInt32(m.code[0]->InputAt(1)));
+    CHECK_EQ(width, m.ToInt32(m.code[0]->InputAt(2)));
+  }
+  for (uint32_t lsb = 0; lsb <= 31; ++lsb) {
+    for (uint32_t width = 1; width < 32 - lsb; ++width) {
+      uint32_t msk = ~((0xffffffffu >> (32 - width)) << lsb);
+      InstructionSelectorTester m;
+      m.Return(m.Word32And(m.Parameter(0), m.Int32Constant(msk)));
+      m.SelectInstructions();
+      CHECK_EQ(1, m.code.size());
+      CHECK_EQ(kArmBfc, m.code[0]->arch_opcode());
+      CHECK_EQ(1, m.code[0]->OutputCount());
+      CHECK(UnallocatedOperand::cast(m.code[0]->Output())
+                ->HasSameAsInputPolicy());
+      CHECK_EQ(3, m.code[0]->InputCount());
+      CHECK_EQ(lsb, m.ToInt32(m.code[0]->InputAt(1)));
+      CHECK_EQ(width, m.ToInt32(m.code[0]->InputAt(2)));
+    }
+  }
+}
+
+
+TEST(InstructionSelectorWord32AndAndWord32ShrImm_ARMv7Enabled) {
+  i::FLAG_enable_armv7 = true;
+  for (uint32_t lsb = 0; lsb <= 31; ++lsb) {
+    for (uint32_t width = 1; width <= 32 - lsb; ++width) {
+      {
+        InstructionSelectorTester m;
+        m.Return(m.Word32And(m.Word32Shr(m.Parameter(0), m.Int32Constant(lsb)),
+                             m.Int32Constant(0xffffffffu >> (32 - width))));
+        m.SelectInstructions();
+        CHECK_EQ(1, m.code.size());
+        CHECK_EQ(kArmUbfx, m.code[0]->arch_opcode());
+        CHECK_EQ(3, m.code[0]->InputCount());
+        CHECK_EQ(lsb, m.ToInt32(m.code[0]->InputAt(1)));
+        CHECK_EQ(width, m.ToInt32(m.code[0]->InputAt(2)));
+      }
+      {
+        InstructionSelectorTester m;
+        m.Return(
+            m.Word32And(m.Int32Constant(0xffffffffu >> (32 - width)),
+                        m.Word32Shr(m.Parameter(0), m.Int32Constant(lsb))));
+        m.SelectInstructions();
+        CHECK_EQ(1, m.code.size());
+        CHECK_EQ(kArmUbfx, m.code[0]->arch_opcode());
+        CHECK_EQ(3, m.code[0]->InputCount());
+        CHECK_EQ(lsb, m.ToInt32(m.code[0]->InputAt(1)));
+        CHECK_EQ(width, m.ToInt32(m.code[0]->InputAt(2)));
+      }
+    }
+  }
+}
+
+
+TEST(InstructionSelectorWord32ShrAndWord32AndImm_ARMv7Enabled) {
+  i::FLAG_enable_armv7 = true;
+  for (uint32_t lsb = 0; lsb <= 31; ++lsb) {
+    for (uint32_t width = 1; width <= 32 - lsb; ++width) {
+      uint32_t max = 1 << lsb;
+      if (max > static_cast<uint32_t>(kMaxInt)) max -= 1;
+      uint32_t jnk = CcTest::random_number_generator()->NextInt(max);
+      uint32_t msk = ((0xffffffffu >> (32 - width)) << lsb) | jnk;
+      {
+        InstructionSelectorTester m;
+        m.Return(m.Word32Shr(m.Word32And(m.Parameter(0), m.Int32Constant(msk)),
+                             m.Int32Constant(lsb)));
+        m.SelectInstructions();
+        CHECK_EQ(1, m.code.size());
+        CHECK_EQ(kArmUbfx, m.code[0]->arch_opcode());
+        CHECK_EQ(3, m.code[0]->InputCount());
+        CHECK_EQ(lsb, m.ToInt32(m.code[0]->InputAt(1)));
+        CHECK_EQ(width, m.ToInt32(m.code[0]->InputAt(2)));
+      }
+      {
+        InstructionSelectorTester m;
+        m.Return(m.Word32Shr(m.Word32And(m.Int32Constant(msk), m.Parameter(0)),
+                             m.Int32Constant(lsb)));
+        m.SelectInstructions();
+        CHECK_EQ(1, m.code.size());
+        CHECK_EQ(kArmUbfx, m.code[0]->arch_opcode());
+        CHECK_EQ(3, m.code[0]->InputCount());
+        CHECK_EQ(lsb, m.ToInt32(m.code[0]->InputAt(1)));
+        CHECK_EQ(width, m.ToInt32(m.code[0]->InputAt(2)));
+      }
+    }
+  }
+}
+
+
+TEST(InstructionSelectorInt32SubAndInt32MulP_MlsEnabled) {
+  i::FLAG_enable_mls = true;
+  InstructionSelectorTester m;
+  m.Return(
+      m.Int32Sub(m.Parameter(0), m.Int32Mul(m.Parameter(1), m.Parameter(2))));
+  m.SelectInstructions();
+  CHECK_EQ(1, m.code.size());
+  CHECK_EQ(kArmMls, m.code[0]->arch_opcode());
+}
+
+
+TEST(InstructionSelectorInt32SubAndInt32MulP_MlsDisabled) {
+  i::FLAG_enable_mls = false;
+  InstructionSelectorTester m;
+  m.Return(
+      m.Int32Sub(m.Parameter(0), m.Int32Mul(m.Parameter(1), m.Parameter(2))));
+  m.SelectInstructions();
+  CHECK_EQ(2, m.code.size());
+  CHECK_EQ(kArmMul, m.code[0]->arch_opcode());
+  CHECK_EQ(1, m.code[0]->OutputCount());
+  CHECK_EQ(kArmSub, m.code[1]->arch_opcode());
+  CHECK_EQ(2, m.code[1]->InputCount());
+  CheckSameVreg(m.code[0]->Output(), m.code[1]->InputAt(1));
+}
+
+
+TEST(InstructionSelectorInt32DivP_ARMv7AndSudivEnabled) {
+  i::FLAG_enable_armv7 = true;
+  i::FLAG_enable_sudiv = true;
+  InstructionSelectorTester m;
+  m.Return(m.Int32Div(m.Parameter(0), m.Parameter(1)));
+  m.SelectInstructions();
+  CHECK_EQ(1, m.code.size());
+  CHECK_EQ(kArmSdiv, m.code[0]->arch_opcode());
+}
+
+
+TEST(InstructionSelectorInt32DivP_SudivDisabled) {
+  i::FLAG_enable_sudiv = false;
+  InstructionSelectorTester m;
+  m.Return(m.Int32Div(m.Parameter(0), m.Parameter(1)));
+  m.SelectInstructions();
+  CHECK_EQ(4, m.code.size());
+  CHECK_EQ(kArmVcvtF64S32, m.code[0]->arch_opcode());
+  CHECK_EQ(1, m.code[0]->OutputCount());
+  CHECK_EQ(kArmVcvtF64S32, m.code[1]->arch_opcode());
+  CHECK_EQ(1, m.code[1]->OutputCount());
+  CHECK_EQ(kArmVdivF64, m.code[2]->arch_opcode());
+  CHECK_EQ(2, m.code[2]->InputCount());
+  CHECK_EQ(1, m.code[2]->OutputCount());
+  CheckSameVreg(m.code[0]->Output(), m.code[2]->InputAt(0));
+  CheckSameVreg(m.code[1]->Output(), m.code[2]->InputAt(1));
+  CHECK_EQ(kArmVcvtS32F64, m.code[3]->arch_opcode());
+  CHECK_EQ(1, m.code[3]->InputCount());
+  CheckSameVreg(m.code[2]->Output(), m.code[3]->InputAt(0));
+}
+
+
+TEST(InstructionSelectorInt32UDivP_ARMv7AndSudivEnabled) {
+  i::FLAG_enable_armv7 = true;
+  i::FLAG_enable_sudiv = true;
+  InstructionSelectorTester m;
+  m.Return(m.Int32UDiv(m.Parameter(0), m.Parameter(1)));
+  m.SelectInstructions();
+  CHECK_EQ(1, m.code.size());
+  CHECK_EQ(kArmUdiv, m.code[0]->arch_opcode());
+}
+
+
+TEST(InstructionSelectorInt32UDivP_SudivDisabled) {
+  i::FLAG_enable_sudiv = false;
+  InstructionSelectorTester m;
+  m.Return(m.Int32UDiv(m.Parameter(0), m.Parameter(1)));
+  m.SelectInstructions();
+  CHECK_EQ(4, m.code.size());
+  CHECK_EQ(kArmVcvtF64U32, m.code[0]->arch_opcode());
+  CHECK_EQ(1, m.code[0]->OutputCount());
+  CHECK_EQ(kArmVcvtF64U32, m.code[1]->arch_opcode());
+  CHECK_EQ(1, m.code[1]->OutputCount());
+  CHECK_EQ(kArmVdivF64, m.code[2]->arch_opcode());
+  CHECK_EQ(2, m.code[2]->InputCount());
+  CHECK_EQ(1, m.code[2]->OutputCount());
+  CheckSameVreg(m.code[0]->Output(), m.code[2]->InputAt(0));
+  CheckSameVreg(m.code[1]->Output(), m.code[2]->InputAt(1));
+  CHECK_EQ(kArmVcvtU32F64, m.code[3]->arch_opcode());
+  CHECK_EQ(1, m.code[3]->InputCount());
+  CheckSameVreg(m.code[2]->Output(), m.code[3]->InputAt(0));
+}
+
+
+TEST(InstructionSelectorInt32ModP_ARMv7AndMlsAndSudivEnabled) {
+  i::FLAG_enable_armv7 = true;
+  i::FLAG_enable_mls = true;
+  i::FLAG_enable_sudiv = true;
+  InstructionSelectorTester m;
+  m.Return(m.Int32Mod(m.Parameter(0), m.Parameter(1)));
+  m.SelectInstructions();
+  CHECK_EQ(2, m.code.size());
+  CHECK_EQ(kArmSdiv, m.code[0]->arch_opcode());
+  CHECK_EQ(1, m.code[0]->OutputCount());
+  CHECK_EQ(2, m.code[0]->InputCount());
+  CHECK_EQ(kArmMls, m.code[1]->arch_opcode());
+  CHECK_EQ(1, m.code[1]->OutputCount());
+  CHECK_EQ(3, m.code[1]->InputCount());
+  CheckSameVreg(m.code[0]->Output(), m.code[1]->InputAt(0));
+  CheckSameVreg(m.code[0]->InputAt(1), m.code[1]->InputAt(1));
+  CheckSameVreg(m.code[0]->InputAt(0), m.code[1]->InputAt(2));
+}
+
+
+TEST(InstructionSelectorInt32ModP_ARMv7AndSudivEnabled) {
+  i::FLAG_enable_armv7 = true;
+  i::FLAG_enable_mls = false;
+  i::FLAG_enable_sudiv = true;
+  InstructionSelectorTester m;
+  m.Return(m.Int32Mod(m.Parameter(0), m.Parameter(1)));
+  m.SelectInstructions();
+  CHECK_EQ(3, m.code.size());
+  CHECK_EQ(kArmSdiv, m.code[0]->arch_opcode());
+  CHECK_EQ(1, m.code[0]->OutputCount());
+  CHECK_EQ(2, m.code[0]->InputCount());
+  CHECK_EQ(kArmMul, m.code[1]->arch_opcode());
+  CHECK_EQ(1, m.code[1]->OutputCount());
+  CHECK_EQ(2, m.code[1]->InputCount());
+  CheckSameVreg(m.code[0]->Output(), m.code[1]->InputAt(0));
+  CheckSameVreg(m.code[0]->InputAt(1), m.code[1]->InputAt(1));
+  CHECK_EQ(kArmSub, m.code[2]->arch_opcode());
+  CHECK_EQ(1, m.code[2]->OutputCount());
+  CHECK_EQ(2, m.code[2]->InputCount());
+  CheckSameVreg(m.code[0]->InputAt(0), m.code[2]->InputAt(0));
+  CheckSameVreg(m.code[1]->Output(), m.code[2]->InputAt(1));
+}
+
+
+TEST(InstructionSelectorInt32ModP_ARMv7AndMlsAndSudivDisabled) {
+  i::FLAG_enable_armv7 = false;
+  i::FLAG_enable_mls = false;
+  i::FLAG_enable_sudiv = false;
+  InstructionSelectorTester m;
+  m.Return(m.Int32Mod(m.Parameter(0), m.Parameter(1)));
+  m.SelectInstructions();
+  CHECK_EQ(6, m.code.size());
+  CHECK_EQ(kArmVcvtF64S32, m.code[0]->arch_opcode());
+  CHECK_EQ(1, m.code[0]->OutputCount());
+  CHECK_EQ(kArmVcvtF64S32, m.code[1]->arch_opcode());
+  CHECK_EQ(1, m.code[1]->OutputCount());
+  CHECK_EQ(kArmVdivF64, m.code[2]->arch_opcode());
+  CHECK_EQ(2, m.code[2]->InputCount());
+  CHECK_EQ(1, m.code[2]->OutputCount());
+  CheckSameVreg(m.code[0]->Output(), m.code[2]->InputAt(0));
+  CheckSameVreg(m.code[1]->Output(), m.code[2]->InputAt(1));
+  CHECK_EQ(kArmVcvtS32F64, m.code[3]->arch_opcode());
+  CHECK_EQ(1, m.code[3]->InputCount());
+  CheckSameVreg(m.code[2]->Output(), m.code[3]->InputAt(0));
+  CHECK_EQ(kArmMul, m.code[4]->arch_opcode());
+  CHECK_EQ(1, m.code[4]->OutputCount());
+  CHECK_EQ(2, m.code[4]->InputCount());
+  CheckSameVreg(m.code[3]->Output(), m.code[4]->InputAt(0));
+  CheckSameVreg(m.code[1]->InputAt(0), m.code[4]->InputAt(1));
+  CHECK_EQ(kArmSub, m.code[5]->arch_opcode());
+  CHECK_EQ(1, m.code[5]->OutputCount());
+  CHECK_EQ(2, m.code[5]->InputCount());
+  CheckSameVreg(m.code[0]->InputAt(0), m.code[5]->InputAt(0));
+  CheckSameVreg(m.code[4]->Output(), m.code[5]->InputAt(1));
+}
+
+
+TEST(InstructionSelectorInt32UModP_ARMv7AndMlsAndSudivEnabled) {
+  i::FLAG_enable_armv7 = true;
+  i::FLAG_enable_mls = true;
+  i::FLAG_enable_sudiv = true;
+  InstructionSelectorTester m;
+  m.Return(m.Int32UMod(m.Parameter(0), m.Parameter(1)));
+  m.SelectInstructions();
+  CHECK_EQ(2, m.code.size());
+  CHECK_EQ(kArmUdiv, m.code[0]->arch_opcode());
+  CHECK_EQ(1, m.code[0]->OutputCount());
+  CHECK_EQ(2, m.code[0]->InputCount());
+  CHECK_EQ(kArmMls, m.code[1]->arch_opcode());
+  CHECK_EQ(1, m.code[1]->OutputCount());
+  CHECK_EQ(3, m.code[1]->InputCount());
+  CheckSameVreg(m.code[0]->Output(), m.code[1]->InputAt(0));
+  CheckSameVreg(m.code[0]->InputAt(1), m.code[1]->InputAt(1));
+  CheckSameVreg(m.code[0]->InputAt(0), m.code[1]->InputAt(2));
+}
+
+
+TEST(InstructionSelectorInt32UModP_ARMv7AndSudivEnabled) {
+  i::FLAG_enable_armv7 = true;
+  i::FLAG_enable_mls = false;
+  i::FLAG_enable_sudiv = true;
+  InstructionSelectorTester m;
+  m.Return(m.Int32UMod(m.Parameter(0), m.Parameter(1)));
+  m.SelectInstructions();
+  CHECK_EQ(3, m.code.size());
+  CHECK_EQ(kArmUdiv, m.code[0]->arch_opcode());
+  CHECK_EQ(1, m.code[0]->OutputCount());
+  CHECK_EQ(2, m.code[0]->InputCount());
+  CHECK_EQ(kArmMul, m.code[1]->arch_opcode());
+  CHECK_EQ(1, m.code[1]->OutputCount());
+  CHECK_EQ(2, m.code[1]->InputCount());
+  CheckSameVreg(m.code[0]->Output(), m.code[1]->InputAt(0));
+  CheckSameVreg(m.code[0]->InputAt(1), m.code[1]->InputAt(1));
+  CHECK_EQ(kArmSub, m.code[2]->arch_opcode());
+  CHECK_EQ(1, m.code[2]->OutputCount());
+  CHECK_EQ(2, m.code[2]->InputCount());
+  CheckSameVreg(m.code[0]->InputAt(0), m.code[2]->InputAt(0));
+  CheckSameVreg(m.code[1]->Output(), m.code[2]->InputAt(1));
+}
+
+
+TEST(InstructionSelectorInt32UModP_ARMv7AndMlsAndSudivDisabled) {
+  i::FLAG_enable_armv7 = false;
+  i::FLAG_enable_mls = false;
+  i::FLAG_enable_sudiv = false;
+  InstructionSelectorTester m;
+  m.Return(m.Int32UMod(m.Parameter(0), m.Parameter(1)));
+  m.SelectInstructions();
+  CHECK_EQ(6, m.code.size());
+  CHECK_EQ(kArmVcvtF64U32, m.code[0]->arch_opcode());
+  CHECK_EQ(1, m.code[0]->OutputCount());
+  CHECK_EQ(kArmVcvtF64U32, m.code[1]->arch_opcode());
+  CHECK_EQ(1, m.code[1]->OutputCount());
+  CHECK_EQ(kArmVdivF64, m.code[2]->arch_opcode());
+  CHECK_EQ(2, m.code[2]->InputCount());
+  CHECK_EQ(1, m.code[2]->OutputCount());
+  CheckSameVreg(m.code[0]->Output(), m.code[2]->InputAt(0));
+  CheckSameVreg(m.code[1]->Output(), m.code[2]->InputAt(1));
+  CHECK_EQ(kArmVcvtU32F64, m.code[3]->arch_opcode());
+  CHECK_EQ(1, m.code[3]->InputCount());
+  CheckSameVreg(m.code[2]->Output(), m.code[3]->InputAt(0));
+  CHECK_EQ(kArmMul, m.code[4]->arch_opcode());
+  CHECK_EQ(1, m.code[4]->OutputCount());
+  CHECK_EQ(2, m.code[4]->InputCount());
+  CheckSameVreg(m.code[3]->Output(), m.code[4]->InputAt(0));
+  CheckSameVreg(m.code[1]->InputAt(0), m.code[4]->InputAt(1));
+  CHECK_EQ(kArmSub, m.code[5]->arch_opcode());
+  CHECK_EQ(1, m.code[5]->OutputCount());
+  CHECK_EQ(2, m.code[5]->InputCount());
+  CheckSameVreg(m.code[0]->InputAt(0), m.code[5]->InputAt(0));
+  CheckSameVreg(m.code[4]->Output(), m.code[5]->InputAt(1));
+}
+
+#endif  // USE_SIMULATOR
+
+
+TEST(InstructionSelectorWord32EqualP) {
+  InstructionSelectorTester m;
+  m.Return(m.Word32Equal(m.Parameter(0), m.Parameter(1)));
+  m.SelectInstructions();
+  CHECK_EQ(1, m.code.size());
+  CHECK_EQ(kArmCmp, m.code[0]->arch_opcode());
+  CHECK_EQ(kMode_Operand2_R, m.code[0]->addressing_mode());
+  CHECK_EQ(kFlags_set, m.code[0]->flags_mode());
+  CHECK_EQ(kEqual, m.code[0]->flags_condition());
+}
+
+
+TEST(InstructionSelectorWord32EqualImm) {
+  Immediates immediates;
+  for (Immediates::const_iterator i = immediates.begin(); i != immediates.end();
+       ++i) {
+    int32_t imm = *i;
+    {
+      InstructionSelectorTester m;
+      m.Return(m.Word32Equal(m.Parameter(0), m.Int32Constant(imm)));
+      m.SelectInstructions();
+      CHECK_EQ(1, m.code.size());
+      if (imm == 0) {
+        CHECK_EQ(kArmTst, m.code[0]->arch_opcode());
+        CHECK_EQ(kMode_Operand2_R, m.code[0]->addressing_mode());
+        CHECK_EQ(2, m.code[0]->InputCount());
+        CheckSameVreg(m.code[0]->InputAt(0), m.code[0]->InputAt(1));
+      } else {
+        CHECK_EQ(kArmCmp, m.code[0]->arch_opcode());
+        CHECK_EQ(kMode_Operand2_I, m.code[0]->addressing_mode());
+      }
+      CHECK_EQ(kFlags_set, m.code[0]->flags_mode());
+      CHECK_EQ(kEqual, m.code[0]->flags_condition());
+    }
+    {
+      InstructionSelectorTester m;
+      m.Return(m.Word32Equal(m.Int32Constant(imm), m.Parameter(0)));
+      m.SelectInstructions();
+      CHECK_EQ(1, m.code.size());
+      if (imm == 0) {
+        CHECK_EQ(kArmTst, m.code[0]->arch_opcode());
+        CHECK_EQ(kMode_Operand2_R, m.code[0]->addressing_mode());
+        CHECK_EQ(2, m.code[0]->InputCount());
+        CheckSameVreg(m.code[0]->InputAt(0), m.code[0]->InputAt(1));
+      } else {
+        CHECK_EQ(kArmCmp, m.code[0]->arch_opcode());
+        CHECK_EQ(kMode_Operand2_I, m.code[0]->addressing_mode());
+      }
+      CHECK_EQ(kFlags_set, m.code[0]->flags_mode());
+      CHECK_EQ(kEqual, m.code[0]->flags_condition());
+    }
+  }
+}
+
+
+TEST(InstructionSelectorWord32EqualAndDPIP) {
+  DPIs dpis;
+  for (DPIs::const_iterator i = dpis.begin(); i != dpis.end(); ++i) {
+    DPI dpi = *i;
+    {
+      InstructionSelectorTester m;
+      m.Return(m.Word32Equal(m.NewNode(dpi.op, m.Parameter(0), m.Parameter(1)),
+                             m.Int32Constant(0)));
+      m.SelectInstructions();
+      CHECK_EQ(1, m.code.size());
+      CHECK_EQ(dpi.test_arch_opcode, m.code[0]->arch_opcode());
+      CHECK_EQ(kMode_Operand2_R, m.code[0]->addressing_mode());
+      CHECK_EQ(kFlags_set, m.code[0]->flags_mode());
+      CHECK_EQ(kEqual, m.code[0]->flags_condition());
+    }
+    {
+      InstructionSelectorTester m;
+      m.Return(
+          m.Word32Equal(m.Int32Constant(0),
+                        m.NewNode(dpi.op, m.Parameter(0), m.Parameter(1))));
+      m.SelectInstructions();
+      CHECK_EQ(1, m.code.size());
+      CHECK_EQ(dpi.test_arch_opcode, m.code[0]->arch_opcode());
+      CHECK_EQ(kMode_Operand2_R, m.code[0]->addressing_mode());
+      CHECK_EQ(kFlags_set, m.code[0]->flags_mode());
+      CHECK_EQ(kEqual, m.code[0]->flags_condition());
+    }
+  }
+}
+
+
+TEST(InstructionSelectorWord32EqualAndDPIImm) {
+  DPIs dpis;
+  Immediates immediates;
+  for (DPIs::const_iterator i = dpis.begin(); i != dpis.end(); ++i) {
+    DPI dpi = *i;
+    for (Immediates::const_iterator j = immediates.begin();
+         j != immediates.end(); ++j) {
+      int32_t imm = *j;
+      {
+        InstructionSelectorTester m;
+        m.Return(m.Word32Equal(
+            m.NewNode(dpi.op, m.Parameter(0), m.Int32Constant(imm)),
+            m.Int32Constant(0)));
+        m.SelectInstructions();
+        CHECK_EQ(1, m.code.size());
+        CHECK_EQ(dpi.test_arch_opcode, m.code[0]->arch_opcode());
+        CHECK_EQ(kMode_Operand2_I, m.code[0]->addressing_mode());
+        CHECK_EQ(kFlags_set, m.code[0]->flags_mode());
+        CHECK_EQ(kEqual, m.code[0]->flags_condition());
+      }
+      {
+        InstructionSelectorTester m;
+        m.Return(m.Word32Equal(
+            m.NewNode(dpi.op, m.Int32Constant(imm), m.Parameter(0)),
+            m.Int32Constant(0)));
+        m.SelectInstructions();
+        CHECK_EQ(1, m.code.size());
+        CHECK_EQ(dpi.test_arch_opcode, m.code[0]->arch_opcode());
+        CHECK_EQ(kMode_Operand2_I, m.code[0]->addressing_mode());
+        CHECK_EQ(kFlags_set, m.code[0]->flags_mode());
+        CHECK_EQ(kEqual, m.code[0]->flags_condition());
+      }
+      {
+        InstructionSelectorTester m;
+        m.Return(m.Word32Equal(
+            m.Int32Constant(0),
+            m.NewNode(dpi.op, m.Parameter(0), m.Int32Constant(imm))));
+        m.SelectInstructions();
+        CHECK_EQ(1, m.code.size());
+        CHECK_EQ(dpi.test_arch_opcode, m.code[0]->arch_opcode());
+        CHECK_EQ(kMode_Operand2_I, m.code[0]->addressing_mode());
+        CHECK_EQ(kFlags_set, m.code[0]->flags_mode());
+        CHECK_EQ(kEqual, m.code[0]->flags_condition());
+      }
+      {
+        InstructionSelectorTester m;
+        m.Return(m.Word32Equal(
+            m.Int32Constant(0),
+            m.NewNode(dpi.op, m.Int32Constant(imm), m.Parameter(0))));
+        m.SelectInstructions();
+        CHECK_EQ(1, m.code.size());
+        CHECK_EQ(dpi.test_arch_opcode, m.code[0]->arch_opcode());
+        CHECK_EQ(kMode_Operand2_I, m.code[0]->addressing_mode());
+        CHECK_EQ(kFlags_set, m.code[0]->flags_mode());
+        CHECK_EQ(kEqual, m.code[0]->flags_condition());
+      }
+    }
+  }
+}
+
+
+TEST(InstructionSelectorWord32EqualAndShiftP) {
+  Shifts shifts;
+  for (Shifts::const_iterator i = shifts.begin(); i != shifts.end(); ++i) {
+    Shift shift = *i;
+    {
+      InstructionSelectorTester m;
+      m.Return(m.Word32Equal(
+          m.Parameter(0), m.NewNode(shift.op, m.Parameter(1), m.Parameter(2))));
+      m.SelectInstructions();
+      CHECK_EQ(1, m.code.size());
+      CHECK_EQ(kArmCmp, m.code[0]->arch_opcode());
+      CHECK_EQ(shift.r_mode, m.code[0]->addressing_mode());
+      CHECK_EQ(kFlags_set, m.code[0]->flags_mode());
+      CHECK_EQ(kEqual, m.code[0]->flags_condition());
+    }
+    {
+      InstructionSelectorTester m;
+      m.Return(m.Word32Equal(
+          m.NewNode(shift.op, m.Parameter(0), m.Parameter(1)), m.Parameter(2)));
+      m.SelectInstructions();
+      CHECK_EQ(1, m.code.size());
+      CHECK_EQ(kArmCmp, m.code[0]->arch_opcode());
+      CHECK_EQ(shift.r_mode, m.code[0]->addressing_mode());
+      CHECK_EQ(kFlags_set, m.code[0]->flags_mode());
+      CHECK_EQ(kEqual, m.code[0]->flags_condition());
+    }
+  }
+}
+
+
+TEST(InstructionSelectorBranchWithWord32EqualAndShiftP) {
+  Shifts shifts;
+  for (Shifts::const_iterator i = shifts.begin(); i != shifts.end(); ++i) {
+    Shift shift = *i;
+    {
+      InstructionSelectorTester m;
+      MLabel blocka, blockb;
+      m.Branch(m.Word32Equal(m.Parameter(0), m.NewNode(shift.op, m.Parameter(1),
+                                                       m.Parameter(2))),
+               &blocka, &blockb);
+      m.Bind(&blocka);
+      m.Return(m.Int32Constant(1));
+      m.Bind(&blockb);
+      m.Return(m.Int32Constant(0));
+      m.SelectInstructions();
+      CHECK_EQ(1, m.code.size());
+      CHECK_EQ(kArmCmp, m.code[0]->arch_opcode());
+      CHECK_EQ(shift.r_mode, m.code[0]->addressing_mode());
+      CHECK_EQ(kFlags_branch, m.code[0]->flags_mode());
+      CHECK_EQ(kEqual, m.code[0]->flags_condition());
+    }
+    {
+      InstructionSelectorTester m;
+      MLabel blocka, blockb;
+      m.Branch(
+          m.Word32Equal(m.NewNode(shift.op, m.Parameter(1), m.Parameter(2)),
+                        m.Parameter(0)),
+          &blocka, &blockb);
+      m.Bind(&blocka);
+      m.Return(m.Int32Constant(1));
+      m.Bind(&blockb);
+      m.Return(m.Int32Constant(0));
+      m.SelectInstructions();
+      CHECK_EQ(1, m.code.size());
+      CHECK_EQ(kArmCmp, m.code[0]->arch_opcode());
+      CHECK_EQ(shift.r_mode, m.code[0]->addressing_mode());
+      CHECK_EQ(kFlags_branch, m.code[0]->flags_mode());
+      CHECK_EQ(kEqual, m.code[0]->flags_condition());
+    }
+  }
+}
+
+
+TEST(InstructionSelectorBranchWithWord32EqualAndShiftImm) {
+  Shifts shifts;
+  for (Shifts::const_iterator i = shifts.begin(); i != shifts.end(); ++i) {
+    Shift shift = *i;
+    for (int32_t imm = shift.i_low; imm <= shift.i_high; ++imm) {
+      {
+        InstructionSelectorTester m;
+        MLabel blocka, blockb;
+        m.Branch(
+            m.Word32Equal(m.Parameter(0), m.NewNode(shift.op, m.Parameter(1),
+                                                    m.Int32Constant(imm))),
+            &blocka, &blockb);
+        m.Bind(&blocka);
+        m.Return(m.Int32Constant(1));
+        m.Bind(&blockb);
+        m.Return(m.Int32Constant(0));
+        m.SelectInstructions();
+        CHECK_EQ(1, m.code.size());
+        CHECK_EQ(kArmCmp, m.code[0]->arch_opcode());
+        CHECK_EQ(shift.i_mode, m.code[0]->addressing_mode());
+        CHECK_EQ(kFlags_branch, m.code[0]->flags_mode());
+        CHECK_EQ(kEqual, m.code[0]->flags_condition());
+      }
+      {
+        InstructionSelectorTester m;
+        MLabel blocka, blockb;
+        m.Branch(m.Word32Equal(
+                     m.NewNode(shift.op, m.Parameter(1), m.Int32Constant(imm)),
+                     m.Parameter(0)),
+                 &blocka, &blockb);
+        m.Bind(&blocka);
+        m.Return(m.Int32Constant(1));
+        m.Bind(&blockb);
+        m.Return(m.Int32Constant(0));
+        m.SelectInstructions();
+        CHECK_EQ(1, m.code.size());
+        CHECK_EQ(kArmCmp, m.code[0]->arch_opcode());
+        CHECK_EQ(shift.i_mode, m.code[0]->addressing_mode());
+        CHECK_EQ(kFlags_branch, m.code[0]->flags_mode());
+        CHECK_EQ(kEqual, m.code[0]->flags_condition());
+      }
+    }
+  }
+}
+
+
+TEST(InstructionSelectorBranchWithDPIP) {
+  DPIs dpis;
+  for (DPIs::const_iterator i = dpis.begin(); i != dpis.end(); ++i) {
+    DPI dpi = *i;
+    {
+      InstructionSelectorTester m;
+      MLabel blocka, blockb;
+      m.Branch(m.NewNode(dpi.op, m.Parameter(0), m.Parameter(1)), &blocka,
+               &blockb);
+      m.Bind(&blocka);
+      m.Return(m.Int32Constant(1));
+      m.Bind(&blockb);
+      m.Return(m.Int32Constant(0));
+      m.SelectInstructions();
+      CHECK_EQ(1, m.code.size());
+      CHECK_EQ(dpi.test_arch_opcode, m.code[0]->arch_opcode());
+      CHECK_EQ(kMode_Operand2_R, m.code[0]->addressing_mode());
+      CHECK_EQ(kFlags_branch, m.code[0]->flags_mode());
+      CHECK_EQ(kNotEqual, m.code[0]->flags_condition());
+    }
+    {
+      InstructionSelectorTester m;
+      MLabel blocka, blockb;
+      m.Branch(m.Word32Equal(m.Int32Constant(0),
+                             m.NewNode(dpi.op, m.Parameter(0), m.Parameter(1))),
+               &blocka, &blockb);
+      m.Bind(&blocka);
+      m.Return(m.Int32Constant(1));
+      m.Bind(&blockb);
+      m.Return(m.Int32Constant(0));
+      m.SelectInstructions();
+      CHECK_EQ(1, m.code.size());
+      CHECK_EQ(dpi.test_arch_opcode, m.code[0]->arch_opcode());
+      CHECK_EQ(kMode_Operand2_R, m.code[0]->addressing_mode());
+      CHECK_EQ(kFlags_branch, m.code[0]->flags_mode());
+      CHECK_EQ(kEqual, m.code[0]->flags_condition());
+    }
+    {
+      InstructionSelectorTester m;
+      MLabel blocka, blockb;
+      m.Branch(m.Word32Equal(m.NewNode(dpi.op, m.Parameter(0), m.Parameter(1)),
+                             m.Int32Constant(0)),
+               &blocka, &blockb);
+      m.Bind(&blocka);
+      m.Return(m.Int32Constant(1));
+      m.Bind(&blockb);
+      m.Return(m.Int32Constant(0));
+      m.SelectInstructions();
+      CHECK_EQ(1, m.code.size());
+      CHECK_EQ(dpi.test_arch_opcode, m.code[0]->arch_opcode());
+      CHECK_EQ(kMode_Operand2_R, m.code[0]->addressing_mode());
+      CHECK_EQ(kFlags_branch, m.code[0]->flags_mode());
+      CHECK_EQ(kEqual, m.code[0]->flags_condition());
+    }
+  }
+}
diff --git a/test/cctest/compiler/test-instruction-selector.cc b/test/cctest/compiler/test-instruction-selector.cc
new file mode 100644 (file)
index 0000000..a82ceb2
--- /dev/null
@@ -0,0 +1,18 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "test/cctest/compiler/instruction-selector-tester.h"
+
+using namespace v8::internal;
+using namespace v8::internal::compiler;
+
+TEST(InstructionSelectionReturnZero) {
+  InstructionSelectorTester m(InstructionSelectorTester::kInternalMode);
+  m.Return(m.Int32Constant(0));
+  m.SelectInstructions();
+  CHECK_EQ(2, m.code.size());
+  CHECK_EQ(kArchNop, m.code[0]->opcode());
+  CHECK_EQ(kArchRet, m.code[1]->opcode());
+  CHECK_EQ(1, m.code[1]->InputCount());
+}
diff --git a/test/cctest/compiler/test-instruction.cc b/test/cctest/compiler/test-instruction.cc
new file mode 100644 (file)
index 0000000..c16e150
--- /dev/null
@@ -0,0 +1,349 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+#include "test/cctest/cctest.h"
+
+#include "src/compiler/code-generator.h"
+#include "src/compiler/common-operator.h"
+#include "src/compiler/graph.h"
+#include "src/compiler/instruction.h"
+#include "src/compiler/machine-operator.h"
+#include "src/compiler/node.h"
+#include "src/compiler/operator.h"
+#include "src/compiler/schedule.h"
+#include "src/compiler/scheduler.h"
+#include "src/lithium.h"
+
+using namespace v8::internal;
+using namespace v8::internal::compiler;
+
+typedef v8::internal::compiler::Instruction TestInstr;
+typedef v8::internal::compiler::InstructionSequence TestInstrSeq;
+
+// A testing helper for the register code abstraction.
+class InstructionTester : public HandleAndZoneScope {
+ public:  // We're all friends here.
+  explicit InstructionTester()
+      : isolate(main_isolate()),
+        graph(zone()),
+        schedule(zone()),
+        info(static_cast<HydrogenCodeStub*>(NULL), main_isolate()),
+        linkage(&info),
+        common(zone()),
+        machine(zone(), kMachineWord32),
+        code(NULL) {}
+
+  Isolate* isolate;
+  Graph graph;
+  Schedule schedule;
+  CompilationInfoWithZone info;
+  Linkage linkage;
+  CommonOperatorBuilder common;
+  MachineOperatorBuilder machine;
+  TestInstrSeq* code;
+
+  Zone* zone() { return main_zone(); }
+
+  void allocCode() {
+    if (schedule.rpo_order()->size() == 0) {
+      // Compute the RPO order.
+      Scheduler scheduler(zone(), &graph, &schedule);
+      scheduler.ComputeSpecialRPO();
+      ASSERT(schedule.rpo_order()->size() > 0);
+    }
+    code = new TestInstrSeq(&linkage, &graph, &schedule);
+  }
+
+  Node* Int32Constant(int32_t val) {
+    Node* node = graph.NewNode(common.Int32Constant(val));
+    schedule.AddNode(schedule.entry(), node);
+    return node;
+  }
+
+  Node* Float64Constant(double val) {
+    Node* node = graph.NewNode(common.Float64Constant(val));
+    schedule.AddNode(schedule.entry(), node);
+    return node;
+  }
+
+  Node* Parameter(int32_t which) {
+    Node* node = graph.NewNode(common.Parameter(which));
+    schedule.AddNode(schedule.entry(), node);
+    return node;
+  }
+
+  Node* NewNode(BasicBlock* block) {
+    Node* node = graph.NewNode(common.Int32Constant(111));
+    schedule.AddNode(block, node);
+    return node;
+  }
+
+  int NewInstr(BasicBlock* block) {
+    InstructionCode opcode = static_cast<InstructionCode>(110);
+    TestInstr* instr = TestInstr::New(zone(), opcode);
+    return code->AddInstruction(instr, block);
+  }
+
+  UnallocatedOperand* NewUnallocated(int vreg) {
+    UnallocatedOperand* unallocated =
+        new (zone()) UnallocatedOperand(UnallocatedOperand::ANY);
+    unallocated->set_virtual_register(vreg);
+    return unallocated;
+  }
+};
+
+
+TEST(InstructionBasic) {
+  InstructionTester R;
+
+  for (int i = 0; i < 10; i++) {
+    R.Int32Constant(i);  // Add some nodes to the graph.
+  }
+
+  BasicBlock* last = R.schedule.entry();
+  for (int i = 0; i < 5; i++) {
+    BasicBlock* block = R.schedule.NewBasicBlock();
+    R.schedule.AddGoto(last, block);
+    last = block;
+  }
+
+  R.allocCode();
+
+  CHECK_EQ(R.graph.NodeCount(), R.code->ValueCount());
+
+  BasicBlockVector* blocks = R.schedule.rpo_order();
+  CHECK_EQ(static_cast<int>(blocks->size()), R.code->BasicBlockCount());
+
+  int index = 0;
+  for (BasicBlockVectorIter i = blocks->begin(); i != blocks->end();
+       i++, index++) {
+    BasicBlock* block = *i;
+    CHECK_EQ(block, R.code->BlockAt(index));
+    CHECK_EQ(-1, R.code->GetLoopEnd(block));
+  }
+}
+
+
+TEST(InstructionGetBasicBlock) {
+  InstructionTester R;
+
+  BasicBlock* b0 = R.schedule.entry();
+  BasicBlock* b1 = R.schedule.NewBasicBlock();
+  BasicBlock* b2 = R.schedule.NewBasicBlock();
+  BasicBlock* b3 = R.schedule.exit();
+
+  R.schedule.AddGoto(b0, b1);
+  R.schedule.AddGoto(b1, b2);
+  R.schedule.AddGoto(b2, b3);
+
+  R.allocCode();
+
+  R.code->StartBlock(b0);
+  int i0 = R.NewInstr(b0);
+  int i1 = R.NewInstr(b0);
+  R.code->EndBlock(b0);
+  R.code->StartBlock(b1);
+  int i2 = R.NewInstr(b1);
+  int i3 = R.NewInstr(b1);
+  int i4 = R.NewInstr(b1);
+  int i5 = R.NewInstr(b1);
+  R.code->EndBlock(b1);
+  R.code->StartBlock(b2);
+  int i6 = R.NewInstr(b2);
+  int i7 = R.NewInstr(b2);
+  int i8 = R.NewInstr(b2);
+  R.code->EndBlock(b2);
+  R.code->StartBlock(b3);
+  R.code->EndBlock(b3);
+
+  CHECK_EQ(b0, R.code->GetBasicBlock(i0));
+  CHECK_EQ(b0, R.code->GetBasicBlock(i1));
+
+  CHECK_EQ(b1, R.code->GetBasicBlock(i2));
+  CHECK_EQ(b1, R.code->GetBasicBlock(i3));
+  CHECK_EQ(b1, R.code->GetBasicBlock(i4));
+  CHECK_EQ(b1, R.code->GetBasicBlock(i5));
+
+  CHECK_EQ(b2, R.code->GetBasicBlock(i6));
+  CHECK_EQ(b2, R.code->GetBasicBlock(i7));
+  CHECK_EQ(b2, R.code->GetBasicBlock(i8));
+
+  CHECK_EQ(b0, R.code->GetBasicBlock(b0->first_instruction_index()));
+  CHECK_EQ(b0, R.code->GetBasicBlock(b0->last_instruction_index()));
+
+  CHECK_EQ(b1, R.code->GetBasicBlock(b1->first_instruction_index()));
+  CHECK_EQ(b1, R.code->GetBasicBlock(b1->last_instruction_index()));
+
+  CHECK_EQ(b2, R.code->GetBasicBlock(b2->first_instruction_index()));
+  CHECK_EQ(b2, R.code->GetBasicBlock(b2->last_instruction_index()));
+
+  CHECK_EQ(b3, R.code->GetBasicBlock(b3->first_instruction_index()));
+  CHECK_EQ(b3, R.code->GetBasicBlock(b3->last_instruction_index()));
+}
+
+
+TEST(InstructionIsGapAt) {
+  InstructionTester R;
+
+  BasicBlock* b0 = R.schedule.entry();
+  R.schedule.AddReturn(b0, R.Int32Constant(1));
+
+  R.allocCode();
+  TestInstr* i0 = TestInstr::New(R.zone(), 100);
+  TestInstr* g = TestInstr::New(R.zone(), 103)->MarkAsControl();
+  R.code->StartBlock(b0);
+  R.code->AddInstruction(i0, b0);
+  R.code->AddInstruction(g, b0);
+  R.code->EndBlock(b0);
+
+  CHECK_EQ(true, R.code->InstructionAt(0)->IsBlockStart());
+
+  CHECK_EQ(true, R.code->IsGapAt(0));   // Label
+  CHECK_EQ(true, R.code->IsGapAt(1));   // Gap
+  CHECK_EQ(false, R.code->IsGapAt(2));  // i0
+  CHECK_EQ(true, R.code->IsGapAt(3));   // Gap
+  CHECK_EQ(true, R.code->IsGapAt(4));   // Gap
+  CHECK_EQ(false, R.code->IsGapAt(5));  // g
+}
+
+
+TEST(InstructionIsGapAt2) {
+  InstructionTester R;
+
+  BasicBlock* b0 = R.schedule.entry();
+  BasicBlock* b1 = R.schedule.exit();
+  R.schedule.AddGoto(b0, b1);
+  R.schedule.AddReturn(b1, R.Int32Constant(1));
+
+  R.allocCode();
+  TestInstr* i0 = TestInstr::New(R.zone(), 100);
+  TestInstr* g = TestInstr::New(R.zone(), 103)->MarkAsControl();
+  R.code->StartBlock(b0);
+  R.code->AddInstruction(i0, b0);
+  R.code->AddInstruction(g, b0);
+  R.code->EndBlock(b0);
+
+  TestInstr* i1 = TestInstr::New(R.zone(), 102);
+  TestInstr* g1 = TestInstr::New(R.zone(), 104)->MarkAsControl();
+  R.code->StartBlock(b1);
+  R.code->AddInstruction(i1, b1);
+  R.code->AddInstruction(g1, b1);
+  R.code->EndBlock(b1);
+
+  CHECK_EQ(true, R.code->InstructionAt(0)->IsBlockStart());
+
+  CHECK_EQ(true, R.code->IsGapAt(0));   // Label
+  CHECK_EQ(true, R.code->IsGapAt(1));   // Gap
+  CHECK_EQ(false, R.code->IsGapAt(2));  // i0
+  CHECK_EQ(true, R.code->IsGapAt(3));   // Gap
+  CHECK_EQ(true, R.code->IsGapAt(4));   // Gap
+  CHECK_EQ(false, R.code->IsGapAt(5));  // g
+
+  CHECK_EQ(true, R.code->InstructionAt(6)->IsBlockStart());
+
+  CHECK_EQ(true, R.code->IsGapAt(6));    // Label
+  CHECK_EQ(true, R.code->IsGapAt(7));    // Gap
+  CHECK_EQ(false, R.code->IsGapAt(8));   // i1
+  CHECK_EQ(true, R.code->IsGapAt(9));    // Gap
+  CHECK_EQ(true, R.code->IsGapAt(10));   // Gap
+  CHECK_EQ(false, R.code->IsGapAt(11));  // g1
+}
+
+
+TEST(InstructionAddGapMove) {
+  InstructionTester R;
+
+  BasicBlock* b0 = R.schedule.entry();
+  R.schedule.AddReturn(b0, R.Int32Constant(1));
+
+  R.allocCode();
+  TestInstr* i0 = TestInstr::New(R.zone(), 100);
+  TestInstr* g = TestInstr::New(R.zone(), 103)->MarkAsControl();
+  R.code->StartBlock(b0);
+  R.code->AddInstruction(i0, b0);
+  R.code->AddInstruction(g, b0);
+  R.code->EndBlock(b0);
+
+  CHECK_EQ(true, R.code->InstructionAt(0)->IsBlockStart());
+
+  CHECK_EQ(true, R.code->IsGapAt(0));   // Label
+  CHECK_EQ(true, R.code->IsGapAt(1));   // Gap
+  CHECK_EQ(false, R.code->IsGapAt(2));  // i0
+  CHECK_EQ(true, R.code->IsGapAt(3));   // Gap
+  CHECK_EQ(true, R.code->IsGapAt(4));   // Gap
+  CHECK_EQ(false, R.code->IsGapAt(5));  // g
+
+  int indexes[] = {0, 1, 3, 4, -1};
+  for (int i = 0; indexes[i] >= 0; i++) {
+    int index = indexes[i];
+
+    UnallocatedOperand* op1 = R.NewUnallocated(index + 6);
+    UnallocatedOperand* op2 = R.NewUnallocated(index + 12);
+
+    R.code->AddGapMove(index, op1, op2);
+    GapInstruction* gap = R.code->GapAt(index);
+    ParallelMove* move = gap->GetParallelMove(GapInstruction::START);
+    CHECK_NE(NULL, move);
+    const ZoneList<MoveOperands>* move_operands = move->move_operands();
+    CHECK_EQ(1, move_operands->length());
+    MoveOperands* cur = &move_operands->at(0);
+    CHECK_EQ(op1, cur->source());
+    CHECK_EQ(op2, cur->destination());
+  }
+}
+
+
+TEST(InstructionOperands) {
+  Zone zone(CcTest::InitIsolateOnce());
+
+  {
+    TestInstr* i = TestInstr::New(&zone, 101);
+    CHECK_EQ(0, i->OutputCount());
+    CHECK_EQ(0, i->InputCount());
+    CHECK_EQ(0, i->TempCount());
+  }
+
+  InstructionOperand* outputs[] = {
+      new (&zone) UnallocatedOperand(UnallocatedOperand::MUST_HAVE_REGISTER),
+      new (&zone) UnallocatedOperand(UnallocatedOperand::MUST_HAVE_REGISTER),
+      new (&zone) UnallocatedOperand(UnallocatedOperand::MUST_HAVE_REGISTER),
+      new (&zone) UnallocatedOperand(UnallocatedOperand::MUST_HAVE_REGISTER)};
+
+  InstructionOperand* inputs[] = {
+      new (&zone) UnallocatedOperand(UnallocatedOperand::MUST_HAVE_REGISTER),
+      new (&zone) UnallocatedOperand(UnallocatedOperand::MUST_HAVE_REGISTER),
+      new (&zone) UnallocatedOperand(UnallocatedOperand::MUST_HAVE_REGISTER),
+      new (&zone) UnallocatedOperand(UnallocatedOperand::MUST_HAVE_REGISTER)};
+
+  InstructionOperand* temps[] = {
+      new (&zone) UnallocatedOperand(UnallocatedOperand::MUST_HAVE_REGISTER),
+      new (&zone) UnallocatedOperand(UnallocatedOperand::MUST_HAVE_REGISTER),
+      new (&zone) UnallocatedOperand(UnallocatedOperand::MUST_HAVE_REGISTER),
+      new (&zone) UnallocatedOperand(UnallocatedOperand::MUST_HAVE_REGISTER)};
+
+  for (size_t i = 0; i < ARRAY_SIZE(outputs); i++) {
+    for (size_t j = 0; j < ARRAY_SIZE(inputs); j++) {
+      for (size_t k = 0; k < ARRAY_SIZE(temps); k++) {
+        TestInstr* m =
+            TestInstr::New(&zone, 101, i, outputs, j, inputs, k, temps);
+        CHECK(i == m->OutputCount());
+        CHECK(j == m->InputCount());
+        CHECK(k == m->TempCount());
+
+        for (size_t z = 0; z < i; z++) {
+          CHECK_EQ(outputs[z], m->OutputAt(z));
+        }
+
+        for (size_t z = 0; z < j; z++) {
+          CHECK_EQ(inputs[z], m->InputAt(z));
+        }
+
+        for (size_t z = 0; z < k; z++) {
+          CHECK_EQ(temps[z], m->TempAt(z));
+        }
+      }
+    }
+  }
+}
diff --git a/test/cctest/compiler/test-js-constant-cache.cc b/test/cctest/compiler/test-js-constant-cache.cc
new file mode 100644 (file)
index 0000000..42a606d
--- /dev/null
@@ -0,0 +1,284 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#include "src/compiler/js-graph.h"
+#include "src/compiler/node-properties-inl.h"
+#include "src/compiler/typer.h"
+#include "src/types.h"
+#include "test/cctest/cctest.h"
+#include "test/cctest/compiler/value-helper.h"
+
+using namespace v8::internal;
+using namespace v8::internal::compiler;
+
+class JSCacheTesterHelper {
+ protected:
+  explicit JSCacheTesterHelper(Zone* zone)
+      : main_graph_(zone), main_common_(zone), main_typer_(zone) {}
+  Graph main_graph_;
+  CommonOperatorBuilder main_common_;
+  Typer main_typer_;
+};
+
+
+class JSConstantCacheTester : public HandleAndZoneScope,
+                              public JSCacheTesterHelper,
+                              public JSGraph {
+ public:
+  JSConstantCacheTester()
+      : JSCacheTesterHelper(main_zone()),
+        JSGraph(&main_graph_, &main_common_, &main_typer_) {}
+
+  Type* upper(Node* node) { return NodeProperties::GetBounds(node).upper; }
+
+  Handle<Object> handle(Node* node) {
+    CHECK_EQ(IrOpcode::kHeapConstant, node->opcode());
+    return ValueOf<Handle<Object> >(node->op());
+  }
+
+  Factory* factory() { return main_isolate()->factory(); }
+};
+
+
+TEST(ZeroConstant1) {
+  JSConstantCacheTester T;
+
+  Node* zero = T.ZeroConstant();
+
+  CHECK_EQ(IrOpcode::kNumberConstant, zero->opcode());
+  CHECK_EQ(zero, T.Constant(0));
+  CHECK_NE(zero, T.Constant(-0.0));
+  CHECK_NE(zero, T.Constant(1.0));
+  CHECK_NE(zero, T.Constant(v8::base::OS::nan_value()));
+  CHECK_NE(zero, T.Float64Constant(0));
+  CHECK_NE(zero, T.Int32Constant(0));
+
+  Type* t = T.upper(zero);
+
+  CHECK(t->Is(Type::Number()));
+  CHECK(t->Is(Type::Integral32()));
+  CHECK(t->Is(Type::Signed32()));
+  CHECK(t->Is(Type::Unsigned32()));
+  CHECK(t->Is(Type::SignedSmall()));
+  CHECK(t->Is(Type::UnsignedSmall()));
+}
+
+
+TEST(MinusZeroConstant) {
+  JSConstantCacheTester T;
+
+  Node* minus_zero = T.Constant(-0.0);
+  Node* zero = T.ZeroConstant();
+
+  CHECK_EQ(IrOpcode::kNumberConstant, minus_zero->opcode());
+  CHECK_EQ(minus_zero, T.Constant(-0.0));
+  CHECK_NE(zero, minus_zero);
+
+  Type* t = T.upper(minus_zero);
+
+  CHECK(t->Is(Type::Number()));
+  CHECK(t->Is(Type::MinusZero()));
+  CHECK(!t->Is(Type::Integral32()));
+  CHECK(!t->Is(Type::Signed32()));
+  CHECK(!t->Is(Type::Unsigned32()));
+  CHECK(!t->Is(Type::SignedSmall()));
+  CHECK(!t->Is(Type::UnsignedSmall()));
+
+  double zero_value = ValueOf<double>(zero->op());
+  double minus_zero_value = ValueOf<double>(minus_zero->op());
+
+  CHECK_EQ(0.0, zero_value);
+  CHECK_NE(-0.0, zero_value);
+  CHECK_EQ(-0.0, minus_zero_value);
+  CHECK_NE(0.0, minus_zero_value);
+}
+
+
+TEST(ZeroConstant2) {
+  JSConstantCacheTester T;
+
+  Node* zero = T.Constant(0);
+
+  CHECK_EQ(IrOpcode::kNumberConstant, zero->opcode());
+  CHECK_EQ(zero, T.ZeroConstant());
+  CHECK_NE(zero, T.Constant(-0.0));
+  CHECK_NE(zero, T.Constant(1.0));
+  CHECK_NE(zero, T.Constant(v8::base::OS::nan_value()));
+  CHECK_NE(zero, T.Float64Constant(0));
+  CHECK_NE(zero, T.Int32Constant(0));
+
+  Type* t = T.upper(zero);
+
+  CHECK(t->Is(Type::Number()));
+  CHECK(t->Is(Type::Integral32()));
+  CHECK(t->Is(Type::Signed32()));
+  CHECK(t->Is(Type::Unsigned32()));
+  CHECK(t->Is(Type::SignedSmall()));
+  CHECK(t->Is(Type::UnsignedSmall()));
+}
+
+
+TEST(OneConstant1) {
+  JSConstantCacheTester T;
+
+  Node* one = T.OneConstant();
+
+  CHECK_EQ(IrOpcode::kNumberConstant, one->opcode());
+  CHECK_EQ(one, T.Constant(1));
+  CHECK_EQ(one, T.Constant(1.0));
+  CHECK_NE(one, T.Constant(1.01));
+  CHECK_NE(one, T.Constant(-1.01));
+  CHECK_NE(one, T.Constant(v8::base::OS::nan_value()));
+  CHECK_NE(one, T.Float64Constant(1.0));
+  CHECK_NE(one, T.Int32Constant(1));
+
+  Type* t = T.upper(one);
+
+  CHECK(t->Is(Type::Number()));
+  CHECK(t->Is(Type::Integral32()));
+  CHECK(t->Is(Type::Signed32()));
+  CHECK(t->Is(Type::Unsigned32()));
+  CHECK(t->Is(Type::SignedSmall()));
+  CHECK(t->Is(Type::UnsignedSmall()));
+}
+
+
+TEST(OneConstant2) {
+  JSConstantCacheTester T;
+
+  Node* one = T.Constant(1);
+
+  CHECK_EQ(IrOpcode::kNumberConstant, one->opcode());
+  CHECK_EQ(one, T.OneConstant());
+  CHECK_EQ(one, T.Constant(1.0));
+  CHECK_NE(one, T.Constant(1.01));
+  CHECK_NE(one, T.Constant(-1.01));
+  CHECK_NE(one, T.Constant(v8::base::OS::nan_value()));
+  CHECK_NE(one, T.Float64Constant(1.0));
+  CHECK_NE(one, T.Int32Constant(1));
+
+  Type* t = T.upper(one);
+
+  CHECK(t->Is(Type::Number()));
+  CHECK(t->Is(Type::Integral32()));
+  CHECK(t->Is(Type::Signed32()));
+  CHECK(t->Is(Type::Unsigned32()));
+  CHECK(t->Is(Type::SignedSmall()));
+  CHECK(t->Is(Type::UnsignedSmall()));
+}
+
+
+TEST(Canonicalizations) {
+  JSConstantCacheTester T;
+
+  CHECK_EQ(T.ZeroConstant(), T.ZeroConstant());
+  CHECK_EQ(T.UndefinedConstant(), T.UndefinedConstant());
+  CHECK_EQ(T.TheHoleConstant(), T.TheHoleConstant());
+  CHECK_EQ(T.TrueConstant(), T.TrueConstant());
+  CHECK_EQ(T.FalseConstant(), T.FalseConstant());
+  CHECK_EQ(T.NullConstant(), T.NullConstant());
+  CHECK_EQ(T.ZeroConstant(), T.ZeroConstant());
+  CHECK_EQ(T.OneConstant(), T.OneConstant());
+  CHECK_EQ(T.NaNConstant(), T.NaNConstant());
+}
+
+
+TEST(NoAliasing) {
+  JSConstantCacheTester T;
+
+  Node* nodes[] = {T.UndefinedConstant(), T.TheHoleConstant(), T.TrueConstant(),
+                   T.FalseConstant(),     T.NullConstant(),    T.ZeroConstant(),
+                   T.OneConstant(),       T.NaNConstant(),     T.Constant(21),
+                   T.Constant(22.2)};
+
+  for (size_t i = 0; i < ARRAY_SIZE(nodes); i++) {
+    for (size_t j = 0; j < ARRAY_SIZE(nodes); j++) {
+      if (i != j) CHECK_NE(nodes[i], nodes[j]);
+    }
+  }
+}
+
+
+TEST(CanonicalizingNumbers) {
+  JSConstantCacheTester T;
+
+  FOR_FLOAT64_INPUTS(i) {
+    Node* node = T.Constant(*i);
+    for (int j = 0; j < 5; j++) {
+      CHECK_EQ(node, T.Constant(*i));
+    }
+  }
+}
+
+
+TEST(NumberTypes) {
+  JSConstantCacheTester T;
+
+  FOR_FLOAT64_INPUTS(i) {
+    double value = *i;
+    Node* node = T.Constant(value);
+    CHECK(T.upper(node)->Equals(Type::Of(value, T.main_zone())));
+  }
+}
+
+
+TEST(HeapNumbers) {
+  JSConstantCacheTester T;
+
+  FOR_FLOAT64_INPUTS(i) {
+    double value = *i;
+    Handle<Object> num = T.factory()->NewNumber(value);
+    Handle<HeapNumber> heap = T.factory()->NewHeapNumber(value);
+    Node* node1 = T.Constant(value);
+    Node* node2 = T.Constant(num);
+    Node* node3 = T.Constant(heap);
+    CHECK_EQ(node1, node2);
+    CHECK_EQ(node1, node3);
+  }
+}
+
+
+TEST(OddballHandle) {
+  JSConstantCacheTester T;
+
+  CHECK_EQ(T.UndefinedConstant(), T.Constant(T.factory()->undefined_value()));
+  CHECK_EQ(T.TheHoleConstant(), T.Constant(T.factory()->the_hole_value()));
+  CHECK_EQ(T.TrueConstant(), T.Constant(T.factory()->true_value()));
+  CHECK_EQ(T.FalseConstant(), T.Constant(T.factory()->false_value()));
+  CHECK_EQ(T.NullConstant(), T.Constant(T.factory()->null_value()));
+  CHECK_EQ(T.NaNConstant(), T.Constant(T.factory()->nan_value()));
+}
+
+
+TEST(OddballValues) {
+  JSConstantCacheTester T;
+
+  CHECK_EQ(*T.factory()->undefined_value(), *T.handle(T.UndefinedConstant()));
+  CHECK_EQ(*T.factory()->the_hole_value(), *T.handle(T.TheHoleConstant()));
+  CHECK_EQ(*T.factory()->true_value(), *T.handle(T.TrueConstant()));
+  CHECK_EQ(*T.factory()->false_value(), *T.handle(T.FalseConstant()));
+  CHECK_EQ(*T.factory()->null_value(), *T.handle(T.NullConstant()));
+}
+
+
+TEST(OddballTypes) {
+  JSConstantCacheTester T;
+
+  CHECK(T.upper(T.UndefinedConstant())->Is(Type::Undefined()));
+  // TODO(dcarney): figure this out.
+  // CHECK(T.upper(T.TheHoleConstant())->Is(Type::Internal()));
+  CHECK(T.upper(T.TrueConstant())->Is(Type::Boolean()));
+  CHECK(T.upper(T.FalseConstant())->Is(Type::Boolean()));
+  CHECK(T.upper(T.NullConstant())->Is(Type::Null()));
+  CHECK(T.upper(T.ZeroConstant())->Is(Type::Number()));
+  CHECK(T.upper(T.OneConstant())->Is(Type::Number()));
+  CHECK(T.upper(T.NaNConstant())->Is(Type::NaN()));
+}
+
+
+TEST(ExternalReferences) {
+  // TODO(titzer): test canonicalization of external references.
+}
diff --git a/test/cctest/compiler/test-js-context-specialization.cc b/test/cctest/compiler/test-js-context-specialization.cc
new file mode 100644 (file)
index 0000000..613ad06
--- /dev/null
@@ -0,0 +1,252 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/js-context-specialization.h"
+#include "src/compiler/js-operator.h"
+#include "src/compiler/node-matchers.h"
+#include "src/compiler/node-properties-inl.h"
+#include "src/compiler/simplified-node-factory.h"
+#include "src/compiler/source-position.h"
+#include "src/compiler/typer.h"
+#include "test/cctest/cctest.h"
+#include "test/cctest/compiler/function-tester.h"
+#include "test/cctest/compiler/graph-builder-tester.h"
+
+using namespace v8::internal;
+using namespace v8::internal::compiler;
+
+class ContextSpecializationTester
+    : public HandleAndZoneScope,
+      public DirectGraphBuilder,
+      public SimplifiedNodeFactory<ContextSpecializationTester> {
+ public:
+  ContextSpecializationTester()
+      : DirectGraphBuilder(new (main_zone()) Graph(main_zone())),
+        common_(main_zone()),
+        javascript_(main_zone()),
+        simplified_(main_zone()),
+        typer_(main_zone()),
+        jsgraph_(graph(), common(), &typer_),
+        info_(main_isolate(), main_zone()) {}
+
+  Factory* factory() { return main_isolate()->factory(); }
+  CommonOperatorBuilder* common() { return &common_; }
+  JSOperatorBuilder* javascript() { return &javascript_; }
+  SimplifiedOperatorBuilder* simplified() { return &simplified_; }
+  JSGraph* jsgraph() { return &jsgraph_; }
+  CompilationInfo* info() { return &info_; }
+
+ private:
+  CommonOperatorBuilder common_;
+  JSOperatorBuilder javascript_;
+  SimplifiedOperatorBuilder simplified_;
+  Typer typer_;
+  JSGraph jsgraph_;
+  CompilationInfo info_;
+};
+
+
+TEST(ReduceJSLoadContext) {
+  ContextSpecializationTester t;
+
+  Node* start = t.NewNode(t.common()->Start());
+  t.graph()->SetStart(start);
+
+  // Make a context and initialize it a bit for this test.
+  Handle<Context> native = t.factory()->NewNativeContext();
+  Handle<Context> ctx1 = t.factory()->NewNativeContext();
+  Handle<Context> ctx2 = t.factory()->NewNativeContext();
+  ctx2->set_previous(*ctx1);
+  ctx1->set_previous(*native);
+  Handle<Object> expected = t.factory()->InternalizeUtf8String("gboy!");
+  const int slot = Context::GLOBAL_OBJECT_INDEX;
+  native->set(slot, *expected);
+
+  Node* const_context = t.jsgraph()->Constant(native);
+  Node* param_context = t.NewNode(t.common()->Parameter(0));
+  JSContextSpecializer spec(t.info(), t.jsgraph(), const_context);
+
+  {
+    // Mutable slot, constant context, depth = 0 => do nothing.
+    t.info()->SetContext(native);
+    Node* load = t.NewNode(t.javascript()->LoadContext(0, 0, false),
+                           const_context, start, start);
+    Reduction r = spec.ReduceJSLoadContext(load);
+    CHECK(!r.Changed());
+  }
+
+  {
+    // Mutable slot, non-constant context, depth = 0 => do nothing.
+    t.info()->SetContext(native);
+    Node* load = t.NewNode(t.javascript()->LoadContext(0, 0, false),
+                           param_context, start, start);
+    Reduction r = spec.ReduceJSLoadContext(load);
+    CHECK(!r.Changed());
+  }
+
+  {
+    // Mutable slot, non-constant context, depth > 0 => fold-in parent context.
+    t.info()->SetContext(ctx2);
+    Node* load = t.NewNode(
+        t.javascript()->LoadContext(2, Context::GLOBAL_EVAL_FUN_INDEX, false),
+        param_context, start, start);
+    Reduction r = spec.ReduceJSLoadContext(load);
+    CHECK(r.Changed());
+    CHECK_EQ(IrOpcode::kHeapConstant, r.replacement()->InputAt(0)->opcode());
+    ValueMatcher<Handle<Context> > match(r.replacement()->InputAt(0));
+    CHECK_EQ(*native, *match.Value());
+    ContextAccess access = static_cast<Operator1<ContextAccess>*>(
+                               r.replacement()->op())->parameter();
+    CHECK_EQ(Context::GLOBAL_EVAL_FUN_INDEX, access.index());
+    CHECK_EQ(0, access.depth());
+    CHECK_EQ(false, access.immutable());
+  }
+
+  {
+    // Immutable slot, constant context => specialize.
+    t.info()->SetContext(native);
+    Node* load = t.NewNode(t.javascript()->LoadContext(0, slot, true),
+                           const_context, start, start);
+    Reduction r = spec.ReduceJSLoadContext(load);
+    CHECK(r.Changed());
+    CHECK(r.replacement() != load);
+
+    ValueMatcher<Handle<Object> > match(r.replacement());
+    CHECK(match.HasValue());
+    CHECK_EQ(*expected, *match.Value());
+  }
+
+  {
+    // Immutable slot, non-constant context => specialize.
+    t.info()->SetContext(native);
+    Node* load = t.NewNode(t.javascript()->LoadContext(0, slot, true),
+                           param_context, start, start);
+    Reduction r = spec.ReduceJSLoadContext(load);
+    CHECK(r.Changed());
+    CHECK(r.replacement() != load);
+
+    ValueMatcher<Handle<Object> > match(r.replacement());
+    CHECK(match.HasValue());
+    CHECK_EQ(*expected, *match.Value());
+  }
+
+  // TODO(titzer): test with other kinds of contexts, e.g. a function context.
+  // TODO(sigurds): test that loads below create context are not optimized
+}
+
+
+// TODO(titzer): factor out common code with effects checking in typed lowering.
+static void CheckEffectInput(Node* effect, Node* use) {
+  CHECK_EQ(effect, NodeProperties::GetEffectInput(use));
+}
+
+
+TEST(SpecializeToContext) {
+  ContextSpecializationTester t;
+
+  Node* start = t.NewNode(t.common()->Start());
+  t.graph()->SetStart(start);
+
+  // Make a context and initialize it a bit for this test.
+  Handle<Context> native = t.factory()->NewNativeContext();
+  Handle<Object> expected = t.factory()->InternalizeUtf8String("gboy!");
+  const int slot = Context::GLOBAL_OBJECT_INDEX;
+  native->set(slot, *expected);
+  t.info()->SetContext(native);
+
+  Node* const_context = t.jsgraph()->Constant(native);
+  Node* param_context = t.NewNode(t.common()->Parameter(0));
+  JSContextSpecializer spec(t.info(), t.jsgraph(), const_context);
+
+  {
+    // Check that SpecializeToContext() replaces values and forwards effects
+    // correctly, and folds values from constant and non-constant contexts
+    Node* effect_in = t.NewNode(t.common()->Start());
+    Node* load = t.NewNode(t.javascript()->LoadContext(0, slot, true),
+                           const_context, const_context, effect_in, start);
+
+
+    Node* value_use = t.ChangeTaggedToInt32(load);
+    Node* other_load = t.NewNode(t.javascript()->LoadContext(0, slot, true),
+                                 param_context, param_context, load, start);
+    Node* effect_use = other_load;
+    Node* other_use = t.ChangeTaggedToInt32(other_load);
+
+    // Double check the above graph is what we expect, or the test is broken.
+    CheckEffectInput(effect_in, load);
+    CheckEffectInput(load, effect_use);
+
+    // Perform the substitution on the entire graph.
+    spec.SpecializeToContext();
+
+    // Effects should have been forwarded (not replaced with a value).
+    CheckEffectInput(effect_in, effect_use);
+
+    // Use of {other_load} should not have been replaced.
+    CHECK_EQ(other_load, other_use->InputAt(0));
+
+    Node* replacement = value_use->InputAt(0);
+    ValueMatcher<Handle<Object> > match(replacement);
+    CHECK(match.HasValue());
+    CHECK_EQ(*expected, *match.Value());
+  }
+  // TODO(titzer): clean up above test and test more complicated effects.
+}
+
+
+TEST(SpecializeJSFunction_ToConstant1) {
+  FunctionTester T(
+      "(function() { var x = 1; function inc(a)"
+      " { return a + x; } return inc; })()");
+
+  T.CheckCall(1.0, 0.0, 0.0);
+  T.CheckCall(2.0, 1.0, 0.0);
+  T.CheckCall(2.1, 1.1, 0.0);
+}
+
+
+TEST(SpecializeJSFunction_ToConstant2) {
+  FunctionTester T(
+      "(function() { var x = 1.5; var y = 2.25; var z = 3.75;"
+      " function f(a) { return a - x + y - z; } return f; })()");
+
+  T.CheckCall(-3.0, 0.0, 0.0);
+  T.CheckCall(-2.0, 1.0, 0.0);
+  T.CheckCall(-1.9, 1.1, 0.0);
+}
+
+
+TEST(SpecializeJSFunction_ToConstant3) {
+  FunctionTester T(
+      "(function() { var x = -11.5; function inc()"
+      " { return (function(a) { return a + x; }); }"
+      " return inc(); })()");
+
+  T.CheckCall(-11.5, 0.0, 0.0);
+  T.CheckCall(-10.5, 1.0, 0.0);
+  T.CheckCall(-10.4, 1.1, 0.0);
+}
+
+
+TEST(SpecializeJSFunction_ToConstant_uninit) {
+  {
+    FunctionTester T(
+        "(function() { if (false) { var x = 1; } function inc(a)"
+        " { return x; } return inc; })()");  // x is undefined!
+
+    CHECK(T.Call(T.Val(0.0), T.Val(0.0)).ToHandleChecked()->IsUndefined());
+    CHECK(T.Call(T.Val(2.0), T.Val(0.0)).ToHandleChecked()->IsUndefined());
+    CHECK(T.Call(T.Val(-2.1), T.Val(0.0)).ToHandleChecked()->IsUndefined());
+  }
+
+  {
+    FunctionTester T(
+        "(function() { if (false) { var x = 1; } function inc(a)"
+        " { return a + x; } return inc; })()");  // x is undefined!
+
+    CHECK(T.Call(T.Val(0.0), T.Val(0.0)).ToHandleChecked()->IsNaN());
+    CHECK(T.Call(T.Val(2.0), T.Val(0.0)).ToHandleChecked()->IsNaN());
+    CHECK(T.Call(T.Val(-2.1), T.Val(0.0)).ToHandleChecked()->IsNaN());
+  }
+}
diff --git a/test/cctest/compiler/test-js-typed-lowering.cc b/test/cctest/compiler/test-js-typed-lowering.cc
new file mode 100644 (file)
index 0000000..1bbc76e
--- /dev/null
@@ -0,0 +1,1345 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+#include "test/cctest/cctest.h"
+
+#include "src/compiler/graph-inl.h"
+#include "src/compiler/js-typed-lowering.h"
+#include "src/compiler/node-properties-inl.h"
+#include "src/compiler/opcodes.h"
+#include "src/compiler/typer.h"
+
+using namespace v8::internal;
+using namespace v8::internal::compiler;
+
+class JSTypedLoweringTester : public HandleAndZoneScope {
+ public:
+  JSTypedLoweringTester()
+      : isolate(main_isolate()),
+        binop(NULL),
+        unop(NULL),
+        javascript(main_zone()),
+        machine(main_zone()),
+        simplified(main_zone()),
+        common(main_zone()),
+        graph(main_zone()),
+        typer(main_zone()),
+        source_positions(&graph),
+        context_node(NULL) {
+    typer.DecorateGraph(&graph);
+  }
+
+  Isolate* isolate;
+  Operator* binop;
+  Operator* unop;
+  JSOperatorBuilder javascript;
+  MachineOperatorBuilder machine;
+  SimplifiedOperatorBuilder simplified;
+  CommonOperatorBuilder common;
+  Graph graph;
+  Typer typer;
+  SourcePositionTable source_positions;
+  Node* context_node;
+
+  Node* Parameter(Type* t, int32_t index = 0) {
+    Node* n = graph.NewNode(common.Parameter(index));
+    NodeProperties::SetBounds(n, Bounds(Type::None(), t));
+    return n;
+  }
+
+  Node* reduce(Node* node) {
+    JSGraph jsgraph(&graph, &common, &typer);
+    JSTypedLowering reducer(&jsgraph, &source_positions);
+    Reduction reduction = reducer.Reduce(node);
+    if (reduction.Changed()) return reduction.replacement();
+    return node;
+  }
+
+  Node* start() {
+    Node* s = graph.start();
+    if (s == NULL) {
+      s = graph.NewNode(common.Start());
+      graph.SetStart(s);
+    }
+    return s;
+  }
+
+  Node* context() {
+    if (context_node == NULL) {
+      context_node = graph.NewNode(common.Parameter(-1));
+    }
+    return context_node;
+  }
+
+  Node* control() { return start(); }
+
+  void CheckPureBinop(IrOpcode::Value expected, Node* node) {
+    CHECK_EQ(expected, node->opcode());
+    CHECK_EQ(2, node->InputCount());  // should not have context, effect, etc.
+  }
+
+  void CheckPureBinop(Operator* expected, Node* node) {
+    CHECK_EQ(expected->opcode(), node->op()->opcode());
+    CHECK_EQ(2, node->InputCount());  // should not have context, effect, etc.
+  }
+
+  Node* ReduceUnop(Operator* op, Type* input_type) {
+    return reduce(Unop(op, Parameter(input_type)));
+  }
+
+  Node* ReduceBinop(Operator* op, Type* left_type, Type* right_type) {
+    return reduce(Binop(op, Parameter(left_type, 0), Parameter(right_type, 1)));
+  }
+
+  Node* Binop(Operator* op, Node* left, Node* right) {
+    // JS binops also require context, effect, and control
+    return graph.NewNode(op, left, right, context(), start(), control());
+  }
+
+  Node* Unop(Operator* op, Node* input) {
+    // JS unops also require context, effect, and control
+    return graph.NewNode(op, input, context(), start(), control());
+  }
+
+  Node* UseForEffect(Node* node) {
+    // TODO(titzer): use EffectPhi after fixing EffectCount
+    return graph.NewNode(javascript.ToNumber(), node, context(), node,
+                         control());
+  }
+
+  void CheckEffectInput(Node* effect, Node* use) {
+    CHECK_EQ(effect, NodeProperties::GetEffectInput(use));
+  }
+
+  void CheckInt32Constant(int32_t expected, Node* result) {
+    CHECK_EQ(IrOpcode::kInt32Constant, result->opcode());
+    CHECK_EQ(expected, ValueOf<int32_t>(result->op()));
+  }
+
+  void CheckNumberConstant(double expected, Node* result) {
+    CHECK_EQ(IrOpcode::kNumberConstant, result->opcode());
+    CHECK_EQ(expected, ValueOf<double>(result->op()));
+  }
+
+  void CheckNaN(Node* result) {
+    CHECK_EQ(IrOpcode::kNumberConstant, result->opcode());
+    double value = ValueOf<double>(result->op());
+    CHECK(std::isnan(value));
+  }
+
+  void CheckTrue(Node* result) {
+    CheckHandle(isolate->factory()->true_value(), result);
+  }
+
+  void CheckFalse(Node* result) {
+    CheckHandle(isolate->factory()->false_value(), result);
+  }
+
+  void CheckHandle(Handle<Object> expected, Node* result) {
+    CHECK_EQ(IrOpcode::kHeapConstant, result->opcode());
+    Handle<Object> value = ValueOf<Handle<Object> >(result->op());
+    CHECK_EQ(*expected, *value);
+  }
+};
+
+static Type* kStringTypes[] = {Type::InternalizedString(), Type::OtherString(),
+                               Type::String()};
+
+
+static Type* kInt32Types[] = {
+    Type::UnsignedSmall(),   Type::OtherSignedSmall(), Type::OtherUnsigned31(),
+    Type::OtherUnsigned32(), Type::OtherSigned32(),    Type::SignedSmall(),
+    Type::Signed32(),        Type::Unsigned32(),       Type::Integral32()};
+
+
+static Type* kNumberTypes[] = {
+    Type::UnsignedSmall(),   Type::OtherSignedSmall(), Type::OtherUnsigned31(),
+    Type::OtherUnsigned32(), Type::OtherSigned32(),    Type::SignedSmall(),
+    Type::Signed32(),        Type::Unsigned32(),       Type::Integral32(),
+    Type::MinusZero(),       Type::NaN(),              Type::OtherNumber(),
+    Type::Number()};
+
+
+static Type* kJSTypes[] = {Type::Undefined(), Type::Null(),   Type::Boolean(),
+                           Type::Number(),    Type::String(), Type::Object()};
+
+
+static Type* I32Type(bool is_signed) {
+  return is_signed ? Type::Signed32() : Type::Unsigned32();
+}
+
+
+static IrOpcode::Value NumberToI32(bool is_signed) {
+  return is_signed ? IrOpcode::kNumberToInt32 : IrOpcode::kNumberToUint32;
+}
+
+
+TEST(StringBinops) {
+  JSTypedLoweringTester R;
+
+  for (size_t i = 0; i < ARRAY_SIZE(kStringTypes); ++i) {
+    Node* p0 = R.Parameter(kStringTypes[i], 0);
+
+    for (size_t j = 0; j < ARRAY_SIZE(kStringTypes); ++j) {
+      Node* p1 = R.Parameter(kStringTypes[j], 1);
+
+      Node* add = R.Binop(R.javascript.Add(), p0, p1);
+      Node* r = R.reduce(add);
+
+      R.CheckPureBinop(IrOpcode::kStringAdd, r);
+      CHECK_EQ(p0, r->InputAt(0));
+      CHECK_EQ(p1, r->InputAt(1));
+    }
+  }
+}
+
+
+TEST(AddNumber1) {
+  JSTypedLoweringTester R;
+  for (size_t i = 0; i < ARRAY_SIZE(kNumberTypes); ++i) {
+    Node* p0 = R.Parameter(kNumberTypes[i], 0);
+    Node* p1 = R.Parameter(kNumberTypes[i], 1);
+    Node* add = R.Binop(R.javascript.Add(), p0, p1);
+    Node* r = R.reduce(add);
+
+    R.CheckPureBinop(IrOpcode::kNumberAdd, r);
+    CHECK_EQ(p0, r->InputAt(0));
+    CHECK_EQ(p1, r->InputAt(1));
+  }
+}
+
+
+TEST(NumberBinops) {
+  JSTypedLoweringTester R;
+  Operator* ops[] = {
+      R.javascript.Add(),      R.simplified.NumberAdd(),
+      R.javascript.Subtract(), R.simplified.NumberSubtract(),
+      R.javascript.Multiply(), R.simplified.NumberMultiply(),
+      R.javascript.Divide(),   R.simplified.NumberDivide(),
+      R.javascript.Modulus(),  R.simplified.NumberModulus(),
+  };
+
+  for (size_t i = 0; i < ARRAY_SIZE(kNumberTypes); ++i) {
+    Node* p0 = R.Parameter(kNumberTypes[i], 0);
+
+    for (size_t j = 0; j < ARRAY_SIZE(kNumberTypes); ++j) {
+      Node* p1 = R.Parameter(kNumberTypes[j], 1);
+
+      for (size_t k = 0; k < ARRAY_SIZE(ops); k += 2) {
+        Node* add = R.Binop(ops[k], p0, p1);
+        Node* r = R.reduce(add);
+
+        R.CheckPureBinop(ops[k + 1], r);
+        CHECK_EQ(p0, r->InputAt(0));
+        CHECK_EQ(p1, r->InputAt(1));
+      }
+    }
+  }
+}
+
+
+static void CheckToI32(Node* old_input, Node* new_input, bool is_signed) {
+  Type* old_type = NodeProperties::GetBounds(old_input).upper;
+  Type* expected_type = I32Type(is_signed);
+  if (old_type->Is(expected_type)) {
+    CHECK_EQ(old_input, new_input);
+  } else if (new_input->opcode() == IrOpcode::kNumberConstant) {
+    CHECK(NodeProperties::GetBounds(new_input).upper->Is(expected_type));
+    double v = ValueOf<double>(new_input->op());
+    double e = static_cast<double>(is_signed ? FastD2I(v) : FastD2UI(v));
+    CHECK_EQ(e, v);
+  } else {
+    CHECK_EQ(NumberToI32(is_signed), new_input->opcode());
+  }
+}
+
+
+// A helper class for testing lowering of bitwise shift operators.
+class JSBitwiseShiftTypedLoweringTester : public JSTypedLoweringTester {
+ public:
+  static const int kNumberOps = 6;
+  Operator** ops;
+  bool* signedness;
+
+  JSBitwiseShiftTypedLoweringTester() {
+    Operator* o[] = {javascript.ShiftLeft(),         machine.Word32Shl(),
+                     javascript.ShiftRight(),        machine.Word32Sar(),
+                     javascript.ShiftRightLogical(), machine.Word32Shr()};
+
+    ops = static_cast<Operator**>(malloc(sizeof(o)));
+    memcpy(ops, o, sizeof(o));
+
+    // Expected signedness of left and right conversions above.
+    bool s[] = {true, false, true, false, false, false};
+
+    signedness = static_cast<bool*>(malloc(sizeof(s)));
+    memcpy(signedness, s, sizeof(s));
+  }
+};
+
+
+TEST(Int32BitwiseShifts) {
+  JSBitwiseShiftTypedLoweringTester R;
+
+  Type* types[] = {
+      Type::SignedSmall(), Type::UnsignedSmall(), Type::OtherSigned32(),
+      Type::Unsigned32(),  Type::Signed32(),      Type::MinusZero(),
+      Type::NaN(),         Type::OtherNumber(),   Type::Undefined(),
+      Type::Null(),        Type::Boolean(),       Type::Number(),
+      Type::String(),      Type::Object()};
+
+  for (size_t i = 0; i < ARRAY_SIZE(types); ++i) {
+    Node* p0 = R.Parameter(types[i], 0);
+
+    for (size_t j = 0; j < ARRAY_SIZE(types); ++j) {
+      Node* p1 = R.Parameter(types[j], 1);
+
+      for (int k = 0; k < R.kNumberOps; k += 2) {
+        Node* add = R.Binop(R.ops[k], p0, p1);
+        Node* r = R.reduce(add);
+
+        R.CheckPureBinop(R.ops[k + 1], r);
+        Node* r0 = r->InputAt(0);
+        Node* r1 = r->InputAt(1);
+
+        CheckToI32(p0, r0, R.signedness[k]);
+
+        R.CheckPureBinop(IrOpcode::kWord32And, r1);
+        CheckToI32(p1, r1->InputAt(0), R.signedness[k + 1]);
+        R.CheckInt32Constant(0x1F, r1->InputAt(1));
+      }
+    }
+  }
+}
+
+
+// A helper class for testing lowering of bitwise operators.
+class JSBitwiseTypedLoweringTester : public JSTypedLoweringTester {
+ public:
+  static const int kNumberOps = 6;
+  Operator** ops;
+  bool* signedness;
+
+  JSBitwiseTypedLoweringTester() {
+    Operator* o[] = {javascript.BitwiseOr(),  machine.Word32Or(),
+                     javascript.BitwiseXor(), machine.Word32Xor(),
+                     javascript.BitwiseAnd(), machine.Word32And()};
+
+    ops = static_cast<Operator**>(malloc(sizeof(o)));
+    memcpy(ops, o, sizeof(o));
+
+    // Expected signedness of left and right conversions above.
+    bool s[] = {true, true, true, true, true, true};
+
+    signedness = static_cast<bool*>(malloc(sizeof(s)));
+    memcpy(signedness, s, sizeof(s));
+  }
+};
+
+
+TEST(Int32BitwiseBinops) {
+  JSBitwiseTypedLoweringTester R;
+
+  Type* types[] = {
+      Type::SignedSmall(), Type::UnsignedSmall(), Type::OtherSigned32(),
+      Type::Unsigned32(),  Type::Signed32(),      Type::MinusZero(),
+      Type::NaN(),         Type::OtherNumber(),   Type::Undefined(),
+      Type::Null(),        Type::Boolean(),       Type::Number(),
+      Type::String(),      Type::Object()};
+
+  for (size_t i = 0; i < ARRAY_SIZE(types); ++i) {
+    Node* p0 = R.Parameter(types[i], 0);
+
+    for (size_t j = 0; j < ARRAY_SIZE(types); ++j) {
+      Node* p1 = R.Parameter(types[j], 1);
+
+      for (int k = 0; k < R.kNumberOps; k += 2) {
+        Node* add = R.Binop(R.ops[k], p0, p1);
+        Node* r = R.reduce(add);
+
+        R.CheckPureBinop(R.ops[k + 1], r);
+
+        CheckToI32(p0, r->InputAt(0), R.signedness[k]);
+        CheckToI32(p1, r->InputAt(1), R.signedness[k + 1]);
+      }
+    }
+  }
+}
+
+
+TEST(JSToNumber1) {
+  JSTypedLoweringTester R;
+  Operator* ton = R.javascript.ToNumber();
+
+  for (size_t i = 0; i < ARRAY_SIZE(kNumberTypes); i++) {  // ToNumber(number)
+    Node* r = R.ReduceUnop(ton, kNumberTypes[i]);
+    CHECK_EQ(IrOpcode::kParameter, r->opcode());
+  }
+
+  {  // ToNumber(undefined)
+    Node* r = R.ReduceUnop(ton, Type::Undefined());
+    R.CheckNaN(r);
+  }
+
+  {  // ToNumber(null)
+    Node* r = R.ReduceUnop(ton, Type::Null());
+    R.CheckNumberConstant(0.0, r);
+  }
+}
+
+
+TEST(JSToNumber_replacement) {
+  JSTypedLoweringTester R;
+
+  Type* types[] = {Type::Null(), Type::Undefined(), Type::Number()};
+
+  for (size_t i = 0; i < ARRAY_SIZE(types); i++) {
+    Node* n = R.Parameter(types[i]);
+    Node* c = R.graph.NewNode(R.javascript.ToNumber(), n, R.context(),
+                              R.start(), R.start());
+    Node* effect_use = R.UseForEffect(c);
+    Node* add = R.graph.NewNode(R.simplified.ReferenceEqual(Type::Any()), n, c);
+
+    R.CheckEffectInput(c, effect_use);
+    Node* r = R.reduce(c);
+
+    if (types[i]->Is(Type::Number())) {
+      CHECK_EQ(n, r);
+    } else {
+      CHECK_EQ(IrOpcode::kNumberConstant, r->opcode());
+    }
+
+    CHECK_EQ(n, add->InputAt(0));
+    CHECK_EQ(r, add->InputAt(1));
+    R.CheckEffectInput(R.start(), effect_use);
+  }
+}
+
+
+TEST(JSToNumberOfConstant) {
+  JSTypedLoweringTester R;
+
+  Operator* ops[] = {R.common.NumberConstant(0), R.common.NumberConstant(-1),
+                     R.common.NumberConstant(0.1), R.common.Int32Constant(1177),
+                     R.common.Float64Constant(0.99)};
+
+  for (size_t i = 0; i < ARRAY_SIZE(ops); i++) {
+    Node* n = R.graph.NewNode(ops[i]);
+    Node* convert = R.Unop(R.javascript.ToNumber(), n);
+    Node* r = R.reduce(convert);
+    // Note that either outcome below is correct. It only depends on whether
+    // the types of constants are eagerly computed or only computed by the
+    // typing pass.
+    if (NodeProperties::GetBounds(n).upper->Is(Type::Number())) {
+      // If number constants are eagerly typed, then reduction should
+      // remove the ToNumber.
+      CHECK_EQ(n, r);
+    } else {
+      // Otherwise, type-based lowering should only look at the type, and
+      // *not* try to constant fold.
+      CHECK_EQ(convert, r);
+    }
+  }
+}
+
+
+TEST(JSToNumberOfNumberOrOtherPrimitive) {
+  JSTypedLoweringTester R;
+  Type* others[] = {Type::Undefined(), Type::Null(), Type::Boolean(),
+                    Type::String()};
+
+  for (size_t i = 0; i < ARRAY_SIZE(others); i++) {
+    Type* t = Type::Union(Type::Number(), others[i], R.main_zone());
+    Node* r = R.ReduceUnop(R.javascript.ToNumber(), t);
+    CHECK_EQ(IrOpcode::kJSToNumber, r->opcode());
+  }
+}
+
+
+TEST(JSToBoolean) {
+  JSTypedLoweringTester R;
+  Operator* op = R.javascript.ToBoolean();
+
+  {  // ToBoolean(undefined)
+    Node* r = R.ReduceUnop(op, Type::Undefined());
+    R.CheckFalse(r);
+  }
+
+  {  // ToBoolean(null)
+    Node* r = R.ReduceUnop(op, Type::Null());
+    R.CheckFalse(r);
+  }
+
+  {  // ToBoolean(boolean)
+    Node* r = R.ReduceUnop(op, Type::Boolean());
+    CHECK_EQ(IrOpcode::kParameter, r->opcode());
+  }
+
+  {  // ToBoolean(number)
+    Node* r = R.ReduceUnop(op, Type::Number());
+    CHECK_EQ(IrOpcode::kBooleanNot, r->opcode());
+    Node* i = r->InputAt(0);
+    CHECK_EQ(IrOpcode::kNumberEqual, i->opcode());
+    // ToBoolean(number) => BooleanNot(NumberEqual(x, #0))
+  }
+
+  {  // ToBoolean(string)
+    Node* r = R.ReduceUnop(op, Type::String());
+    // TODO(titzer): test will break with better js-typed-lowering
+    CHECK_EQ(IrOpcode::kJSToBoolean, r->opcode());
+  }
+
+  {  // ToBoolean(object)
+    Node* r = R.ReduceUnop(op, Type::DetectableObject());
+    R.CheckTrue(r);
+  }
+
+  {  // ToBoolean(undetectable)
+    Node* r = R.ReduceUnop(op, Type::Undetectable());
+    R.CheckFalse(r);
+  }
+
+  {  // ToBoolean(object)
+    Node* r = R.ReduceUnop(op, Type::Object());
+    CHECK_EQ(IrOpcode::kJSToBoolean, r->opcode());
+  }
+}
+
+
+TEST(JSToBoolean_replacement) {
+  JSTypedLoweringTester R;
+
+  Type* types[] = {Type::Null(), Type::Undefined(), Type::Boolean(),
+                   Type::DetectableObject(), Type::Undetectable()};
+
+  for (size_t i = 0; i < ARRAY_SIZE(types); i++) {
+    Node* n = R.Parameter(types[i]);
+    Node* c = R.graph.NewNode(R.javascript.ToBoolean(), n, R.context(),
+                              R.start(), R.start());
+    Node* effect_use = R.UseForEffect(c);
+    Node* add = R.graph.NewNode(R.simplified.ReferenceEqual(Type::Any()), n, c);
+
+    R.CheckEffectInput(c, effect_use);
+    Node* r = R.reduce(c);
+
+    if (types[i]->Is(Type::Boolean())) {
+      CHECK_EQ(n, r);
+    } else {
+      CHECK_EQ(IrOpcode::kHeapConstant, r->opcode());
+    }
+
+    CHECK_EQ(n, add->InputAt(0));
+    CHECK_EQ(r, add->InputAt(1));
+    R.CheckEffectInput(R.start(), effect_use);
+  }
+}
+
+
+TEST(JSToString1) {
+  JSTypedLoweringTester R;
+
+  for (size_t i = 0; i < ARRAY_SIZE(kStringTypes); i++) {
+    Node* r = R.ReduceUnop(R.javascript.ToString(), kStringTypes[i]);
+    CHECK_EQ(IrOpcode::kParameter, r->opcode());
+  }
+
+  Operator* op = R.javascript.ToString();
+
+  {  // ToString(undefined) => "undefined"
+    Node* r = R.ReduceUnop(op, Type::Undefined());
+    R.CheckHandle(R.isolate->factory()->undefined_string(), r);
+  }
+
+  {  // ToString(null) => "null"
+    Node* r = R.ReduceUnop(op, Type::Null());
+    R.CheckHandle(R.isolate->factory()->null_string(), r);
+  }
+
+  {  // ToString(boolean)
+    Node* r = R.ReduceUnop(op, Type::Boolean());
+    // TODO(titzer): could be a branch
+    CHECK_EQ(IrOpcode::kJSToString, r->opcode());
+  }
+
+  {  // ToString(number)
+    Node* r = R.ReduceUnop(op, Type::Number());
+    // TODO(titzer): could remove effects
+    CHECK_EQ(IrOpcode::kJSToString, r->opcode());
+  }
+
+  {  // ToString(string)
+    Node* r = R.ReduceUnop(op, Type::String());
+    CHECK_EQ(IrOpcode::kParameter, r->opcode());  // No-op
+  }
+
+  {  // ToString(object)
+    Node* r = R.ReduceUnop(op, Type::Object());
+    CHECK_EQ(IrOpcode::kJSToString, r->opcode());  // No reduction.
+  }
+}
+
+
+TEST(JSToString_replacement) {
+  JSTypedLoweringTester R;
+
+  Type* types[] = {Type::Null(), Type::Undefined(), Type::String()};
+
+  for (size_t i = 0; i < ARRAY_SIZE(types); i++) {
+    Node* n = R.Parameter(types[i]);
+    Node* c = R.graph.NewNode(R.javascript.ToString(), n, R.context(),
+                              R.start(), R.start());
+    Node* effect_use = R.UseForEffect(c);
+    Node* add = R.graph.NewNode(R.simplified.ReferenceEqual(Type::Any()), n, c);
+
+    R.CheckEffectInput(c, effect_use);
+    Node* r = R.reduce(c);
+
+    if (types[i]->Is(Type::String())) {
+      CHECK_EQ(n, r);
+    } else {
+      CHECK_EQ(IrOpcode::kHeapConstant, r->opcode());
+    }
+
+    CHECK_EQ(n, add->InputAt(0));
+    CHECK_EQ(r, add->InputAt(1));
+    R.CheckEffectInput(R.start(), effect_use);
+  }
+}
+
+
+TEST(StringComparison) {
+  JSTypedLoweringTester R;
+
+  Operator* ops[] = {
+      R.javascript.LessThan(),           R.simplified.StringLessThan(),
+      R.javascript.LessThanOrEqual(),    R.simplified.StringLessThanOrEqual(),
+      R.javascript.GreaterThan(),        R.simplified.StringLessThan(),
+      R.javascript.GreaterThanOrEqual(), R.simplified.StringLessThanOrEqual()};
+
+  for (size_t i = 0; i < ARRAY_SIZE(kStringTypes); i++) {
+    Node* p0 = R.Parameter(kStringTypes[i], 0);
+    for (size_t j = 0; j < ARRAY_SIZE(kStringTypes); j++) {
+      Node* p1 = R.Parameter(kStringTypes[j], 1);
+
+      for (size_t k = 0; k < ARRAY_SIZE(ops); k += 2) {
+        Node* cmp = R.Binop(ops[k], p0, p1);
+        Node* r = R.reduce(cmp);
+
+        R.CheckPureBinop(ops[k + 1], r);
+        if (k >= 4) {
+          // GreaterThan and GreaterThanOrEqual commute the inputs
+          // and use the LessThan and LessThanOrEqual operators.
+          CHECK_EQ(p1, r->InputAt(0));
+          CHECK_EQ(p0, r->InputAt(1));
+        } else {
+          CHECK_EQ(p0, r->InputAt(0));
+          CHECK_EQ(p1, r->InputAt(1));
+        }
+      }
+    }
+  }
+}
+
+
+static void CheckIsConvertedToNumber(Node* val, Node* converted) {
+  if (NodeProperties::GetBounds(val).upper->Is(Type::Number())) {
+    CHECK_EQ(val, converted);
+  } else {
+    if (converted->opcode() == IrOpcode::kNumberConstant) return;
+    CHECK_EQ(IrOpcode::kJSToNumber, converted->opcode());
+    CHECK_EQ(val, converted->InputAt(0));
+  }
+}
+
+
+TEST(NumberComparison) {
+  JSTypedLoweringTester R;
+
+  Operator* ops[] = {
+      R.javascript.LessThan(),           R.simplified.NumberLessThan(),
+      R.javascript.LessThanOrEqual(),    R.simplified.NumberLessThanOrEqual(),
+      R.javascript.GreaterThan(),        R.simplified.NumberLessThan(),
+      R.javascript.GreaterThanOrEqual(), R.simplified.NumberLessThanOrEqual()};
+
+  for (size_t i = 0; i < ARRAY_SIZE(kJSTypes); i++) {
+    Type* t0 = kJSTypes[i];
+    if (t0->Is(Type::String())) continue;  // skip Type::String
+    Node* p0 = R.Parameter(t0, 0);
+
+    for (size_t j = 0; j < ARRAY_SIZE(kJSTypes); j++) {
+      Type* t1 = kJSTypes[j];
+      if (t1->Is(Type::String())) continue;  // skip Type::String
+      Node* p1 = R.Parameter(t1, 1);
+
+      for (size_t k = 0; k < ARRAY_SIZE(ops); k += 2) {
+        Node* cmp = R.Binop(ops[k], p0, p1);
+        Node* r = R.reduce(cmp);
+
+        R.CheckPureBinop(ops[k + 1], r);
+        if (k >= 4) {
+          // GreaterThan and GreaterThanOrEqual commute the inputs
+          // and use the LessThan and LessThanOrEqual operators.
+          CheckIsConvertedToNumber(p1, r->InputAt(0));
+          CheckIsConvertedToNumber(p0, r->InputAt(1));
+        } else {
+          CheckIsConvertedToNumber(p0, r->InputAt(0));
+          CheckIsConvertedToNumber(p1, r->InputAt(1));
+        }
+      }
+    }
+  }
+}
+
+
+TEST(MixedComparison1) {
+  JSTypedLoweringTester R;
+
+  Type* types[] = {Type::Number(), Type::String(),
+                   Type::Union(Type::Number(), Type::String(), R.main_zone())};
+
+  for (size_t i = 0; i < ARRAY_SIZE(types); i++) {
+    Node* p0 = R.Parameter(types[i], 0);
+
+    for (size_t j = 0; j < ARRAY_SIZE(types); j++) {
+      Node* p1 = R.Parameter(types[j], 1);
+      {
+        Node* cmp = R.Binop(R.javascript.LessThan(), p0, p1);
+        Node* r = R.reduce(cmp);
+
+        if (!types[i]->Maybe(Type::String()) ||
+            !types[j]->Maybe(Type::String())) {
+          if (types[i]->Is(Type::String()) && types[j]->Is(Type::String())) {
+            R.CheckPureBinop(R.simplified.StringLessThan(), r);
+          } else {
+            R.CheckPureBinop(R.simplified.NumberLessThan(), r);
+          }
+        } else {
+          CHECK_EQ(cmp, r);  // No reduction of mixed types.
+        }
+      }
+    }
+  }
+}
+
+
+TEST(ObjectComparison) {
+  JSTypedLoweringTester R;
+
+  Node* p0 = R.Parameter(Type::Object(), 0);
+  Node* p1 = R.Parameter(Type::Object(), 1);
+
+  Node* cmp = R.Binop(R.javascript.LessThan(), p0, p1);
+  Node* effect_use = R.UseForEffect(cmp);
+
+  R.CheckEffectInput(R.start(), cmp);
+  R.CheckEffectInput(cmp, effect_use);
+
+  Node* r = R.reduce(cmp);
+
+  R.CheckPureBinop(R.simplified.NumberLessThan(), r);
+
+  Node* i0 = r->InputAt(0);
+  Node* i1 = r->InputAt(1);
+
+  CHECK_NE(p0, i0);
+  CHECK_NE(p1, i1);
+  CHECK_EQ(IrOpcode::kJSToNumber, i0->opcode());
+  CHECK_EQ(IrOpcode::kJSToNumber, i1->opcode());
+
+  // Check effect chain is correct.
+  R.CheckEffectInput(R.start(), i0);
+  R.CheckEffectInput(i0, i1);
+  R.CheckEffectInput(i1, effect_use);
+}
+
+
+TEST(UnaryNot) {
+  JSTypedLoweringTester R;
+  Operator* opnot = R.javascript.UnaryNot();
+
+  for (size_t i = 0; i < ARRAY_SIZE(kJSTypes); i++) {
+    Node* r = R.ReduceUnop(opnot, kJSTypes[i]);
+    // TODO(titzer): test will break if/when js-typed-lowering constant folds.
+    CHECK_EQ(IrOpcode::kBooleanNot, r->opcode());
+  }
+}
+
+
+TEST(RemoveToNumberEffects) {
+  JSTypedLoweringTester R;
+
+  Node* effect_use = NULL;
+  for (int i = 0; i < 10; i++) {
+    Node* p0 = R.Parameter(Type::Number());
+    Node* ton = R.Unop(R.javascript.ToNumber(), p0);
+    effect_use = NULL;
+
+    switch (i) {
+      case 0:
+        effect_use = R.graph.NewNode(R.javascript.ToNumber(), p0, R.context(),
+                                     ton, R.start());
+        break;
+      case 1:
+        effect_use = R.graph.NewNode(R.javascript.ToNumber(), ton, R.context(),
+                                     ton, R.start());
+        break;
+      case 2:
+        effect_use = R.graph.NewNode(R.common.EffectPhi(1), ton, R.start());
+      case 3:
+        effect_use = R.graph.NewNode(R.javascript.Add(), ton, ton, R.context(),
+                                     ton, R.start());
+        break;
+      case 4:
+        effect_use = R.graph.NewNode(R.javascript.Add(), p0, p0, R.context(),
+                                     ton, R.start());
+        break;
+      case 5:
+        effect_use = R.graph.NewNode(R.common.Return(), p0, ton, R.start());
+        break;
+      case 6:
+        effect_use = R.graph.NewNode(R.common.Return(), ton, ton, R.start());
+    }
+
+    R.CheckEffectInput(R.start(), ton);
+    if (effect_use != NULL) R.CheckEffectInput(ton, effect_use);
+
+    Node* r = R.reduce(ton);
+    CHECK_EQ(p0, r);
+    CHECK_NE(R.start(), r);
+
+    if (effect_use != NULL) {
+      R.CheckEffectInput(R.start(), effect_use);
+      // Check that value uses of ToNumber() do not go to start().
+      for (int i = 0; i < effect_use->op()->InputCount(); i++) {
+        CHECK_NE(R.start(), effect_use->InputAt(i));
+      }
+    }
+  }
+
+  CHECK_EQ(NULL, effect_use);  // should have done all cases above.
+}
+
+
+// Helper class for testing the reduction of a single binop.
+class BinopEffectsTester {
+ public:
+  explicit BinopEffectsTester(Operator* op, Type* t0, Type* t1)
+      : R(),
+        p0(R.Parameter(t0, 0)),
+        p1(R.Parameter(t1, 1)),
+        binop(R.Binop(op, p0, p1)),
+        effect_use(R.graph.NewNode(R.common.EffectPhi(1), binop, R.start())) {
+    // Effects should be ordered start -> binop -> effect_use
+    R.CheckEffectInput(R.start(), binop);
+    R.CheckEffectInput(binop, effect_use);
+    result = R.reduce(binop);
+  }
+
+  JSTypedLoweringTester R;
+  Node* p0;
+  Node* p1;
+  Node* binop;
+  Node* effect_use;
+  Node* result;
+
+  void CheckEffectsRemoved() { R.CheckEffectInput(R.start(), effect_use); }
+
+  void CheckEffectOrdering(Node* n0) {
+    R.CheckEffectInput(R.start(), n0);
+    R.CheckEffectInput(n0, effect_use);
+  }
+
+  void CheckEffectOrdering(Node* n0, Node* n1) {
+    R.CheckEffectInput(R.start(), n0);
+    R.CheckEffectInput(n0, n1);
+    R.CheckEffectInput(n1, effect_use);
+  }
+
+  Node* CheckConvertedInput(IrOpcode::Value opcode, int which, bool effects) {
+    return CheckConverted(opcode, result->InputAt(which), effects);
+  }
+
+  Node* CheckConverted(IrOpcode::Value opcode, Node* node, bool effects) {
+    CHECK_EQ(opcode, node->opcode());
+    if (effects) {
+      CHECK_LT(0, NodeProperties::GetEffectInputCount(node));
+    } else {
+      CHECK_EQ(0, NodeProperties::GetEffectInputCount(node));
+    }
+    return node;
+  }
+
+  Node* CheckNoOp(int which) {
+    CHECK_EQ(which == 0 ? p0 : p1, result->InputAt(which));
+    return result->InputAt(which);
+  }
+};
+
+
+// Helper function for strict and non-strict equality reductions.
+void CheckEqualityReduction(JSTypedLoweringTester* R, bool strict, Node* l,
+                            Node* r, IrOpcode::Value expected) {
+  for (int j = 0; j < 2; j++) {
+    Node* p0 = j == 0 ? l : r;
+    Node* p1 = j == 1 ? l : r;
+
+    {
+      Node* eq = strict ? R->graph.NewNode(R->javascript.StrictEqual(), p0, p1)
+                        : R->Binop(R->javascript.Equal(), p0, p1);
+      Node* r = R->reduce(eq);
+      R->CheckPureBinop(expected, r);
+    }
+
+    {
+      Node* ne = strict
+                     ? R->graph.NewNode(R->javascript.StrictNotEqual(), p0, p1)
+                     : R->Binop(R->javascript.NotEqual(), p0, p1);
+      Node* n = R->reduce(ne);
+      CHECK_EQ(IrOpcode::kBooleanNot, n->opcode());
+      Node* r = n->InputAt(0);
+      R->CheckPureBinop(expected, r);
+    }
+  }
+}
+
+
+TEST(EqualityForNumbers) {
+  JSTypedLoweringTester R;
+
+  Type* simple_number_types[] = {Type::UnsignedSmall(), Type::SignedSmall(),
+                                 Type::Signed32(), Type::Unsigned32(),
+                                 Type::Number()};
+
+
+  for (size_t i = 0; i < ARRAY_SIZE(simple_number_types); ++i) {
+    Node* p0 = R.Parameter(simple_number_types[i], 0);
+
+    for (size_t j = 0; j < ARRAY_SIZE(simple_number_types); ++j) {
+      Node* p1 = R.Parameter(simple_number_types[j], 1);
+
+      CheckEqualityReduction(&R, true, p0, p1, IrOpcode::kNumberEqual);
+      CheckEqualityReduction(&R, false, p0, p1, IrOpcode::kNumberEqual);
+    }
+  }
+}
+
+
+TEST(StrictEqualityForRefEqualTypes) {
+  JSTypedLoweringTester R;
+
+  Type* types[] = {Type::Undefined(), Type::Null(), Type::Boolean(),
+                   Type::Object(), Type::Receiver()};
+
+  Node* p0 = R.Parameter(Type::Any());
+  for (size_t i = 0; i < ARRAY_SIZE(types); i++) {
+    Node* p1 = R.Parameter(types[i]);
+    CheckEqualityReduction(&R, true, p0, p1, IrOpcode::kReferenceEqual);
+  }
+  // TODO(titzer): Equal(RefEqualTypes)
+}
+
+
+TEST(StringEquality) {
+  JSTypedLoweringTester R;
+  Node* p0 = R.Parameter(Type::String());
+  Node* p1 = R.Parameter(Type::String());
+
+  CheckEqualityReduction(&R, true, p0, p1, IrOpcode::kStringEqual);
+  CheckEqualityReduction(&R, false, p0, p1, IrOpcode::kStringEqual);
+}
+
+
+TEST(RemovePureNumberBinopEffects) {
+  JSTypedLoweringTester R;
+
+  Operator* ops[] = {
+      R.javascript.Equal(),           R.simplified.NumberEqual(),
+      R.javascript.Add(),             R.simplified.NumberAdd(),
+      R.javascript.Subtract(),        R.simplified.NumberSubtract(),
+      R.javascript.Multiply(),        R.simplified.NumberMultiply(),
+      R.javascript.Divide(),          R.simplified.NumberDivide(),
+      R.javascript.Modulus(),         R.simplified.NumberModulus(),
+      R.javascript.LessThan(),        R.simplified.NumberLessThan(),
+      R.javascript.LessThanOrEqual(), R.simplified.NumberLessThanOrEqual(),
+  };
+
+  for (size_t j = 0; j < ARRAY_SIZE(ops); j += 2) {
+    BinopEffectsTester B(ops[j], Type::Number(), Type::Number());
+    CHECK_EQ(ops[j + 1]->opcode(), B.result->op()->opcode());
+
+    B.R.CheckPureBinop(B.result->opcode(), B.result);
+
+    B.CheckNoOp(0);
+    B.CheckNoOp(1);
+
+    B.CheckEffectsRemoved();
+  }
+}
+
+
+TEST(OrderNumberBinopEffects1) {
+  JSTypedLoweringTester R;
+
+  Operator* ops[] = {
+      R.javascript.Subtract(), R.simplified.NumberSubtract(),
+      R.javascript.Multiply(), R.simplified.NumberMultiply(),
+      R.javascript.Divide(),   R.simplified.NumberDivide(),
+      R.javascript.Modulus(),  R.simplified.NumberModulus(),
+  };
+
+  for (size_t j = 0; j < ARRAY_SIZE(ops); j += 2) {
+    BinopEffectsTester B(ops[j], Type::Object(), Type::String());
+    CHECK_EQ(ops[j + 1]->opcode(), B.result->op()->opcode());
+
+    Node* i0 = B.CheckConvertedInput(IrOpcode::kJSToNumber, 0, true);
+    Node* i1 = B.CheckConvertedInput(IrOpcode::kJSToNumber, 1, true);
+
+    CHECK_EQ(B.p0, i0->InputAt(0));
+    CHECK_EQ(B.p1, i1->InputAt(0));
+
+    // Effects should be ordered start -> i0 -> i1 -> effect_use
+    B.CheckEffectOrdering(i0, i1);
+  }
+}
+
+
+TEST(OrderNumberBinopEffects2) {
+  JSTypedLoweringTester R;
+
+  Operator* ops[] = {
+      R.javascript.Add(),      R.simplified.NumberAdd(),
+      R.javascript.Subtract(), R.simplified.NumberSubtract(),
+      R.javascript.Multiply(), R.simplified.NumberMultiply(),
+      R.javascript.Divide(),   R.simplified.NumberDivide(),
+      R.javascript.Modulus(),  R.simplified.NumberModulus(),
+  };
+
+  for (size_t j = 0; j < ARRAY_SIZE(ops); j += 2) {
+    BinopEffectsTester B(ops[j], Type::Number(), Type::Object());
+
+    Node* i0 = B.CheckNoOp(0);
+    Node* i1 = B.CheckConvertedInput(IrOpcode::kJSToNumber, 1, true);
+
+    CHECK_EQ(B.p0, i0);
+    CHECK_EQ(B.p1, i1->InputAt(0));
+
+    // Effects should be ordered start -> i1 -> effect_use
+    B.CheckEffectOrdering(i1);
+  }
+
+  for (size_t j = 0; j < ARRAY_SIZE(ops); j += 2) {
+    BinopEffectsTester B(ops[j], Type::Object(), Type::Number());
+
+    Node* i0 = B.CheckConvertedInput(IrOpcode::kJSToNumber, 0, true);
+    Node* i1 = B.CheckNoOp(1);
+
+    CHECK_EQ(B.p0, i0->InputAt(0));
+    CHECK_EQ(B.p1, i1);
+
+    // Effects should be ordered start -> i0 -> effect_use
+    B.CheckEffectOrdering(i0);
+  }
+}
+
+
+TEST(OrderCompareEffects) {
+  JSTypedLoweringTester R;
+
+  Operator* ops[] = {
+      R.javascript.GreaterThan(), R.simplified.NumberLessThan(),
+      R.javascript.GreaterThanOrEqual(), R.simplified.NumberLessThanOrEqual(),
+  };
+
+  for (size_t j = 0; j < ARRAY_SIZE(ops); j += 2) {
+    BinopEffectsTester B(ops[j], Type::Object(), Type::String());
+    CHECK_EQ(ops[j + 1]->opcode(), B.result->op()->opcode());
+
+    Node* i0 = B.CheckConvertedInput(IrOpcode::kJSToNumber, 0, true);
+    Node* i1 = B.CheckConvertedInput(IrOpcode::kJSToNumber, 1, true);
+
+    // Inputs should be commuted.
+    CHECK_EQ(B.p1, i0->InputAt(0));
+    CHECK_EQ(B.p0, i1->InputAt(0));
+
+    // But effects should be ordered start -> i1 -> i0 -> effect_use
+    B.CheckEffectOrdering(i1, i0);
+  }
+
+  for (size_t j = 0; j < ARRAY_SIZE(ops); j += 2) {
+    BinopEffectsTester B(ops[j], Type::Number(), Type::Object());
+
+    Node* i0 = B.CheckConvertedInput(IrOpcode::kJSToNumber, 0, true);
+    Node* i1 = B.result->InputAt(1);
+
+    CHECK_EQ(B.p1, i0->InputAt(0));  // Should be commuted.
+    CHECK_EQ(B.p0, i1);
+
+    // Effects should be ordered start -> i1 -> effect_use
+    B.CheckEffectOrdering(i0);
+  }
+
+  for (size_t j = 0; j < ARRAY_SIZE(ops); j += 2) {
+    BinopEffectsTester B(ops[j], Type::Object(), Type::Number());
+
+    Node* i0 = B.result->InputAt(0);
+    Node* i1 = B.CheckConvertedInput(IrOpcode::kJSToNumber, 1, true);
+
+    CHECK_EQ(B.p1, i0);  // Should be commuted.
+    CHECK_EQ(B.p0, i1->InputAt(0));
+
+    // Effects should be ordered start -> i0 -> effect_use
+    B.CheckEffectOrdering(i1);
+  }
+}
+
+
+TEST(Int32BinopEffects) {
+  JSBitwiseTypedLoweringTester R;
+
+  for (int j = 0; j < R.kNumberOps; j += 2) {
+    bool signed_left = R.signedness[j], signed_right = R.signedness[j + 1];
+    BinopEffectsTester B(R.ops[j], I32Type(signed_left), I32Type(signed_right));
+    CHECK_EQ(R.ops[j + 1]->opcode(), B.result->op()->opcode());
+
+    B.R.CheckPureBinop(B.result->opcode(), B.result);
+
+    B.CheckNoOp(0);
+    B.CheckNoOp(1);
+
+    B.CheckEffectsRemoved();
+  }
+
+  for (int j = 0; j < R.kNumberOps; j += 2) {
+    bool signed_left = R.signedness[j], signed_right = R.signedness[j + 1];
+    BinopEffectsTester B(R.ops[j], Type::Number(), Type::Number());
+    CHECK_EQ(R.ops[j + 1]->opcode(), B.result->op()->opcode());
+
+    B.R.CheckPureBinop(B.result->opcode(), B.result);
+
+    B.CheckConvertedInput(NumberToI32(signed_left), 0, false);
+    B.CheckConvertedInput(NumberToI32(signed_right), 1, false);
+
+    B.CheckEffectsRemoved();
+  }
+
+  for (int j = 0; j < R.kNumberOps; j += 2) {
+    bool signed_left = R.signedness[j], signed_right = R.signedness[j + 1];
+    BinopEffectsTester B(R.ops[j], Type::Number(), Type::Object());
+
+    B.R.CheckPureBinop(B.result->opcode(), B.result);
+
+    Node* i0 = B.CheckConvertedInput(NumberToI32(signed_left), 0, false);
+    Node* i1 = B.CheckConvertedInput(NumberToI32(signed_right), 1, false);
+
+    CHECK_EQ(B.p0, i0->InputAt(0));
+    Node* ii1 = B.CheckConverted(IrOpcode::kJSToNumber, i1->InputAt(0), true);
+
+    CHECK_EQ(B.p1, ii1->InputAt(0));
+
+    B.CheckEffectOrdering(ii1);
+  }
+
+  for (int j = 0; j < R.kNumberOps; j += 2) {
+    bool signed_left = R.signedness[j], signed_right = R.signedness[j + 1];
+    BinopEffectsTester B(R.ops[j], Type::Object(), Type::Number());
+
+    B.R.CheckPureBinop(B.result->opcode(), B.result);
+
+    Node* i0 = B.CheckConvertedInput(NumberToI32(signed_left), 0, false);
+    Node* i1 = B.CheckConvertedInput(NumberToI32(signed_right), 1, false);
+
+    Node* ii0 = B.CheckConverted(IrOpcode::kJSToNumber, i0->InputAt(0), true);
+    CHECK_EQ(B.p1, i1->InputAt(0));
+
+    CHECK_EQ(B.p0, ii0->InputAt(0));
+
+    B.CheckEffectOrdering(ii0);
+  }
+
+  for (int j = 0; j < R.kNumberOps; j += 2) {
+    bool signed_left = R.signedness[j], signed_right = R.signedness[j + 1];
+    BinopEffectsTester B(R.ops[j], Type::Object(), Type::Object());
+
+    B.R.CheckPureBinop(B.result->opcode(), B.result);
+
+    Node* i0 = B.CheckConvertedInput(NumberToI32(signed_left), 0, false);
+    Node* i1 = B.CheckConvertedInput(NumberToI32(signed_right), 1, false);
+
+    Node* ii0 = B.CheckConverted(IrOpcode::kJSToNumber, i0->InputAt(0), true);
+    Node* ii1 = B.CheckConverted(IrOpcode::kJSToNumber, i1->InputAt(0), true);
+
+    CHECK_EQ(B.p0, ii0->InputAt(0));
+    CHECK_EQ(B.p1, ii1->InputAt(0));
+
+    B.CheckEffectOrdering(ii0, ii1);
+  }
+}
+
+
+TEST(UnaryNotEffects) {
+  JSTypedLoweringTester R;
+  Operator* opnot = R.javascript.UnaryNot();
+
+  for (size_t i = 0; i < ARRAY_SIZE(kJSTypes); i++) {
+    Node* p0 = R.Parameter(kJSTypes[i], 0);
+    Node* orig = R.Unop(opnot, p0);
+    Node* effect_use = R.UseForEffect(orig);
+    Node* value_use = R.graph.NewNode(R.common.Return(), orig);
+    Node* r = R.reduce(orig);
+    // TODO(titzer): test will break if/when js-typed-lowering constant folds.
+    CHECK_EQ(IrOpcode::kBooleanNot, r->opcode());
+
+    CHECK_EQ(r, value_use->InputAt(0));
+
+    if (r->InputAt(0) == orig && orig->opcode() == IrOpcode::kJSToBoolean) {
+      // The original node was turned into a ToBoolean, which has an effect.
+      R.CheckEffectInput(R.start(), orig);
+      R.CheckEffectInput(orig, effect_use);
+    } else {
+      // effect should have been removed from this node.
+      R.CheckEffectInput(R.start(), effect_use);
+    }
+  }
+}
+
+
+TEST(Int32AddNarrowing) {
+  {
+    JSBitwiseTypedLoweringTester R;
+
+    for (int o = 0; o < R.kNumberOps; o += 2) {
+      for (size_t i = 0; i < ARRAY_SIZE(kInt32Types); i++) {
+        Node* n0 = R.Parameter(kInt32Types[i]);
+        for (size_t j = 0; j < ARRAY_SIZE(kInt32Types); j++) {
+          Node* n1 = R.Parameter(kInt32Types[j]);
+          Node* one = R.graph.NewNode(R.common.NumberConstant(1));
+
+          for (int l = 0; l < 2; l++) {
+            Node* add_node = R.Binop(R.simplified.NumberAdd(), n0, n1);
+            Node* or_node =
+                R.Binop(R.ops[o], l ? add_node : one, l ? one : add_node);
+            Node* r = R.reduce(or_node);
+
+            CHECK_EQ(R.ops[o + 1]->opcode(), r->op()->opcode());
+            CHECK_EQ(IrOpcode::kInt32Add, add_node->opcode());
+            bool is_signed = l ? R.signedness[o] : R.signedness[o + 1];
+
+            Type* add_type = NodeProperties::GetBounds(add_node).upper;
+            CHECK(add_type->Is(I32Type(is_signed)));
+          }
+        }
+      }
+    }
+  }
+  {
+    JSBitwiseShiftTypedLoweringTester R;
+
+    for (int o = 0; o < R.kNumberOps; o += 2) {
+      for (size_t i = 0; i < ARRAY_SIZE(kInt32Types); i++) {
+        Node* n0 = R.Parameter(kInt32Types[i]);
+        for (size_t j = 0; j < ARRAY_SIZE(kInt32Types); j++) {
+          Node* n1 = R.Parameter(kInt32Types[j]);
+          Node* one = R.graph.NewNode(R.common.NumberConstant(1));
+
+          for (int l = 0; l < 2; l++) {
+            Node* add_node = R.Binop(R.simplified.NumberAdd(), n0, n1);
+            Node* or_node =
+                R.Binop(R.ops[o], l ? add_node : one, l ? one : add_node);
+            Node* r = R.reduce(or_node);
+
+            CHECK_EQ(R.ops[o + 1]->opcode(), r->op()->opcode());
+            CHECK_EQ(IrOpcode::kInt32Add, add_node->opcode());
+            bool is_signed = l ? R.signedness[o] : R.signedness[o + 1];
+
+            Type* add_type = NodeProperties::GetBounds(add_node).upper;
+            CHECK(add_type->Is(I32Type(is_signed)));
+          }
+        }
+      }
+    }
+  }
+}
+
+
+TEST(Int32AddNarrowingNotOwned) {
+  JSBitwiseTypedLoweringTester R;
+
+  for (int o = 0; o < R.kNumberOps; o += 2) {
+    Node* n0 = R.Parameter(I32Type(R.signedness[o]));
+    Node* n1 = R.Parameter(I32Type(R.signedness[o + 1]));
+    Node* one = R.graph.NewNode(R.common.NumberConstant(1));
+
+    Node* add_node = R.Binop(R.simplified.NumberAdd(), n0, n1);
+    Node* or_node = R.Binop(R.ops[o], add_node, one);
+    Node* other_use = R.Binop(R.simplified.NumberAdd(), add_node, one);
+    Node* r = R.reduce(or_node);
+    CHECK_EQ(R.ops[o + 1]->opcode(), r->op()->opcode());
+    // Should not be reduced to Int32Add because of the other number add.
+    CHECK_EQ(IrOpcode::kNumberAdd, add_node->opcode());
+    // Conversion to int32 should be done.
+    CheckToI32(add_node, r->InputAt(0), R.signedness[o]);
+    CheckToI32(one, r->InputAt(1), R.signedness[o + 1]);
+    // The other use should also not be touched.
+    CHECK_EQ(add_node, other_use->InputAt(0));
+    CHECK_EQ(one, other_use->InputAt(1));
+  }
+}
+
+
+TEST(Int32Comparisons) {
+  JSTypedLoweringTester R;
+
+  struct Entry {
+    Operator* js_op;
+    Operator* uint_op;
+    Operator* int_op;
+    Operator* num_op;
+    bool commute;
+  };
+
+  Entry ops[] = {
+      {R.javascript.LessThan(), R.machine.Uint32LessThan(),
+       R.machine.Int32LessThan(), R.simplified.NumberLessThan(), false},
+      {R.javascript.LessThanOrEqual(), R.machine.Uint32LessThanOrEqual(),
+       R.machine.Int32LessThanOrEqual(), R.simplified.NumberLessThanOrEqual(),
+       false},
+      {R.javascript.GreaterThan(), R.machine.Uint32LessThan(),
+       R.machine.Int32LessThan(), R.simplified.NumberLessThan(), true},
+      {R.javascript.GreaterThanOrEqual(), R.machine.Uint32LessThanOrEqual(),
+       R.machine.Int32LessThanOrEqual(), R.simplified.NumberLessThanOrEqual(),
+       true}};
+
+  for (size_t o = 0; o < ARRAY_SIZE(ops); o++) {
+    for (size_t i = 0; i < ARRAY_SIZE(kNumberTypes); i++) {
+      Type* t0 = kNumberTypes[i];
+      Node* p0 = R.Parameter(t0, 0);
+
+      for (size_t j = 0; j < ARRAY_SIZE(kNumberTypes); j++) {
+        Type* t1 = kNumberTypes[j];
+        Node* p1 = R.Parameter(t1, 1);
+
+        Node* cmp = R.Binop(ops[o].js_op, p0, p1);
+        Node* r = R.reduce(cmp);
+
+        Operator* expected;
+        if (t0->Is(Type::Unsigned32()) && t1->Is(Type::Unsigned32())) {
+          expected = ops[o].uint_op;
+        } else if (t0->Is(Type::Signed32()) && t1->Is(Type::Signed32())) {
+          expected = ops[o].int_op;
+        } else {
+          expected = ops[o].num_op;
+        }
+        R.CheckPureBinop(expected, r);
+        if (ops[o].commute) {
+          CHECK_EQ(p1, r->InputAt(0));
+          CHECK_EQ(p0, r->InputAt(1));
+        } else {
+          CHECK_EQ(p0, r->InputAt(0));
+          CHECK_EQ(p1, r->InputAt(1));
+        }
+      }
+    }
+  }
+}
diff --git a/test/cctest/compiler/test-linkage.cc b/test/cctest/compiler/test-linkage.cc
new file mode 100644 (file)
index 0000000..6d9453f
--- /dev/null
@@ -0,0 +1,113 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#include "src/compiler.h"
+#include "src/zone.h"
+
+#include "src/compiler/common-operator.h"
+#include "src/compiler/generic-node-inl.h"
+#include "src/compiler/graph.h"
+#include "src/compiler/linkage.h"
+#include "src/compiler/machine-operator.h"
+#include "src/compiler/node.h"
+#include "src/compiler/operator.h"
+#include "src/compiler/pipeline.h"
+#include "src/compiler/schedule.h"
+#include "test/cctest/cctest.h"
+
+#if V8_TURBOFAN_TARGET
+
+using namespace v8::internal;
+using namespace v8::internal::compiler;
+
+static SimpleOperator dummy_operator(IrOpcode::kParameter, Operator::kNoWrite,
+                                     0, 0, "dummy");
+
+// So we can get a real JS function.
+static Handle<JSFunction> Compile(const char* source) {
+  Isolate* isolate = CcTest::i_isolate();
+  Handle<String> source_code = isolate->factory()
+                                   ->NewStringFromUtf8(CStrVector(source))
+                                   .ToHandleChecked();
+  Handle<SharedFunctionInfo> shared_function = Compiler::CompileScript(
+      source_code, Handle<String>(), 0, 0, false,
+      Handle<Context>(isolate->native_context()), NULL, NULL,
+      v8::ScriptCompiler::kNoCompileOptions, NOT_NATIVES_CODE);
+  return isolate->factory()->NewFunctionFromSharedFunctionInfo(
+      shared_function, isolate->native_context());
+}
+
+
+TEST(TestLinkageCreate) {
+  InitializedHandleScope handles;
+  Handle<JSFunction> function = Compile("a + b");
+  CompilationInfoWithZone info(function);
+  Linkage linkage(&info);
+}
+
+
+TEST(TestLinkageJSFunctionIncoming) {
+  InitializedHandleScope handles;
+
+  const char* sources[] = {"(function() { })", "(function(a) { })",
+                           "(function(a,b) { })", "(function(a,b,c) { })"};
+
+  for (int i = 0; i < 3; i++) {
+    i::HandleScope handles(CcTest::i_isolate());
+    Handle<JSFunction> function = v8::Utils::OpenHandle(
+        *v8::Handle<v8::Function>::Cast(CompileRun(sources[i])));
+    CompilationInfoWithZone info(function);
+    Linkage linkage(&info);
+
+    CallDescriptor* descriptor = linkage.GetIncomingDescriptor();
+    CHECK_NE(NULL, descriptor);
+
+    CHECK_EQ(1 + i, descriptor->ParameterCount());
+    CHECK_EQ(1, descriptor->ReturnCount());
+    CHECK_EQ(Operator::kNoProperties, descriptor->properties());
+    CHECK_EQ(true, descriptor->IsJSFunctionCall());
+  }
+}
+
+
+TEST(TestLinkageCodeStubIncoming) {
+  Isolate* isolate = CcTest::InitIsolateOnce();
+  CompilationInfoWithZone info(static_cast<HydrogenCodeStub*>(NULL), isolate);
+  Linkage linkage(&info);
+  // TODO(titzer): test linkage creation with a bonafide code stub.
+  // this just checks current behavior.
+  CHECK_EQ(NULL, linkage.GetIncomingDescriptor());
+}
+
+
+TEST(TestLinkageJSCall) {
+  HandleAndZoneScope handles;
+  Handle<JSFunction> function = Compile("a + c");
+  CompilationInfoWithZone info(function);
+  Linkage linkage(&info);
+
+  for (int i = 0; i < 32; i++) {
+    CallDescriptor* descriptor = linkage.GetJSCallDescriptor(i);
+    CHECK_NE(NULL, descriptor);
+    CHECK_EQ(i, descriptor->ParameterCount());
+    CHECK_EQ(1, descriptor->ReturnCount());
+    CHECK_EQ(Operator::kNoProperties, descriptor->properties());
+    CHECK_EQ(true, descriptor->IsJSFunctionCall());
+  }
+}
+
+
+TEST(TestLinkageRuntimeCall) {
+  // TODO(titzer): test linkage creation for outgoing runtime calls.
+}
+
+
+TEST(TestLinkageStubCall) {
+  // TODO(titzer): test linkage creation for outgoing stub calls.
+}
+
+
+#endif  // V8_TURBOFAN_TARGET
diff --git a/test/cctest/compiler/test-machine-operator-reducer.cc b/test/cctest/compiler/test-machine-operator-reducer.cc
new file mode 100644 (file)
index 0000000..6a82f5a
--- /dev/null
@@ -0,0 +1,776 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "test/cctest/cctest.h"
+
+#include "src/base/utils/random-number-generator.h"
+#include "src/compiler/graph-inl.h"
+#include "src/compiler/machine-operator-reducer.h"
+#include "test/cctest/compiler/value-helper.h"
+
+using namespace v8::internal;
+using namespace v8::internal::compiler;
+
+template <typename T>
+Operator* NewConstantOperator(CommonOperatorBuilder* common, volatile T value);
+
+template <>
+Operator* NewConstantOperator<int32_t>(CommonOperatorBuilder* common,
+                                       volatile int32_t value) {
+  return common->Int32Constant(value);
+}
+
+template <>
+Operator* NewConstantOperator<double>(CommonOperatorBuilder* common,
+                                      volatile double value) {
+  return common->Float64Constant(value);
+}
+
+
+class ReducerTester : public HandleAndZoneScope {
+ public:
+  ReducerTester()
+      : isolate(main_isolate()),
+        binop(NULL),
+        unop(NULL),
+        machine(main_zone()),
+        common(main_zone()),
+        graph(main_zone()),
+        maxuint32(Constant<int32_t>(kMaxUInt32)) {}
+
+  Isolate* isolate;
+  Operator* binop;
+  Operator* unop;
+  MachineOperatorBuilder machine;
+  CommonOperatorBuilder common;
+  Graph graph;
+  Node* maxuint32;
+
+  template <typename T>
+  Node* Constant(volatile T value) {
+    return graph.NewNode(NewConstantOperator<T>(&common, value));
+  }
+
+  // Check that the reduction of this binop applied to constants {a} and {b}
+  // yields the {expect} value.
+  template <typename T>
+  void CheckFoldBinop(volatile T expect, volatile T a, volatile T b) {
+    CheckFoldBinop<T>(expect, Constant<T>(a), Constant<T>(b));
+  }
+
+  // Check that the reduction of this binop applied to {a} and {b} yields
+  // the {expect} value.
+  template <typename T>
+  void CheckFoldBinop(volatile T expect, Node* a, Node* b) {
+    CHECK_NE(NULL, binop);
+    Node* n = graph.NewNode(binop, a, b);
+    MachineOperatorReducer reducer(&graph);
+    Reduction reduction = reducer.Reduce(n);
+    CHECK(reduction.Changed());
+    CHECK_NE(n, reduction.replacement());
+    CHECK_EQ(expect, ValueOf<T>(reduction.replacement()->op()));
+  }
+
+  // Check that the reduction of this binop applied to {a} and {b} yields
+  // the {expect} node.
+  void CheckBinop(Node* expect, Node* a, Node* b) {
+    CHECK_NE(NULL, binop);
+    Node* n = graph.NewNode(binop, a, b);
+    MachineOperatorReducer reducer(&graph);
+    Reduction reduction = reducer.Reduce(n);
+    CHECK(reduction.Changed());
+    CHECK_EQ(expect, reduction.replacement());
+  }
+
+  // Check that the reduction of this binop applied to {left} and {right} yields
+  // this binop applied to {left_expect} and {right_expect}.
+  void CheckFoldBinop(Node* left_expect, Node* right_expect, Node* left,
+                      Node* right) {
+    CHECK_NE(NULL, binop);
+    Node* n = graph.NewNode(binop, left, right);
+    MachineOperatorReducer reducer(&graph);
+    Reduction reduction = reducer.Reduce(n);
+    CHECK(reduction.Changed());
+    CHECK_EQ(binop, reduction.replacement()->op());
+    CHECK_EQ(left_expect, reduction.replacement()->InputAt(0));
+    CHECK_EQ(right_expect, reduction.replacement()->InputAt(1));
+  }
+
+  // Check that the reduction of this binop applied to {left} and {right} yields
+  // the {op_expect} applied to {left_expect} and {right_expect}.
+  template <typename T>
+  void CheckFoldBinop(volatile T left_expect, Operator* op_expect,
+                      Node* right_expect, Node* left, Node* right) {
+    CHECK_NE(NULL, binop);
+    Node* n = graph.NewNode(binop, left, right);
+    MachineOperatorReducer reducer(&graph);
+    Reduction r = reducer.Reduce(n);
+    CHECK(r.Changed());
+    CHECK_EQ(op_expect->opcode(), r.replacement()->op()->opcode());
+    CHECK_EQ(left_expect, ValueOf<T>(r.replacement()->InputAt(0)->op()));
+    CHECK_EQ(right_expect, r.replacement()->InputAt(1));
+  }
+
+  // Check that the reduction of this binop applied to {left} and {right} yields
+  // the {op_expect} applied to {left_expect} and {right_expect}.
+  template <typename T>
+  void CheckFoldBinop(Node* left_expect, Operator* op_expect,
+                      volatile T right_expect, Node* left, Node* right) {
+    CHECK_NE(NULL, binop);
+    Node* n = graph.NewNode(binop, left, right);
+    MachineOperatorReducer reducer(&graph);
+    Reduction r = reducer.Reduce(n);
+    CHECK(r.Changed());
+    CHECK_EQ(op_expect->opcode(), r.replacement()->op()->opcode());
+    CHECK_EQ(left_expect, r.replacement()->InputAt(0));
+    CHECK_EQ(right_expect, ValueOf<T>(r.replacement()->InputAt(1)->op()));
+  }
+
+  // Check that if the given constant appears on the left, the reducer will
+  // swap it to be on the right.
+  template <typename T>
+  void CheckPutConstantOnRight(volatile T constant) {
+    // TODO(titzer): CHECK(binop->HasProperty(Operator::kCommutative));
+    Node* p = Parameter();
+    Node* k = Constant<T>(constant);
+    {
+      Node* n = graph.NewNode(binop, k, p);
+      MachineOperatorReducer reducer(&graph);
+      Reduction reduction = reducer.Reduce(n);
+      CHECK(!reduction.Changed() || reduction.replacement() == n);
+      CHECK_EQ(p, n->InputAt(0));
+      CHECK_EQ(k, n->InputAt(1));
+    }
+    {
+      Node* n = graph.NewNode(binop, p, k);
+      MachineOperatorReducer reducer(&graph);
+      Reduction reduction = reducer.Reduce(n);
+      CHECK(!reduction.Changed());
+      CHECK_EQ(p, n->InputAt(0));
+      CHECK_EQ(k, n->InputAt(1));
+    }
+  }
+
+  // Check that if the given constant appears on the left, the reducer will
+  // *NOT* swap it to be on the right.
+  template <typename T>
+  void CheckDontPutConstantOnRight(volatile T constant) {
+    CHECK(!binop->HasProperty(Operator::kCommutative));
+    Node* p = Parameter();
+    Node* k = Constant<T>(constant);
+    Node* n = graph.NewNode(binop, k, p);
+    MachineOperatorReducer reducer(&graph);
+    Reduction reduction = reducer.Reduce(n);
+    CHECK(!reduction.Changed());
+    CHECK_EQ(k, n->InputAt(0));
+    CHECK_EQ(p, n->InputAt(1));
+  }
+
+  Node* Parameter(int32_t index = 0) {
+    return graph.NewNode(common.Parameter(index));
+  }
+};
+
+
+TEST(ReduceWord32And) {
+  ReducerTester R;
+  R.binop = R.machine.Word32And();
+
+  FOR_INT32_INPUTS(pl) {
+    FOR_INT32_INPUTS(pr) {
+      int32_t x = *pl, y = *pr;
+      R.CheckFoldBinop<int32_t>(x & y, x, y);
+    }
+  }
+
+  R.CheckPutConstantOnRight(33);
+  R.CheckPutConstantOnRight(44000);
+
+  Node* x = R.Parameter();
+  Node* zero = R.Constant<int32_t>(0);
+  Node* minus_1 = R.Constant<int32_t>(-1);
+
+  R.CheckBinop(zero, x, zero);  // x  & 0  => 0
+  R.CheckBinop(zero, zero, x);  // 0  & x  => 0
+  R.CheckBinop(x, x, minus_1);  // x  & -1 => 0
+  R.CheckBinop(x, minus_1, x);  // -1 & x  => 0
+  R.CheckBinop(x, x, x);        // x  & x  => x
+}
+
+
+TEST(ReduceWord32Or) {
+  ReducerTester R;
+  R.binop = R.machine.Word32Or();
+
+  FOR_INT32_INPUTS(pl) {
+    FOR_INT32_INPUTS(pr) {
+      int32_t x = *pl, y = *pr;
+      R.CheckFoldBinop<int32_t>(x | y, x, y);
+    }
+  }
+
+  R.CheckPutConstantOnRight(36);
+  R.CheckPutConstantOnRight(44001);
+
+  Node* x = R.Parameter();
+  Node* zero = R.Constant<int32_t>(0);
+  Node* minus_1 = R.Constant<int32_t>(-1);
+
+  R.CheckBinop(x, x, zero);           // x  & 0  => x
+  R.CheckBinop(x, zero, x);           // 0  & x  => x
+  R.CheckBinop(minus_1, x, minus_1);  // x  & -1 => -1
+  R.CheckBinop(minus_1, minus_1, x);  // -1 & x  => -1
+  R.CheckBinop(x, x, x);              // x  & x  => x
+}
+
+
+TEST(ReduceWord32Xor) {
+  ReducerTester R;
+  R.binop = R.machine.Word32Xor();
+
+  FOR_INT32_INPUTS(pl) {
+    FOR_INT32_INPUTS(pr) {
+      int32_t x = *pl, y = *pr;
+      R.CheckFoldBinop<int32_t>(x ^ y, x, y);
+    }
+  }
+
+  R.CheckPutConstantOnRight(39);
+  R.CheckPutConstantOnRight(4403);
+
+  Node* x = R.Parameter();
+  Node* zero = R.Constant<int32_t>(0);
+
+  R.CheckBinop(x, x, zero);            // x ^ 0  => x
+  R.CheckBinop(x, zero, x);            // 0 ^ x  => x
+  R.CheckFoldBinop<int32_t>(0, x, x);  // x ^ x  => 0
+}
+
+
+TEST(ReduceWord32Shl) {
+  ReducerTester R;
+  R.binop = R.machine.Word32Shl();
+
+  // TODO(titzer): out of range shifts
+  FOR_INT32_INPUTS(i) {
+    for (int y = 0; y < 32; y++) {
+      int32_t x = *i;
+      R.CheckFoldBinop<int32_t>(x << y, x, y);
+    }
+  }
+
+  R.CheckDontPutConstantOnRight(44);
+
+  Node* x = R.Parameter();
+  Node* zero = R.Constant<int32_t>(0);
+
+  R.CheckBinop(x, x, zero);  // x << 0  => x
+}
+
+
+TEST(ReduceWord32Shr) {
+  ReducerTester R;
+  R.binop = R.machine.Word32Shr();
+
+  // TODO(titzer): test out of range shifts
+  FOR_UINT32_INPUTS(i) {
+    for (uint32_t y = 0; y < 32; y++) {
+      uint32_t x = *i;
+      R.CheckFoldBinop<int32_t>(x >> y, x, y);
+    }
+  }
+
+  R.CheckDontPutConstantOnRight(44);
+
+  Node* x = R.Parameter();
+  Node* zero = R.Constant<int32_t>(0);
+
+  R.CheckBinop(x, x, zero);  // x >>> 0  => x
+}
+
+
+TEST(ReduceWord32Sar) {
+  ReducerTester R;
+  R.binop = R.machine.Word32Sar();
+
+  // TODO(titzer): test out of range shifts
+  FOR_INT32_INPUTS(i) {
+    for (int32_t y = 0; y < 32; y++) {
+      int32_t x = *i;
+      R.CheckFoldBinop<int32_t>(x >> y, x, y);
+    }
+  }
+
+  R.CheckDontPutConstantOnRight(44);
+
+  Node* x = R.Parameter();
+  Node* zero = R.Constant<int32_t>(0);
+
+  R.CheckBinop(x, x, zero);  // x >> 0  => x
+}
+
+
+TEST(ReduceWord32Equal) {
+  ReducerTester R;
+  R.binop = R.machine.Word32Equal();
+
+  FOR_INT32_INPUTS(pl) {
+    FOR_INT32_INPUTS(pr) {
+      int32_t x = *pl, y = *pr;
+      R.CheckFoldBinop<int32_t>(x == y ? 1 : 0, x, y);
+    }
+  }
+
+  R.CheckPutConstantOnRight(48);
+  R.CheckPutConstantOnRight(-48);
+
+  Node* x = R.Parameter(0);
+  Node* y = R.Parameter(1);
+  Node* zero = R.Constant<int32_t>(0);
+  Node* sub = R.graph.NewNode(R.machine.Int32Sub(), x, y);
+
+  R.CheckFoldBinop<int32_t>(1, x, x);  // x == x  => 1
+  R.CheckFoldBinop(x, y, sub, zero);   // x - y == 0  => x == y
+  R.CheckFoldBinop(x, y, zero, sub);   // 0 == x - y  => x == y
+}
+
+
+TEST(ReduceInt32Add) {
+  ReducerTester R;
+  R.binop = R.machine.Int32Add();
+
+  FOR_INT32_INPUTS(pl) {
+    FOR_INT32_INPUTS(pr) {
+      int32_t x = *pl, y = *pr;
+      R.CheckFoldBinop<int32_t>(x + y, x, y);  // TODO(titzer): signed overflow
+    }
+  }
+
+  R.CheckPutConstantOnRight(41);
+  R.CheckPutConstantOnRight(4407);
+
+  Node* x = R.Parameter();
+  Node* zero = R.Constant<int32_t>(0);
+
+  R.CheckBinop(x, x, zero);  // x + 0  => x
+  R.CheckBinop(x, zero, x);  // 0 + x  => x
+}
+
+
+TEST(ReduceInt32Sub) {
+  ReducerTester R;
+  R.binop = R.machine.Int32Sub();
+
+  FOR_INT32_INPUTS(pl) {
+    FOR_INT32_INPUTS(pr) {
+      int32_t x = *pl, y = *pr;
+      R.CheckFoldBinop<int32_t>(x - y, x, y);
+    }
+  }
+
+  R.CheckDontPutConstantOnRight(412);
+
+  Node* x = R.Parameter();
+  Node* zero = R.Constant<int32_t>(0);
+
+  R.CheckBinop(x, x, zero);  // x - 0  => x
+}
+
+
+TEST(ReduceInt32Mul) {
+  ReducerTester R;
+  R.binop = R.machine.Int32Mul();
+
+  FOR_INT32_INPUTS(pl) {
+    FOR_INT32_INPUTS(pr) {
+      int32_t x = *pl, y = *pr;
+      R.CheckFoldBinop<int32_t>(x * y, x, y);  // TODO(titzer): signed overflow
+    }
+  }
+
+  R.CheckPutConstantOnRight(4111);
+  R.CheckPutConstantOnRight(-4407);
+
+  Node* x = R.Parameter();
+  Node* zero = R.Constant<int32_t>(0);
+  Node* one = R.Constant<int32_t>(1);
+  Node* minus_one = R.Constant<int32_t>(-1);
+
+  R.CheckBinop(zero, x, zero);  // x * 0  => 0
+  R.CheckBinop(zero, zero, x);  // 0 * x  => 0
+  R.CheckBinop(x, x, one);      // x * 1  => x
+  R.CheckBinop(x, one, x);      // 1 * x  => x
+  R.CheckFoldBinop<int32_t>(0, R.machine.Int32Sub(), x, minus_one,
+                            x);  // -1 * x  => 0 - x
+  R.CheckFoldBinop<int32_t>(0, R.machine.Int32Sub(), x, x,
+                            minus_one);  // x * -1  => 0 - x
+
+  for (int32_t n = 1; n < 31; ++n) {
+    Node* multiplier = R.Constant<int32_t>(1 << n);
+    R.CheckFoldBinop<int32_t>(x, R.machine.Word32Shl(), n, x,
+                              multiplier);  // x * 2^n => x << n
+    R.CheckFoldBinop<int32_t>(x, R.machine.Word32Shl(), n, multiplier,
+                              x);  // 2^n * x => x << n
+  }
+}
+
+
+TEST(ReduceInt32Div) {
+  ReducerTester R;
+  R.binop = R.machine.Int32Div();
+
+  FOR_INT32_INPUTS(pl) {
+    FOR_INT32_INPUTS(pr) {
+      int32_t x = *pl, y = *pr;
+      if (y == 0) continue;              // TODO(titzer): test / 0
+      int32_t r = y == -1 ? -x : x / y;  // INT_MIN / -1 may explode in C
+      R.CheckFoldBinop<int32_t>(r, x, y);
+    }
+  }
+
+  R.CheckDontPutConstantOnRight(41111);
+  R.CheckDontPutConstantOnRight(-44071);
+
+  Node* x = R.Parameter();
+  Node* one = R.Constant<int32_t>(1);
+  Node* minus_one = R.Constant<int32_t>(-1);
+
+  R.CheckBinop(x, x, one);  // x / 1  => x
+  // TODO(titzer):                          // 0 / x  => 0 if x != 0
+  // TODO(titzer):                          // x / 2^n => x >> n and round
+  R.CheckFoldBinop<int32_t>(0, R.machine.Int32Sub(), x, x,
+                            minus_one);  // x / -1  => 0 - x
+}
+
+
+TEST(ReduceInt32UDiv) {
+  ReducerTester R;
+  R.binop = R.machine.Int32UDiv();
+
+  FOR_UINT32_INPUTS(pl) {
+    FOR_UINT32_INPUTS(pr) {
+      uint32_t x = *pl, y = *pr;
+      if (y == 0) continue;  // TODO(titzer): test / 0
+      R.CheckFoldBinop<int32_t>(x / y, x, y);
+    }
+  }
+
+  R.CheckDontPutConstantOnRight(41311);
+  R.CheckDontPutConstantOnRight(-44371);
+
+  Node* x = R.Parameter();
+  Node* one = R.Constant<int32_t>(1);
+
+  R.CheckBinop(x, x, one);  // x / 1  => x
+  // TODO(titzer):                            // 0 / x  => 0 if x != 0
+
+  for (uint32_t n = 1; n < 32; ++n) {
+    Node* divisor = R.Constant<int32_t>(1u << n);
+    R.CheckFoldBinop<int32_t>(x, R.machine.Word32Shr(), n, x,
+                              divisor);  // x / 2^n => x >> n
+  }
+}
+
+
+TEST(ReduceInt32Mod) {
+  ReducerTester R;
+  R.binop = R.machine.Int32Mod();
+
+  FOR_INT32_INPUTS(pl) {
+    FOR_INT32_INPUTS(pr) {
+      int32_t x = *pl, y = *pr;
+      if (y == 0) continue;             // TODO(titzer): test % 0
+      int32_t r = y == -1 ? 0 : x % y;  // INT_MIN % -1 may explode in C
+      R.CheckFoldBinop<int32_t>(r, x, y);
+    }
+  }
+
+  R.CheckDontPutConstantOnRight(413);
+  R.CheckDontPutConstantOnRight(-4401);
+
+  Node* x = R.Parameter();
+  Node* one = R.Constant<int32_t>(1);
+
+  R.CheckFoldBinop<int32_t>(0, x, one);  // x % 1  => 0
+  // TODO(titzer):                       // x % 2^n => x & 2^n-1 and round
+}
+
+
+TEST(ReduceInt32UMod) {
+  ReducerTester R;
+  R.binop = R.machine.Int32UMod();
+
+  FOR_INT32_INPUTS(pl) {
+    FOR_INT32_INPUTS(pr) {
+      uint32_t x = *pl, y = *pr;
+      if (y == 0) continue;  // TODO(titzer): test x % 0
+      R.CheckFoldBinop<int32_t>(x % y, x, y);
+    }
+  }
+
+  R.CheckDontPutConstantOnRight(417);
+  R.CheckDontPutConstantOnRight(-4371);
+
+  Node* x = R.Parameter();
+  Node* one = R.Constant<int32_t>(1);
+
+  R.CheckFoldBinop<int32_t>(0, x, one);  // x % 1  => 0
+
+  for (uint32_t n = 1; n < 32; ++n) {
+    Node* divisor = R.Constant<int32_t>(1u << n);
+    R.CheckFoldBinop<int32_t>(x, R.machine.Word32And(), (1u << n) - 1, x,
+                              divisor);  // x % 2^n => x & 2^n-1
+  }
+}
+
+
+TEST(ReduceInt32LessThan) {
+  ReducerTester R;
+  R.binop = R.machine.Int32LessThan();
+
+  FOR_INT32_INPUTS(pl) {
+    FOR_INT32_INPUTS(pr) {
+      int32_t x = *pl, y = *pr;
+      R.CheckFoldBinop<int32_t>(x < y ? 1 : 0, x, y);
+    }
+  }
+
+  R.CheckDontPutConstantOnRight(41399);
+  R.CheckDontPutConstantOnRight(-440197);
+
+  Node* x = R.Parameter(0);
+  Node* y = R.Parameter(1);
+  Node* zero = R.Constant<int32_t>(0);
+  Node* sub = R.graph.NewNode(R.machine.Int32Sub(), x, y);
+
+  R.CheckFoldBinop<int32_t>(0, x, x);  // x < x  => 0
+  R.CheckFoldBinop(x, y, sub, zero);   // x - y < 0 => x < y
+  R.CheckFoldBinop(y, x, zero, sub);   // 0 < x - y => y < x
+}
+
+
+TEST(ReduceInt32LessThanOrEqual) {
+  ReducerTester R;
+  R.binop = R.machine.Int32LessThanOrEqual();
+
+  FOR_INT32_INPUTS(pl) {
+    FOR_INT32_INPUTS(pr) {
+      int32_t x = *pl, y = *pr;
+      R.CheckFoldBinop<int32_t>(x <= y ? 1 : 0, x, y);
+    }
+  }
+
+  FOR_INT32_INPUTS(i) { R.CheckDontPutConstantOnRight<int32_t>(*i); }
+
+  Node* x = R.Parameter(0);
+  Node* y = R.Parameter(1);
+  Node* zero = R.Constant<int32_t>(0);
+  Node* sub = R.graph.NewNode(R.machine.Int32Sub(), x, y);
+
+  R.CheckFoldBinop<int32_t>(1, x, x);  // x <= x => 1
+  R.CheckFoldBinop(x, y, sub, zero);   // x - y <= 0 => x <= y
+  R.CheckFoldBinop(y, x, zero, sub);   // 0 <= x - y => y <= x
+}
+
+
+TEST(ReduceUint32LessThan) {
+  ReducerTester R;
+  R.binop = R.machine.Uint32LessThan();
+
+  FOR_UINT32_INPUTS(pl) {
+    FOR_UINT32_INPUTS(pr) {
+      uint32_t x = *pl, y = *pr;
+      R.CheckFoldBinop<int32_t>(x < y ? 1 : 0, x, y);
+    }
+  }
+
+  R.CheckDontPutConstantOnRight(41399);
+  R.CheckDontPutConstantOnRight(-440197);
+
+  Node* x = R.Parameter();
+  Node* max = R.maxuint32;
+  Node* zero = R.Constant<int32_t>(0);
+
+  R.CheckFoldBinop<int32_t>(0, max, x);   // M < x  => 0
+  R.CheckFoldBinop<int32_t>(0, x, zero);  // x < 0  => 0
+  R.CheckFoldBinop<int32_t>(0, x, x);     // x < x  => 0
+}
+
+
+TEST(ReduceUint32LessThanOrEqual) {
+  ReducerTester R;
+  R.binop = R.machine.Uint32LessThanOrEqual();
+
+  FOR_UINT32_INPUTS(pl) {
+    FOR_UINT32_INPUTS(pr) {
+      uint32_t x = *pl, y = *pr;
+      R.CheckFoldBinop<int32_t>(x <= y ? 1 : 0, x, y);
+    }
+  }
+
+  R.CheckDontPutConstantOnRight(41399);
+  R.CheckDontPutConstantOnRight(-440197);
+
+  Node* x = R.Parameter();
+  Node* max = R.maxuint32;
+  Node* zero = R.Constant<int32_t>(0);
+
+  R.CheckFoldBinop<int32_t>(1, x, max);   // x <= M  => 1
+  R.CheckFoldBinop<int32_t>(1, zero, x);  // 0 <= x  => 1
+  R.CheckFoldBinop<int32_t>(1, x, x);     // x <= x  => 1
+}
+
+
+TEST(ReduceLoadStore) {
+  ReducerTester R;
+
+  Node* base = R.Constant<int32_t>(11);
+  Node* index = R.Constant<int32_t>(4);
+  Node* load = R.graph.NewNode(R.machine.Load(kMachineWord32), base, index);
+
+  {
+    MachineOperatorReducer reducer(&R.graph);
+    Reduction reduction = reducer.Reduce(load);
+    CHECK(!reduction.Changed());  // loads should not be reduced.
+  }
+
+  {
+    Node* store =
+        R.graph.NewNode(R.machine.Store(kMachineWord32), base, index, load);
+    MachineOperatorReducer reducer(&R.graph);
+    Reduction reduction = reducer.Reduce(store);
+    CHECK(!reduction.Changed());  // stores should not be reduced.
+  }
+}
+
+
+static void CheckNans(ReducerTester* R) {
+  Node* x = R->Parameter();
+  std::vector<double> nans = ValueHelper::nan_vector();
+  for (std::vector<double>::const_iterator pl = nans.begin(); pl != nans.end();
+       ++pl) {
+    for (std::vector<double>::const_iterator pr = nans.begin();
+         pr != nans.end(); ++pr) {
+      Node* nan1 = R->Constant<double>(*pl);
+      Node* nan2 = R->Constant<double>(*pr);
+      R->CheckBinop(nan1, x, nan1);     // x % NaN => NaN
+      R->CheckBinop(nan1, nan1, x);     // NaN % x => NaN
+      R->CheckBinop(nan1, nan2, nan1);  // NaN % NaN => NaN
+    }
+  }
+}
+
+
+TEST(ReduceFloat64Add) {
+  ReducerTester R;
+  R.binop = R.machine.Float64Add();
+
+  FOR_FLOAT64_INPUTS(pl) {
+    FOR_FLOAT64_INPUTS(pr) {
+      double x = *pl, y = *pr;
+      R.CheckFoldBinop<double>(x + y, x, y);
+    }
+  }
+
+  FOR_FLOAT64_INPUTS(i) { R.CheckPutConstantOnRight(*i); }
+  // TODO(titzer): CheckNans(&R);
+}
+
+
+TEST(ReduceFloat64Sub) {
+  ReducerTester R;
+  R.binop = R.machine.Float64Sub();
+
+  FOR_FLOAT64_INPUTS(pl) {
+    FOR_FLOAT64_INPUTS(pr) {
+      double x = *pl, y = *pr;
+      R.CheckFoldBinop<double>(x - y, x, y);
+    }
+  }
+  // TODO(titzer): CheckNans(&R);
+}
+
+
+TEST(ReduceFloat64Mul) {
+  ReducerTester R;
+  R.binop = R.machine.Float64Mul();
+
+  FOR_FLOAT64_INPUTS(pl) {
+    FOR_FLOAT64_INPUTS(pr) {
+      double x = *pl, y = *pr;
+      R.CheckFoldBinop<double>(x * y, x, y);
+    }
+  }
+
+  double inf = V8_INFINITY;
+  R.CheckPutConstantOnRight(-inf);
+  R.CheckPutConstantOnRight(-0.1);
+  R.CheckPutConstantOnRight(0.1);
+  R.CheckPutConstantOnRight(inf);
+
+  Node* x = R.Parameter();
+  Node* one = R.Constant<double>(1.0);
+
+  R.CheckBinop(x, x, one);  // x * 1.0 => x
+  R.CheckBinop(x, one, x);  // 1.0 * x => x
+
+  CheckNans(&R);
+}
+
+
+TEST(ReduceFloat64Div) {
+  ReducerTester R;
+  R.binop = R.machine.Float64Div();
+
+  FOR_FLOAT64_INPUTS(pl) {
+    FOR_FLOAT64_INPUTS(pr) {
+      double x = *pl, y = *pr;
+      R.CheckFoldBinop<double>(x / y, x, y);
+    }
+  }
+
+  Node* x = R.Parameter();
+  Node* one = R.Constant<double>(1.0);
+
+  R.CheckBinop(x, x, one);  // x / 1.0 => x
+
+  CheckNans(&R);
+}
+
+
+TEST(ReduceFloat64Mod) {
+  ReducerTester R;
+  R.binop = R.machine.Float64Mod();
+
+  FOR_FLOAT64_INPUTS(pl) {
+    FOR_FLOAT64_INPUTS(pr) {
+      double x = *pl, y = *pr;
+      R.CheckFoldBinop<double>(modulo(x, y), x, y);
+    }
+  }
+
+  CheckNans(&R);
+}
+
+
+// TODO(titzer): test MachineOperatorReducer for Word64And
+// TODO(titzer): test MachineOperatorReducer for Word64Or
+// TODO(titzer): test MachineOperatorReducer for Word64Xor
+// TODO(titzer): test MachineOperatorReducer for Word64Shl
+// TODO(titzer): test MachineOperatorReducer for Word64Shr
+// TODO(titzer): test MachineOperatorReducer for Word64Sar
+// TODO(titzer): test MachineOperatorReducer for Word64Equal
+// TODO(titzer): test MachineOperatorReducer for Word64Not
+// TODO(titzer): test MachineOperatorReducer for Int64Add
+// TODO(titzer): test MachineOperatorReducer for Int64Sub
+// TODO(titzer): test MachineOperatorReducer for Int64Mul
+// TODO(titzer): test MachineOperatorReducer for Int64UMul
+// TODO(titzer): test MachineOperatorReducer for Int64Div
+// TODO(titzer): test MachineOperatorReducer for Int64UDiv
+// TODO(titzer): test MachineOperatorReducer for Int64Mod
+// TODO(titzer): test MachineOperatorReducer for Int64UMod
+// TODO(titzer): test MachineOperatorReducer for Int64Neg
+// TODO(titzer): test MachineOperatorReducer for ConvertInt32ToFloat64
+// TODO(titzer): test MachineOperatorReducer for ConvertFloat64ToInt32
+// TODO(titzer): test MachineOperatorReducer for Float64Compare
diff --git a/test/cctest/compiler/test-node-algorithm.cc b/test/cctest/compiler/test-node-algorithm.cc
new file mode 100644 (file)
index 0000000..ac8fbb9
--- /dev/null
@@ -0,0 +1,330 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <vector>
+
+#include "src/v8.h"
+
+#include "graph-tester.h"
+#include "src/compiler/common-operator.h"
+#include "src/compiler/generic-node.h"
+#include "src/compiler/generic-node-inl.h"
+#include "src/compiler/graph.h"
+#include "src/compiler/graph-inl.h"
+#include "src/compiler/graph-visualizer.h"
+#include "src/compiler/operator.h"
+
+using namespace v8::internal;
+using namespace v8::internal::compiler;
+
+static SimpleOperator dummy_operator(IrOpcode::kParameter, Operator::kNoWrite,
+                                     0, 0, "dummy");
+
+class PreNodeVisitor : public NullNodeVisitor {
+ public:
+  GenericGraphVisit::Control Pre(Node* node) {
+    printf("NODE ID: %d\n", node->id());
+    nodes_.push_back(node);
+    return GenericGraphVisit::CONTINUE;
+  }
+  std::vector<Node*> nodes_;
+};
+
+
+class PostNodeVisitor : public NullNodeVisitor {
+ public:
+  GenericGraphVisit::Control Post(Node* node) {
+    printf("NODE ID: %d\n", node->id());
+    nodes_.push_back(node);
+    return GenericGraphVisit::CONTINUE;
+  }
+  std::vector<Node*> nodes_;
+};
+
+
+TEST(TestUseNodeVisitEmpty) {
+  GraphWithStartNodeTester graph;
+
+  PreNodeVisitor node_visitor;
+  graph.VisitNodeUsesFromStart(&node_visitor);
+
+  CHECK_EQ(1, node_visitor.nodes_.size());
+}
+
+
+TEST(TestUseNodePreOrderVisitSimple) {
+  GraphWithStartNodeTester graph;
+  Node* n2 = graph.NewNode(&dummy_operator, graph.start());
+  Node* n3 = graph.NewNode(&dummy_operator, n2);
+  Node* n4 = graph.NewNode(&dummy_operator, n2, n3);
+  Node* n5 = graph.NewNode(&dummy_operator, n4, n2);
+  graph.SetEnd(n5);
+
+  PreNodeVisitor node_visitor;
+  graph.VisitNodeUsesFromStart(&node_visitor);
+
+  CHECK_EQ(5, node_visitor.nodes_.size());
+  CHECK(graph.start()->id() == node_visitor.nodes_[0]->id());
+  CHECK(n2->id() == node_visitor.nodes_[1]->id());
+  CHECK(n3->id() == node_visitor.nodes_[2]->id());
+  CHECK(n4->id() == node_visitor.nodes_[3]->id());
+  CHECK(n5->id() == node_visitor.nodes_[4]->id());
+}
+
+
+TEST(TestInputNodePreOrderVisitSimple) {
+  GraphWithStartNodeTester graph;
+  Node* n2 = graph.NewNode(&dummy_operator, graph.start());
+  Node* n3 = graph.NewNode(&dummy_operator, n2);
+  Node* n4 = graph.NewNode(&dummy_operator, n2, n3);
+  Node* n5 = graph.NewNode(&dummy_operator, n4, n2);
+  graph.SetEnd(n5);
+
+  PreNodeVisitor node_visitor;
+  graph.VisitNodeInputsFromEnd(&node_visitor);
+  CHECK_EQ(5, node_visitor.nodes_.size());
+  CHECK(n5->id() == node_visitor.nodes_[0]->id());
+  CHECK(n4->id() == node_visitor.nodes_[1]->id());
+  CHECK(n2->id() == node_visitor.nodes_[2]->id());
+  CHECK(graph.start()->id() == node_visitor.nodes_[3]->id());
+  CHECK(n3->id() == node_visitor.nodes_[4]->id());
+}
+
+
+TEST(TestUseNodePostOrderVisitSimple) {
+  GraphWithStartNodeTester graph;
+  Node* n2 = graph.NewNode(&dummy_operator, graph.start());
+  Node* n3 = graph.NewNode(&dummy_operator, graph.start());
+  Node* n4 = graph.NewNode(&dummy_operator, n2);
+  Node* n5 = graph.NewNode(&dummy_operator, n2);
+  Node* n6 = graph.NewNode(&dummy_operator, n2);
+  Node* n7 = graph.NewNode(&dummy_operator, n3);
+  Node* end_dependencies[4] = {n4, n5, n6, n7};
+  Node* n8 = graph.NewNode(&dummy_operator, 4, end_dependencies);
+  graph.SetEnd(n8);
+
+  PostNodeVisitor node_visitor;
+  graph.VisitNodeUsesFromStart(&node_visitor);
+
+  CHECK_EQ(8, node_visitor.nodes_.size());
+  CHECK(graph.end()->id() == node_visitor.nodes_[0]->id());
+  CHECK(n4->id() == node_visitor.nodes_[1]->id());
+  CHECK(n5->id() == node_visitor.nodes_[2]->id());
+  CHECK(n6->id() == node_visitor.nodes_[3]->id());
+  CHECK(n2->id() == node_visitor.nodes_[4]->id());
+  CHECK(n7->id() == node_visitor.nodes_[5]->id());
+  CHECK(n3->id() == node_visitor.nodes_[6]->id());
+  CHECK(graph.start()->id() == node_visitor.nodes_[7]->id());
+}
+
+
+TEST(TestUseNodePostOrderVisitLong) {
+  GraphWithStartNodeTester graph;
+  Node* n2 = graph.NewNode(&dummy_operator, graph.start());
+  Node* n3 = graph.NewNode(&dummy_operator, graph.start());
+  Node* n4 = graph.NewNode(&dummy_operator, n2);
+  Node* n5 = graph.NewNode(&dummy_operator, n2);
+  Node* n6 = graph.NewNode(&dummy_operator, n3);
+  Node* n7 = graph.NewNode(&dummy_operator, n3);
+  Node* n8 = graph.NewNode(&dummy_operator, n5);
+  Node* n9 = graph.NewNode(&dummy_operator, n5);
+  Node* n10 = graph.NewNode(&dummy_operator, n9);
+  Node* n11 = graph.NewNode(&dummy_operator, n9);
+  Node* end_dependencies[6] = {n4, n8, n10, n11, n6, n7};
+  Node* n12 = graph.NewNode(&dummy_operator, 6, end_dependencies);
+  graph.SetEnd(n12);
+
+  PostNodeVisitor node_visitor;
+  graph.VisitNodeUsesFromStart(&node_visitor);
+
+  CHECK_EQ(12, node_visitor.nodes_.size());
+  CHECK(graph.end()->id() == node_visitor.nodes_[0]->id());
+  CHECK(n4->id() == node_visitor.nodes_[1]->id());
+  CHECK(n8->id() == node_visitor.nodes_[2]->id());
+  CHECK(n10->id() == node_visitor.nodes_[3]->id());
+  CHECK(n11->id() == node_visitor.nodes_[4]->id());
+  CHECK(n9->id() == node_visitor.nodes_[5]->id());
+  CHECK(n5->id() == node_visitor.nodes_[6]->id());
+  CHECK(n2->id() == node_visitor.nodes_[7]->id());
+  CHECK(n6->id() == node_visitor.nodes_[8]->id());
+  CHECK(n7->id() == node_visitor.nodes_[9]->id());
+  CHECK(n3->id() == node_visitor.nodes_[10]->id());
+  CHECK(graph.start()->id() == node_visitor.nodes_[11]->id());
+}
+
+
+TEST(TestUseNodePreOrderVisitCycle) {
+  GraphWithStartNodeTester graph;
+  Node* n0 = graph.start_node();
+  Node* n1 = graph.NewNode(&dummy_operator, n0);
+  Node* n2 = graph.NewNode(&dummy_operator, n1);
+  n0->AppendInput(graph.main_zone(), n2);
+  graph.SetStart(n0);
+  graph.SetEnd(n2);
+
+  PreNodeVisitor node_visitor;
+  graph.VisitNodeUsesFromStart(&node_visitor);
+
+  CHECK_EQ(3, node_visitor.nodes_.size());
+  CHECK(n0->id() == node_visitor.nodes_[0]->id());
+  CHECK(n1->id() == node_visitor.nodes_[1]->id());
+  CHECK(n2->id() == node_visitor.nodes_[2]->id());
+}
+
+
+struct ReenterNodeVisitor : NullNodeVisitor {
+  GenericGraphVisit::Control Pre(Node* node) {
+    printf("[%d] PRE NODE: %d\n", static_cast<int>(nodes_.size()), node->id());
+    nodes_.push_back(node->id());
+    int size = nodes_.size();
+    switch (node->id()) {
+      case 0:
+        return size < 6 ? GenericGraphVisit::REENTER : GenericGraphVisit::SKIP;
+      case 1:
+        return size < 4 ? GenericGraphVisit::DEFER
+                        : GenericGraphVisit::CONTINUE;
+      default:
+        return GenericGraphVisit::REENTER;
+    }
+  }
+
+  GenericGraphVisit::Control Post(Node* node) {
+    printf("[%d] POST NODE: %d\n", static_cast<int>(nodes_.size()), node->id());
+    nodes_.push_back(-node->id());
+    return node->id() == 4 ? GenericGraphVisit::REENTER
+                           : GenericGraphVisit::CONTINUE;
+  }
+
+  void PreEdge(Node* from, int index, Node* to) {
+    printf("[%d] PRE EDGE: %d-%d\n", static_cast<int>(edges_.size()),
+           from->id(), to->id());
+    edges_.push_back(std::make_pair(from->id(), to->id()));
+  }
+
+  void PostEdge(Node* from, int index, Node* to) {
+    printf("[%d] POST EDGE: %d-%d\n", static_cast<int>(edges_.size()),
+           from->id(), to->id());
+    edges_.push_back(std::make_pair(-from->id(), -to->id()));
+  }
+
+  std::vector<int> nodes_;
+  std::vector<std::pair<int, int> > edges_;
+};
+
+
+TEST(TestUseNodeReenterVisit) {
+  GraphWithStartNodeTester graph;
+  Node* n0 = graph.start_node();
+  Node* n1 = graph.NewNode(&dummy_operator, n0);
+  Node* n2 = graph.NewNode(&dummy_operator, n0);
+  Node* n3 = graph.NewNode(&dummy_operator, n2);
+  Node* n4 = graph.NewNode(&dummy_operator, n0);
+  Node* n5 = graph.NewNode(&dummy_operator, n4);
+  n0->AppendInput(graph.main_zone(), n3);
+  graph.SetStart(n0);
+  graph.SetEnd(n5);
+
+  ReenterNodeVisitor visitor;
+  graph.VisitNodeUsesFromStart(&visitor);
+
+  CHECK_EQ(22, visitor.nodes_.size());
+  CHECK_EQ(24, visitor.edges_.size());
+
+  CHECK(n0->id() == visitor.nodes_[0]);
+  CHECK(n0->id() == visitor.edges_[0].first);
+  CHECK(n1->id() == visitor.edges_[0].second);
+  CHECK(n1->id() == visitor.nodes_[1]);
+  // N1 is deferred.
+  CHECK(-n1->id() == visitor.edges_[1].second);
+  CHECK(-n0->id() == visitor.edges_[1].first);
+  CHECK(n0->id() == visitor.edges_[2].first);
+  CHECK(n2->id() == visitor.edges_[2].second);
+  CHECK(n2->id() == visitor.nodes_[2]);
+  CHECK(n2->id() == visitor.edges_[3].first);
+  CHECK(n3->id() == visitor.edges_[3].second);
+  CHECK(n3->id() == visitor.nodes_[3]);
+  // Circle back to N0, which we may reenter for now.
+  CHECK(n3->id() == visitor.edges_[4].first);
+  CHECK(n0->id() == visitor.edges_[4].second);
+  CHECK(n0->id() == visitor.nodes_[4]);
+  CHECK(n0->id() == visitor.edges_[5].first);
+  CHECK(n1->id() == visitor.edges_[5].second);
+  CHECK(n1->id() == visitor.nodes_[5]);
+  // This time N1 is no longer deferred.
+  CHECK(-n1->id() == visitor.nodes_[6]);
+  CHECK(-n1->id() == visitor.edges_[6].second);
+  CHECK(-n0->id() == visitor.edges_[6].first);
+  CHECK(n0->id() == visitor.edges_[7].first);
+  CHECK(n2->id() == visitor.edges_[7].second);
+  CHECK(n2->id() == visitor.nodes_[7]);
+  CHECK(n2->id() == visitor.edges_[8].first);
+  CHECK(n3->id() == visitor.edges_[8].second);
+  CHECK(n3->id() == visitor.nodes_[8]);
+  CHECK(n3->id() == visitor.edges_[9].first);
+  CHECK(n0->id() == visitor.edges_[9].second);
+  CHECK(n0->id() == visitor.nodes_[9]);
+  // This time we break at N0 and skip it.
+  CHECK(-n0->id() == visitor.edges_[10].second);
+  CHECK(-n3->id() == visitor.edges_[10].first);
+  CHECK(-n3->id() == visitor.nodes_[10]);
+  CHECK(-n3->id() == visitor.edges_[11].second);
+  CHECK(-n2->id() == visitor.edges_[11].first);
+  CHECK(-n2->id() == visitor.nodes_[11]);
+  CHECK(-n2->id() == visitor.edges_[12].second);
+  CHECK(-n0->id() == visitor.edges_[12].first);
+  CHECK(n0->id() == visitor.edges_[13].first);
+  CHECK(n4->id() == visitor.edges_[13].second);
+  CHECK(n4->id() == visitor.nodes_[12]);
+  CHECK(n4->id() == visitor.edges_[14].first);
+  CHECK(n5->id() == visitor.edges_[14].second);
+  CHECK(n5->id() == visitor.nodes_[13]);
+  CHECK(-n5->id() == visitor.nodes_[14]);
+  CHECK(-n5->id() == visitor.edges_[15].second);
+  CHECK(-n4->id() == visitor.edges_[15].first);
+  CHECK(-n4->id() == visitor.nodes_[15]);
+  CHECK(-n4->id() == visitor.edges_[16].second);
+  CHECK(-n0->id() == visitor.edges_[16].first);
+  CHECK(-n0->id() == visitor.nodes_[16]);
+  CHECK(-n0->id() == visitor.edges_[17].second);
+  CHECK(-n3->id() == visitor.edges_[17].first);
+  CHECK(-n3->id() == visitor.nodes_[17]);
+  CHECK(-n3->id() == visitor.edges_[18].second);
+  CHECK(-n2->id() == visitor.edges_[18].first);
+  CHECK(-n2->id() == visitor.nodes_[18]);
+  CHECK(-n2->id() == visitor.edges_[19].second);
+  CHECK(-n0->id() == visitor.edges_[19].first);
+  // N4 may be reentered.
+  CHECK(n0->id() == visitor.edges_[20].first);
+  CHECK(n4->id() == visitor.edges_[20].second);
+  CHECK(n4->id() == visitor.nodes_[19]);
+  CHECK(n4->id() == visitor.edges_[21].first);
+  CHECK(n5->id() == visitor.edges_[21].second);
+  CHECK(-n5->id() == visitor.edges_[22].second);
+  CHECK(-n4->id() == visitor.edges_[22].first);
+  CHECK(-n4->id() == visitor.nodes_[20]);
+  CHECK(-n4->id() == visitor.edges_[23].second);
+  CHECK(-n0->id() == visitor.edges_[23].first);
+  CHECK(-n0->id() == visitor.nodes_[21]);
+}
+
+
+TEST(TestPrintNodeGraphToNodeGraphviz) {
+  GraphWithStartNodeTester graph;
+  Node* n2 = graph.NewNode(&dummy_operator, graph.start());
+  Node* n3 = graph.NewNode(&dummy_operator, graph.start());
+  Node* n4 = graph.NewNode(&dummy_operator, n2);
+  Node* n5 = graph.NewNode(&dummy_operator, n2);
+  Node* n6 = graph.NewNode(&dummy_operator, n3);
+  Node* n7 = graph.NewNode(&dummy_operator, n3);
+  Node* n8 = graph.NewNode(&dummy_operator, n5);
+  Node* n9 = graph.NewNode(&dummy_operator, n5);
+  Node* n10 = graph.NewNode(&dummy_operator, n9);
+  Node* n11 = graph.NewNode(&dummy_operator, n9);
+  Node* end_dependencies[6] = {n4, n8, n10, n11, n6, n7};
+  Node* n12 = graph.NewNode(&dummy_operator, 6, end_dependencies);
+  graph.SetEnd(n12);
+
+  OFStream os(stdout);
+  os << AsDOT(graph);
+}
diff --git a/test/cctest/compiler/test-node-cache.cc b/test/cctest/compiler/test-node-cache.cc
new file mode 100644 (file)
index 0000000..23909a5
--- /dev/null
@@ -0,0 +1,160 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#include "graph-tester.h"
+#include "src/compiler/common-operator.h"
+#include "src/compiler/node-cache.h"
+
+using namespace v8::internal;
+using namespace v8::internal::compiler;
+
+TEST(Int32Constant_back_to_back) {
+  GraphTester graph;
+  Int32NodeCache cache;
+
+  for (int i = -2000000000; i < 2000000000; i += 3315177) {
+    Node** pos = cache.Find(graph.zone(), i);
+    CHECK_NE(NULL, pos);
+    for (int j = 0; j < 3; j++) {
+      Node** npos = cache.Find(graph.zone(), i);
+      CHECK_EQ(pos, npos);
+    }
+  }
+}
+
+
+TEST(Int32Constant_five) {
+  GraphTester graph;
+  Int32NodeCache cache;
+  CommonOperatorBuilder common(graph.zone());
+
+  int32_t constants[] = {static_cast<int32_t>(0x80000000), -77, 0, 1, -1};
+
+  Node* nodes[ARRAY_SIZE(constants)];
+
+  for (size_t i = 0; i < ARRAY_SIZE(constants); i++) {
+    int32_t k = constants[i];
+    Node* node = graph.NewNode(common.Int32Constant(k));
+    *cache.Find(graph.zone(), k) = nodes[i] = node;
+  }
+
+  for (size_t i = 0; i < ARRAY_SIZE(constants); i++) {
+    int32_t k = constants[i];
+    CHECK_EQ(nodes[i], *cache.Find(graph.zone(), k));
+  }
+}
+
+
+TEST(Int32Constant_hits) {
+  GraphTester graph;
+  Int32NodeCache cache;
+  const int32_t kSize = 1500;
+  Node** nodes = graph.zone()->NewArray<Node*>(kSize);
+  CommonOperatorBuilder common(graph.zone());
+
+  for (int i = 0; i < kSize; i++) {
+    int32_t v = i * -55;
+    nodes[i] = graph.NewNode(common.Int32Constant(v));
+    *cache.Find(graph.zone(), v) = nodes[i];
+  }
+
+  int hits = 0;
+  for (int i = 0; i < kSize; i++) {
+    int32_t v = i * -55;
+    Node** pos = cache.Find(graph.zone(), v);
+    if (*pos != NULL) {
+      CHECK_EQ(nodes[i], *pos);
+      hits++;
+    }
+  }
+  CHECK_LT(4, hits);
+}
+
+
+TEST(Int64Constant_back_to_back) {
+  GraphTester graph;
+  Int64NodeCache cache;
+
+  for (int64_t i = -2000000000; i < 2000000000; i += 3315177) {
+    Node** pos = cache.Find(graph.zone(), i);
+    CHECK_NE(NULL, pos);
+    for (int j = 0; j < 3; j++) {
+      Node** npos = cache.Find(graph.zone(), i);
+      CHECK_EQ(pos, npos);
+    }
+  }
+}
+
+
+TEST(Int64Constant_hits) {
+  GraphTester graph;
+  Int64NodeCache cache;
+  const int32_t kSize = 1500;
+  Node** nodes = graph.zone()->NewArray<Node*>(kSize);
+  CommonOperatorBuilder common(graph.zone());
+
+  for (int i = 0; i < kSize; i++) {
+    int64_t v = static_cast<int64_t>(i) * static_cast<int64_t>(5003001);
+    nodes[i] = graph.NewNode(common.Int32Constant(i));
+    *cache.Find(graph.zone(), v) = nodes[i];
+  }
+
+  int hits = 0;
+  for (int i = 0; i < kSize; i++) {
+    int64_t v = static_cast<int64_t>(i) * static_cast<int64_t>(5003001);
+    Node** pos = cache.Find(graph.zone(), v);
+    if (*pos != NULL) {
+      CHECK_EQ(nodes[i], *pos);
+      hits++;
+    }
+  }
+  CHECK_LT(4, hits);
+}
+
+
+TEST(PtrConstant_back_to_back) {
+  GraphTester graph;
+  PtrNodeCache cache;
+  int32_t buffer[50];
+
+  for (int32_t* p = buffer;
+       (p - buffer) < static_cast<ptrdiff_t>(ARRAY_SIZE(buffer)); p++) {
+    Node** pos = cache.Find(graph.zone(), p);
+    CHECK_NE(NULL, pos);
+    for (int j = 0; j < 3; j++) {
+      Node** npos = cache.Find(graph.zone(), p);
+      CHECK_EQ(pos, npos);
+    }
+  }
+}
+
+
+TEST(PtrConstant_hits) {
+  GraphTester graph;
+  PtrNodeCache cache;
+  const int32_t kSize = 50;
+  int32_t buffer[kSize];
+  Node* nodes[kSize];
+  CommonOperatorBuilder common(graph.zone());
+
+  for (size_t i = 0; i < ARRAY_SIZE(buffer); i++) {
+    int k = static_cast<int>(i);
+    int32_t* p = &buffer[i];
+    nodes[i] = graph.NewNode(common.Int32Constant(k));
+    *cache.Find(graph.zone(), p) = nodes[i];
+  }
+
+  int hits = 0;
+  for (size_t i = 0; i < ARRAY_SIZE(buffer); i++) {
+    int32_t* p = &buffer[i];
+    Node** pos = cache.Find(graph.zone(), p);
+    if (*pos != NULL) {
+      CHECK_EQ(nodes[i], *pos);
+      hits++;
+    }
+  }
+  CHECK_LT(4, hits);
+}
diff --git a/test/cctest/compiler/test-node.cc b/test/cctest/compiler/test-node.cc
new file mode 100644 (file)
index 0000000..5411755
--- /dev/null
@@ -0,0 +1,813 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#include "graph-tester.h"
+#include "src/compiler/generic-node-inl.h"
+#include "src/compiler/node.h"
+#include "src/compiler/operator.h"
+
+using namespace v8::internal;
+using namespace v8::internal::compiler;
+
+static SimpleOperator dummy_operator(IrOpcode::kParameter, Operator::kNoWrite,
+                                     0, 0, "dummy");
+
+TEST(NodeAllocation) {
+  GraphTester graph;
+  Node* n1 = graph.NewNode(&dummy_operator);
+  Node* n2 = graph.NewNode(&dummy_operator);
+  CHECK(n2->id() != n1->id());
+}
+
+
+TEST(NodeWithOpcode) {
+  GraphTester graph;
+  Node* n1 = graph.NewNode(&dummy_operator);
+  Node* n2 = graph.NewNode(&dummy_operator);
+  CHECK(n1->op() == &dummy_operator);
+  CHECK(n2->op() == &dummy_operator);
+}
+
+
+TEST(NodeInputs1) {
+  GraphTester graph;
+  Node* n0 = graph.NewNode(&dummy_operator);
+  Node* n2 = graph.NewNode(&dummy_operator, n0);
+  CHECK_EQ(1, n2->InputCount());
+  CHECK(n0 == n2->InputAt(0));
+}
+
+
+TEST(NodeInputs2) {
+  GraphTester graph;
+  Node* n0 = graph.NewNode(&dummy_operator);
+  Node* n1 = graph.NewNode(&dummy_operator);
+  Node* n2 = graph.NewNode(&dummy_operator, n0, n1);
+  CHECK_EQ(2, n2->InputCount());
+  CHECK(n0 == n2->InputAt(0));
+  CHECK(n1 == n2->InputAt(1));
+}
+
+
+TEST(NodeInputs3) {
+  GraphTester graph;
+  Node* n0 = graph.NewNode(&dummy_operator);
+  Node* n1 = graph.NewNode(&dummy_operator);
+  Node* n2 = graph.NewNode(&dummy_operator, n0, n1, n1);
+  CHECK_EQ(3, n2->InputCount());
+  CHECK(n0 == n2->InputAt(0));
+  CHECK(n1 == n2->InputAt(1));
+  CHECK(n1 == n2->InputAt(2));
+}
+
+
+TEST(NodeInputIteratorEmpty) {
+  GraphTester graph;
+  Node* n1 = graph.NewNode(&dummy_operator);
+  Node::Inputs::iterator i(n1->inputs().begin());
+  int input_count = 0;
+  for (; i != n1->inputs().end(); ++i) {
+    input_count++;
+  }
+  CHECK_EQ(0, input_count);
+}
+
+
+TEST(NodeInputIteratorOne) {
+  GraphTester graph;
+  Node* n0 = graph.NewNode(&dummy_operator);
+  Node* n1 = graph.NewNode(&dummy_operator, n0);
+  Node::Inputs::iterator i(n1->inputs().begin());
+  CHECK_EQ(1, n1->InputCount());
+  CHECK_EQ(n0, *i);
+  ++i;
+  CHECK(n1->inputs().end() == i);
+}
+
+
+TEST(NodeUseIteratorEmpty) {
+  GraphTester graph;
+  Node* n1 = graph.NewNode(&dummy_operator);
+  Node::Uses::iterator i(n1->uses().begin());
+  int use_count = 0;
+  for (; i != n1->uses().end(); ++i) {
+    Node::Edge edge(i.edge());
+    USE(edge);
+    use_count++;
+  }
+  CHECK_EQ(0, use_count);
+}
+
+
+TEST(NodeUseIteratorOne) {
+  GraphTester graph;
+  Node* n0 = graph.NewNode(&dummy_operator);
+  Node* n1 = graph.NewNode(&dummy_operator, n0);
+  Node::Uses::iterator i(n0->uses().begin());
+  CHECK_EQ(n1, *i);
+  ++i;
+  CHECK(n0->uses().end() == i);
+}
+
+
+TEST(NodeUseIteratorReplaceNoUses) {
+  GraphTester graph;
+  Node* n0 = graph.NewNode(&dummy_operator);
+  Node* n3 = graph.NewNode(&dummy_operator);
+  n0->ReplaceUses(n3);
+  CHECK(n0->uses().begin() == n0->uses().end());
+}
+
+
+TEST(NodeUseIteratorReplaceUses) {
+  GraphTester graph;
+  Node* n0 = graph.NewNode(&dummy_operator);
+  Node* n1 = graph.NewNode(&dummy_operator, n0);
+  Node* n2 = graph.NewNode(&dummy_operator, n0);
+  Node* n3 = graph.NewNode(&dummy_operator);
+  Node::Uses::iterator i1(n0->uses().begin());
+  CHECK_EQ(n1, *i1);
+  ++i1;
+  CHECK_EQ(n2, *i1);
+  n0->ReplaceUses(n3);
+  Node::Uses::iterator i2(n3->uses().begin());
+  CHECK_EQ(n1, *i2);
+  ++i2;
+  CHECK_EQ(n2, *i2);
+  Node::Inputs::iterator i3(n1->inputs().begin());
+  CHECK_EQ(n3, *i3);
+  ++i3;
+  CHECK(n1->inputs().end() == i3);
+  Node::Inputs::iterator i4(n2->inputs().begin());
+  CHECK_EQ(n3, *i4);
+  ++i4;
+  CHECK(n2->inputs().end() == i4);
+}
+
+
+TEST(NodeUseIteratorReplaceUsesSelf) {
+  GraphTester graph;
+  Node* n0 = graph.NewNode(&dummy_operator);
+  Node* n1 = graph.NewNode(&dummy_operator, n0);
+  Node* n3 = graph.NewNode(&dummy_operator);
+
+  n1->ReplaceInput(0, n1);  // Create self-reference.
+
+  Node::Uses::iterator i1(n1->uses().begin());
+  CHECK_EQ(n1, *i1);
+
+  n1->ReplaceUses(n3);
+
+  CHECK(n1->uses().begin() == n1->uses().end());
+
+  Node::Uses::iterator i2(n3->uses().begin());
+  CHECK_EQ(n1, *i2);
+  ++i2;
+  CHECK(n1->uses().end() == i2);
+}
+
+
+TEST(ReplaceInput) {
+  GraphTester graph;
+  Node* n0 = graph.NewNode(&dummy_operator);
+  Node* n1 = graph.NewNode(&dummy_operator);
+  Node* n2 = graph.NewNode(&dummy_operator);
+  Node* n3 = graph.NewNode(&dummy_operator, n0, n1, n2);
+  Node::Inputs::iterator i1(n3->inputs().begin());
+  CHECK(n0 == *i1);
+  CHECK_EQ(n0, n3->InputAt(0));
+  ++i1;
+  CHECK_EQ(n1, *i1);
+  CHECK_EQ(n1, n3->InputAt(1));
+  ++i1;
+  CHECK_EQ(n2, *i1);
+  CHECK_EQ(n2, n3->InputAt(2));
+  ++i1;
+  CHECK(i1 == n3->inputs().end());
+
+  Node::Uses::iterator i2(n1->uses().begin());
+  CHECK_EQ(n3, *i2);
+  ++i2;
+  CHECK(i2 == n1->uses().end());
+
+  Node* n4 = graph.NewNode(&dummy_operator);
+  Node::Uses::iterator i3(n4->uses().begin());
+  CHECK(i3 == n4->uses().end());
+
+  n3->ReplaceInput(1, n4);
+
+  Node::Uses::iterator i4(n1->uses().begin());
+  CHECK(i4 == n1->uses().end());
+
+  Node::Uses::iterator i5(n4->uses().begin());
+  CHECK_EQ(n3, *i5);
+  ++i5;
+  CHECK(i5 == n4->uses().end());
+
+  Node::Inputs::iterator i6(n3->inputs().begin());
+  CHECK(n0 == *i6);
+  CHECK_EQ(n0, n3->InputAt(0));
+  ++i6;
+  CHECK_EQ(n4, *i6);
+  CHECK_EQ(n4, n3->InputAt(1));
+  ++i6;
+  CHECK_EQ(n2, *i6);
+  CHECK_EQ(n2, n3->InputAt(2));
+  ++i6;
+  CHECK(i6 == n3->inputs().end());
+}
+
+
+TEST(OwnedBy) {
+  GraphTester graph;
+
+  {
+    Node* n0 = graph.NewNode(&dummy_operator);
+    Node* n1 = graph.NewNode(&dummy_operator);
+
+    CHECK(!n0->OwnedBy(n1));
+    CHECK(!n1->OwnedBy(n0));
+
+    Node* n2 = graph.NewNode(&dummy_operator, n0);
+    CHECK(n0->OwnedBy(n2));
+    CHECK(!n2->OwnedBy(n0));
+
+    Node* n3 = graph.NewNode(&dummy_operator, n0);
+    CHECK(!n0->OwnedBy(n2));
+    CHECK(!n0->OwnedBy(n3));
+    CHECK(!n2->OwnedBy(n0));
+    CHECK(!n3->OwnedBy(n0));
+  }
+
+  {
+    Node* n0 = graph.NewNode(&dummy_operator);
+    Node* n1 = graph.NewNode(&dummy_operator, n0);
+    CHECK(n0->OwnedBy(n1));
+    CHECK(!n1->OwnedBy(n0));
+    Node* n2 = graph.NewNode(&dummy_operator, n0);
+    CHECK(!n0->OwnedBy(n1));
+    CHECK(!n0->OwnedBy(n2));
+    CHECK(!n1->OwnedBy(n0));
+    CHECK(!n1->OwnedBy(n2));
+    CHECK(!n2->OwnedBy(n0));
+    CHECK(!n2->OwnedBy(n1));
+
+    Node* n3 = graph.NewNode(&dummy_operator);
+    n2->ReplaceInput(0, n3);
+
+    CHECK(n0->OwnedBy(n1));
+    CHECK(!n1->OwnedBy(n0));
+    CHECK(!n1->OwnedBy(n0));
+    CHECK(!n1->OwnedBy(n2));
+    CHECK(!n2->OwnedBy(n0));
+    CHECK(!n2->OwnedBy(n1));
+    CHECK(n3->OwnedBy(n2));
+    CHECK(!n2->OwnedBy(n3));
+  }
+}
+
+
+TEST(Uses) {
+  GraphTester graph;
+
+  Node* n0 = graph.NewNode(&dummy_operator);
+  Node* n1 = graph.NewNode(&dummy_operator, n0);
+  CHECK_EQ(1, n0->UseCount());
+  printf("A: %d vs %d\n", n0->UseAt(0)->id(), n1->id());
+  CHECK(n0->UseAt(0) == n1);
+  Node* n2 = graph.NewNode(&dummy_operator, n0);
+  CHECK_EQ(2, n0->UseCount());
+  printf("B: %d vs %d\n", n0->UseAt(1)->id(), n2->id());
+  CHECK(n0->UseAt(1) == n2);
+  Node* n3 = graph.NewNode(&dummy_operator, n0);
+  CHECK_EQ(3, n0->UseCount());
+  CHECK(n0->UseAt(2) == n3);
+}
+
+
+TEST(Inputs) {
+  GraphTester graph;
+
+  Node* n0 = graph.NewNode(&dummy_operator);
+  Node* n1 = graph.NewNode(&dummy_operator, n0);
+  Node* n2 = graph.NewNode(&dummy_operator, n0);
+  Node* n3 = graph.NewNode(&dummy_operator, n0, n1, n2);
+  CHECK_EQ(3, n3->InputCount());
+  CHECK(n3->InputAt(0) == n0);
+  CHECK(n3->InputAt(1) == n1);
+  CHECK(n3->InputAt(2) == n2);
+  Node* n4 = graph.NewNode(&dummy_operator, n0, n1, n2);
+  n3->AppendInput(graph.zone(), n4);
+  CHECK_EQ(4, n3->InputCount());
+  CHECK(n3->InputAt(0) == n0);
+  CHECK(n3->InputAt(1) == n1);
+  CHECK(n3->InputAt(2) == n2);
+  CHECK(n3->InputAt(3) == n4);
+  Node* n5 = graph.NewNode(&dummy_operator, n4);
+  n3->AppendInput(graph.zone(), n4);
+  CHECK_EQ(5, n3->InputCount());
+  CHECK(n3->InputAt(0) == n0);
+  CHECK(n3->InputAt(1) == n1);
+  CHECK(n3->InputAt(2) == n2);
+  CHECK(n3->InputAt(3) == n4);
+  CHECK(n3->InputAt(4) == n4);
+
+  // Make sure uses have been hooked op correctly.
+  Node::Uses uses(n4->uses());
+  Node::Uses::iterator current = uses.begin();
+  CHECK(current != uses.end());
+  CHECK(*current == n3);
+  ++current;
+  CHECK(current != uses.end());
+  CHECK(*current == n5);
+  ++current;
+  CHECK(current != uses.end());
+  CHECK(*current == n3);
+  ++current;
+  CHECK(current == uses.end());
+}
+
+
+TEST(AppendInputsAndIterator) {
+  GraphTester graph;
+
+  Node* n0 = graph.NewNode(&dummy_operator);
+  Node* n1 = graph.NewNode(&dummy_operator, n0);
+  Node* n2 = graph.NewNode(&dummy_operator, n0, n1);
+
+  Node::Inputs inputs(n2->inputs());
+  Node::Inputs::iterator current = inputs.begin();
+  CHECK(current != inputs.end());
+  CHECK(*current == n0);
+  ++current;
+  CHECK(current != inputs.end());
+  CHECK(*current == n1);
+  ++current;
+  CHECK(current == inputs.end());
+
+  Node* n3 = graph.NewNode(&dummy_operator);
+  n2->AppendInput(graph.zone(), n3);
+  inputs = n2->inputs();
+  current = inputs.begin();
+  CHECK(current != inputs.end());
+  CHECK(*current == n0);
+  CHECK_EQ(0, current.index());
+  ++current;
+  CHECK(current != inputs.end());
+  CHECK(*current == n1);
+  CHECK_EQ(1, current.index());
+  ++current;
+  CHECK(current != inputs.end());
+  CHECK(*current == n3);
+  CHECK_EQ(2, current.index());
+  ++current;
+  CHECK(current == inputs.end());
+}
+
+
+TEST(NullInputsSimple) {
+  GraphTester graph;
+
+  Node* n0 = graph.NewNode(&dummy_operator);
+  Node* n1 = graph.NewNode(&dummy_operator, n0);
+  Node* n2 = graph.NewNode(&dummy_operator, n0, n1);
+  CHECK_EQ(2, n2->InputCount());
+
+  CHECK(n0 == n2->InputAt(0));
+  CHECK(n1 == n2->InputAt(1));
+  CHECK_EQ(2, n0->UseCount());
+  n2->ReplaceInput(0, NULL);
+  CHECK(NULL == n2->InputAt(0));
+  CHECK(n1 == n2->InputAt(1));
+  CHECK_EQ(1, n0->UseCount());
+}
+
+
+TEST(NullInputsAppended) {
+  GraphTester graph;
+
+  Node* n0 = graph.NewNode(&dummy_operator);
+  Node* n1 = graph.NewNode(&dummy_operator, n0);
+  Node* n2 = graph.NewNode(&dummy_operator, n0);
+  Node* n3 = graph.NewNode(&dummy_operator, n0);
+  n3->AppendInput(graph.zone(), n1);
+  n3->AppendInput(graph.zone(), n2);
+  CHECK_EQ(3, n3->InputCount());
+
+  CHECK(n0 == n3->InputAt(0));
+  CHECK(n1 == n3->InputAt(1));
+  CHECK(n2 == n3->InputAt(2));
+  CHECK_EQ(1, n1->UseCount());
+  n3->ReplaceInput(1, NULL);
+  CHECK(n0 == n3->InputAt(0));
+  CHECK(NULL == n3->InputAt(1));
+  CHECK(n2 == n3->InputAt(2));
+  CHECK_EQ(0, n1->UseCount());
+}
+
+
+TEST(ReplaceUsesFromAppendedInputs) {
+  GraphTester graph;
+
+  Node* n0 = graph.NewNode(&dummy_operator);
+  Node* n1 = graph.NewNode(&dummy_operator, n0);
+  Node* n2 = graph.NewNode(&dummy_operator, n0);
+  Node* n3 = graph.NewNode(&dummy_operator);
+  n2->AppendInput(graph.zone(), n1);
+  n2->AppendInput(graph.zone(), n0);
+  CHECK_EQ(0, n3->UseCount());
+  CHECK_EQ(3, n0->UseCount());
+  n0->ReplaceUses(n3);
+  CHECK_EQ(0, n0->UseCount());
+  CHECK_EQ(3, n3->UseCount());
+
+  Node::Uses uses(n3->uses());
+  Node::Uses::iterator current = uses.begin();
+  CHECK(current != uses.end());
+  CHECK(*current == n1);
+  ++current;
+  CHECK(current != uses.end());
+  CHECK(*current == n2);
+  ++current;
+  CHECK(current != uses.end());
+  CHECK(*current == n2);
+  ++current;
+  CHECK(current == uses.end());
+}
+
+
+template <bool result>
+struct FixedPredicate {
+  bool operator()(const Node* node) const { return result; }
+};
+
+
+TEST(ReplaceUsesIfWithFixedPredicate) {
+  GraphTester graph;
+
+  Node* n0 = graph.NewNode(&dummy_operator);
+  Node* n1 = graph.NewNode(&dummy_operator, n0);
+  Node* n2 = graph.NewNode(&dummy_operator, n0);
+  Node* n3 = graph.NewNode(&dummy_operator);
+
+  CHECK_EQ(0, n2->UseCount());
+  n2->ReplaceUsesIf(FixedPredicate<true>(), n1);
+  CHECK_EQ(0, n2->UseCount());
+  n2->ReplaceUsesIf(FixedPredicate<false>(), n1);
+  CHECK_EQ(0, n2->UseCount());
+
+  CHECK_EQ(0, n3->UseCount());
+  n3->ReplaceUsesIf(FixedPredicate<true>(), n1);
+  CHECK_EQ(0, n3->UseCount());
+  n3->ReplaceUsesIf(FixedPredicate<false>(), n1);
+  CHECK_EQ(0, n3->UseCount());
+
+  CHECK_EQ(2, n0->UseCount());
+  CHECK_EQ(0, n1->UseCount());
+  n0->ReplaceUsesIf(FixedPredicate<false>(), n1);
+  CHECK_EQ(2, n0->UseCount());
+  CHECK_EQ(0, n1->UseCount());
+  n0->ReplaceUsesIf(FixedPredicate<true>(), n1);
+  CHECK_EQ(0, n0->UseCount());
+  CHECK_EQ(2, n1->UseCount());
+
+  n1->AppendInput(graph.zone(), n1);
+  CHECK_EQ(3, n1->UseCount());
+  n1->AppendInput(graph.zone(), n3);
+  CHECK_EQ(1, n3->UseCount());
+  n3->ReplaceUsesIf(FixedPredicate<true>(), n1);
+  CHECK_EQ(4, n1->UseCount());
+  CHECK_EQ(0, n3->UseCount());
+  n1->ReplaceUsesIf(FixedPredicate<false>(), n3);
+  CHECK_EQ(4, n1->UseCount());
+  CHECK_EQ(0, n3->UseCount());
+}
+
+
+TEST(ReplaceUsesIfWithEqualTo) {
+  GraphTester graph;
+
+  Node* n0 = graph.NewNode(&dummy_operator);
+  Node* n1 = graph.NewNode(&dummy_operator, n0);
+  Node* n2 = graph.NewNode(&dummy_operator, n0, n1);
+
+  CHECK_EQ(0, n2->UseCount());
+  n2->ReplaceUsesIf(std::bind1st(std::equal_to<Node*>(), n1), n0);
+  CHECK_EQ(0, n2->UseCount());
+
+  CHECK_EQ(2, n0->UseCount());
+  CHECK_EQ(1, n1->UseCount());
+  n1->ReplaceUsesIf(std::bind1st(std::equal_to<Node*>(), n0), n0);
+  CHECK_EQ(2, n0->UseCount());
+  CHECK_EQ(1, n1->UseCount());
+  n0->ReplaceUsesIf(std::bind2nd(std::equal_to<Node*>(), n2), n1);
+  CHECK_EQ(1, n0->UseCount());
+  CHECK_EQ(2, n1->UseCount());
+}
+
+
+TEST(ReplaceInputMultipleUses) {
+  GraphTester graph;
+
+  Node* n0 = graph.NewNode(&dummy_operator);
+  Node* n1 = graph.NewNode(&dummy_operator);
+  Node* n2 = graph.NewNode(&dummy_operator, n0);
+  n2->ReplaceInput(0, n1);
+  CHECK_EQ(0, n0->UseCount());
+  CHECK_EQ(1, n1->UseCount());
+
+  Node* n3 = graph.NewNode(&dummy_operator, n0);
+  n3->ReplaceInput(0, n1);
+  CHECK_EQ(0, n0->UseCount());
+  CHECK_EQ(2, n1->UseCount());
+}
+
+
+TEST(TrimInputCountInline) {
+  GraphTester graph;
+
+  {
+    Node* n0 = graph.NewNode(&dummy_operator);
+    Node* n1 = graph.NewNode(&dummy_operator, n0);
+    n1->TrimInputCount(1);
+    CHECK_EQ(1, n1->InputCount());
+    CHECK_EQ(n0, n1->InputAt(0));
+    CHECK_EQ(1, n0->UseCount());
+  }
+
+  {
+    Node* n0 = graph.NewNode(&dummy_operator);
+    Node* n1 = graph.NewNode(&dummy_operator, n0);
+    n1->TrimInputCount(0);
+    CHECK_EQ(0, n1->InputCount());
+    CHECK_EQ(0, n0->UseCount());
+  }
+
+  {
+    Node* n0 = graph.NewNode(&dummy_operator);
+    Node* n1 = graph.NewNode(&dummy_operator);
+    Node* n2 = graph.NewNode(&dummy_operator, n0, n1);
+    n2->TrimInputCount(2);
+    CHECK_EQ(2, n2->InputCount());
+    CHECK_EQ(1, n0->UseCount());
+    CHECK_EQ(1, n1->UseCount());
+    CHECK_EQ(0, n2->UseCount());
+  }
+
+  {
+    Node* n0 = graph.NewNode(&dummy_operator);
+    Node* n1 = graph.NewNode(&dummy_operator);
+    Node* n2 = graph.NewNode(&dummy_operator, n0, n1);
+    n2->TrimInputCount(1);
+    CHECK_EQ(1, n2->InputCount());
+    CHECK_EQ(1, n0->UseCount());
+    CHECK_EQ(0, n1->UseCount());
+    CHECK_EQ(0, n2->UseCount());
+  }
+
+  {
+    Node* n0 = graph.NewNode(&dummy_operator);
+    Node* n1 = graph.NewNode(&dummy_operator);
+    Node* n2 = graph.NewNode(&dummy_operator, n0, n1);
+    n2->TrimInputCount(0);
+    CHECK_EQ(0, n2->InputCount());
+    CHECK_EQ(0, n0->UseCount());
+    CHECK_EQ(0, n1->UseCount());
+    CHECK_EQ(0, n2->UseCount());
+  }
+
+  {
+    Node* n0 = graph.NewNode(&dummy_operator);
+    Node* n2 = graph.NewNode(&dummy_operator, n0, n0);
+    n2->TrimInputCount(1);
+    CHECK_EQ(1, n2->InputCount());
+    CHECK_EQ(1, n0->UseCount());
+    CHECK_EQ(0, n2->UseCount());
+  }
+
+  {
+    Node* n0 = graph.NewNode(&dummy_operator);
+    Node* n2 = graph.NewNode(&dummy_operator, n0, n0);
+    n2->TrimInputCount(0);
+    CHECK_EQ(0, n2->InputCount());
+    CHECK_EQ(0, n0->UseCount());
+    CHECK_EQ(0, n2->UseCount());
+  }
+}
+
+
+TEST(TrimInputCountOutOfLine1) {
+  GraphTester graph;
+
+  {
+    Node* n0 = graph.NewNode(&dummy_operator);
+    Node* n1 = graph.NewNode(&dummy_operator);
+    n1->AppendInput(graph.zone(), n0);
+    n1->TrimInputCount(1);
+    CHECK_EQ(1, n1->InputCount());
+    CHECK_EQ(n0, n1->InputAt(0));
+    CHECK_EQ(1, n0->UseCount());
+  }
+
+  {
+    Node* n0 = graph.NewNode(&dummy_operator);
+    Node* n1 = graph.NewNode(&dummy_operator);
+    n1->AppendInput(graph.zone(), n0);
+    CHECK_EQ(1, n1->InputCount());
+    n1->TrimInputCount(0);
+    CHECK_EQ(0, n1->InputCount());
+    CHECK_EQ(0, n0->UseCount());
+  }
+
+  {
+    Node* n0 = graph.NewNode(&dummy_operator);
+    Node* n1 = graph.NewNode(&dummy_operator);
+    Node* n2 = graph.NewNode(&dummy_operator);
+    n2->AppendInput(graph.zone(), n0);
+    n2->AppendInput(graph.zone(), n1);
+    CHECK_EQ(2, n2->InputCount());
+    n2->TrimInputCount(2);
+    CHECK_EQ(2, n2->InputCount());
+    CHECK_EQ(n0, n2->InputAt(0));
+    CHECK_EQ(n1, n2->InputAt(1));
+    CHECK_EQ(1, n0->UseCount());
+    CHECK_EQ(1, n1->UseCount());
+    CHECK_EQ(0, n2->UseCount());
+  }
+
+  {
+    Node* n0 = graph.NewNode(&dummy_operator);
+    Node* n1 = graph.NewNode(&dummy_operator);
+    Node* n2 = graph.NewNode(&dummy_operator);
+    n2->AppendInput(graph.zone(), n0);
+    n2->AppendInput(graph.zone(), n1);
+    CHECK_EQ(2, n2->InputCount());
+    n2->TrimInputCount(1);
+    CHECK_EQ(1, n2->InputCount());
+    CHECK_EQ(n0, n2->InputAt(0));
+    CHECK_EQ(1, n0->UseCount());
+    CHECK_EQ(0, n1->UseCount());
+    CHECK_EQ(0, n2->UseCount());
+  }
+
+  {
+    Node* n0 = graph.NewNode(&dummy_operator);
+    Node* n1 = graph.NewNode(&dummy_operator);
+    Node* n2 = graph.NewNode(&dummy_operator);
+    n2->AppendInput(graph.zone(), n0);
+    n2->AppendInput(graph.zone(), n1);
+    CHECK_EQ(2, n2->InputCount());
+    n2->TrimInputCount(0);
+    CHECK_EQ(0, n2->InputCount());
+    CHECK_EQ(0, n0->UseCount());
+    CHECK_EQ(0, n1->UseCount());
+    CHECK_EQ(0, n2->UseCount());
+  }
+
+  {
+    Node* n0 = graph.NewNode(&dummy_operator);
+    Node* n2 = graph.NewNode(&dummy_operator);
+    n2->AppendInput(graph.zone(), n0);
+    n2->AppendInput(graph.zone(), n0);
+    CHECK_EQ(2, n2->InputCount());
+    CHECK_EQ(2, n0->UseCount());
+    n2->TrimInputCount(1);
+    CHECK_EQ(1, n2->InputCount());
+    CHECK_EQ(1, n0->UseCount());
+    CHECK_EQ(0, n2->UseCount());
+  }
+
+  {
+    Node* n0 = graph.NewNode(&dummy_operator);
+    Node* n2 = graph.NewNode(&dummy_operator);
+    n2->AppendInput(graph.zone(), n0);
+    n2->AppendInput(graph.zone(), n0);
+    CHECK_EQ(2, n2->InputCount());
+    CHECK_EQ(2, n0->UseCount());
+    n2->TrimInputCount(0);
+    CHECK_EQ(0, n2->InputCount());
+    CHECK_EQ(0, n0->UseCount());
+    CHECK_EQ(0, n2->UseCount());
+  }
+}
+
+
+TEST(TrimInputCountOutOfLine2) {
+  GraphTester graph;
+
+  {
+    Node* n0 = graph.NewNode(&dummy_operator);
+    Node* n1 = graph.NewNode(&dummy_operator);
+    Node* n2 = graph.NewNode(&dummy_operator, n0);
+    n2->AppendInput(graph.zone(), n1);
+    CHECK_EQ(2, n2->InputCount());
+    n2->TrimInputCount(2);
+    CHECK_EQ(2, n2->InputCount());
+    CHECK_EQ(n0, n2->InputAt(0));
+    CHECK_EQ(n1, n2->InputAt(1));
+    CHECK_EQ(1, n0->UseCount());
+    CHECK_EQ(1, n1->UseCount());
+    CHECK_EQ(0, n2->UseCount());
+  }
+
+  {
+    Node* n0 = graph.NewNode(&dummy_operator);
+    Node* n1 = graph.NewNode(&dummy_operator);
+    Node* n2 = graph.NewNode(&dummy_operator, n0);
+    n2->AppendInput(graph.zone(), n1);
+    CHECK_EQ(2, n2->InputCount());
+    n2->TrimInputCount(1);
+    CHECK_EQ(1, n2->InputCount());
+    CHECK_EQ(n0, n2->InputAt(0));
+    CHECK_EQ(1, n0->UseCount());
+    CHECK_EQ(0, n1->UseCount());
+    CHECK_EQ(0, n2->UseCount());
+  }
+
+  {
+    Node* n0 = graph.NewNode(&dummy_operator);
+    Node* n1 = graph.NewNode(&dummy_operator);
+    Node* n2 = graph.NewNode(&dummy_operator, n0);
+    n2->AppendInput(graph.zone(), n1);
+    CHECK_EQ(2, n2->InputCount());
+    n2->TrimInputCount(0);
+    CHECK_EQ(0, n2->InputCount());
+    CHECK_EQ(0, n0->UseCount());
+    CHECK_EQ(0, n1->UseCount());
+    CHECK_EQ(0, n2->UseCount());
+  }
+
+  {
+    Node* n0 = graph.NewNode(&dummy_operator);
+    Node* n2 = graph.NewNode(&dummy_operator, n0);
+    n2->AppendInput(graph.zone(), n0);
+    CHECK_EQ(2, n2->InputCount());
+    CHECK_EQ(2, n0->UseCount());
+    n2->TrimInputCount(1);
+    CHECK_EQ(1, n2->InputCount());
+    CHECK_EQ(1, n0->UseCount());
+    CHECK_EQ(0, n2->UseCount());
+  }
+
+  {
+    Node* n0 = graph.NewNode(&dummy_operator);
+    Node* n2 = graph.NewNode(&dummy_operator, n0);
+    n2->AppendInput(graph.zone(), n0);
+    CHECK_EQ(2, n2->InputCount());
+    CHECK_EQ(2, n0->UseCount());
+    n2->TrimInputCount(0);
+    CHECK_EQ(0, n2->InputCount());
+    CHECK_EQ(0, n0->UseCount());
+    CHECK_EQ(0, n2->UseCount());
+  }
+}
+
+
+TEST(RemoveAllInputs) {
+  GraphTester graph;
+
+  for (int i = 0; i < 2; i++) {
+    Node* n0 = graph.NewNode(&dummy_operator);
+    Node* n1 = graph.NewNode(&dummy_operator, n0);
+    Node* n2;
+    if (i == 0) {
+      n2 = graph.NewNode(&dummy_operator, n0, n1);
+    } else {
+      n2 = graph.NewNode(&dummy_operator, n0);
+      n2->AppendInput(graph.zone(), n1);  // with out-of-line input.
+    }
+
+    n0->RemoveAllInputs();
+    CHECK_EQ(0, n0->InputCount());
+
+    CHECK_EQ(2, n0->UseCount());
+    n1->RemoveAllInputs();
+    CHECK_EQ(1, n1->InputCount());
+    CHECK_EQ(1, n0->UseCount());
+    CHECK_EQ(NULL, n1->InputAt(0));
+
+    CHECK_EQ(1, n1->UseCount());
+    n2->RemoveAllInputs();
+    CHECK_EQ(2, n2->InputCount());
+    CHECK_EQ(0, n0->UseCount());
+    CHECK_EQ(0, n1->UseCount());
+    CHECK_EQ(NULL, n2->InputAt(0));
+    CHECK_EQ(NULL, n2->InputAt(1));
+  }
+
+  {
+    Node* n0 = graph.NewNode(&dummy_operator);
+    Node* n1 = graph.NewNode(&dummy_operator, n0);
+    n1->ReplaceInput(0, n1);  // self-reference.
+
+    CHECK_EQ(0, n0->UseCount());
+    CHECK_EQ(1, n1->UseCount());
+    n1->RemoveAllInputs();
+    CHECK_EQ(1, n1->InputCount());
+    CHECK_EQ(0, n1->UseCount());
+    CHECK_EQ(NULL, n1->InputAt(0));
+  }
+}
diff --git a/test/cctest/compiler/test-operator.cc b/test/cctest/compiler/test-operator.cc
new file mode 100644 (file)
index 0000000..0bf8cb7
--- /dev/null
@@ -0,0 +1,244 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#include "src/compiler/operator.h"
+#include "test/cctest/cctest.h"
+
+using namespace v8::internal;
+using namespace v8::internal::compiler;
+
+#define NaN (v8::base::OS::nan_value())
+#define Infinity (std::numeric_limits<double>::infinity())
+
+TEST(TestOperatorMnemonic) {
+  SimpleOperator op1(10, 0, 0, 0, "ThisOne");
+  CHECK_EQ(0, strcmp(op1.mnemonic(), "ThisOne"));
+
+  SimpleOperator op2(11, 0, 0, 0, "ThatOne");
+  CHECK_EQ(0, strcmp(op2.mnemonic(), "ThatOne"));
+
+  Operator1<int> op3(12, 0, 0, 1, "Mnemonic1", 12333);
+  CHECK_EQ(0, strcmp(op3.mnemonic(), "Mnemonic1"));
+
+  Operator1<double> op4(13, 0, 0, 1, "TheOther", 99.9);
+  CHECK_EQ(0, strcmp(op4.mnemonic(), "TheOther"));
+}
+
+
+TEST(TestSimpleOperatorHash) {
+  SimpleOperator op1(17, 0, 0, 0, "Another");
+  CHECK_EQ(17, op1.HashCode());
+
+  SimpleOperator op2(18, 0, 0, 0, "Falsch");
+  CHECK_EQ(18, op2.HashCode());
+}
+
+
+TEST(TestSimpleOperatorEquals) {
+  SimpleOperator op1a(19, 0, 0, 0, "Another1");
+  SimpleOperator op1b(19, 2, 2, 2, "Another2");
+
+  CHECK(op1a.Equals(&op1a));
+  CHECK(op1a.Equals(&op1b));
+  CHECK(op1b.Equals(&op1a));
+  CHECK(op1b.Equals(&op1b));
+
+  SimpleOperator op2a(20, 0, 0, 0, "Falsch1");
+  SimpleOperator op2b(20, 1, 1, 1, "Falsch2");
+
+  CHECK(op2a.Equals(&op2a));
+  CHECK(op2a.Equals(&op2b));
+  CHECK(op2b.Equals(&op2a));
+  CHECK(op2b.Equals(&op2b));
+
+  CHECK(!op1a.Equals(&op2a));
+  CHECK(!op1a.Equals(&op2b));
+  CHECK(!op1b.Equals(&op2a));
+  CHECK(!op1b.Equals(&op2b));
+
+  CHECK(!op2a.Equals(&op1a));
+  CHECK(!op2a.Equals(&op1b));
+  CHECK(!op2b.Equals(&op1a));
+  CHECK(!op2b.Equals(&op1b));
+}
+
+
+static SmartArrayPointer<const char> OperatorToString(Operator* op) {
+  OStringStream os;
+  os << *op;
+  return SmartArrayPointer<const char>(StrDup(os.c_str()));
+}
+
+
+TEST(TestSimpleOperatorPrint) {
+  SimpleOperator op1a(19, 0, 0, 0, "Another1");
+  SimpleOperator op1b(19, 2, 2, 2, "Another2");
+
+  CHECK_EQ("Another1", OperatorToString(&op1a).get());
+  CHECK_EQ("Another2", OperatorToString(&op1b).get());
+
+  SimpleOperator op2a(20, 0, 0, 0, "Flog1");
+  SimpleOperator op2b(20, 1, 1, 1, "Flog2");
+
+  CHECK_EQ("Flog1", OperatorToString(&op2a).get());
+  CHECK_EQ("Flog2", OperatorToString(&op2b).get());
+}
+
+
+TEST(TestOperator1intHash) {
+  Operator1<int> op1a(23, 0, 0, 0, "Wolfie", 11);
+  Operator1<int> op1b(23, 2, 2, 2, "Doggie", 11);
+
+  CHECK_EQ(op1a.HashCode(), op1b.HashCode());
+
+  Operator1<int> op2a(24, 0, 0, 0, "Arfie", 3);
+  Operator1<int> op2b(24, 0, 0, 0, "Arfie", 4);
+
+  CHECK_NE(op1a.HashCode(), op2a.HashCode());
+  CHECK_NE(op2a.HashCode(), op2b.HashCode());
+}
+
+
+TEST(TestOperator1intEquals) {
+  Operator1<int> op1a(23, 0, 0, 0, "Scratchy", 11);
+  Operator1<int> op1b(23, 2, 2, 2, "Scratchy", 11);
+
+  CHECK(op1a.Equals(&op1a));
+  CHECK(op1a.Equals(&op1b));
+  CHECK(op1b.Equals(&op1a));
+  CHECK(op1b.Equals(&op1b));
+
+  Operator1<int> op2a(24, 0, 0, 0, "Im", 3);
+  Operator1<int> op2b(24, 0, 0, 0, "Im", 4);
+
+  CHECK(op2a.Equals(&op2a));
+  CHECK(!op2a.Equals(&op2b));
+  CHECK(!op2b.Equals(&op2a));
+  CHECK(op2b.Equals(&op2b));
+
+  CHECK(!op1a.Equals(&op2a));
+  CHECK(!op1a.Equals(&op2b));
+  CHECK(!op1b.Equals(&op2a));
+  CHECK(!op1b.Equals(&op2b));
+
+  CHECK(!op2a.Equals(&op1a));
+  CHECK(!op2a.Equals(&op1b));
+  CHECK(!op2b.Equals(&op1a));
+  CHECK(!op2b.Equals(&op1b));
+
+  SimpleOperator op3(25, 0, 0, 0, "Weepy");
+
+  CHECK(!op1a.Equals(&op3));
+  CHECK(!op1b.Equals(&op3));
+  CHECK(!op2a.Equals(&op3));
+  CHECK(!op2b.Equals(&op3));
+
+  CHECK(!op3.Equals(&op1a));
+  CHECK(!op3.Equals(&op1b));
+  CHECK(!op3.Equals(&op2a));
+  CHECK(!op3.Equals(&op2b));
+}
+
+
+TEST(TestOperator1intPrint) {
+  Operator1<int> op1(12, 0, 0, 1, "Op1Test", 0);
+  CHECK_EQ("Op1Test[0]", OperatorToString(&op1).get());
+
+  Operator1<int> op2(12, 0, 0, 1, "Op1Test", 66666666);
+  CHECK_EQ("Op1Test[66666666]", OperatorToString(&op2).get());
+
+  Operator1<int> op3(12, 0, 0, 1, "FooBar", 2347);
+  CHECK_EQ("FooBar[2347]", OperatorToString(&op3).get());
+
+  Operator1<int> op4(12, 0, 0, 1, "BarFoo", -879);
+  CHECK_EQ("BarFoo[-879]", OperatorToString(&op4).get());
+}
+
+
+TEST(TestOperator1doubleHash) {
+  Operator1<double> op1a(23, 0, 0, 0, "Wolfie", 11.77);
+  Operator1<double> op1b(23, 2, 2, 2, "Doggie", 11.77);
+
+  CHECK_EQ(op1a.HashCode(), op1b.HashCode());
+
+  Operator1<double> op2a(24, 0, 0, 0, "Arfie", -6.7);
+  Operator1<double> op2b(24, 0, 0, 0, "Arfie", -6.8);
+
+  CHECK_NE(op1a.HashCode(), op2a.HashCode());
+  CHECK_NE(op2a.HashCode(), op2b.HashCode());
+}
+
+
+TEST(TestOperator1doubleEquals) {
+  Operator1<double> op1a(23, 0, 0, 0, "Scratchy", 11.77);
+  Operator1<double> op1b(23, 2, 2, 2, "Scratchy", 11.77);
+
+  CHECK(op1a.Equals(&op1a));
+  CHECK(op1a.Equals(&op1b));
+  CHECK(op1b.Equals(&op1a));
+  CHECK(op1b.Equals(&op1b));
+
+  Operator1<double> op2a(24, 0, 0, 0, "Im", 3.1);
+  Operator1<double> op2b(24, 0, 0, 0, "Im", 3.2);
+
+  CHECK(op2a.Equals(&op2a));
+  CHECK(!op2a.Equals(&op2b));
+  CHECK(!op2b.Equals(&op2a));
+  CHECK(op2b.Equals(&op2b));
+
+  CHECK(!op1a.Equals(&op2a));
+  CHECK(!op1a.Equals(&op2b));
+  CHECK(!op1b.Equals(&op2a));
+  CHECK(!op1b.Equals(&op2b));
+
+  CHECK(!op2a.Equals(&op1a));
+  CHECK(!op2a.Equals(&op1b));
+  CHECK(!op2b.Equals(&op1a));
+  CHECK(!op2b.Equals(&op1b));
+
+  SimpleOperator op3(25, 0, 0, 0, "Weepy");
+
+  CHECK(!op1a.Equals(&op3));
+  CHECK(!op1b.Equals(&op3));
+  CHECK(!op2a.Equals(&op3));
+  CHECK(!op2b.Equals(&op3));
+
+  CHECK(!op3.Equals(&op1a));
+  CHECK(!op3.Equals(&op1b));
+  CHECK(!op3.Equals(&op2a));
+  CHECK(!op3.Equals(&op2b));
+
+  Operator1<double> op4a(24, 0, 0, 0, "Bashful", NaN);
+  Operator1<double> op4b(24, 0, 0, 0, "Bashful", NaN);
+
+  CHECK(op4a.Equals(&op4a));
+  CHECK(op4a.Equals(&op4b));
+  CHECK(op4b.Equals(&op4a));
+  CHECK(op4b.Equals(&op4b));
+
+  CHECK(!op3.Equals(&op4a));
+  CHECK(!op3.Equals(&op4b));
+  CHECK(!op3.Equals(&op4a));
+  CHECK(!op3.Equals(&op4b));
+}
+
+
+TEST(TestOperator1doublePrint) {
+  Operator1<double> op1(12, 0, 0, 1, "Op1Test", 0);
+  CHECK_EQ("Op1Test[0]", OperatorToString(&op1).get());
+
+  Operator1<double> op2(12, 0, 0, 1, "Op1Test", 7.3);
+  CHECK_EQ("Op1Test[7.3]", OperatorToString(&op2).get());
+
+  Operator1<double> op3(12, 0, 0, 1, "FooBar", 2e+123);
+  CHECK_EQ("FooBar[2e+123]", OperatorToString(&op3).get());
+
+  Operator1<double> op4(12, 0, 0, 1, "BarFoo", Infinity);
+  CHECK_EQ("BarFoo[inf]", OperatorToString(&op4).get());
+
+  Operator1<double> op5(12, 0, 0, 1, "BarFoo", NaN);
+  CHECK_EQ("BarFoo[nan]", OperatorToString(&op5).get());
+}
diff --git a/test/cctest/compiler/test-phi-reducer.cc b/test/cctest/compiler/test-phi-reducer.cc
new file mode 100644 (file)
index 0000000..5560040
--- /dev/null
@@ -0,0 +1,223 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+#include "test/cctest/cctest.h"
+
+#include "src/compiler/common-operator.h"
+#include "src/compiler/graph-inl.h"
+#include "src/compiler/phi-reducer.h"
+
+using namespace v8::internal;
+using namespace v8::internal::compiler;
+
+class PhiReducerTester : HandleAndZoneScope {
+ public:
+  PhiReducerTester()
+      : isolate(main_isolate()),
+        common(main_zone()),
+        graph(main_zone()),
+        self(graph.NewNode(common.Start())),
+        dead(graph.NewNode(common.Dead())) {}
+
+  Isolate* isolate;
+  CommonOperatorBuilder common;
+  Graph graph;
+  Node* self;
+  Node* dead;
+
+  void CheckReduce(Node* expect, Node* phi) {
+    PhiReducer reducer;
+    Reduction reduction = reducer.Reduce(phi);
+    if (expect == phi) {
+      CHECK(!reduction.Changed());
+    } else {
+      CHECK(reduction.Changed());
+      CHECK_EQ(expect, reduction.replacement());
+    }
+  }
+
+  Node* Int32Constant(int32_t val) {
+    return graph.NewNode(common.Int32Constant(val));
+  }
+
+  Node* Float64Constant(double val) {
+    return graph.NewNode(common.Float64Constant(val));
+  }
+
+  Node* Parameter(int32_t index = 0) {
+    return graph.NewNode(common.Parameter(index));
+  }
+
+  Node* Phi(Node* a) {
+    return SetSelfReferences(graph.NewNode(common.Phi(1), a));
+  }
+
+  Node* Phi(Node* a, Node* b) {
+    return SetSelfReferences(graph.NewNode(common.Phi(2), a, b));
+  }
+
+  Node* Phi(Node* a, Node* b, Node* c) {
+    return SetSelfReferences(graph.NewNode(common.Phi(3), a, b, c));
+  }
+
+  Node* Phi(Node* a, Node* b, Node* c, Node* d) {
+    return SetSelfReferences(graph.NewNode(common.Phi(4), a, b, c, d));
+  }
+
+  Node* PhiWithControl(Node* a, Node* control) {
+    return SetSelfReferences(graph.NewNode(common.Phi(1), a, control));
+  }
+
+  Node* PhiWithControl(Node* a, Node* b, Node* control) {
+    return SetSelfReferences(graph.NewNode(common.Phi(2), a, b, control));
+  }
+
+  Node* SetSelfReferences(Node* node) {
+    Node::Inputs inputs = node->inputs();
+    for (Node::Inputs::iterator iter(inputs.begin()); iter != inputs.end();
+         ++iter) {
+      Node* input = *iter;
+      if (input == self) node->ReplaceInput(iter.index(), node);
+    }
+    return node;
+  }
+};
+
+
+TEST(PhiReduce1) {
+  PhiReducerTester R;
+  Node* zero = R.Int32Constant(0);
+  Node* one = R.Int32Constant(1);
+  Node* oneish = R.Float64Constant(1.1);
+  Node* param = R.Parameter();
+
+  Node* singles[] = {zero, one, oneish, param};
+  for (size_t i = 0; i < ARRAY_SIZE(singles); i++) {
+    R.CheckReduce(singles[i], R.Phi(singles[i]));
+  }
+}
+
+
+TEST(PhiReduce2) {
+  PhiReducerTester R;
+  Node* zero = R.Int32Constant(0);
+  Node* one = R.Int32Constant(1);
+  Node* oneish = R.Float64Constant(1.1);
+  Node* param = R.Parameter();
+
+  Node* singles[] = {zero, one, oneish, param};
+  for (size_t i = 0; i < ARRAY_SIZE(singles); i++) {
+    Node* a = singles[i];
+    R.CheckReduce(a, R.Phi(a, a));
+  }
+
+  for (size_t i = 0; i < ARRAY_SIZE(singles); i++) {
+    Node* a = singles[i];
+    R.CheckReduce(a, R.Phi(R.self, a));
+    R.CheckReduce(a, R.Phi(a, R.self));
+  }
+
+  for (size_t i = 1; i < ARRAY_SIZE(singles); i++) {
+    Node* a = singles[i], *b = singles[0];
+    Node* phi1 = R.Phi(b, a);
+    R.CheckReduce(phi1, phi1);
+
+    Node* phi2 = R.Phi(a, b);
+    R.CheckReduce(phi2, phi2);
+  }
+}
+
+
+TEST(PhiReduce3) {
+  PhiReducerTester R;
+  Node* zero = R.Int32Constant(0);
+  Node* one = R.Int32Constant(1);
+  Node* oneish = R.Float64Constant(1.1);
+  Node* param = R.Parameter();
+
+  Node* singles[] = {zero, one, oneish, param};
+  for (size_t i = 0; i < ARRAY_SIZE(singles); i++) {
+    Node* a = singles[i];
+    R.CheckReduce(a, R.Phi(a, a, a));
+  }
+
+  for (size_t i = 0; i < ARRAY_SIZE(singles); i++) {
+    Node* a = singles[i];
+    R.CheckReduce(a, R.Phi(R.self, a, a));
+    R.CheckReduce(a, R.Phi(a, R.self, a));
+    R.CheckReduce(a, R.Phi(a, a, R.self));
+  }
+
+  for (size_t i = 1; i < ARRAY_SIZE(singles); i++) {
+    Node* a = singles[i], *b = singles[0];
+    Node* phi1 = R.Phi(b, a, a);
+    R.CheckReduce(phi1, phi1);
+
+    Node* phi2 = R.Phi(a, b, a);
+    R.CheckReduce(phi2, phi2);
+
+    Node* phi3 = R.Phi(a, a, b);
+    R.CheckReduce(phi3, phi3);
+  }
+}
+
+
+TEST(PhiReduce4) {
+  PhiReducerTester R;
+  Node* zero = R.Int32Constant(0);
+  Node* one = R.Int32Constant(1);
+  Node* oneish = R.Float64Constant(1.1);
+  Node* param = R.Parameter();
+
+  Node* singles[] = {zero, one, oneish, param};
+  for (size_t i = 0; i < ARRAY_SIZE(singles); i++) {
+    Node* a = singles[i];
+    R.CheckReduce(a, R.Phi(a, a, a, a));
+  }
+
+  for (size_t i = 0; i < ARRAY_SIZE(singles); i++) {
+    Node* a = singles[i];
+    R.CheckReduce(a, R.Phi(R.self, a, a, a));
+    R.CheckReduce(a, R.Phi(a, R.self, a, a));
+    R.CheckReduce(a, R.Phi(a, a, R.self, a));
+    R.CheckReduce(a, R.Phi(a, a, a, R.self));
+
+    R.CheckReduce(a, R.Phi(R.self, R.self, a, a));
+    R.CheckReduce(a, R.Phi(a, R.self, R.self, a));
+    R.CheckReduce(a, R.Phi(a, a, R.self, R.self));
+    R.CheckReduce(a, R.Phi(R.self, a, a, R.self));
+  }
+
+  for (size_t i = 1; i < ARRAY_SIZE(singles); i++) {
+    Node* a = singles[i], *b = singles[0];
+    Node* phi1 = R.Phi(b, a, a, a);
+    R.CheckReduce(phi1, phi1);
+
+    Node* phi2 = R.Phi(a, b, a, a);
+    R.CheckReduce(phi2, phi2);
+
+    Node* phi3 = R.Phi(a, a, b, a);
+    R.CheckReduce(phi3, phi3);
+
+    Node* phi4 = R.Phi(a, a, a, b);
+    R.CheckReduce(phi4, phi4);
+  }
+}
+
+
+TEST(PhiReduceShouldIgnoreControlNodes) {
+  PhiReducerTester R;
+  Node* zero = R.Int32Constant(0);
+  Node* one = R.Int32Constant(1);
+  Node* oneish = R.Float64Constant(1.1);
+  Node* param = R.Parameter();
+
+  Node* singles[] = {zero, one, oneish, param};
+  for (size_t i = 0; i < ARRAY_SIZE(singles); ++i) {
+    R.CheckReduce(singles[i], R.PhiWithControl(singles[i], R.dead));
+    R.CheckReduce(singles[i], R.PhiWithControl(R.self, singles[i], R.dead));
+    R.CheckReduce(singles[i], R.PhiWithControl(singles[i], R.self, R.dead));
+  }
+}
diff --git a/test/cctest/compiler/test-pipeline.cc b/test/cctest/compiler/test-pipeline.cc
new file mode 100644 (file)
index 0000000..84ccc28
--- /dev/null
@@ -0,0 +1,40 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+#include "test/cctest/cctest.h"
+
+#include "src/compiler.h"
+#include "src/compiler/pipeline.h"
+#include "src/handles.h"
+#include "src/parser.h"
+#include "src/rewriter.h"
+#include "src/scopes.h"
+
+using namespace v8::internal;
+using namespace v8::internal::compiler;
+
+TEST(PipelineAdd) {
+  InitializedHandleScope handles;
+  const char* source = "(function(a,b) { return a + b; })";
+  Handle<JSFunction> function = v8::Utils::OpenHandle(
+      *v8::Handle<v8::Function>::Cast(CompileRun(source)));
+  CompilationInfoWithZone info(function);
+
+  CHECK(Parser::Parse(&info));
+  StrictMode strict_mode = info.function()->strict_mode();
+  info.SetStrictMode(strict_mode);
+  CHECK(Rewriter::Rewrite(&info));
+  CHECK(Scope::Analyze(&info));
+  CHECK_NE(NULL, info.scope());
+
+  Pipeline pipeline(&info);
+  Handle<Code> code = pipeline.GenerateCode();
+#if V8_TURBOFAN_TARGET
+  CHECK(Pipeline::SupportedTarget());
+  CHECK(!code.is_null());
+#else
+  USE(code);
+#endif
+}
diff --git a/test/cctest/compiler/test-representation-change.cc b/test/cctest/compiler/test-representation-change.cc
new file mode 100644 (file)
index 0000000..2b63307
--- /dev/null
@@ -0,0 +1,281 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <limits>
+
+#include "src/v8.h"
+#include "test/cctest/cctest.h"
+#include "test/cctest/compiler/graph-builder-tester.h"
+
+#include "src/compiler/node-matchers.h"
+#include "src/compiler/representation-change.h"
+#include "src/compiler/typer.h"
+
+using namespace v8::internal;
+using namespace v8::internal::compiler;
+
+namespace v8 {  // for friendiness.
+namespace internal {
+namespace compiler {
+
+class RepresentationChangerTester : public HandleAndZoneScope,
+                                    public GraphAndBuilders {
+ public:
+  RepresentationChangerTester()
+      : GraphAndBuilders(main_zone()),
+        typer_(main_zone()),
+        jsgraph_(main_graph_, &main_common_, &typer_),
+        changer_(&jsgraph_, &main_simplified_, &main_machine_, main_isolate()) {
+  }
+
+  Typer typer_;
+  JSGraph jsgraph_;
+  RepresentationChanger changer_;
+
+  Isolate* isolate() { return main_isolate(); }
+  Graph* graph() { return main_graph_; }
+  CommonOperatorBuilder* common() { return &main_common_; }
+  JSGraph* jsgraph() { return &jsgraph_; }
+  RepresentationChanger* changer() { return &changer_; }
+
+  // TODO(titzer): use ValueChecker / ValueUtil
+  void CheckInt32Constant(Node* n, int32_t expected) {
+    ValueMatcher<int32_t> m(n);
+    CHECK(m.HasValue());
+    CHECK_EQ(expected, m.Value());
+  }
+
+  void CheckHeapConstant(Node* n, Object* expected) {
+    ValueMatcher<Handle<Object> > m(n);
+    CHECK(m.HasValue());
+    CHECK_EQ(expected, *m.Value());
+  }
+
+  void CheckNumberConstant(Node* n, double expected) {
+    ValueMatcher<double> m(n);
+    CHECK_EQ(IrOpcode::kNumberConstant, n->opcode());
+    CHECK(m.HasValue());
+    CHECK_EQ(expected, m.Value());
+  }
+
+  Node* Parameter(int index = 0) {
+    return graph()->NewNode(common()->Parameter(index));
+  }
+
+  void CheckTypeError(RepTypeUnion from, RepTypeUnion to) {
+    changer()->testing_type_errors_ = true;
+    changer()->type_error_ = false;
+    Node* n = Parameter(0);
+    Node* c = changer()->GetRepresentationFor(n, from, to);
+    CHECK_EQ(n, c);
+    CHECK(changer()->type_error_);
+  }
+
+  void CheckNop(RepTypeUnion from, RepTypeUnion to) {
+    Node* n = Parameter(0);
+    Node* c = changer()->GetRepresentationFor(n, from, to);
+    CHECK_EQ(n, c);
+  }
+};
+}
+}
+}  // namespace v8::internal::compiler
+
+
+static const RepType all_reps[] = {rBit, rWord32, rWord64, rFloat64, rTagged};
+
+
+// TODO(titzer): lift this to ValueHelper
+static const double double_inputs[] = {
+    0.0,   -0.0,    1.0,    -1.0,        0.1,         1.4,    -1.7,
+    2,     5,       6,      982983,      888,         -999.8, 3.1e7,
+    -2e66, 2.3e124, -12e73, V8_INFINITY, -V8_INFINITY};
+
+
+static const int32_t int32_inputs[] = {
+    0,      1,                                -1,
+    2,      5,                                6,
+    982983, 888,                              -999,
+    65535,  static_cast<int32_t>(0xFFFFFFFF), static_cast<int32_t>(0x80000000)};
+
+
+static const uint32_t uint32_inputs[] = {
+    0,      1,   static_cast<uint32_t>(-1),   2,     5,          6,
+    982983, 888, static_cast<uint32_t>(-999), 65535, 0xFFFFFFFF, 0x80000000};
+
+
+TEST(BoolToBit_constant) {
+  RepresentationChangerTester r;
+
+  Node* true_node = r.jsgraph()->TrueConstant();
+  Node* true_bit = r.changer()->GetRepresentationFor(true_node, rTagged, rBit);
+  r.CheckInt32Constant(true_bit, 1);
+
+  Node* false_node = r.jsgraph()->FalseConstant();
+  Node* false_bit =
+      r.changer()->GetRepresentationFor(false_node, rTagged, rBit);
+  r.CheckInt32Constant(false_bit, 0);
+}
+
+
+TEST(BitToBool_constant) {
+  RepresentationChangerTester r;
+
+  for (int i = -5; i < 5; i++) {
+    Node* node = r.jsgraph()->Int32Constant(i);
+    Node* val = r.changer()->GetRepresentationFor(node, rBit, rTagged);
+    r.CheckHeapConstant(val, i == 0 ? r.isolate()->heap()->false_value()
+                                    : r.isolate()->heap()->true_value());
+  }
+}
+
+
+TEST(ToTagged_constant) {
+  RepresentationChangerTester r;
+
+  for (size_t i = 0; i < ARRAY_SIZE(double_inputs); i++) {
+    Node* n = r.jsgraph()->Float64Constant(double_inputs[i]);
+    Node* c = r.changer()->GetRepresentationFor(n, rFloat64, rTagged);
+    r.CheckNumberConstant(c, double_inputs[i]);
+  }
+
+  for (size_t i = 0; i < ARRAY_SIZE(int32_inputs); i++) {
+    Node* n = r.jsgraph()->Int32Constant(int32_inputs[i]);
+    Node* c = r.changer()->GetRepresentationFor(n, rWord32 | tInt32, rTagged);
+    r.CheckNumberConstant(c, static_cast<double>(int32_inputs[i]));
+  }
+
+  for (size_t i = 0; i < ARRAY_SIZE(uint32_inputs); i++) {
+    Node* n = r.jsgraph()->Int32Constant(uint32_inputs[i]);
+    Node* c = r.changer()->GetRepresentationFor(n, rWord32 | tUint32, rTagged);
+    r.CheckNumberConstant(c, static_cast<double>(uint32_inputs[i]));
+  }
+}
+
+
+static void CheckChange(IrOpcode::Value expected, RepTypeUnion from,
+                        RepTypeUnion to) {
+  RepresentationChangerTester r;
+
+  Node* n = r.Parameter();
+  Node* c = r.changer()->GetRepresentationFor(n, from, to);
+
+  CHECK_NE(c, n);
+  CHECK_EQ(expected, c->opcode());
+  CHECK_EQ(n, c->InputAt(0));
+}
+
+
+TEST(SingleChanges) {
+  CheckChange(IrOpcode::kChangeBoolToBit, rTagged, rBit);
+  CheckChange(IrOpcode::kChangeBitToBool, rBit, rTagged);
+
+  CheckChange(IrOpcode::kChangeInt32ToTagged, rWord32 | tInt32, rTagged);
+  CheckChange(IrOpcode::kChangeUint32ToTagged, rWord32 | tUint32, rTagged);
+  CheckChange(IrOpcode::kChangeFloat64ToTagged, rFloat64, rTagged);
+
+  CheckChange(IrOpcode::kChangeTaggedToInt32, rTagged | tInt32, rWord32);
+  CheckChange(IrOpcode::kChangeTaggedToUint32, rTagged | tUint32, rWord32);
+  CheckChange(IrOpcode::kChangeTaggedToFloat64, rTagged, rFloat64);
+
+  // Int32,Uint32 <-> Float64 are actually machine conversions.
+  CheckChange(IrOpcode::kConvertInt32ToFloat64, rWord32 | tInt32, rFloat64);
+  CheckChange(IrOpcode::kConvertUint32ToFloat64, rWord32 | tUint32, rFloat64);
+  CheckChange(IrOpcode::kConvertFloat64ToInt32, rFloat64 | tInt32, rWord32);
+  CheckChange(IrOpcode::kConvertFloat64ToUint32, rFloat64 | tUint32, rWord32);
+}
+
+
+TEST(SignednessInWord32) {
+  RepresentationChangerTester r;
+
+  // TODO(titzer): these are currently type errors because the output type is
+  // not specified. Maybe the RepresentationChanger should assume anything to or
+  // from {rWord32} is {tInt32}, i.e. signed, if not it is explicitly otherwise?
+  r.CheckTypeError(rTagged, rWord32 | tInt32);
+  r.CheckTypeError(rTagged, rWord32 | tUint32);
+  r.CheckTypeError(rWord32, rFloat64);
+  r.CheckTypeError(rFloat64, rWord32);
+
+  //  CheckChange(IrOpcode::kChangeTaggedToInt32, rTagged, rWord32 | tInt32);
+  //  CheckChange(IrOpcode::kChangeTaggedToUint32, rTagged, rWord32 | tUint32);
+  //  CheckChange(IrOpcode::kConvertInt32ToFloat64, rWord32, rFloat64);
+  //  CheckChange(IrOpcode::kConvertFloat64ToInt32, rFloat64, rWord32);
+}
+
+
+TEST(Nops) {
+  RepresentationChangerTester r;
+
+  // X -> X is always a nop for any single representation X.
+  for (size_t i = 0; i < ARRAY_SIZE(all_reps); i++) {
+    r.CheckNop(all_reps[i], all_reps[i]);
+  }
+
+  // 32-bit or 64-bit words can be used as branch conditions (rBit).
+  r.CheckNop(rWord32, rBit);
+  r.CheckNop(rWord32, rBit | tBool);
+  r.CheckNop(rWord64, rBit);
+  r.CheckNop(rWord64, rBit | tBool);
+
+  // rBit (result of comparison) is implicitly a wordish thing.
+  r.CheckNop(rBit, rWord32);
+  r.CheckNop(rBit | tBool, rWord32);
+  r.CheckNop(rBit, rWord64);
+  r.CheckNop(rBit | tBool, rWord64);
+}
+
+
+TEST(TypeErrors) {
+  RepresentationChangerTester r;
+
+  // Floats cannot be implicitly converted to/from comparison conditions.
+  r.CheckTypeError(rFloat64, rBit);
+  r.CheckTypeError(rFloat64, rBit | tBool);
+  r.CheckTypeError(rBit, rFloat64);
+  r.CheckTypeError(rBit | tBool, rFloat64);
+
+  // Word64 is internal and shouldn't be implicitly converted.
+  r.CheckTypeError(rWord64, rTagged | tBool);
+  r.CheckTypeError(rWord64, rTagged);
+  r.CheckTypeError(rWord64, rTagged | tBool);
+  r.CheckTypeError(rTagged, rWord64);
+  r.CheckTypeError(rTagged | tBool, rWord64);
+
+  // Word64 / Word32 shouldn't be implicitly converted.
+  r.CheckTypeError(rWord64, rWord32);
+  r.CheckTypeError(rWord32, rWord64);
+  r.CheckTypeError(rWord64, rWord32 | tInt32);
+  r.CheckTypeError(rWord32 | tInt32, rWord64);
+  r.CheckTypeError(rWord64, rWord32 | tUint32);
+  r.CheckTypeError(rWord32 | tUint32, rWord64);
+
+  for (size_t i = 0; i < ARRAY_SIZE(all_reps); i++) {
+    for (size_t j = 0; j < ARRAY_SIZE(all_reps); j++) {
+      if (i == j) continue;
+      // Only a single from representation is allowed.
+      r.CheckTypeError(all_reps[i] | all_reps[j], rTagged);
+    }
+  }
+}
+
+
+TEST(CompleteMatrix) {
+  // TODO(titzer): test all variants in the matrix.
+  // rB
+  // tBrB
+  // tBrT
+  // rW32
+  // tIrW32
+  // tUrW32
+  // rW64
+  // tIrW64
+  // tUrW64
+  // rF64
+  // tIrF64
+  // tUrF64
+  // tArF64
+  // rT
+  // tArT
+}
diff --git a/test/cctest/compiler/test-run-deopt.cc b/test/cctest/compiler/test-run-deopt.cc
new file mode 100644 (file)
index 0000000..36998d0
--- /dev/null
@@ -0,0 +1,39 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "v8.h"
+
+#include "function-tester.h"
+
+using namespace v8::internal;
+using namespace v8::internal::compiler;
+
+
+TEST(TurboSimpleDeopt) {
+  FLAG_allow_natives_syntax = true;
+  FLAG_turbo_deoptimization = true;
+
+  FunctionTester T(
+      "(function f(a) {"
+      "var b = 1;"
+      "if (!%IsOptimized()) return 0;"
+      "%DeoptimizeFunction(f);"
+      "if (%IsOptimized()) return 0;"
+      "return a + b; })");
+
+  T.CheckCall(T.Val(2), T.Val(1));
+}
+
+
+TEST(TurboTrivialDeopt) {
+  FLAG_allow_natives_syntax = true;
+  FLAG_turbo_deoptimization = true;
+
+  FunctionTester T(
+      "(function foo() {"
+      "%DeoptimizeFunction(foo);"
+      "return 1; })");
+
+  T.CheckCall(T.Val(1));
+}
diff --git a/test/cctest/compiler/test-run-intrinsics.cc b/test/cctest/compiler/test-run-intrinsics.cc
new file mode 100644 (file)
index 0000000..a1b5676
--- /dev/null
@@ -0,0 +1,211 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#include "test/cctest/compiler/function-tester.h"
+
+using namespace v8::internal;
+using namespace v8::internal::compiler;
+
+
+TEST(IsSmi) {
+  FunctionTester T("(function(a) { return %_IsSmi(a); })");
+
+  T.CheckTrue(T.Val(1));
+  T.CheckFalse(T.Val(1.1));
+  T.CheckFalse(T.Val(-0.0));
+  T.CheckTrue(T.Val(-2));
+  T.CheckFalse(T.Val(-2.3));
+  T.CheckFalse(T.undefined());
+}
+
+
+TEST(IsNonNegativeSmi) {
+  FunctionTester T("(function(a) { return %_IsNonNegativeSmi(a); })");
+
+  T.CheckTrue(T.Val(1));
+  T.CheckFalse(T.Val(1.1));
+  T.CheckFalse(T.Val(-0.0));
+  T.CheckFalse(T.Val(-2));
+  T.CheckFalse(T.Val(-2.3));
+  T.CheckFalse(T.undefined());
+}
+
+
+TEST(IsMinusZero) {
+  FunctionTester T("(function(a) { return %_IsMinusZero(a); })");
+
+  T.CheckFalse(T.Val(1));
+  T.CheckFalse(T.Val(1.1));
+  T.CheckTrue(T.Val(-0.0));
+  T.CheckFalse(T.Val(-2));
+  T.CheckFalse(T.Val(-2.3));
+  T.CheckFalse(T.undefined());
+}
+
+
+TEST(IsArray) {
+  FunctionTester T("(function(a) { return %_IsArray(a); })");
+
+  T.CheckFalse(T.NewObject("(function() {})"));
+  T.CheckTrue(T.NewObject("([1])"));
+  T.CheckFalse(T.NewObject("({})"));
+  T.CheckFalse(T.NewObject("(/x/)"));
+  T.CheckFalse(T.undefined());
+  T.CheckFalse(T.null());
+  T.CheckFalse(T.Val("x"));
+  T.CheckFalse(T.Val(1));
+}
+
+
+TEST(IsObject) {
+  FunctionTester T("(function(a) { return %_IsObject(a); })");
+
+  T.CheckFalse(T.NewObject("(function() {})"));
+  T.CheckTrue(T.NewObject("([1])"));
+  T.CheckTrue(T.NewObject("({})"));
+  T.CheckTrue(T.NewObject("(/x/)"));
+  T.CheckFalse(T.undefined());
+  T.CheckTrue(T.null());
+  T.CheckFalse(T.Val("x"));
+  T.CheckFalse(T.Val(1));
+}
+
+
+TEST(IsFunction) {
+  FunctionTester T("(function(a) { return %_IsFunction(a); })");
+
+  T.CheckTrue(T.NewObject("(function() {})"));
+  T.CheckFalse(T.NewObject("([1])"));
+  T.CheckFalse(T.NewObject("({})"));
+  T.CheckFalse(T.NewObject("(/x/)"));
+  T.CheckFalse(T.undefined());
+  T.CheckFalse(T.null());
+  T.CheckFalse(T.Val("x"));
+  T.CheckFalse(T.Val(1));
+}
+
+
+TEST(IsRegExp) {
+  FunctionTester T("(function(a) { return %_IsRegExp(a); })");
+
+  T.CheckFalse(T.NewObject("(function() {})"));
+  T.CheckFalse(T.NewObject("([1])"));
+  T.CheckFalse(T.NewObject("({})"));
+  T.CheckTrue(T.NewObject("(/x/)"));
+  T.CheckFalse(T.undefined());
+  T.CheckFalse(T.null());
+  T.CheckFalse(T.Val("x"));
+  T.CheckFalse(T.Val(1));
+}
+
+
+TEST(ClassOf) {
+  FunctionTester T("(function(a) { return %_ClassOf(a); })");
+
+  T.CheckCall(T.Val("Function"), T.NewObject("(function() {})"));
+  T.CheckCall(T.Val("Array"), T.NewObject("([1])"));
+  T.CheckCall(T.Val("Object"), T.NewObject("({})"));
+  T.CheckCall(T.Val("RegExp"), T.NewObject("(/x/)"));
+  T.CheckCall(T.null(), T.undefined());
+  T.CheckCall(T.null(), T.null());
+  T.CheckCall(T.null(), T.Val("x"));
+  T.CheckCall(T.null(), T.Val(1));
+}
+
+
+TEST(ObjectEquals) {
+  FunctionTester T("(function(a,b) { return %_ObjectEquals(a,b); })");
+  CompileRun("var o = {}");
+
+  T.CheckTrue(T.NewObject("(o)"), T.NewObject("(o)"));
+  T.CheckTrue(T.Val("internal"), T.Val("internal"));
+  T.CheckTrue(T.true_value(), T.true_value());
+  T.CheckFalse(T.true_value(), T.false_value());
+  T.CheckFalse(T.NewObject("({})"), T.NewObject("({})"));
+  T.CheckFalse(T.Val("a"), T.Val("b"));
+}
+
+
+TEST(ValueOf) {
+  FunctionTester T("(function(a) { return %_ValueOf(a); })");
+
+  T.CheckCall(T.Val("a"), T.Val("a"));
+  T.CheckCall(T.Val("b"), T.NewObject("(new String('b'))"));
+  T.CheckCall(T.Val(123), T.Val(123));
+  T.CheckCall(T.Val(456), T.NewObject("(new Number(456))"));
+}
+
+
+TEST(SetValueOf) {
+  FunctionTester T("(function(a,b) { return %_SetValueOf(a,b); })");
+
+  T.CheckCall(T.Val("a"), T.NewObject("(new String)"), T.Val("a"));
+  T.CheckCall(T.Val(123), T.NewObject("(new Number)"), T.Val(123));
+  T.CheckCall(T.Val("x"), T.undefined(), T.Val("x"));
+}
+
+
+TEST(StringCharFromCode) {
+  FunctionTester T("(function(a) { return %_StringCharFromCode(a); })");
+
+  T.CheckCall(T.Val("a"), T.Val(97));
+  T.CheckCall(T.Val("\xE2\x9D\x8A"), T.Val(0x274A));
+  T.CheckCall(T.Val(""), T.undefined());
+}
+
+
+TEST(StringCharAt) {
+  FunctionTester T("(function(a,b) { return %_StringCharAt(a,b); })");
+
+  T.CheckCall(T.Val("e"), T.Val("huge fan!"), T.Val(3));
+  T.CheckCall(T.Val("f"), T.Val("\xE2\x9D\x8A fan!"), T.Val(2));
+  T.CheckCall(T.Val(""), T.Val("not a fan!"), T.Val(23));
+}
+
+
+TEST(StringCharCodeAt) {
+  FunctionTester T("(function(a,b) { return %_StringCharCodeAt(a,b); })");
+
+  T.CheckCall(T.Val('e'), T.Val("huge fan!"), T.Val(3));
+  T.CheckCall(T.Val('f'), T.Val("\xE2\x9D\x8A fan!"), T.Val(2));
+  T.CheckCall(T.nan(), T.Val("not a fan!"), T.Val(23));
+}
+
+
+TEST(StringAdd) {
+  FunctionTester T("(function(a,b) { return %_StringAdd(a,b); })");
+
+  T.CheckCall(T.Val("aaabbb"), T.Val("aaa"), T.Val("bbb"));
+  T.CheckCall(T.Val("aaa"), T.Val("aaa"), T.Val(""));
+  T.CheckCall(T.Val("bbb"), T.Val(""), T.Val("bbb"));
+}
+
+
+TEST(StringSubString) {
+  FunctionTester T("(function(a,b) { return %_SubString(a,b,b+3); })");
+
+  T.CheckCall(T.Val("aaa"), T.Val("aaabbb"), T.Val(0.0));
+  T.CheckCall(T.Val("abb"), T.Val("aaabbb"), T.Val(2));
+  T.CheckCall(T.Val("aaa"), T.Val("aaa"), T.Val(0.0));
+}
+
+
+TEST(StringCompare) {
+  FunctionTester T("(function(a,b) { return %_StringCompare(a,b); })");
+
+  T.CheckCall(T.Val(-1), T.Val("aaa"), T.Val("bbb"));
+  T.CheckCall(T.Val(0.0), T.Val("bbb"), T.Val("bbb"));
+  T.CheckCall(T.Val(+1), T.Val("ccc"), T.Val("bbb"));
+}
+
+
+TEST(CallFunction) {
+  FunctionTester T("(function(a,b) { return %_CallFunction(a, 1, 2, 3, b); })");
+  CompileRun("function f(a,b,c) { return a + b + c + this.d; }");
+
+  T.CheckCall(T.Val(129), T.NewObject("({d:123})"), T.NewObject("f"));
+  T.CheckCall(T.Val("6x"), T.NewObject("({d:'x'})"), T.NewObject("f"));
+}
diff --git a/test/cctest/compiler/test-run-jsbranches.cc b/test/cctest/compiler/test-run-jsbranches.cc
new file mode 100644 (file)
index 0000000..2eb4fa6
--- /dev/null
@@ -0,0 +1,262 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#include "test/cctest/compiler/function-tester.h"
+
+using namespace v8::internal;
+using namespace v8::internal::compiler;
+
+TEST(Conditional) {
+  FunctionTester T("(function(a) { return a ? 23 : 42; })");
+
+  T.CheckCall(T.Val(23), T.true_value(), T.undefined());
+  T.CheckCall(T.Val(42), T.false_value(), T.undefined());
+  T.CheckCall(T.Val(42), T.undefined(), T.undefined());
+  T.CheckCall(T.Val(42), T.Val(0.0), T.undefined());
+  T.CheckCall(T.Val(23), T.Val(999), T.undefined());
+  T.CheckCall(T.Val(23), T.Val("x"), T.undefined());
+}
+
+
+TEST(LogicalAnd) {
+  FunctionTester T("(function(a,b) { return a && b; })");
+
+  T.CheckCall(T.true_value(), T.true_value(), T.true_value());
+  T.CheckCall(T.false_value(), T.false_value(), T.true_value());
+  T.CheckCall(T.false_value(), T.true_value(), T.false_value());
+  T.CheckCall(T.false_value(), T.false_value(), T.false_value());
+
+  T.CheckCall(T.Val(999), T.Val(777), T.Val(999));
+  T.CheckCall(T.Val(0.0), T.Val(0.0), T.Val(999));
+  T.CheckCall(T.Val("b"), T.Val("a"), T.Val("b"));
+}
+
+
+TEST(LogicalOr) {
+  FunctionTester T("(function(a,b) { return a || b; })");
+
+  T.CheckCall(T.true_value(), T.true_value(), T.true_value());
+  T.CheckCall(T.true_value(), T.false_value(), T.true_value());
+  T.CheckCall(T.true_value(), T.true_value(), T.false_value());
+  T.CheckCall(T.false_value(), T.false_value(), T.false_value());
+
+  T.CheckCall(T.Val(777), T.Val(777), T.Val(999));
+  T.CheckCall(T.Val(999), T.Val(0.0), T.Val(999));
+  T.CheckCall(T.Val("a"), T.Val("a"), T.Val("b"));
+}
+
+
+TEST(LogicalEffect) {
+  FunctionTester T("(function(a,b) { a && (b = a); return b; })");
+
+  T.CheckCall(T.true_value(), T.true_value(), T.true_value());
+  T.CheckCall(T.true_value(), T.false_value(), T.true_value());
+  T.CheckCall(T.true_value(), T.true_value(), T.false_value());
+  T.CheckCall(T.false_value(), T.false_value(), T.false_value());
+
+  T.CheckCall(T.Val(777), T.Val(777), T.Val(999));
+  T.CheckCall(T.Val(999), T.Val(0.0), T.Val(999));
+  T.CheckCall(T.Val("a"), T.Val("a"), T.Val("b"));
+}
+
+
+TEST(IfStatement) {
+  FunctionTester T("(function(a) { if (a) { return 1; } else { return 2; } })");
+
+  T.CheckCall(T.Val(1), T.true_value(), T.undefined());
+  T.CheckCall(T.Val(2), T.false_value(), T.undefined());
+  T.CheckCall(T.Val(2), T.undefined(), T.undefined());
+  T.CheckCall(T.Val(2), T.Val(0.0), T.undefined());
+  T.CheckCall(T.Val(1), T.Val(999), T.undefined());
+  T.CheckCall(T.Val(1), T.Val("x"), T.undefined());
+}
+
+
+TEST(DoWhileStatement) {
+  FunctionTester T("(function(a,b) { do { a+=23; } while(a < b) return a; })");
+
+  T.CheckCall(T.Val(24), T.Val(1), T.Val(1));
+  T.CheckCall(T.Val(24), T.Val(1), T.Val(23));
+  T.CheckCall(T.Val(47), T.Val(1), T.Val(25));
+  T.CheckCall(T.Val("str23"), T.Val("str"), T.Val("str"));
+}
+
+
+TEST(WhileStatement) {
+  FunctionTester T("(function(a,b) { while(a < b) { a+=23; } return a; })");
+
+  T.CheckCall(T.Val(1), T.Val(1), T.Val(1));
+  T.CheckCall(T.Val(24), T.Val(1), T.Val(23));
+  T.CheckCall(T.Val(47), T.Val(1), T.Val(25));
+  T.CheckCall(T.Val("str"), T.Val("str"), T.Val("str"));
+}
+
+
+TEST(ForStatement) {
+  FunctionTester T("(function(a,b) { for (; a < b; a+=23) {} return a; })");
+
+  T.CheckCall(T.Val(1), T.Val(1), T.Val(1));
+  T.CheckCall(T.Val(24), T.Val(1), T.Val(23));
+  T.CheckCall(T.Val(47), T.Val(1), T.Val(25));
+  T.CheckCall(T.Val("str"), T.Val("str"), T.Val("str"));
+}
+
+
+static void TestForIn(const char* code) {
+  FunctionTester T(code);
+  T.CheckCall(T.undefined(), T.undefined());
+  T.CheckCall(T.undefined(), T.null());
+  T.CheckCall(T.undefined(), T.NewObject("({})"));
+  T.CheckCall(T.undefined(), T.Val(1));
+  T.CheckCall(T.Val("2"), T.Val("str"));
+  T.CheckCall(T.Val("a"), T.NewObject("({'a' : 1})"));
+  T.CheckCall(T.Val("2"), T.NewObject("([1, 2, 3])"));
+  T.CheckCall(T.Val("a"), T.NewObject("({'a' : 1, 'b' : 1})"), T.Val("b"));
+  T.CheckCall(T.Val("1"), T.NewObject("([1, 2, 3])"), T.Val("2"));
+}
+
+
+TEST(ForInStatement) {
+  // Variable assignment.
+  TestForIn(
+      "(function(a, b) {"
+      "var last;"
+      "for (var x in a) {"
+      "  if (b) { delete a[b]; b = undefined; }"
+      "  last = x;"
+      "}"
+      "return last;})");
+  // Indexed assignment.
+  TestForIn(
+      "(function(a, b) {"
+      "var array = [0, 1, undefined];"
+      "for (array[2] in a) {"
+      "  if (b) { delete a[b]; b = undefined; }"
+      "}"
+      "return array[2];})");
+  // Named assignment.
+  TestForIn(
+      "(function(a, b) {"
+      "var obj = {'a' : undefined};"
+      "for (obj.a in a) {"
+      "  if (b) { delete a[b]; b = undefined; }"
+      "}"
+      "return obj.a;})");
+}
+
+
+TEST(SwitchStatement) {
+  const char* src =
+      "(function(a,b) {"
+      "  var r = '-';"
+      "  switch (a) {"
+      "    case 'x'    : r += 'X-';"
+      "    case b + 'b': r += 'B-';"
+      "    default     : r += 'D-';"
+      "    case 'y'    : r += 'Y-';"
+      "  }"
+      "  return r;"
+      "})";
+  FunctionTester T(src);
+
+  T.CheckCall(T.Val("-X-B-D-Y-"), T.Val("x"), T.Val("B"));
+  T.CheckCall(T.Val("-B-D-Y-"), T.Val("Bb"), T.Val("B"));
+  T.CheckCall(T.Val("-D-Y-"), T.Val("z"), T.Val("B"));
+  T.CheckCall(T.Val("-Y-"), T.Val("y"), T.Val("B"));
+
+  CompileRun("var c = 0; var o = { toString:function(){return c++} };");
+  T.CheckCall(T.Val("-D-Y-"), T.Val("1b"), T.NewObject("o"));
+  T.CheckCall(T.Val("-B-D-Y-"), T.Val("1b"), T.NewObject("o"));
+  T.CheckCall(T.Val("-D-Y-"), T.Val("1b"), T.NewObject("o"));
+}
+
+
+TEST(BlockBreakStatement) {
+  FunctionTester T("(function(a,b) { L:{ if (a) break L; b=1; } return b; })");
+
+  T.CheckCall(T.Val(7), T.true_value(), T.Val(7));
+  T.CheckCall(T.Val(1), T.false_value(), T.Val(7));
+}
+
+
+TEST(BlockReturnStatement) {
+  FunctionTester T("(function(a,b) { L:{ if (a) b=1; return b; } })");
+
+  T.CheckCall(T.Val(1), T.true_value(), T.Val(7));
+  T.CheckCall(T.Val(7), T.false_value(), T.Val(7));
+}
+
+
+TEST(NestedIfConditional) {
+  FunctionTester T("(function(a,b) { if (a) { b = (b?b:7) + 1; } return b; })");
+
+  T.CheckCall(T.Val(4), T.false_value(), T.Val(4));
+  T.CheckCall(T.Val(6), T.true_value(), T.Val(5));
+  T.CheckCall(T.Val(8), T.true_value(), T.undefined());
+}
+
+
+TEST(NestedIfLogical) {
+  const char* src =
+      "(function(a,b) {"
+      "  if (a || b) { return 1; } else { return 2; }"
+      "})";
+  FunctionTester T(src);
+
+  T.CheckCall(T.Val(1), T.true_value(), T.true_value());
+  T.CheckCall(T.Val(1), T.false_value(), T.true_value());
+  T.CheckCall(T.Val(1), T.true_value(), T.false_value());
+  T.CheckCall(T.Val(2), T.false_value(), T.false_value());
+  T.CheckCall(T.Val(1), T.Val(1.0), T.Val(1.0));
+  T.CheckCall(T.Val(1), T.Val(0.0), T.Val(1.0));
+  T.CheckCall(T.Val(1), T.Val(1.0), T.Val(0.0));
+  T.CheckCall(T.Val(2), T.Val(0.0), T.Val(0.0));
+}
+
+
+TEST(NestedIfElseFor) {
+  const char* src =
+      "(function(a,b) {"
+      "  if (!a) { return b - 3; } else { for (; a < b; a++); }"
+      "  return a;"
+      "})";
+  FunctionTester T(src);
+
+  T.CheckCall(T.Val(1), T.false_value(), T.Val(4));
+  T.CheckCall(T.Val(2), T.true_value(), T.Val(2));
+  T.CheckCall(T.Val(3), T.Val(3), T.Val(1));
+}
+
+
+TEST(NestedWhileWhile) {
+  const char* src =
+      "(function(a) {"
+      "  var i = a; while (false) while(false) return i;"
+      "  return i;"
+      "})";
+  FunctionTester T(src);
+
+  T.CheckCall(T.Val(2.0), T.Val(2.0), T.Val(-1.0));
+  T.CheckCall(T.Val(65.0), T.Val(65.0), T.Val(-1.0));
+}
+
+
+TEST(NestedForIf) {
+  FunctionTester T("(function(a,b) { for (; a > 1; a--) if (b) return 1; })");
+
+  T.CheckCall(T.Val(1), T.Val(3), T.true_value());
+  T.CheckCall(T.undefined(), T.Val(2), T.false_value());
+  T.CheckCall(T.undefined(), T.Val(1), T.null());
+}
+
+
+TEST(NestedForConditional) {
+  FunctionTester T("(function(a,b) { for (; a > 1; a--) return b ? 1 : 2; })");
+
+  T.CheckCall(T.Val(1), T.Val(3), T.true_value());
+  T.CheckCall(T.Val(2), T.Val(2), T.false_value());
+  T.CheckCall(T.undefined(), T.Val(1), T.null());
+}
diff --git a/test/cctest/compiler/test-run-jscalls.cc b/test/cctest/compiler/test-run-jscalls.cc
new file mode 100644 (file)
index 0000000..2ad7e50
--- /dev/null
@@ -0,0 +1,235 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#include "test/cctest/compiler/function-tester.h"
+
+using namespace v8::internal;
+using namespace v8::internal::compiler;
+
+TEST(SimpleCall) {
+  FunctionTester T("(function(foo,a) { return foo(a); })");
+  Handle<JSFunction> foo = T.NewFunction("(function(a) { return a; })");
+
+  T.CheckCall(T.Val(3), foo, T.Val(3));
+  T.CheckCall(T.Val(3.1), foo, T.Val(3.1));
+  T.CheckCall(foo, foo, foo);
+  T.CheckCall(T.Val("Abba"), foo, T.Val("Abba"));
+}
+
+
+TEST(SimpleCall2) {
+  FunctionTester T("(function(foo,a) { return foo(a); })");
+  Handle<JSFunction> foo = T.NewFunction("(function(a) { return a; })");
+  T.Compile(foo);
+
+  T.CheckCall(T.Val(3), foo, T.Val(3));
+  T.CheckCall(T.Val(3.1), foo, T.Val(3.1));
+  T.CheckCall(foo, foo, foo);
+  T.CheckCall(T.Val("Abba"), foo, T.Val("Abba"));
+}
+
+
+TEST(ConstCall) {
+  FunctionTester T("(function(foo,a) { return foo(a,3); })");
+  Handle<JSFunction> foo = T.NewFunction("(function(a,b) { return a + b; })");
+  T.Compile(foo);
+
+  T.CheckCall(T.Val(6), foo, T.Val(3));
+  T.CheckCall(T.Val(6.1), foo, T.Val(3.1));
+  T.CheckCall(T.Val("function (a,b) { return a + b; }3"), foo, foo);
+  T.CheckCall(T.Val("Abba3"), foo, T.Val("Abba"));
+}
+
+
+TEST(ConstCall2) {
+  FunctionTester T("(function(foo,a) { return foo(a,\"3\"); })");
+  Handle<JSFunction> foo = T.NewFunction("(function(a,b) { return a + b; })");
+  T.Compile(foo);
+
+  T.CheckCall(T.Val("33"), foo, T.Val(3));
+  T.CheckCall(T.Val("3.13"), foo, T.Val(3.1));
+  T.CheckCall(T.Val("function (a,b) { return a + b; }3"), foo, foo);
+  T.CheckCall(T.Val("Abba3"), foo, T.Val("Abba"));
+}
+
+
+TEST(PropertyNamedCall) {
+  FunctionTester T("(function(a,b) { return a.foo(b,23); })");
+  CompileRun("function foo(y,z) { return this.x + y + z; }");
+
+  T.CheckCall(T.Val(32), T.NewObject("({ foo:foo, x:4 })"), T.Val(5));
+  T.CheckCall(T.Val("xy23"), T.NewObject("({ foo:foo, x:'x' })"), T.Val("y"));
+  T.CheckCall(T.nan(), T.NewObject("({ foo:foo, y:0 })"), T.Val(3));
+}
+
+
+TEST(PropertyKeyedCall) {
+  FunctionTester T("(function(a,b) { var f = 'foo'; return a[f](b,23); })");
+  CompileRun("function foo(y,z) { return this.x + y + z; }");
+
+  T.CheckCall(T.Val(32), T.NewObject("({ foo:foo, x:4 })"), T.Val(5));
+  T.CheckCall(T.Val("xy23"), T.NewObject("({ foo:foo, x:'x' })"), T.Val("y"));
+  T.CheckCall(T.nan(), T.NewObject("({ foo:foo, y:0 })"), T.Val(3));
+}
+
+
+TEST(GlobalCall) {
+  FunctionTester T("(function(a,b) { return foo(a,b); })");
+  CompileRun("function foo(a,b) { return a + b + this.c; }");
+  CompileRun("var c = 23;");
+
+  T.CheckCall(T.Val(32), T.Val(4), T.Val(5));
+  T.CheckCall(T.Val("xy23"), T.Val("x"), T.Val("y"));
+  T.CheckCall(T.nan(), T.undefined(), T.Val(3));
+}
+
+
+TEST(LookupCall) {
+  FunctionTester T("(function(a,b) { with (a) { return foo(a,b); } })");
+
+  CompileRun("function f1(a,b) { return a.val + b; }");
+  T.CheckCall(T.Val(5), T.NewObject("({ foo:f1, val:2 })"), T.Val(3));
+  T.CheckCall(T.Val("xy"), T.NewObject("({ foo:f1, val:'x' })"), T.Val("y"));
+
+  CompileRun("function f2(a,b) { return this.val + b; }");
+  T.CheckCall(T.Val(9), T.NewObject("({ foo:f2, val:4 })"), T.Val(5));
+  T.CheckCall(T.Val("xy"), T.NewObject("({ foo:f2, val:'x' })"), T.Val("y"));
+}
+
+
+TEST(MismatchCallTooFew) {
+  FunctionTester T("(function(a,b) { return foo(a,b); })");
+  CompileRun("function foo(a,b,c) { return a + b + c; }");
+
+  T.CheckCall(T.nan(), T.Val(23), T.Val(42));
+  T.CheckCall(T.nan(), T.Val(4.2), T.Val(2.3));
+  T.CheckCall(T.Val("abundefined"), T.Val("a"), T.Val("b"));
+}
+
+
+TEST(MismatchCallTooMany) {
+  FunctionTester T("(function(a,b) { return foo(a,b); })");
+  CompileRun("function foo(a) { return a; }");
+
+  T.CheckCall(T.Val(23), T.Val(23), T.Val(42));
+  T.CheckCall(T.Val(4.2), T.Val(4.2), T.Val(2.3));
+  T.CheckCall(T.Val("a"), T.Val("a"), T.Val("b"));
+}
+
+
+TEST(ConstructorCall) {
+  FunctionTester T("(function(a,b) { return new foo(a,b).value; })");
+  CompileRun("function foo(a,b) { return { value: a + b + this.c }; }");
+  CompileRun("foo.prototype.c = 23;");
+
+  T.CheckCall(T.Val(32), T.Val(4), T.Val(5));
+  T.CheckCall(T.Val("xy23"), T.Val("x"), T.Val("y"));
+  T.CheckCall(T.nan(), T.undefined(), T.Val(3));
+}
+
+
+// TODO(titzer): factor these out into test-runtime-calls.cc
+TEST(RuntimeCallCPP1) {
+  FLAG_allow_natives_syntax = true;
+  FunctionTester T("(function(a) { return %ToBool(a); })");
+
+  T.CheckCall(T.true_value(), T.Val(23), T.undefined());
+  T.CheckCall(T.true_value(), T.Val(4.2), T.undefined());
+  T.CheckCall(T.true_value(), T.Val("str"), T.undefined());
+  T.CheckCall(T.true_value(), T.true_value(), T.undefined());
+  T.CheckCall(T.false_value(), T.false_value(), T.undefined());
+  T.CheckCall(T.false_value(), T.undefined(), T.undefined());
+  T.CheckCall(T.false_value(), T.Val(0.0), T.undefined());
+}
+
+
+TEST(RuntimeCallCPP2) {
+  FLAG_allow_natives_syntax = true;
+  FunctionTester T("(function(a,b) { return %NumberAdd(a, b); })");
+
+  T.CheckCall(T.Val(65), T.Val(42), T.Val(23));
+  T.CheckCall(T.Val(19), T.Val(42), T.Val(-23));
+  T.CheckCall(T.Val(6.5), T.Val(4.2), T.Val(2.3));
+}
+
+
+TEST(RuntimeCallJS) {
+  FLAG_allow_natives_syntax = true;
+  FunctionTester T("(function(a) { return %ToString(a); })");
+
+  T.CheckCall(T.Val("23"), T.Val(23), T.undefined());
+  T.CheckCall(T.Val("4.2"), T.Val(4.2), T.undefined());
+  T.CheckCall(T.Val("str"), T.Val("str"), T.undefined());
+  T.CheckCall(T.Val("true"), T.true_value(), T.undefined());
+  T.CheckCall(T.Val("false"), T.false_value(), T.undefined());
+  T.CheckCall(T.Val("undefined"), T.undefined(), T.undefined());
+}
+
+
+TEST(RuntimeCallInline) {
+  FLAG_allow_natives_syntax = true;
+  FunctionTester T("(function(a) { return %_IsObject(a); })");
+
+  T.CheckCall(T.false_value(), T.Val(23), T.undefined());
+  T.CheckCall(T.false_value(), T.Val(4.2), T.undefined());
+  T.CheckCall(T.false_value(), T.Val("str"), T.undefined());
+  T.CheckCall(T.false_value(), T.true_value(), T.undefined());
+  T.CheckCall(T.false_value(), T.false_value(), T.undefined());
+  T.CheckCall(T.false_value(), T.undefined(), T.undefined());
+  T.CheckCall(T.true_value(), T.NewObject("({})"), T.undefined());
+  T.CheckCall(T.true_value(), T.NewObject("([])"), T.undefined());
+}
+
+
+TEST(RuntimeCallBooleanize) {
+  // TODO(turbofan): %Booleanize will disappear, don't hesitate to remove this
+  // test case, two-argument case is covered by the above test already.
+  FLAG_allow_natives_syntax = true;
+  FunctionTester T("(function(a,b) { return %Booleanize(a, b); })");
+
+  T.CheckCall(T.true_value(), T.Val(-1), T.Val(Token::LT));
+  T.CheckCall(T.false_value(), T.Val(-1), T.Val(Token::EQ));
+  T.CheckCall(T.false_value(), T.Val(-1), T.Val(Token::GT));
+
+  T.CheckCall(T.false_value(), T.Val(0.0), T.Val(Token::LT));
+  T.CheckCall(T.true_value(), T.Val(0.0), T.Val(Token::EQ));
+  T.CheckCall(T.false_value(), T.Val(0.0), T.Val(Token::GT));
+
+  T.CheckCall(T.false_value(), T.Val(1), T.Val(Token::LT));
+  T.CheckCall(T.false_value(), T.Val(1), T.Val(Token::EQ));
+  T.CheckCall(T.true_value(), T.Val(1), T.Val(Token::GT));
+}
+
+
+TEST(EvalCall) {
+  FunctionTester T("(function(a,b) { return eval(a); })");
+  Handle<JSObject> g(T.function->context()->global_object()->global_proxy());
+
+  T.CheckCall(T.Val(23), T.Val("17 + 6"), T.undefined());
+  T.CheckCall(T.Val("'Y'; a"), T.Val("'Y'; a"), T.Val("b-val"));
+  T.CheckCall(T.Val("b-val"), T.Val("'Y'; b"), T.Val("b-val"));
+  T.CheckCall(g, T.Val("this"), T.undefined());
+  T.CheckCall(g, T.Val("'use strict'; this"), T.undefined());
+
+  CompileRun("eval = function(x) { return x; }");
+  T.CheckCall(T.Val("17 + 6"), T.Val("17 + 6"), T.undefined());
+
+  CompileRun("eval = function(x) { return this; }");
+  T.CheckCall(g, T.Val("17 + 6"), T.undefined());
+
+  CompileRun("eval = function(x) { 'use strict'; return this; }");
+  T.CheckCall(T.undefined(), T.Val("17 + 6"), T.undefined());
+}
+
+
+TEST(ReceiverPatching) {
+  // TODO(turbofan): Note that this test only checks that the function prologue
+  // patches an undefined receiver to the global receiver. If this starts to
+  // fail once we fix the calling protocol, just remove this test.
+  FunctionTester T("(function(a) { return this; })");
+  Handle<JSObject> g(T.function->context()->global_object()->global_proxy());
+  T.CheckCall(g, T.undefined());
+}
diff --git a/test/cctest/compiler/test-run-jsexceptions.cc b/test/cctest/compiler/test-run-jsexceptions.cc
new file mode 100644 (file)
index 0000000..0712ab6
--- /dev/null
@@ -0,0 +1,45 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#include "test/cctest/compiler/function-tester.h"
+
+using namespace v8::internal;
+using namespace v8::internal::compiler;
+
+TEST(Throw) {
+  FunctionTester T("(function(a,b) { if (a) { throw b; } else { return b; }})");
+
+  T.CheckThrows(T.true_value(), T.NewObject("new Error"));
+  T.CheckCall(T.Val(23), T.false_value(), T.Val(23));
+}
+
+
+TEST(ThrowSourcePosition) {
+  static const char* src =
+      "(function(a, b) {        \n"
+      "  if (a == 1) throw 1;   \n"
+      "  if (a == 2) {throw 2}  \n"
+      "  if (a == 3) {0;throw 3}\n"
+      "  throw 4;               \n"
+      "})                       ";
+  FunctionTester T(src);
+  v8::Handle<v8::Message> message;
+
+  message = T.CheckThrowsReturnMessage(T.Val(1), T.undefined());
+  CHECK(!message.IsEmpty());
+  CHECK_EQ(2, message->GetLineNumber());
+  CHECK_EQ(40, message->GetStartPosition());
+
+  message = T.CheckThrowsReturnMessage(T.Val(2), T.undefined());
+  CHECK(!message.IsEmpty());
+  CHECK_EQ(3, message->GetLineNumber());
+  CHECK_EQ(67, message->GetStartPosition());
+
+  message = T.CheckThrowsReturnMessage(T.Val(3), T.undefined());
+  CHECK(!message.IsEmpty());
+  CHECK_EQ(4, message->GetLineNumber());
+  CHECK_EQ(95, message->GetStartPosition());
+}
diff --git a/test/cctest/compiler/test-run-jsops.cc b/test/cctest/compiler/test-run-jsops.cc
new file mode 100644 (file)
index 0000000..eb39760
--- /dev/null
@@ -0,0 +1,524 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#include "test/cctest/compiler/function-tester.h"
+
+using namespace v8::internal;
+using namespace v8::internal::compiler;
+
+TEST(BinopAdd) {
+  FunctionTester T("(function(a,b) { return a + b; })");
+
+  T.CheckCall(3, 1, 2);
+  T.CheckCall(-11, -2, -9);
+  T.CheckCall(-11, -1.5, -9.5);
+  T.CheckCall(T.Val("AB"), T.Val("A"), T.Val("B"));
+  T.CheckCall(T.Val("A11"), T.Val("A"), T.Val(11));
+  T.CheckCall(T.Val("12B"), T.Val(12), T.Val("B"));
+  T.CheckCall(T.Val("38"), T.Val("3"), T.Val("8"));
+  T.CheckCall(T.Val("31"), T.Val("3"), T.NewObject("([1])"));
+  T.CheckCall(T.Val("3[object Object]"), T.Val("3"), T.NewObject("({})"));
+}
+
+
+TEST(BinopSubtract) {
+  FunctionTester T("(function(a,b) { return a - b; })");
+
+  T.CheckCall(3, 4, 1);
+  T.CheckCall(3.0, 4.5, 1.5);
+  T.CheckCall(T.Val(-9), T.Val("0"), T.Val(9));
+  T.CheckCall(T.Val(-9), T.Val(0.0), T.Val("9"));
+  T.CheckCall(T.Val(1), T.Val("3"), T.Val("2"));
+  T.CheckCall(T.nan(), T.Val("3"), T.Val("B"));
+  T.CheckCall(T.Val(2), T.Val("3"), T.NewObject("([1])"));
+  T.CheckCall(T.nan(), T.Val("3"), T.NewObject("({})"));
+}
+
+
+TEST(BinopMultiply) {
+  FunctionTester T("(function(a,b) { return a * b; })");
+
+  T.CheckCall(6, 3, 2);
+  T.CheckCall(4.5, 2.0, 2.25);
+  T.CheckCall(T.Val(6), T.Val("3"), T.Val(2));
+  T.CheckCall(T.Val(4.5), T.Val(2.0), T.Val("2.25"));
+  T.CheckCall(T.Val(6), T.Val("3"), T.Val("2"));
+  T.CheckCall(T.nan(), T.Val("3"), T.Val("B"));
+  T.CheckCall(T.Val(3), T.Val("3"), T.NewObject("([1])"));
+  T.CheckCall(T.nan(), T.Val("3"), T.NewObject("({})"));
+}
+
+
+TEST(BinopDivide) {
+  FunctionTester T("(function(a,b) { return a / b; })");
+
+  T.CheckCall(2, 8, 4);
+  T.CheckCall(2.1, 8.4, 4);
+  T.CheckCall(V8_INFINITY, 8, 0);
+  T.CheckCall(-V8_INFINITY, -8, 0);
+  T.CheckCall(T.infinity(), T.Val(8), T.Val("0"));
+  T.CheckCall(T.minus_infinity(), T.Val("-8"), T.Val(0.0));
+  T.CheckCall(T.Val(1.5), T.Val("3"), T.Val("2"));
+  T.CheckCall(T.nan(), T.Val("3"), T.Val("B"));
+  T.CheckCall(T.Val(1.5), T.Val("3"), T.NewObject("([2])"));
+  T.CheckCall(T.nan(), T.Val("3"), T.NewObject("({})"));
+}
+
+
+TEST(BinopModulus) {
+  FunctionTester T("(function(a,b) { return a % b; })");
+
+  T.CheckCall(3, 8, 5);
+  T.CheckCall(T.Val(3), T.Val("8"), T.Val(5));
+  T.CheckCall(T.Val(3), T.Val(8), T.Val("5"));
+  T.CheckCall(T.Val(1), T.Val("3"), T.Val("2"));
+  T.CheckCall(T.nan(), T.Val("3"), T.Val("B"));
+  T.CheckCall(T.Val(1), T.Val("3"), T.NewObject("([2])"));
+  T.CheckCall(T.nan(), T.Val("3"), T.NewObject("({})"));
+}
+
+
+TEST(BinopShiftLeft) {
+  FunctionTester T("(function(a,b) { return a << b; })");
+
+  T.CheckCall(4, 2, 1);
+  T.CheckCall(T.Val(4), T.Val("2"), T.Val(1));
+  T.CheckCall(T.Val(4), T.Val(2), T.Val("1"));
+}
+
+
+TEST(BinopShiftRight) {
+  FunctionTester T("(function(a,b) { return a >> b; })");
+
+  T.CheckCall(4, 8, 1);
+  T.CheckCall(-4, -8, 1);
+  T.CheckCall(T.Val(4), T.Val("8"), T.Val(1));
+  T.CheckCall(T.Val(4), T.Val(8), T.Val("1"));
+}
+
+
+TEST(BinopShiftRightLogical) {
+  FunctionTester T("(function(a,b) { return a >>> b; })");
+
+  T.CheckCall(4, 8, 1);
+  T.CheckCall(0x7ffffffc, -8, 1);
+  T.CheckCall(T.Val(4), T.Val("8"), T.Val(1));
+  T.CheckCall(T.Val(4), T.Val(8), T.Val("1"));
+}
+
+
+TEST(BinopAnd) {
+  FunctionTester T("(function(a,b) { return a & b; })");
+
+  T.CheckCall(7, 7, 15);
+  T.CheckCall(7, 15, 7);
+  T.CheckCall(T.Val(7), T.Val("15"), T.Val(7));
+  T.CheckCall(T.Val(7), T.Val(15), T.Val("7"));
+}
+
+
+TEST(BinopOr) {
+  FunctionTester T("(function(a,b) { return a | b; })");
+
+  T.CheckCall(6, 4, 2);
+  T.CheckCall(6, 2, 4);
+  T.CheckCall(T.Val(6), T.Val("2"), T.Val(4));
+  T.CheckCall(T.Val(6), T.Val(2), T.Val("4"));
+}
+
+
+TEST(BinopXor) {
+  FunctionTester T("(function(a,b) { return a ^ b; })");
+
+  T.CheckCall(7, 15, 8);
+  T.CheckCall(7, 8, 15);
+  T.CheckCall(T.Val(7), T.Val("8"), T.Val(15));
+  T.CheckCall(T.Val(7), T.Val(8), T.Val("15"));
+}
+
+
+TEST(BinopStrictEqual) {
+  FunctionTester T("(function(a,b) { return a === b; })");
+
+  T.CheckTrue(7, 7);
+  T.CheckFalse(7, 8);
+  T.CheckTrue(7.1, 7.1);
+  T.CheckFalse(7.1, 8.1);
+
+  T.CheckTrue(T.Val("7.1"), T.Val("7.1"));
+  T.CheckFalse(T.Val(7.1), T.Val("7.1"));
+  T.CheckFalse(T.Val(7), T.undefined());
+  T.CheckFalse(T.undefined(), T.Val(7));
+
+  CompileRun("var o = { desc : 'I am a singleton' }");
+  T.CheckFalse(T.NewObject("([1])"), T.NewObject("([1])"));
+  T.CheckFalse(T.NewObject("({})"), T.NewObject("({})"));
+  T.CheckTrue(T.NewObject("(o)"), T.NewObject("(o)"));
+}
+
+
+TEST(BinopEqual) {
+  FunctionTester T("(function(a,b) { return a == b; })");
+
+  T.CheckTrue(7, 7);
+  T.CheckFalse(7, 8);
+  T.CheckTrue(7.1, 7.1);
+  T.CheckFalse(7.1, 8.1);
+
+  T.CheckTrue(T.Val("7.1"), T.Val("7.1"));
+  T.CheckTrue(T.Val(7.1), T.Val("7.1"));
+
+  CompileRun("var o = { desc : 'I am a singleton' }");
+  T.CheckFalse(T.NewObject("([1])"), T.NewObject("([1])"));
+  T.CheckFalse(T.NewObject("({})"), T.NewObject("({})"));
+  T.CheckTrue(T.NewObject("(o)"), T.NewObject("(o)"));
+}
+
+
+TEST(BinopNotEqual) {
+  FunctionTester T("(function(a,b) { return a != b; })");
+
+  T.CheckFalse(7, 7);
+  T.CheckTrue(7, 8);
+  T.CheckFalse(7.1, 7.1);
+  T.CheckTrue(7.1, 8.1);
+
+  T.CheckFalse(T.Val("7.1"), T.Val("7.1"));
+  T.CheckFalse(T.Val(7.1), T.Val("7.1"));
+
+  CompileRun("var o = { desc : 'I am a singleton' }");
+  T.CheckTrue(T.NewObject("([1])"), T.NewObject("([1])"));
+  T.CheckTrue(T.NewObject("({})"), T.NewObject("({})"));
+  T.CheckFalse(T.NewObject("(o)"), T.NewObject("(o)"));
+}
+
+
+TEST(BinopLessThan) {
+  FunctionTester T("(function(a,b) { return a < b; })");
+
+  T.CheckTrue(7, 8);
+  T.CheckFalse(8, 7);
+  T.CheckTrue(-8.1, -8);
+  T.CheckFalse(-8, -8.1);
+  T.CheckFalse(0.111, 0.111);
+
+  T.CheckFalse(T.Val("7.1"), T.Val("7.1"));
+  T.CheckFalse(T.Val(7.1), T.Val("6.1"));
+  T.CheckFalse(T.Val(7.1), T.Val("7.1"));
+  T.CheckTrue(T.Val(7.1), T.Val("8.1"));
+}
+
+
+TEST(BinopLessThanEqual) {
+  FunctionTester T("(function(a,b) { return a <= b; })");
+
+  T.CheckTrue(7, 8);
+  T.CheckFalse(8, 7);
+  T.CheckTrue(-8.1, -8);
+  T.CheckFalse(-8, -8.1);
+  T.CheckTrue(0.111, 0.111);
+
+  T.CheckTrue(T.Val("7.1"), T.Val("7.1"));
+  T.CheckFalse(T.Val(7.1), T.Val("6.1"));
+  T.CheckTrue(T.Val(7.1), T.Val("7.1"));
+  T.CheckTrue(T.Val(7.1), T.Val("8.1"));
+}
+
+
+TEST(BinopGreaterThan) {
+  FunctionTester T("(function(a,b) { return a > b; })");
+
+  T.CheckFalse(7, 8);
+  T.CheckTrue(8, 7);
+  T.CheckFalse(-8.1, -8);
+  T.CheckTrue(-8, -8.1);
+  T.CheckFalse(0.111, 0.111);
+
+  T.CheckFalse(T.Val("7.1"), T.Val("7.1"));
+  T.CheckTrue(T.Val(7.1), T.Val("6.1"));
+  T.CheckFalse(T.Val(7.1), T.Val("7.1"));
+  T.CheckFalse(T.Val(7.1), T.Val("8.1"));
+}
+
+
+TEST(BinopGreaterThanOrEqual) {
+  FunctionTester T("(function(a,b) { return a >= b; })");
+
+  T.CheckFalse(7, 8);
+  T.CheckTrue(8, 7);
+  T.CheckFalse(-8.1, -8);
+  T.CheckTrue(-8, -8.1);
+  T.CheckTrue(0.111, 0.111);
+
+  T.CheckTrue(T.Val("7.1"), T.Val("7.1"));
+  T.CheckTrue(T.Val(7.1), T.Val("6.1"));
+  T.CheckTrue(T.Val(7.1), T.Val("7.1"));
+  T.CheckFalse(T.Val(7.1), T.Val("8.1"));
+}
+
+
+TEST(BinopIn) {
+  FunctionTester T("(function(a,b) { return a in b; })");
+
+  T.CheckTrue(T.Val("x"), T.NewObject("({x:23})"));
+  T.CheckFalse(T.Val("y"), T.NewObject("({x:42})"));
+  T.CheckFalse(T.Val(123), T.NewObject("({x:65})"));
+  T.CheckTrue(T.Val(1), T.NewObject("([1,2,3])"));
+}
+
+
+TEST(BinopInstanceOf) {
+  FunctionTester T("(function(a,b) { return a instanceof b; })");
+
+  T.CheckTrue(T.NewObject("(new Number(23))"), T.NewObject("Number"));
+  T.CheckFalse(T.NewObject("(new Number(23))"), T.NewObject("String"));
+  T.CheckFalse(T.NewObject("(new String('a'))"), T.NewObject("Number"));
+  T.CheckTrue(T.NewObject("(new String('b'))"), T.NewObject("String"));
+  T.CheckFalse(T.Val(1), T.NewObject("Number"));
+  T.CheckFalse(T.Val("abc"), T.NewObject("String"));
+
+  CompileRun("var bound = (function() {}).bind(undefined)");
+  T.CheckTrue(T.NewObject("(new bound())"), T.NewObject("bound"));
+  T.CheckTrue(T.NewObject("(new bound())"), T.NewObject("Object"));
+  T.CheckFalse(T.NewObject("(new bound())"), T.NewObject("Number"));
+}
+
+
+TEST(UnopNot) {
+  FunctionTester T("(function(a) { return !a; })");
+
+  T.CheckCall(T.true_value(), T.false_value(), T.undefined());
+  T.CheckCall(T.false_value(), T.true_value(), T.undefined());
+  T.CheckCall(T.true_value(), T.Val(0.0), T.undefined());
+  T.CheckCall(T.false_value(), T.Val(123), T.undefined());
+  T.CheckCall(T.false_value(), T.Val("x"), T.undefined());
+  T.CheckCall(T.true_value(), T.undefined(), T.undefined());
+  T.CheckCall(T.true_value(), T.nan(), T.undefined());
+}
+
+
+TEST(UnopCountPost) {
+  FunctionTester T("(function(a) { return a++; })");
+
+  T.CheckCall(T.Val(0.0), T.Val(0.0), T.undefined());
+  T.CheckCall(T.Val(2.3), T.Val(2.3), T.undefined());
+  T.CheckCall(T.Val(123), T.Val(123), T.undefined());
+  T.CheckCall(T.Val(7), T.Val("7"), T.undefined());
+  T.CheckCall(T.nan(), T.Val("x"), T.undefined());
+  T.CheckCall(T.nan(), T.undefined(), T.undefined());
+  T.CheckCall(T.Val(1.0), T.true_value(), T.undefined());
+  T.CheckCall(T.Val(0.0), T.false_value(), T.undefined());
+  T.CheckCall(T.nan(), T.nan(), T.undefined());
+}
+
+
+TEST(UnopCountPre) {
+  FunctionTester T("(function(a) { return ++a; })");
+
+  T.CheckCall(T.Val(1.0), T.Val(0.0), T.undefined());
+  T.CheckCall(T.Val(3.3), T.Val(2.3), T.undefined());
+  T.CheckCall(T.Val(124), T.Val(123), T.undefined());
+  T.CheckCall(T.Val(8), T.Val("7"), T.undefined());
+  T.CheckCall(T.nan(), T.Val("x"), T.undefined());
+  T.CheckCall(T.nan(), T.undefined(), T.undefined());
+  T.CheckCall(T.Val(2.0), T.true_value(), T.undefined());
+  T.CheckCall(T.Val(1.0), T.false_value(), T.undefined());
+  T.CheckCall(T.nan(), T.nan(), T.undefined());
+}
+
+
+TEST(PropertyNamedLoad) {
+  FunctionTester T("(function(a,b) { return a.x; })");
+
+  T.CheckCall(T.Val(23), T.NewObject("({x:23})"), T.undefined());
+  T.CheckCall(T.undefined(), T.NewObject("({y:23})"), T.undefined());
+}
+
+
+TEST(PropertyKeyedLoad) {
+  FunctionTester T("(function(a,b) { return a[b]; })");
+
+  T.CheckCall(T.Val(23), T.NewObject("({x:23})"), T.Val("x"));
+  T.CheckCall(T.Val(42), T.NewObject("([23,42,65])"), T.Val(1));
+  T.CheckCall(T.undefined(), T.NewObject("({x:23})"), T.Val("y"));
+  T.CheckCall(T.undefined(), T.NewObject("([23,42,65])"), T.Val(4));
+}
+
+
+TEST(PropertyNamedStore) {
+  FunctionTester T("(function(a) { a.x = 7; return a.x; })");
+
+  T.CheckCall(T.Val(7), T.NewObject("({})"), T.undefined());
+  T.CheckCall(T.Val(7), T.NewObject("({x:23})"), T.undefined());
+}
+
+
+TEST(PropertyKeyedStore) {
+  FunctionTester T("(function(a,b) { a[b] = 7; return a.x; })");
+
+  T.CheckCall(T.Val(7), T.NewObject("({})"), T.Val("x"));
+  T.CheckCall(T.Val(7), T.NewObject("({x:23})"), T.Val("x"));
+  T.CheckCall(T.Val(9), T.NewObject("({x:9})"), T.Val("y"));
+}
+
+
+TEST(PropertyNamedDelete) {
+  FunctionTester T("(function(a) { return delete a.x; })");
+
+  CompileRun("var o = Object.create({}, { x: { value:23 } });");
+  T.CheckTrue(T.NewObject("({x:42})"), T.undefined());
+  T.CheckTrue(T.NewObject("({})"), T.undefined());
+  T.CheckFalse(T.NewObject("(o)"), T.undefined());
+}
+
+
+TEST(PropertyKeyedDelete) {
+  FunctionTester T("(function(a, b) { return delete a[b]; })");
+
+  CompileRun("function getX() { return 'x'; }");
+  CompileRun("var o = Object.create({}, { x: { value:23 } });");
+  T.CheckTrue(T.NewObject("({x:42})"), T.Val("x"));
+  T.CheckFalse(T.NewObject("(o)"), T.Val("x"));
+  T.CheckFalse(T.NewObject("(o)"), T.NewObject("({toString:getX})"));
+}
+
+
+TEST(GlobalLoad) {
+  FunctionTester T("(function() { return g; })");
+
+  T.CheckThrows(T.undefined(), T.undefined());
+  CompileRun("var g = 23;");
+  T.CheckCall(T.Val(23));
+}
+
+
+TEST(GlobalStoreSloppy) {
+  FunctionTester T("(function(a,b) { g = a + b; return g; })");
+
+  T.CheckCall(T.Val(33), T.Val(22), T.Val(11));
+  CompileRun("delete g");
+  CompileRun("const g = 23");
+  T.CheckCall(T.Val(23), T.Val(55), T.Val(44));
+}
+
+
+TEST(GlobalStoreStrict) {
+  FunctionTester T("(function(a,b) { 'use strict'; g = a + b; return g; })");
+
+  T.CheckThrows(T.Val(22), T.Val(11));
+  CompileRun("var g = 'a global variable';");
+  T.CheckCall(T.Val(33), T.Val(22), T.Val(11));
+}
+
+
+TEST(ContextLoad) {
+  FunctionTester T("(function(a,b) { (function(){a}); return a + b; })");
+
+  T.CheckCall(T.Val(65), T.Val(23), T.Val(42));
+  T.CheckCall(T.Val("ab"), T.Val("a"), T.Val("b"));
+}
+
+
+TEST(ContextStore) {
+  FunctionTester T("(function(a,b) { (function(){x}); var x = a; return x; })");
+
+  T.CheckCall(T.Val(23), T.Val(23), T.undefined());
+  T.CheckCall(T.Val("a"), T.Val("a"), T.undefined());
+}
+
+
+TEST(LookupLoad) {
+  FunctionTester T("(function(a,b) { with(a) { return x + b; } })");
+
+  T.CheckCall(T.Val(24), T.NewObject("({x:23})"), T.Val(1));
+  T.CheckCall(T.Val(32), T.NewObject("({x:23, b:9})"), T.Val(2));
+  T.CheckCall(T.Val(45), T.NewObject("({__proto__:{x:42}})"), T.Val(3));
+  T.CheckCall(T.Val(69), T.NewObject("({get x() { return 65; }})"), T.Val(4));
+}
+
+
+TEST(LookupStore) {
+  FunctionTester T("(function(a,b) { var x; with(a) { x = b; } return x; })");
+
+  T.CheckCall(T.undefined(), T.NewObject("({x:23})"), T.Val(1));
+  T.CheckCall(T.Val(2), T.NewObject("({y:23})"), T.Val(2));
+  T.CheckCall(T.Val(23), T.NewObject("({b:23})"), T.Val(3));
+  T.CheckCall(T.undefined(), T.NewObject("({__proto__:{x:42}})"), T.Val(4));
+}
+
+
+TEST(BlockLoadStore) {
+  FLAG_harmony_scoping = true;
+  FunctionTester T("(function(a) { 'use strict'; { let x = a+a; return x; }})");
+
+  T.CheckCall(T.Val(46), T.Val(23));
+  T.CheckCall(T.Val("aa"), T.Val("a"));
+}
+
+
+TEST(BlockLoadStoreNested) {
+  FLAG_harmony_scoping = true;
+  const char* src =
+      "(function(a,b) {"
+      "'use strict';"
+      "{ let x = a, y = a;"
+      "  { let y = b;"
+      "    return x + y;"
+      "  }"
+      "}})";
+  FunctionTester T(src);
+
+  T.CheckCall(T.Val(65), T.Val(23), T.Val(42));
+  T.CheckCall(T.Val("ab"), T.Val("a"), T.Val("b"));
+}
+
+
+TEST(ObjectLiteralComputed) {
+  FunctionTester T("(function(a,b) { o = { x:a+b }; return o.x; })");
+
+  T.CheckCall(T.Val(65), T.Val(23), T.Val(42));
+  T.CheckCall(T.Val("ab"), T.Val("a"), T.Val("b"));
+}
+
+
+TEST(ObjectLiteralNonString) {
+  FunctionTester T("(function(a,b) { o = { 7:a+b }; return o[7]; })");
+
+  T.CheckCall(T.Val(65), T.Val(23), T.Val(42));
+  T.CheckCall(T.Val("ab"), T.Val("a"), T.Val("b"));
+}
+
+
+TEST(ObjectLiteralPrototype) {
+  FunctionTester T("(function(a) { o = { __proto__:a }; return o.x; })");
+
+  T.CheckCall(T.Val(23), T.NewObject("({x:23})"), T.undefined());
+  T.CheckCall(T.undefined(), T.NewObject("({y:42})"), T.undefined());
+}
+
+
+TEST(ObjectLiteralGetter) {
+  FunctionTester T("(function(a) { o = { get x() {return a} }; return o.x; })");
+
+  T.CheckCall(T.Val(23), T.Val(23), T.undefined());
+  T.CheckCall(T.Val("x"), T.Val("x"), T.undefined());
+}
+
+
+TEST(ArrayLiteral) {
+  FunctionTester T("(function(a,b) { o = [1, a + b, 3]; return o[1]; })");
+
+  T.CheckCall(T.Val(65), T.Val(23), T.Val(42));
+  T.CheckCall(T.Val("ab"), T.Val("a"), T.Val("b"));
+}
+
+
+TEST(RegExpLiteral) {
+  FunctionTester T("(function(a) { o = /b/; return o.test(a); })");
+
+  T.CheckTrue(T.Val("abc"));
+  T.CheckFalse(T.Val("xyz"));
+}
diff --git a/test/cctest/compiler/test-run-machops.cc b/test/cctest/compiler/test-run-machops.cc
new file mode 100644 (file)
index 0000000..1f71c4a
--- /dev/null
@@ -0,0 +1,3798 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <limits>
+#include "src/v8.h"
+
+#include "test/cctest/cctest.h"
+#include "test/cctest/compiler/codegen-tester.h"
+#include "test/cctest/compiler/value-helper.h"
+
+#if V8_TURBOFAN_TARGET
+
+using namespace v8::internal;
+using namespace v8::internal::compiler;
+
+typedef RawMachineAssembler::Label MLabel;
+
+TEST(RunInt32Add) {
+  RawMachineAssemblerTester<int32_t> m;
+  Node* add = m.Int32Add(m.Int32Constant(0), m.Int32Constant(1));
+  m.Return(add);
+  CHECK_EQ(1, m.Call());
+}
+
+
+static Node* Int32Input(RawMachineAssemblerTester<int32_t>* m, int index) {
+  switch (index) {
+    case 0:
+      return m->Parameter(0);
+    case 1:
+      return m->Parameter(1);
+    case 2:
+      return m->Int32Constant(0);
+    case 3:
+      return m->Int32Constant(1);
+    case 4:
+      return m->Int32Constant(-1);
+    case 5:
+      return m->Int32Constant(0xff);
+    case 6:
+      return m->Int32Constant(0x01234567);
+    case 7:
+      return m->Load(kMachineWord32, m->PointerConstant(NULL));
+    default:
+      return NULL;
+  }
+}
+
+
+TEST(CodeGenInt32Binop) {
+  RawMachineAssemblerTester<void> m;
+
+  Operator* ops[] = {
+      m.machine()->Word32And(),      m.machine()->Word32Or(),
+      m.machine()->Word32Xor(),      m.machine()->Word32Shl(),
+      m.machine()->Word32Shr(),      m.machine()->Word32Sar(),
+      m.machine()->Word32Equal(),    m.machine()->Int32Add(),
+      m.machine()->Int32Sub(),       m.machine()->Int32Mul(),
+      m.machine()->Int32Div(),       m.machine()->Int32UDiv(),
+      m.machine()->Int32Mod(),       m.machine()->Int32UMod(),
+      m.machine()->Int32LessThan(),  m.machine()->Int32LessThanOrEqual(),
+      m.machine()->Uint32LessThan(), m.machine()->Uint32LessThanOrEqual(),
+      NULL};
+
+  for (int i = 0; ops[i] != NULL; i++) {
+    for (int j = 0; j < 8; j++) {
+      for (int k = 0; k < 8; k++) {
+        RawMachineAssemblerTester<int32_t> m(kMachineWord32, kMachineWord32);
+        Node* a = Int32Input(&m, j);
+        Node* b = Int32Input(&m, k);
+        m.Return(m.NewNode(ops[i], a, b));
+        m.GenerateCode();
+      }
+    }
+  }
+}
+
+
+TEST(RunGoto) {
+  RawMachineAssemblerTester<int32_t> m;
+  int constant = 99999;
+
+  MLabel next;
+  m.Goto(&next);
+  m.Bind(&next);
+  m.Return(m.Int32Constant(constant));
+
+  CHECK_EQ(constant, m.Call());
+}
+
+
+TEST(RunGotoMultiple) {
+  RawMachineAssemblerTester<int32_t> m;
+  int constant = 9999977;
+
+  MLabel labels[10];
+  for (size_t i = 0; i < ARRAY_SIZE(labels); i++) {
+    m.Goto(&labels[i]);
+    m.Bind(&labels[i]);
+  }
+  m.Return(m.Int32Constant(constant));
+
+  CHECK_EQ(constant, m.Call());
+}
+
+
+TEST(RunBranch) {
+  RawMachineAssemblerTester<int32_t> m;
+  int constant = 999777;
+
+  MLabel blocka, blockb;
+  m.Branch(m.Int32Constant(0), &blocka, &blockb);
+  m.Bind(&blocka);
+  m.Return(m.Int32Constant(0 - constant));
+  m.Bind(&blockb);
+  m.Return(m.Int32Constant(constant));
+
+  CHECK_EQ(constant, m.Call());
+}
+
+
+TEST(RunRedundantBranch1) {
+  RawMachineAssemblerTester<int32_t> m;
+  int constant = 944777;
+
+  MLabel blocka;
+  m.Branch(m.Int32Constant(0), &blocka, &blocka);
+  m.Bind(&blocka);
+  m.Return(m.Int32Constant(constant));
+
+  CHECK_EQ(constant, m.Call());
+}
+
+
+TEST(RunRedundantBranch2) {
+  RawMachineAssemblerTester<int32_t> m;
+  int constant = 955777;
+
+  MLabel blocka, blockb;
+  m.Branch(m.Int32Constant(0), &blocka, &blocka);
+  m.Bind(&blockb);
+  m.Goto(&blocka);
+  m.Bind(&blocka);
+  m.Return(m.Int32Constant(constant));
+
+  CHECK_EQ(constant, m.Call());
+}
+
+
+TEST(RunRedundantBranch3) {
+  RawMachineAssemblerTester<int32_t> m;
+  int constant = 966777;
+
+  MLabel blocka, blockb, blockc;
+  m.Branch(m.Int32Constant(0), &blocka, &blockc);
+  m.Bind(&blocka);
+  m.Branch(m.Int32Constant(0), &blockb, &blockb);
+  m.Bind(&blockc);
+  m.Goto(&blockb);
+  m.Bind(&blockb);
+  m.Return(m.Int32Constant(constant));
+
+  CHECK_EQ(constant, m.Call());
+}
+
+
+TEST(RunDiamond2) {
+  RawMachineAssemblerTester<int32_t> m;
+
+  int constant = 995666;
+
+  MLabel blocka, blockb, end;
+  m.Branch(m.Int32Constant(0), &blocka, &blockb);
+  m.Bind(&blocka);
+  m.Goto(&end);
+  m.Bind(&blockb);
+  m.Goto(&end);
+  m.Bind(&end);
+  m.Return(m.Int32Constant(constant));
+
+  CHECK_EQ(constant, m.Call());
+}
+
+
+TEST(RunLoop) {
+  RawMachineAssemblerTester<int32_t> m;
+  int constant = 999555;
+
+  MLabel header, body, exit;
+  m.Goto(&header);
+  m.Bind(&header);
+  m.Branch(m.Int32Constant(0), &body, &exit);
+  m.Bind(&body);
+  m.Goto(&header);
+  m.Bind(&exit);
+  m.Return(m.Int32Constant(constant));
+
+  CHECK_EQ(constant, m.Call());
+}
+
+
+template <typename R>
+static void BuildDiamondPhi(RawMachineAssemblerTester<R>* m, Node* cond_node,
+                            Node* true_node, Node* false_node) {
+  MLabel blocka, blockb;
+  MLabel* end = m->Exit();
+  m->Branch(cond_node, &blocka, &blockb);
+  m->Bind(&blocka);
+  m->Goto(end);
+  m->Bind(&blockb);
+  m->Goto(end);
+
+  m->Bind(end);
+  Node* phi = m->Phi(true_node, false_node);
+  m->Return(phi);
+}
+
+
+TEST(RunDiamondPhiConst) {
+  RawMachineAssemblerTester<int32_t> m(kMachineWord32);
+  int false_val = 0xFF666;
+  int true_val = 0x00DDD;
+  Node* true_node = m.Int32Constant(true_val);
+  Node* false_node = m.Int32Constant(false_val);
+  BuildDiamondPhi(&m, m.Parameter(0), true_node, false_node);
+  CHECK_EQ(false_val, m.Call(0));
+  CHECK_EQ(true_val, m.Call(1));
+}
+
+
+TEST(RunDiamondPhiNumber) {
+  RawMachineAssemblerTester<Object*> m(kMachineWord32);
+  double false_val = -11.1;
+  double true_val = 200.1;
+  Node* true_node = m.NumberConstant(true_val);
+  Node* false_node = m.NumberConstant(false_val);
+  BuildDiamondPhi(&m, m.Parameter(0), true_node, false_node);
+  m.CheckNumber(false_val, m.Call(0));
+  m.CheckNumber(true_val, m.Call(1));
+}
+
+
+TEST(RunDiamondPhiString) {
+  RawMachineAssemblerTester<Object*> m(kMachineWord32);
+  const char* false_val = "false";
+  const char* true_val = "true";
+  Node* true_node = m.StringConstant(true_val);
+  Node* false_node = m.StringConstant(false_val);
+  BuildDiamondPhi(&m, m.Parameter(0), true_node, false_node);
+  m.CheckString(false_val, m.Call(0));
+  m.CheckString(true_val, m.Call(1));
+}
+
+
+TEST(RunDiamondPhiParam) {
+  RawMachineAssemblerTester<int32_t> m(kMachineWord32, kMachineWord32,
+                                       kMachineWord32);
+  BuildDiamondPhi(&m, m.Parameter(0), m.Parameter(1), m.Parameter(2));
+  int32_t c1 = 0x260cb75a;
+  int32_t c2 = 0xcd3e9c8b;
+  int result = m.Call(0, c1, c2);
+  CHECK_EQ(c2, result);
+  result = m.Call(1, c1, c2);
+  CHECK_EQ(c1, result);
+}
+
+
+TEST(RunLoopPhiConst) {
+  RawMachineAssemblerTester<int32_t> m;
+  int true_val = 0x44000;
+  int false_val = 0x00888;
+
+  Node* cond_node = m.Int32Constant(0);
+  Node* true_node = m.Int32Constant(true_val);
+  Node* false_node = m.Int32Constant(false_val);
+
+  // x = false_val; while(false) { x = true_val; } return x;
+  MLabel body, header;
+  MLabel* end = m.Exit();
+
+  m.Goto(&header);
+  m.Bind(&header);
+  Node* phi = m.Phi(false_node, true_node);
+  m.Branch(cond_node, &body, end);
+  m.Bind(&body);
+  m.Goto(&header);
+  m.Bind(end);
+  m.Return(phi);
+
+  CHECK_EQ(false_val, m.Call());
+}
+
+
+TEST(RunLoopPhiParam) {
+  RawMachineAssemblerTester<int32_t> m(kMachineWord32, kMachineWord32,
+                                       kMachineWord32);
+
+  MLabel blocka, blockb;
+  MLabel* end = m.Exit();
+
+  m.Goto(&blocka);
+
+  m.Bind(&blocka);
+  Node* phi = m.Phi(m.Parameter(1), m.Parameter(2));
+  Node* cond = m.Phi(m.Parameter(0), m.Int32Constant(0));
+  m.Branch(cond, &blockb, end);
+
+  m.Bind(&blockb);
+  m.Goto(&blocka);
+
+  m.Bind(end);
+  m.Return(phi);
+
+  int32_t c1 = 0xa81903b4;
+  int32_t c2 = 0x5a1207da;
+  int result = m.Call(0, c1, c2);
+  CHECK_EQ(c1, result);
+  result = m.Call(1, c1, c2);
+  CHECK_EQ(c2, result);
+}
+
+
+TEST(RunLoopPhiInduction) {
+  RawMachineAssemblerTester<int32_t> m;
+
+  int false_val = 0x10777;
+
+  // x = false_val; while(false) { x++; } return x;
+  MLabel header, body;
+  MLabel* end = m.Exit();
+  Node* false_node = m.Int32Constant(false_val);
+
+  m.Goto(&header);
+
+  m.Bind(&header);
+  Node* phi = m.Phi(false_node, false_node);
+  m.Branch(m.Int32Constant(0), &body, end);
+
+  m.Bind(&body);
+  Node* add = m.Int32Add(phi, m.Int32Constant(1));
+  phi->ReplaceInput(1, add);
+  m.Goto(&header);
+
+  m.Bind(end);
+  m.Return(phi);
+
+  CHECK_EQ(false_val, m.Call());
+}
+
+
+TEST(RunLoopIncrement) {
+  RawMachineAssemblerTester<int32_t> m;
+  Int32BinopTester bt(&m);
+
+  // x = 0; while(x ^ param) { x++; } return x;
+  MLabel header, body;
+  MLabel* end = m.Exit();
+  Node* zero = m.Int32Constant(0);
+
+  m.Goto(&header);
+
+  m.Bind(&header);
+  Node* phi = m.Phi(zero, zero);
+  m.Branch(m.WordXor(phi, bt.param0), &body, end);
+
+  m.Bind(&body);
+  phi->ReplaceInput(1, m.Int32Add(phi, m.Int32Constant(1)));
+  m.Goto(&header);
+
+  m.Bind(end);
+  bt.AddReturn(phi);
+
+  CHECK_EQ(11, bt.call(11, 0));
+  CHECK_EQ(110, bt.call(110, 0));
+  CHECK_EQ(176, bt.call(176, 0));
+}
+
+
+TEST(RunLoopIncrement2) {
+  RawMachineAssemblerTester<int32_t> m;
+  Int32BinopTester bt(&m);
+
+  // x = 0; while(x < param) { x++; } return x;
+  MLabel header, body;
+  MLabel* end = m.Exit();
+  Node* zero = m.Int32Constant(0);
+
+  m.Goto(&header);
+
+  m.Bind(&header);
+  Node* phi = m.Phi(zero, zero);
+  m.Branch(m.Int32LessThan(phi, bt.param0), &body, end);
+
+  m.Bind(&body);
+  phi->ReplaceInput(1, m.Int32Add(phi, m.Int32Constant(1)));
+  m.Goto(&header);
+
+  m.Bind(end);
+  bt.AddReturn(phi);
+
+  CHECK_EQ(11, bt.call(11, 0));
+  CHECK_EQ(110, bt.call(110, 0));
+  CHECK_EQ(176, bt.call(176, 0));
+  CHECK_EQ(0, bt.call(-200, 0));
+}
+
+
+TEST(RunLoopIncrement3) {
+  RawMachineAssemblerTester<int32_t> m;
+  Int32BinopTester bt(&m);
+
+  // x = 0; while(x < param) { x++; } return x;
+  MLabel header, body;
+  MLabel* end = m.Exit();
+  Node* zero = m.Int32Constant(0);
+
+  m.Goto(&header);
+
+  m.Bind(&header);
+  Node* phi = m.Phi(zero, zero);
+  m.Branch(m.Uint32LessThan(phi, bt.param0), &body, end);
+
+  m.Bind(&body);
+  phi->ReplaceInput(1, m.Int32Add(phi, m.Int32Constant(1)));
+  m.Goto(&header);
+
+  m.Bind(end);
+  bt.AddReturn(phi);
+
+  CHECK_EQ(11, bt.call(11, 0));
+  CHECK_EQ(110, bt.call(110, 0));
+  CHECK_EQ(176, bt.call(176, 0));
+  CHECK_EQ(200, bt.call(200, 0));
+}
+
+
+TEST(RunLoopDecrement) {
+  RawMachineAssemblerTester<int32_t> m;
+  Int32BinopTester bt(&m);
+
+  // x = param; while(x) { x--; } return x;
+  MLabel header, body;
+  MLabel* end = m.Exit();
+
+  m.Goto(&header);
+
+  m.Bind(&header);
+  Node* phi = m.Phi(bt.param0, m.Int32Constant(0));
+  m.Branch(phi, &body, end);
+
+  m.Bind(&body);
+  phi->ReplaceInput(1, m.Int32Sub(phi, m.Int32Constant(1)));
+  m.Goto(&header);
+
+  m.Bind(end);
+  bt.AddReturn(phi);
+
+  CHECK_EQ(0, bt.call(11, 0));
+  CHECK_EQ(0, bt.call(110, 0));
+  CHECK_EQ(0, bt.call(197, 0));
+}
+
+
+TEST(RunLoopIncrementFloat64) {
+  RawMachineAssemblerTester<int32_t> m;
+
+  // x = -3.0; while(x < 10) { x = x + 0.5; } return (int) x;
+  MLabel header, body;
+  MLabel* end = m.Exit();
+  Node* minus_3 = m.Float64Constant(-3.0);
+  Node* ten = m.Float64Constant(10.0);
+
+  m.Goto(&header);
+
+  m.Bind(&header);
+  Node* phi = m.Phi(minus_3, ten);
+  m.Branch(m.Float64LessThan(phi, ten), &body, end);
+
+  m.Bind(&body);
+  phi->ReplaceInput(1, m.Float64Add(phi, m.Float64Constant(0.5)));
+  m.Goto(&header);
+
+  m.Bind(end);
+  m.Return(m.ConvertFloat64ToInt32(phi));
+
+  CHECK_EQ(10, m.Call());
+}
+
+
+TEST(RunLoadInt32) {
+  RawMachineAssemblerTester<int32_t> m;
+
+  int32_t p1 = 0;  // loads directly from this location.
+  m.Return(m.LoadFromPointer(&p1, kMachineWord32));
+
+  FOR_INT32_INPUTS(i) {
+    p1 = *i;
+    CHECK_EQ(p1, m.Call());
+  }
+}
+
+
+TEST(RunLoadInt32Offset) {
+  int32_t p1 = 0;  // loads directly from this location.
+
+  int32_t offsets[] = {-2000000, -100, -101, 1,          3,
+                       7,        120,  2000, 2000000000, 0xff};
+
+  for (size_t i = 0; i < ARRAY_SIZE(offsets); i++) {
+    RawMachineAssemblerTester<int32_t> m;
+    int32_t offset = offsets[i];
+    byte* pointer = reinterpret_cast<byte*>(&p1) - offset;
+    // generate load [#base + #index]
+    m.Return(m.LoadFromPointer(pointer, kMachineWord32, offset));
+
+    FOR_INT32_INPUTS(j) {
+      p1 = *j;
+      CHECK_EQ(p1, m.Call());
+    }
+  }
+}
+
+
+TEST(RunLoadStoreFloat64Offset) {
+  double p1 = 0;  // loads directly from this location.
+  double p2 = 0;  // and stores directly into this location.
+
+  FOR_INT32_INPUTS(i) {
+    int32_t magic = 0x2342aabb + *i * 3;
+    RawMachineAssemblerTester<int32_t> m;
+    int32_t offset = *i;
+    byte* from = reinterpret_cast<byte*>(&p1) - offset;
+    byte* to = reinterpret_cast<byte*>(&p2) - offset;
+    // generate load [#base + #index]
+    Node* load = m.Load(kMachineFloat64, m.PointerConstant(from),
+                        m.Int32Constant(offset));
+    m.Store(kMachineFloat64, m.PointerConstant(to), m.Int32Constant(offset),
+            load);
+    m.Return(m.Int32Constant(magic));
+
+    FOR_FLOAT64_INPUTS(j) {
+      p1 = *j;
+      p2 = *j - 5;
+      CHECK_EQ(magic, m.Call());
+      CHECK_EQ(p1, p2);
+    }
+  }
+}
+
+
+TEST(RunInt32AddP) {
+  RawMachineAssemblerTester<int32_t> m;
+  Int32BinopTester bt(&m);
+
+  bt.AddReturn(m.Int32Add(bt.param0, bt.param1));
+
+  FOR_INT32_INPUTS(i) {
+    FOR_INT32_INPUTS(j) {
+      // Use uint32_t because signed overflow is UB in C.
+      int expected = static_cast<int32_t>(*i + *j);
+      CHECK_EQ(expected, bt.call(*i, *j));
+    }
+  }
+}
+
+
+TEST(RunInt32AddAndWord32SarP) {
+  {
+    RawMachineAssemblerTester<int32_t> m(kMachineWord32, kMachineWord32,
+                                         kMachineWord32);
+    m.Return(m.Int32Add(m.Parameter(0),
+                        m.Word32Sar(m.Parameter(1), m.Parameter(2))));
+    FOR_UINT32_INPUTS(i) {
+      FOR_INT32_INPUTS(j) {
+        FOR_UINT32_INPUTS(k) {
+          uint32_t shift = *k & 0x1F;
+          // Use uint32_t because signed overflow is UB in C.
+          int32_t expected = *i + (*j >> shift);
+          CHECK_EQ(expected, m.Call(*i, *j, shift));
+        }
+      }
+    }
+  }
+  {
+    RawMachineAssemblerTester<int32_t> m(kMachineWord32, kMachineWord32,
+                                         kMachineWord32);
+    m.Return(m.Int32Add(m.Word32Sar(m.Parameter(0), m.Parameter(1)),
+                        m.Parameter(2)));
+    FOR_INT32_INPUTS(i) {
+      FOR_UINT32_INPUTS(j) {
+        FOR_UINT32_INPUTS(k) {
+          uint32_t shift = *j & 0x1F;
+          // Use uint32_t because signed overflow is UB in C.
+          int32_t expected = (*i >> shift) + *k;
+          CHECK_EQ(expected, m.Call(*i, shift, *k));
+        }
+      }
+    }
+  }
+}
+
+
+TEST(RunInt32AddAndWord32ShlP) {
+  {
+    RawMachineAssemblerTester<int32_t> m(kMachineWord32, kMachineWord32,
+                                         kMachineWord32);
+    m.Return(m.Int32Add(m.Parameter(0),
+                        m.Word32Shl(m.Parameter(1), m.Parameter(2))));
+    FOR_UINT32_INPUTS(i) {
+      FOR_INT32_INPUTS(j) {
+        FOR_UINT32_INPUTS(k) {
+          uint32_t shift = *k & 0x1F;
+          // Use uint32_t because signed overflow is UB in C.
+          int32_t expected = *i + (*j << shift);
+          CHECK_EQ(expected, m.Call(*i, *j, shift));
+        }
+      }
+    }
+  }
+  {
+    RawMachineAssemblerTester<int32_t> m(kMachineWord32, kMachineWord32,
+                                         kMachineWord32);
+    m.Return(m.Int32Add(m.Word32Shl(m.Parameter(0), m.Parameter(1)),
+                        m.Parameter(2)));
+    FOR_INT32_INPUTS(i) {
+      FOR_UINT32_INPUTS(j) {
+        FOR_UINT32_INPUTS(k) {
+          uint32_t shift = *j & 0x1F;
+          // Use uint32_t because signed overflow is UB in C.
+          int32_t expected = (*i << shift) + *k;
+          CHECK_EQ(expected, m.Call(*i, shift, *k));
+        }
+      }
+    }
+  }
+}
+
+
+TEST(RunInt32AddAndWord32ShrP) {
+  {
+    RawMachineAssemblerTester<int32_t> m(kMachineWord32, kMachineWord32,
+                                         kMachineWord32);
+    m.Return(m.Int32Add(m.Parameter(0),
+                        m.Word32Shr(m.Parameter(1), m.Parameter(2))));
+    FOR_UINT32_INPUTS(i) {
+      FOR_UINT32_INPUTS(j) {
+        FOR_UINT32_INPUTS(k) {
+          uint32_t shift = *k & 0x1F;
+          // Use uint32_t because signed overflow is UB in C.
+          int32_t expected = *i + (*j >> shift);
+          CHECK_EQ(expected, m.Call(*i, *j, shift));
+        }
+      }
+    }
+  }
+  {
+    RawMachineAssemblerTester<int32_t> m(kMachineWord32, kMachineWord32,
+                                         kMachineWord32);
+    m.Return(m.Int32Add(m.Word32Shr(m.Parameter(0), m.Parameter(1)),
+                        m.Parameter(2)));
+    FOR_UINT32_INPUTS(i) {
+      FOR_UINT32_INPUTS(j) {
+        FOR_UINT32_INPUTS(k) {
+          uint32_t shift = *j & 0x1F;
+          // Use uint32_t because signed overflow is UB in C.
+          int32_t expected = (*i >> shift) + *k;
+          CHECK_EQ(expected, m.Call(*i, shift, *k));
+        }
+      }
+    }
+  }
+}
+
+
+TEST(RunInt32AddInBranch) {
+  static const int32_t constant = 987654321;
+  {
+    RawMachineAssemblerTester<int32_t> m;
+    Int32BinopTester bt(&m);
+    MLabel blocka, blockb;
+    m.Branch(
+        m.Word32Equal(m.Int32Add(bt.param0, bt.param1), m.Int32Constant(0)),
+        &blocka, &blockb);
+    m.Bind(&blocka);
+    bt.AddReturn(m.Int32Constant(constant));
+    m.Bind(&blockb);
+    bt.AddReturn(m.Int32Constant(0 - constant));
+    FOR_UINT32_INPUTS(i) {
+      FOR_UINT32_INPUTS(j) {
+        int32_t expected = (*i + *j) == 0 ? constant : 0 - constant;
+        CHECK_EQ(expected, bt.call(*i, *j));
+      }
+    }
+  }
+  {
+    RawMachineAssemblerTester<int32_t> m;
+    Int32BinopTester bt(&m);
+    MLabel blocka, blockb;
+    m.Branch(
+        m.Word32NotEqual(m.Int32Add(bt.param0, bt.param1), m.Int32Constant(0)),
+        &blocka, &blockb);
+    m.Bind(&blocka);
+    bt.AddReturn(m.Int32Constant(constant));
+    m.Bind(&blockb);
+    bt.AddReturn(m.Int32Constant(0 - constant));
+    FOR_UINT32_INPUTS(i) {
+      FOR_UINT32_INPUTS(j) {
+        int32_t expected = (*i + *j) != 0 ? constant : 0 - constant;
+        CHECK_EQ(expected, bt.call(*i, *j));
+      }
+    }
+  }
+  {
+    FOR_UINT32_INPUTS(i) {
+      RawMachineAssemblerTester<int32_t> m(kMachineWord32);
+      MLabel blocka, blockb;
+      m.Branch(m.Word32Equal(m.Int32Add(m.Int32Constant(*i), m.Parameter(0)),
+                             m.Int32Constant(0)),
+               &blocka, &blockb);
+      m.Bind(&blocka);
+      m.Return(m.Int32Constant(constant));
+      m.Bind(&blockb);
+      m.Return(m.Int32Constant(0 - constant));
+      FOR_UINT32_INPUTS(j) {
+        int32_t expected = (*i + *j) == 0 ? constant : 0 - constant;
+        CHECK_EQ(expected, m.Call(*j));
+      }
+    }
+  }
+  {
+    FOR_UINT32_INPUTS(i) {
+      RawMachineAssemblerTester<int32_t> m(kMachineWord32);
+      MLabel blocka, blockb;
+      m.Branch(m.Word32NotEqual(m.Int32Add(m.Int32Constant(*i), m.Parameter(0)),
+                                m.Int32Constant(0)),
+               &blocka, &blockb);
+      m.Bind(&blocka);
+      m.Return(m.Int32Constant(constant));
+      m.Bind(&blockb);
+      m.Return(m.Int32Constant(0 - constant));
+      FOR_UINT32_INPUTS(j) {
+        int32_t expected = (*i + *j) != 0 ? constant : 0 - constant;
+        CHECK_EQ(expected, m.Call(*j));
+      }
+    }
+  }
+  {
+    RawMachineAssemblerTester<void> m;
+    Operator* shops[] = {m.machine()->Word32Sar(), m.machine()->Word32Shl(),
+                         m.machine()->Word32Shr()};
+    for (size_t n = 0; n < ARRAY_SIZE(shops); n++) {
+      RawMachineAssemblerTester<int32_t> m(kMachineWord32, kMachineWord32,
+                                           kMachineWord32);
+      MLabel blocka, blockb;
+      m.Branch(m.Word32Equal(m.Int32Add(m.Parameter(0),
+                                        m.NewNode(shops[n], m.Parameter(1),
+                                                  m.Parameter(2))),
+                             m.Int32Constant(0)),
+               &blocka, &blockb);
+      m.Bind(&blocka);
+      m.Return(m.Int32Constant(constant));
+      m.Bind(&blockb);
+      m.Return(m.Int32Constant(0 - constant));
+      FOR_UINT32_INPUTS(i) {
+        FOR_INT32_INPUTS(j) {
+          FOR_UINT32_INPUTS(k) {
+            uint32_t shift = *k & 0x1F;
+            int32_t right;
+            switch (shops[n]->opcode()) {
+              default:
+                UNREACHABLE();
+              case IrOpcode::kWord32Sar:
+                right = *j >> shift;
+                break;
+              case IrOpcode::kWord32Shl:
+                right = *j << shift;
+                break;
+              case IrOpcode::kWord32Shr:
+                right = static_cast<uint32_t>(*j) >> shift;
+                break;
+            }
+            int32_t expected = ((*i + right) == 0) ? constant : 0 - constant;
+            CHECK_EQ(expected, m.Call(*i, *j, shift));
+          }
+        }
+      }
+    }
+  }
+}
+
+
+TEST(RunInt32AddInComparison) {
+  {
+    RawMachineAssemblerTester<int32_t> m;
+    Int32BinopTester bt(&m);
+    bt.AddReturn(
+        m.Word32Equal(m.Int32Add(bt.param0, bt.param1), m.Int32Constant(0)));
+    FOR_UINT32_INPUTS(i) {
+      FOR_UINT32_INPUTS(j) {
+        int32_t expected = (*i + *j) == 0;
+        CHECK_EQ(expected, bt.call(*i, *j));
+      }
+    }
+  }
+  {
+    RawMachineAssemblerTester<int32_t> m;
+    Int32BinopTester bt(&m);
+    bt.AddReturn(
+        m.Word32Equal(m.Int32Constant(0), m.Int32Add(bt.param0, bt.param1)));
+    FOR_UINT32_INPUTS(i) {
+      FOR_UINT32_INPUTS(j) {
+        int32_t expected = (*i + *j) == 0;
+        CHECK_EQ(expected, bt.call(*i, *j));
+      }
+    }
+  }
+  {
+    FOR_UINT32_INPUTS(i) {
+      RawMachineAssemblerTester<int32_t> m(kMachineWord32);
+      m.Return(m.Word32Equal(m.Int32Add(m.Int32Constant(*i), m.Parameter(0)),
+                             m.Int32Constant(0)));
+      FOR_UINT32_INPUTS(j) {
+        int32_t expected = (*i + *j) == 0;
+        CHECK_EQ(expected, m.Call(*j));
+      }
+    }
+  }
+  {
+    FOR_UINT32_INPUTS(i) {
+      RawMachineAssemblerTester<int32_t> m(kMachineWord32);
+      m.Return(m.Word32Equal(m.Int32Add(m.Parameter(0), m.Int32Constant(*i)),
+                             m.Int32Constant(0)));
+      FOR_UINT32_INPUTS(j) {
+        int32_t expected = (*j + *i) == 0;
+        CHECK_EQ(expected, m.Call(*j));
+      }
+    }
+  }
+  {
+    RawMachineAssemblerTester<void> m;
+    Operator* shops[] = {m.machine()->Word32Sar(), m.machine()->Word32Shl(),
+                         m.machine()->Word32Shr()};
+    for (size_t n = 0; n < ARRAY_SIZE(shops); n++) {
+      RawMachineAssemblerTester<int32_t> m(kMachineWord32, kMachineWord32,
+                                           kMachineWord32);
+      m.Return(m.Word32Equal(
+          m.Int32Add(m.Parameter(0),
+                     m.NewNode(shops[n], m.Parameter(1), m.Parameter(2))),
+          m.Int32Constant(0)));
+      FOR_UINT32_INPUTS(i) {
+        FOR_INT32_INPUTS(j) {
+          FOR_UINT32_INPUTS(k) {
+            uint32_t shift = *k & 0x1F;
+            int32_t right;
+            switch (shops[n]->opcode()) {
+              default:
+                UNREACHABLE();
+              case IrOpcode::kWord32Sar:
+                right = *j >> shift;
+                break;
+              case IrOpcode::kWord32Shl:
+                right = *j << shift;
+                break;
+              case IrOpcode::kWord32Shr:
+                right = static_cast<uint32_t>(*j) >> shift;
+                break;
+            }
+            int32_t expected = (*i + right) == 0;
+            CHECK_EQ(expected, m.Call(*i, *j, shift));
+          }
+        }
+      }
+    }
+  }
+}
+
+
+TEST(RunInt32SubP) {
+  RawMachineAssemblerTester<int32_t> m;
+  Int32BinopTester bt(&m);
+
+  m.Return(m.Int32Sub(bt.param0, bt.param1));
+
+  FOR_UINT32_INPUTS(i) {
+    FOR_UINT32_INPUTS(j) {
+      // Use uint32_t because signed overflow is UB in C.
+      int expected = static_cast<int32_t>(*i - *j);
+      CHECK_EQ(expected, bt.call(*i, *j));
+    }
+  }
+}
+
+
+TEST(RunInt32SubImm) {
+  {
+    FOR_UINT32_INPUTS(i) {
+      RawMachineAssemblerTester<int32_t> m(kMachineWord32);
+      m.Return(m.Int32Sub(m.Int32Constant(*i), m.Parameter(0)));
+      FOR_UINT32_INPUTS(j) {
+        // Use uint32_t because signed overflow is UB in C.
+        int32_t expected = static_cast<int32_t>(*i - *j);
+        CHECK_EQ(expected, m.Call(*j));
+      }
+    }
+  }
+  {
+    FOR_UINT32_INPUTS(i) {
+      RawMachineAssemblerTester<int32_t> m(kMachineWord32);
+      m.Return(m.Int32Sub(m.Parameter(0), m.Int32Constant(*i)));
+      FOR_UINT32_INPUTS(j) {
+        // Use uint32_t because signed overflow is UB in C.
+        int32_t expected = static_cast<int32_t>(*j - *i);
+        CHECK_EQ(expected, m.Call(*j));
+      }
+    }
+  }
+}
+
+
+TEST(RunInt32SubAndWord32SarP) {
+  {
+    RawMachineAssemblerTester<int32_t> m(kMachineWord32, kMachineWord32,
+                                         kMachineWord32);
+    m.Return(m.Int32Sub(m.Parameter(0),
+                        m.Word32Sar(m.Parameter(1), m.Parameter(2))));
+    FOR_UINT32_INPUTS(i) {
+      FOR_INT32_INPUTS(j) {
+        FOR_UINT32_INPUTS(k) {
+          uint32_t shift = *k & 0x1F;
+          // Use uint32_t because signed overflow is UB in C.
+          int32_t expected = *i - (*j >> shift);
+          CHECK_EQ(expected, m.Call(*i, *j, shift));
+        }
+      }
+    }
+  }
+  {
+    RawMachineAssemblerTester<int32_t> m(kMachineWord32, kMachineWord32,
+                                         kMachineWord32);
+    m.Return(m.Int32Sub(m.Word32Sar(m.Parameter(0), m.Parameter(1)),
+                        m.Parameter(2)));
+    FOR_INT32_INPUTS(i) {
+      FOR_UINT32_INPUTS(j) {
+        FOR_UINT32_INPUTS(k) {
+          uint32_t shift = *j & 0x1F;
+          // Use uint32_t because signed overflow is UB in C.
+          int32_t expected = (*i >> shift) - *k;
+          CHECK_EQ(expected, m.Call(*i, shift, *k));
+        }
+      }
+    }
+  }
+}
+
+
+TEST(RunInt32SubAndWord32ShlP) {
+  {
+    RawMachineAssemblerTester<int32_t> m(kMachineWord32, kMachineWord32,
+                                         kMachineWord32);
+    m.Return(m.Int32Sub(m.Parameter(0),
+                        m.Word32Shl(m.Parameter(1), m.Parameter(2))));
+    FOR_UINT32_INPUTS(i) {
+      FOR_INT32_INPUTS(j) {
+        FOR_UINT32_INPUTS(k) {
+          uint32_t shift = *k & 0x1F;
+          // Use uint32_t because signed overflow is UB in C.
+          int32_t expected = *i - (*j << shift);
+          CHECK_EQ(expected, m.Call(*i, *j, shift));
+        }
+      }
+    }
+  }
+  {
+    RawMachineAssemblerTester<int32_t> m(kMachineWord32, kMachineWord32,
+                                         kMachineWord32);
+    m.Return(m.Int32Sub(m.Word32Shl(m.Parameter(0), m.Parameter(1)),
+                        m.Parameter(2)));
+    FOR_INT32_INPUTS(i) {
+      FOR_UINT32_INPUTS(j) {
+        FOR_UINT32_INPUTS(k) {
+          uint32_t shift = *j & 0x1F;
+          // Use uint32_t because signed overflow is UB in C.
+          int32_t expected = (*i << shift) - *k;
+          CHECK_EQ(expected, m.Call(*i, shift, *k));
+        }
+      }
+    }
+  }
+}
+
+
+TEST(RunInt32SubAndWord32ShrP) {
+  {
+    RawMachineAssemblerTester<int32_t> m(kMachineWord32, kMachineWord32,
+                                         kMachineWord32);
+    m.Return(m.Int32Sub(m.Parameter(0),
+                        m.Word32Shr(m.Parameter(1), m.Parameter(2))));
+    FOR_UINT32_INPUTS(i) {
+      FOR_UINT32_INPUTS(j) {
+        FOR_UINT32_INPUTS(k) {
+          uint32_t shift = *k & 0x1F;
+          // Use uint32_t because signed overflow is UB in C.
+          int32_t expected = *i - (*j >> shift);
+          CHECK_EQ(expected, m.Call(*i, *j, shift));
+        }
+      }
+    }
+  }
+  {
+    RawMachineAssemblerTester<int32_t> m(kMachineWord32, kMachineWord32,
+                                         kMachineWord32);
+    m.Return(m.Int32Sub(m.Word32Shr(m.Parameter(0), m.Parameter(1)),
+                        m.Parameter(2)));
+    FOR_UINT32_INPUTS(i) {
+      FOR_UINT32_INPUTS(j) {
+        FOR_UINT32_INPUTS(k) {
+          uint32_t shift = *j & 0x1F;
+          // Use uint32_t because signed overflow is UB in C.
+          int32_t expected = (*i >> shift) - *k;
+          CHECK_EQ(expected, m.Call(*i, shift, *k));
+        }
+      }
+    }
+  }
+}
+
+
+TEST(RunInt32SubInBranch) {
+  static const int constant = 987654321;
+  {
+    RawMachineAssemblerTester<int32_t> m;
+    Int32BinopTester bt(&m);
+    MLabel blocka, blockb;
+    m.Branch(
+        m.Word32Equal(m.Int32Sub(bt.param0, bt.param1), m.Int32Constant(0)),
+        &blocka, &blockb);
+    m.Bind(&blocka);
+    bt.AddReturn(m.Int32Constant(constant));
+    m.Bind(&blockb);
+    bt.AddReturn(m.Int32Constant(0 - constant));
+    FOR_UINT32_INPUTS(i) {
+      FOR_UINT32_INPUTS(j) {
+        int32_t expected = (*i - *j) == 0 ? constant : 0 - constant;
+        CHECK_EQ(expected, bt.call(*i, *j));
+      }
+    }
+  }
+  {
+    RawMachineAssemblerTester<int32_t> m;
+    Int32BinopTester bt(&m);
+    MLabel blocka, blockb;
+    m.Branch(
+        m.Word32NotEqual(m.Int32Sub(bt.param0, bt.param1), m.Int32Constant(0)),
+        &blocka, &blockb);
+    m.Bind(&blocka);
+    bt.AddReturn(m.Int32Constant(constant));
+    m.Bind(&blockb);
+    bt.AddReturn(m.Int32Constant(0 - constant));
+    FOR_UINT32_INPUTS(i) {
+      FOR_UINT32_INPUTS(j) {
+        int32_t expected = (*i - *j) != 0 ? constant : 0 - constant;
+        CHECK_EQ(expected, bt.call(*i, *j));
+      }
+    }
+  }
+  {
+    FOR_UINT32_INPUTS(i) {
+      RawMachineAssemblerTester<int32_t> m(kMachineWord32);
+      MLabel blocka, blockb;
+      m.Branch(m.Word32Equal(m.Int32Sub(m.Int32Constant(*i), m.Parameter(0)),
+                             m.Int32Constant(0)),
+               &blocka, &blockb);
+      m.Bind(&blocka);
+      m.Return(m.Int32Constant(constant));
+      m.Bind(&blockb);
+      m.Return(m.Int32Constant(0 - constant));
+      FOR_UINT32_INPUTS(j) {
+        int32_t expected = (*i - *j) == 0 ? constant : 0 - constant;
+        CHECK_EQ(expected, m.Call(*j));
+      }
+    }
+  }
+  {
+    FOR_UINT32_INPUTS(i) {
+      RawMachineAssemblerTester<int32_t> m(kMachineWord32);
+      MLabel blocka, blockb;
+      m.Branch(m.Word32NotEqual(m.Int32Sub(m.Int32Constant(*i), m.Parameter(0)),
+                                m.Int32Constant(0)),
+               &blocka, &blockb);
+      m.Bind(&blocka);
+      m.Return(m.Int32Constant(constant));
+      m.Bind(&blockb);
+      m.Return(m.Int32Constant(0 - constant));
+      FOR_UINT32_INPUTS(j) {
+        int32_t expected = (*i - *j) != 0 ? constant : 0 - constant;
+        CHECK_EQ(expected, m.Call(*j));
+      }
+    }
+  }
+  {
+    RawMachineAssemblerTester<void> m;
+    Operator* shops[] = {m.machine()->Word32Sar(), m.machine()->Word32Shl(),
+                         m.machine()->Word32Shr()};
+    for (size_t n = 0; n < ARRAY_SIZE(shops); n++) {
+      RawMachineAssemblerTester<int32_t> m(kMachineWord32, kMachineWord32,
+                                           kMachineWord32);
+      MLabel blocka, blockb;
+      m.Branch(m.Word32Equal(m.Int32Sub(m.Parameter(0),
+                                        m.NewNode(shops[n], m.Parameter(1),
+                                                  m.Parameter(2))),
+                             m.Int32Constant(0)),
+               &blocka, &blockb);
+      m.Bind(&blocka);
+      m.Return(m.Int32Constant(constant));
+      m.Bind(&blockb);
+      m.Return(m.Int32Constant(0 - constant));
+      FOR_UINT32_INPUTS(i) {
+        FOR_INT32_INPUTS(j) {
+          FOR_UINT32_INPUTS(k) {
+            uint32_t shift = *k & 0x1F;
+            int32_t right;
+            switch (shops[n]->opcode()) {
+              default:
+                UNREACHABLE();
+              case IrOpcode::kWord32Sar:
+                right = *j >> shift;
+                break;
+              case IrOpcode::kWord32Shl:
+                right = *j << shift;
+                break;
+              case IrOpcode::kWord32Shr:
+                right = static_cast<uint32_t>(*j) >> shift;
+                break;
+            }
+            int32_t expected = ((*i - right) == 0) ? constant : 0 - constant;
+            CHECK_EQ(expected, m.Call(*i, *j, shift));
+          }
+        }
+      }
+    }
+  }
+}
+
+
+TEST(RunInt32SubInComparison) {
+  {
+    RawMachineAssemblerTester<int32_t> m;
+    Int32BinopTester bt(&m);
+    bt.AddReturn(
+        m.Word32Equal(m.Int32Sub(bt.param0, bt.param1), m.Int32Constant(0)));
+    FOR_UINT32_INPUTS(i) {
+      FOR_UINT32_INPUTS(j) {
+        int32_t expected = (*i - *j) == 0;
+        CHECK_EQ(expected, bt.call(*i, *j));
+      }
+    }
+  }
+  {
+    RawMachineAssemblerTester<int32_t> m;
+    Int32BinopTester bt(&m);
+    bt.AddReturn(
+        m.Word32Equal(m.Int32Constant(0), m.Int32Sub(bt.param0, bt.param1)));
+    FOR_UINT32_INPUTS(i) {
+      FOR_UINT32_INPUTS(j) {
+        int32_t expected = (*i - *j) == 0;
+        CHECK_EQ(expected, bt.call(*i, *j));
+      }
+    }
+  }
+  {
+    FOR_UINT32_INPUTS(i) {
+      RawMachineAssemblerTester<int32_t> m(kMachineWord32);
+      m.Return(m.Word32Equal(m.Int32Sub(m.Int32Constant(*i), m.Parameter(0)),
+                             m.Int32Constant(0)));
+      FOR_UINT32_INPUTS(j) {
+        int32_t expected = (*i - *j) == 0;
+        CHECK_EQ(expected, m.Call(*j));
+      }
+    }
+  }
+  {
+    FOR_UINT32_INPUTS(i) {
+      RawMachineAssemblerTester<int32_t> m(kMachineWord32);
+      m.Return(m.Word32Equal(m.Int32Sub(m.Parameter(0), m.Int32Constant(*i)),
+                             m.Int32Constant(0)));
+      FOR_UINT32_INPUTS(j) {
+        int32_t expected = (*j - *i) == 0;
+        CHECK_EQ(expected, m.Call(*j));
+      }
+    }
+  }
+  {
+    RawMachineAssemblerTester<void> m;
+    Operator* shops[] = {m.machine()->Word32Sar(), m.machine()->Word32Shl(),
+                         m.machine()->Word32Shr()};
+    for (size_t n = 0; n < ARRAY_SIZE(shops); n++) {
+      RawMachineAssemblerTester<int32_t> m(kMachineWord32, kMachineWord32,
+                                           kMachineWord32);
+      m.Return(m.Word32Equal(
+          m.Int32Sub(m.Parameter(0),
+                     m.NewNode(shops[n], m.Parameter(1), m.Parameter(2))),
+          m.Int32Constant(0)));
+      FOR_UINT32_INPUTS(i) {
+        FOR_INT32_INPUTS(j) {
+          FOR_UINT32_INPUTS(k) {
+            uint32_t shift = *k & 0x1F;
+            int32_t right;
+            switch (shops[n]->opcode()) {
+              default:
+                UNREACHABLE();
+              case IrOpcode::kWord32Sar:
+                right = *j >> shift;
+                break;
+              case IrOpcode::kWord32Shl:
+                right = *j << shift;
+                break;
+              case IrOpcode::kWord32Shr:
+                right = static_cast<uint32_t>(*j) >> shift;
+                break;
+            }
+            int32_t expected = (*i - right) == 0;
+            CHECK_EQ(expected, m.Call(*i, *j, shift));
+          }
+        }
+      }
+    }
+  }
+}
+
+
+TEST(RunInt32MulP) {
+  {
+    RawMachineAssemblerTester<int32_t> m;
+    Int32BinopTester bt(&m);
+    bt.AddReturn(m.Int32Mul(bt.param0, bt.param1));
+    FOR_INT32_INPUTS(i) {
+      FOR_INT32_INPUTS(j) {
+        int expected = static_cast<int32_t>(*i * *j);
+        CHECK_EQ(expected, bt.call(*i, *j));
+      }
+    }
+  }
+  {
+    RawMachineAssemblerTester<int32_t> m;
+    Int32BinopTester bt(&m);
+    bt.AddReturn(m.Int32Mul(bt.param0, bt.param1));
+    FOR_UINT32_INPUTS(i) {
+      FOR_UINT32_INPUTS(j) {
+        int expected = static_cast<int32_t>(*i * *j);
+        CHECK_EQ(expected, bt.call(*i, *j));
+      }
+    }
+  }
+}
+
+
+TEST(RunInt32MulImm) {
+  {
+    FOR_UINT32_INPUTS(i) {
+      RawMachineAssemblerTester<int32_t> m(kMachineWord32);
+      m.Return(m.Int32Mul(m.Int32Constant(*i), m.Parameter(0)));
+      FOR_UINT32_INPUTS(j) {
+        int32_t expected = static_cast<int32_t>(*i * *j);
+        CHECK_EQ(expected, m.Call(*j));
+      }
+    }
+  }
+  {
+    FOR_UINT32_INPUTS(i) {
+      RawMachineAssemblerTester<int32_t> m(kMachineWord32);
+      m.Return(m.Int32Mul(m.Parameter(0), m.Int32Constant(*i)));
+      FOR_UINT32_INPUTS(j) {
+        int32_t expected = static_cast<int32_t>(*j * *i);
+        CHECK_EQ(expected, m.Call(*j));
+      }
+    }
+  }
+}
+
+
+TEST(RunInt32MulAndInt32AddP) {
+  {
+    RawMachineAssemblerTester<int32_t> m(kMachineWord32, kMachineWord32,
+                                         kMachineWord32);
+    m.Return(
+        m.Int32Add(m.Parameter(0), m.Int32Mul(m.Parameter(1), m.Parameter(2))));
+    FOR_INT32_INPUTS(i) {
+      FOR_INT32_INPUTS(j) {
+        FOR_INT32_INPUTS(k) {
+          int32_t p0 = *i;
+          int32_t p1 = *j;
+          int32_t p2 = *k;
+          int expected = p0 + static_cast<int32_t>(p1 * p2);
+          CHECK_EQ(expected, m.Call(p0, p1, p2));
+        }
+      }
+    }
+  }
+  {
+    RawMachineAssemblerTester<int32_t> m(kMachineWord32, kMachineWord32,
+                                         kMachineWord32);
+    m.Return(
+        m.Int32Add(m.Int32Mul(m.Parameter(0), m.Parameter(1)), m.Parameter(2)));
+    FOR_INT32_INPUTS(i) {
+      FOR_INT32_INPUTS(j) {
+        FOR_INT32_INPUTS(k) {
+          int32_t p0 = *i;
+          int32_t p1 = *j;
+          int32_t p2 = *k;
+          int expected = static_cast<int32_t>(p0 * p1) + p2;
+          CHECK_EQ(expected, m.Call(p0, p1, p2));
+        }
+      }
+    }
+  }
+  {
+    FOR_INT32_INPUTS(i) {
+      RawMachineAssemblerTester<int32_t> m;
+      Int32BinopTester bt(&m);
+      bt.AddReturn(
+          m.Int32Add(m.Int32Constant(*i), m.Int32Mul(bt.param0, bt.param1)));
+      FOR_INT32_INPUTS(j) {
+        FOR_INT32_INPUTS(k) {
+          int32_t p0 = *j;
+          int32_t p1 = *k;
+          int expected = *i + static_cast<int32_t>(p0 * p1);
+          CHECK_EQ(expected, bt.call(p0, p1));
+        }
+      }
+    }
+  }
+}
+
+
+TEST(RunInt32MulAndInt32SubP) {
+  {
+    RawMachineAssemblerTester<int32_t> m(kMachineWord32, kMachineWord32,
+                                         kMachineWord32);
+    m.Return(
+        m.Int32Sub(m.Parameter(0), m.Int32Mul(m.Parameter(1), m.Parameter(2))));
+    FOR_UINT32_INPUTS(i) {
+      FOR_INT32_INPUTS(j) {
+        FOR_INT32_INPUTS(k) {
+          uint32_t p0 = *i;
+          int32_t p1 = *j;
+          int32_t p2 = *k;
+          // Use uint32_t because signed overflow is UB in C.
+          int expected = p0 - static_cast<uint32_t>(p1 * p2);
+          CHECK_EQ(expected, m.Call(p0, p1, p2));
+        }
+      }
+    }
+  }
+  {
+    FOR_UINT32_INPUTS(i) {
+      RawMachineAssemblerTester<int32_t> m;
+      Int32BinopTester bt(&m);
+      bt.AddReturn(
+          m.Int32Sub(m.Int32Constant(*i), m.Int32Mul(bt.param0, bt.param1)));
+      FOR_INT32_INPUTS(j) {
+        FOR_INT32_INPUTS(k) {
+          int32_t p0 = *j;
+          int32_t p1 = *k;
+          // Use uint32_t because signed overflow is UB in C.
+          int expected = *i - static_cast<uint32_t>(p0 * p1);
+          CHECK_EQ(expected, bt.call(p0, p1));
+        }
+      }
+    }
+  }
+}
+
+
+TEST(RunInt32DivP) {
+  {
+    RawMachineAssemblerTester<int32_t> m;
+    Int32BinopTester bt(&m);
+    bt.AddReturn(m.Int32Div(bt.param0, bt.param1));
+    FOR_INT32_INPUTS(i) {
+      FOR_INT32_INPUTS(j) {
+        int p0 = *i;
+        int p1 = *j;
+        if (p1 != 0 && (static_cast<uint32_t>(p0) != 0x80000000 || p1 != -1)) {
+          int expected = static_cast<int32_t>(p0 / p1);
+          CHECK_EQ(expected, bt.call(p0, p1));
+        }
+      }
+    }
+  }
+  {
+    RawMachineAssemblerTester<int32_t> m;
+    Int32BinopTester bt(&m);
+    bt.AddReturn(m.Int32Add(bt.param0, m.Int32Div(bt.param0, bt.param1)));
+    FOR_INT32_INPUTS(i) {
+      FOR_INT32_INPUTS(j) {
+        int p0 = *i;
+        int p1 = *j;
+        if (p1 != 0 && (static_cast<uint32_t>(p0) != 0x80000000 || p1 != -1)) {
+          int expected = static_cast<int32_t>(p0 + (p0 / p1));
+          CHECK_EQ(expected, bt.call(p0, p1));
+        }
+      }
+    }
+  }
+}
+
+
+TEST(RunInt32UDivP) {
+  {
+    RawMachineAssemblerTester<int32_t> m;
+    Int32BinopTester bt(&m);
+    bt.AddReturn(m.Int32UDiv(bt.param0, bt.param1));
+    FOR_UINT32_INPUTS(i) {
+      FOR_UINT32_INPUTS(j) {
+        uint32_t p0 = *i;
+        uint32_t p1 = *j;
+        if (p1 != 0) {
+          uint32_t expected = static_cast<uint32_t>(p0 / p1);
+          CHECK_EQ(expected, bt.call(p0, p1));
+        }
+      }
+    }
+  }
+  {
+    RawMachineAssemblerTester<int32_t> m;
+    Int32BinopTester bt(&m);
+    bt.AddReturn(m.Int32Add(bt.param0, m.Int32UDiv(bt.param0, bt.param1)));
+    FOR_UINT32_INPUTS(i) {
+      FOR_UINT32_INPUTS(j) {
+        uint32_t p0 = *i;
+        uint32_t p1 = *j;
+        if (p1 != 0) {
+          uint32_t expected = static_cast<uint32_t>(p0 + (p0 / p1));
+          CHECK_EQ(expected, bt.call(p0, p1));
+        }
+      }
+    }
+  }
+}
+
+
+TEST(RunInt32ModP) {
+  {
+    RawMachineAssemblerTester<int32_t> m;
+    Int32BinopTester bt(&m);
+    bt.AddReturn(m.Int32Mod(bt.param0, bt.param1));
+    FOR_INT32_INPUTS(i) {
+      FOR_INT32_INPUTS(j) {
+        int p0 = *i;
+        int p1 = *j;
+        if (p1 != 0 && (static_cast<uint32_t>(p0) != 0x80000000 || p1 != -1)) {
+          int expected = static_cast<int32_t>(p0 % p1);
+          CHECK_EQ(expected, bt.call(p0, p1));
+        }
+      }
+    }
+  }
+  {
+    RawMachineAssemblerTester<int32_t> m;
+    Int32BinopTester bt(&m);
+    bt.AddReturn(m.Int32Add(bt.param0, m.Int32Mod(bt.param0, bt.param1)));
+    FOR_INT32_INPUTS(i) {
+      FOR_INT32_INPUTS(j) {
+        int p0 = *i;
+        int p1 = *j;
+        if (p1 != 0 && (static_cast<uint32_t>(p0) != 0x80000000 || p1 != -1)) {
+          int expected = static_cast<int32_t>(p0 + (p0 % p1));
+          CHECK_EQ(expected, bt.call(p0, p1));
+        }
+      }
+    }
+  }
+}
+
+
+TEST(RunInt32UModP) {
+  {
+    RawMachineAssemblerTester<int32_t> m;
+    Int32BinopTester bt(&m);
+    bt.AddReturn(m.Int32UMod(bt.param0, bt.param1));
+    FOR_UINT32_INPUTS(i) {
+      FOR_UINT32_INPUTS(j) {
+        uint32_t p0 = *i;
+        uint32_t p1 = *j;
+        if (p1 != 0) {
+          uint32_t expected = static_cast<uint32_t>(p0 % p1);
+          CHECK_EQ(expected, bt.call(p0, p1));
+        }
+      }
+    }
+  }
+  {
+    RawMachineAssemblerTester<int32_t> m;
+    Int32BinopTester bt(&m);
+    bt.AddReturn(m.Int32Add(bt.param0, m.Int32UMod(bt.param0, bt.param1)));
+    FOR_UINT32_INPUTS(i) {
+      FOR_UINT32_INPUTS(j) {
+        uint32_t p0 = *i;
+        uint32_t p1 = *j;
+        if (p1 != 0) {
+          uint32_t expected = static_cast<uint32_t>(p0 + (p0 % p1));
+          CHECK_EQ(expected, bt.call(p0, p1));
+        }
+      }
+    }
+  }
+}
+
+
+TEST(RunWord32AndP) {
+  {
+    RawMachineAssemblerTester<int32_t> m;
+    Int32BinopTester bt(&m);
+    bt.AddReturn(m.Word32And(bt.param0, bt.param1));
+    FOR_UINT32_INPUTS(i) {
+      FOR_UINT32_INPUTS(j) {
+        uint32_t expected = *i & *j;
+        CHECK_EQ(expected, bt.call(*i, *j));
+      }
+    }
+  }
+  {
+    RawMachineAssemblerTester<int32_t> m;
+    Int32BinopTester bt(&m);
+    bt.AddReturn(m.Word32And(bt.param0, m.Word32Not(bt.param1)));
+    FOR_UINT32_INPUTS(i) {
+      FOR_UINT32_INPUTS(j) {
+        uint32_t expected = *i & ~(*j);
+        CHECK_EQ(expected, bt.call(*i, *j));
+      }
+    }
+  }
+  {
+    RawMachineAssemblerTester<int32_t> m;
+    Int32BinopTester bt(&m);
+    bt.AddReturn(m.Word32And(m.Word32Not(bt.param0), bt.param1));
+    FOR_UINT32_INPUTS(i) {
+      FOR_UINT32_INPUTS(j) {
+        uint32_t expected = ~(*i) & *j;
+        CHECK_EQ(expected, bt.call(*i, *j));
+      }
+    }
+  }
+}
+
+
+TEST(RunWord32AndAndWord32ShlP) {
+  {
+    RawMachineAssemblerTester<int32_t> m;
+    Int32BinopTester bt(&m);
+    bt.AddReturn(
+        m.Word32Shl(bt.param0, m.Word32And(bt.param1, m.Int32Constant(0x1f))));
+    FOR_UINT32_INPUTS(i) {
+      FOR_UINT32_INPUTS(j) {
+        uint32_t expected = *i << (*j & 0x1f);
+        CHECK_EQ(expected, bt.call(*i, *j));
+      }
+    }
+  }
+  {
+    RawMachineAssemblerTester<int32_t> m;
+    Int32BinopTester bt(&m);
+    bt.AddReturn(
+        m.Word32Shl(bt.param0, m.Word32And(m.Int32Constant(0x1f), bt.param1)));
+    FOR_UINT32_INPUTS(i) {
+      FOR_UINT32_INPUTS(j) {
+        uint32_t expected = *i << (0x1f & *j);
+        CHECK_EQ(expected, bt.call(*i, *j));
+      }
+    }
+  }
+}
+
+
+TEST(RunWord32AndAndWord32ShrP) {
+  {
+    RawMachineAssemblerTester<int32_t> m;
+    Int32BinopTester bt(&m);
+    bt.AddReturn(
+        m.Word32Shr(bt.param0, m.Word32And(bt.param1, m.Int32Constant(0x1f))));
+    FOR_UINT32_INPUTS(i) {
+      FOR_UINT32_INPUTS(j) {
+        uint32_t expected = *i >> (*j & 0x1f);
+        CHECK_EQ(expected, bt.call(*i, *j));
+      }
+    }
+  }
+  {
+    RawMachineAssemblerTester<int32_t> m;
+    Int32BinopTester bt(&m);
+    bt.AddReturn(
+        m.Word32Shr(bt.param0, m.Word32And(m.Int32Constant(0x1f), bt.param1)));
+    FOR_UINT32_INPUTS(i) {
+      FOR_UINT32_INPUTS(j) {
+        uint32_t expected = *i >> (0x1f & *j);
+        CHECK_EQ(expected, bt.call(*i, *j));
+      }
+    }
+  }
+}
+
+
+TEST(RunWord32AndAndWord32SarP) {
+  {
+    RawMachineAssemblerTester<int32_t> m;
+    Int32BinopTester bt(&m);
+    bt.AddReturn(
+        m.Word32Sar(bt.param0, m.Word32And(bt.param1, m.Int32Constant(0x1f))));
+    FOR_INT32_INPUTS(i) {
+      FOR_UINT32_INPUTS(j) {
+        uint32_t expected = *i >> (*j & 0x1f);
+        CHECK_EQ(expected, bt.call(*i, *j));
+      }
+    }
+  }
+  {
+    RawMachineAssemblerTester<int32_t> m;
+    Int32BinopTester bt(&m);
+    bt.AddReturn(
+        m.Word32Sar(bt.param0, m.Word32And(m.Int32Constant(0x1f), bt.param1)));
+    FOR_INT32_INPUTS(i) {
+      FOR_UINT32_INPUTS(j) {
+        uint32_t expected = *i >> (0x1f & *j);
+        CHECK_EQ(expected, bt.call(*i, *j));
+      }
+    }
+  }
+}
+
+
+TEST(RunWord32AndImm) {
+  {
+    FOR_UINT32_INPUTS(i) {
+      RawMachineAssemblerTester<int32_t> m(kMachineWord32);
+      m.Return(m.Word32And(m.Int32Constant(*i), m.Parameter(0)));
+      FOR_UINT32_INPUTS(j) {
+        uint32_t expected = *i & *j;
+        CHECK_EQ(expected, m.Call(*j));
+      }
+    }
+  }
+  {
+    FOR_UINT32_INPUTS(i) {
+      RawMachineAssemblerTester<int32_t> m(kMachineWord32);
+      m.Return(m.Word32And(m.Int32Constant(*i), m.Word32Not(m.Parameter(0))));
+      FOR_UINT32_INPUTS(j) {
+        uint32_t expected = *i & ~(*j);
+        CHECK_EQ(expected, m.Call(*j));
+      }
+    }
+  }
+}
+
+
+TEST(RunWord32AndInBranch) {
+  static const int constant = 987654321;
+  {
+    RawMachineAssemblerTester<int32_t> m;
+    Int32BinopTester bt(&m);
+    MLabel blocka, blockb;
+    m.Branch(
+        m.Word32Equal(m.Word32And(bt.param0, bt.param1), m.Int32Constant(0)),
+        &blocka, &blockb);
+    m.Bind(&blocka);
+    bt.AddReturn(m.Int32Constant(constant));
+    m.Bind(&blockb);
+    bt.AddReturn(m.Int32Constant(0 - constant));
+    FOR_UINT32_INPUTS(i) {
+      FOR_UINT32_INPUTS(j) {
+        int32_t expected = (*i & *j) == 0 ? constant : 0 - constant;
+        CHECK_EQ(expected, bt.call(*i, *j));
+      }
+    }
+  }
+  {
+    RawMachineAssemblerTester<int32_t> m;
+    Int32BinopTester bt(&m);
+    MLabel blocka, blockb;
+    m.Branch(
+        m.Word32NotEqual(m.Word32And(bt.param0, bt.param1), m.Int32Constant(0)),
+        &blocka, &blockb);
+    m.Bind(&blocka);
+    bt.AddReturn(m.Int32Constant(constant));
+    m.Bind(&blockb);
+    bt.AddReturn(m.Int32Constant(0 - constant));
+    FOR_UINT32_INPUTS(i) {
+      FOR_UINT32_INPUTS(j) {
+        int32_t expected = (*i & *j) != 0 ? constant : 0 - constant;
+        CHECK_EQ(expected, bt.call(*i, *j));
+      }
+    }
+  }
+  {
+    FOR_UINT32_INPUTS(i) {
+      RawMachineAssemblerTester<int32_t> m(kMachineWord32);
+      MLabel blocka, blockb;
+      m.Branch(m.Word32Equal(m.Word32And(m.Int32Constant(*i), m.Parameter(0)),
+                             m.Int32Constant(0)),
+               &blocka, &blockb);
+      m.Bind(&blocka);
+      m.Return(m.Int32Constant(constant));
+      m.Bind(&blockb);
+      m.Return(m.Int32Constant(0 - constant));
+      FOR_UINT32_INPUTS(j) {
+        int32_t expected = (*i & *j) == 0 ? constant : 0 - constant;
+        CHECK_EQ(expected, m.Call(*j));
+      }
+    }
+  }
+  {
+    FOR_UINT32_INPUTS(i) {
+      RawMachineAssemblerTester<int32_t> m(kMachineWord32);
+      MLabel blocka, blockb;
+      m.Branch(
+          m.Word32NotEqual(m.Word32And(m.Int32Constant(*i), m.Parameter(0)),
+                           m.Int32Constant(0)),
+          &blocka, &blockb);
+      m.Bind(&blocka);
+      m.Return(m.Int32Constant(constant));
+      m.Bind(&blockb);
+      m.Return(m.Int32Constant(0 - constant));
+      FOR_UINT32_INPUTS(j) {
+        int32_t expected = (*i & *j) != 0 ? constant : 0 - constant;
+        CHECK_EQ(expected, m.Call(*j));
+      }
+    }
+  }
+  {
+    RawMachineAssemblerTester<void> m;
+    Operator* shops[] = {m.machine()->Word32Sar(), m.machine()->Word32Shl(),
+                         m.machine()->Word32Shr()};
+    for (size_t n = 0; n < ARRAY_SIZE(shops); n++) {
+      RawMachineAssemblerTester<int32_t> m(kMachineWord32, kMachineWord32,
+                                           kMachineWord32);
+      MLabel blocka, blockb;
+      m.Branch(m.Word32Equal(m.Word32And(m.Parameter(0),
+                                         m.NewNode(shops[n], m.Parameter(1),
+                                                   m.Parameter(2))),
+                             m.Int32Constant(0)),
+               &blocka, &blockb);
+      m.Bind(&blocka);
+      m.Return(m.Int32Constant(constant));
+      m.Bind(&blockb);
+      m.Return(m.Int32Constant(0 - constant));
+      FOR_UINT32_INPUTS(i) {
+        FOR_INT32_INPUTS(j) {
+          FOR_UINT32_INPUTS(k) {
+            uint32_t shift = *k & 0x1F;
+            int32_t right;
+            switch (shops[n]->opcode()) {
+              default:
+                UNREACHABLE();
+              case IrOpcode::kWord32Sar:
+                right = *j >> shift;
+                break;
+              case IrOpcode::kWord32Shl:
+                right = *j << shift;
+                break;
+              case IrOpcode::kWord32Shr:
+                right = static_cast<uint32_t>(*j) >> shift;
+                break;
+            }
+            int32_t expected = ((*i & right) == 0) ? constant : 0 - constant;
+            CHECK_EQ(expected, m.Call(*i, *j, shift));
+          }
+        }
+      }
+    }
+  }
+}
+
+
+TEST(RunWord32AndInComparison) {
+  {
+    RawMachineAssemblerTester<int32_t> m;
+    Int32BinopTester bt(&m);
+    bt.AddReturn(
+        m.Word32Equal(m.Word32And(bt.param0, bt.param1), m.Int32Constant(0)));
+    FOR_UINT32_INPUTS(i) {
+      FOR_UINT32_INPUTS(j) {
+        int32_t expected = (*i & *j) == 0;
+        CHECK_EQ(expected, bt.call(*i, *j));
+      }
+    }
+  }
+  {
+    RawMachineAssemblerTester<int32_t> m;
+    Int32BinopTester bt(&m);
+    bt.AddReturn(
+        m.Word32Equal(m.Int32Constant(0), m.Word32And(bt.param0, bt.param1)));
+    FOR_UINT32_INPUTS(i) {
+      FOR_UINT32_INPUTS(j) {
+        int32_t expected = (*i & *j) == 0;
+        CHECK_EQ(expected, bt.call(*i, *j));
+      }
+    }
+  }
+  {
+    FOR_UINT32_INPUTS(i) {
+      RawMachineAssemblerTester<int32_t> m(kMachineWord32);
+      m.Return(m.Word32Equal(m.Word32And(m.Int32Constant(*i), m.Parameter(0)),
+                             m.Int32Constant(0)));
+      FOR_UINT32_INPUTS(j) {
+        int32_t expected = (*i & *j) == 0;
+        CHECK_EQ(expected, m.Call(*j));
+      }
+    }
+  }
+  {
+    FOR_UINT32_INPUTS(i) {
+      RawMachineAssemblerTester<int32_t> m(kMachineWord32);
+      m.Return(m.Word32Equal(m.Word32And(m.Parameter(0), m.Int32Constant(*i)),
+                             m.Int32Constant(0)));
+      FOR_UINT32_INPUTS(j) {
+        int32_t expected = (*j & *i) == 0;
+        CHECK_EQ(expected, m.Call(*j));
+      }
+    }
+  }
+}
+
+
+TEST(RunWord32OrP) {
+  {
+    RawMachineAssemblerTester<int32_t> m;
+    Int32BinopTester bt(&m);
+    bt.AddReturn(m.Word32Or(bt.param0, bt.param1));
+    FOR_UINT32_INPUTS(i) {
+      FOR_UINT32_INPUTS(j) {
+        uint32_t expected = *i | *j;
+        CHECK_EQ(expected, bt.call(*i, *j));
+      }
+    }
+  }
+  {
+    RawMachineAssemblerTester<int32_t> m;
+    Int32BinopTester bt(&m);
+    bt.AddReturn(m.Word32Or(bt.param0, m.Word32Not(bt.param1)));
+    FOR_UINT32_INPUTS(i) {
+      FOR_UINT32_INPUTS(j) {
+        uint32_t expected = *i | ~(*j);
+        CHECK_EQ(expected, bt.call(*i, *j));
+      }
+    }
+  }
+  {
+    RawMachineAssemblerTester<int32_t> m;
+    Int32BinopTester bt(&m);
+    bt.AddReturn(m.Word32Or(m.Word32Not(bt.param0), bt.param1));
+    FOR_UINT32_INPUTS(i) {
+      FOR_UINT32_INPUTS(j) {
+        uint32_t expected = ~(*i) | *j;
+        CHECK_EQ(expected, bt.call(*i, *j));
+      }
+    }
+  }
+}
+
+
+TEST(RunWord32OrImm) {
+  {
+    FOR_UINT32_INPUTS(i) {
+      RawMachineAssemblerTester<int32_t> m(kMachineWord32);
+      m.Return(m.Word32Or(m.Int32Constant(*i), m.Parameter(0)));
+      FOR_UINT32_INPUTS(j) {
+        uint32_t expected = *i | *j;
+        CHECK_EQ(expected, m.Call(*j));
+      }
+    }
+  }
+  {
+    FOR_UINT32_INPUTS(i) {
+      RawMachineAssemblerTester<int32_t> m(kMachineWord32);
+      m.Return(m.Word32Or(m.Int32Constant(*i), m.Word32Not(m.Parameter(0))));
+      FOR_UINT32_INPUTS(j) {
+        uint32_t expected = *i | ~(*j);
+        CHECK_EQ(expected, m.Call(*j));
+      }
+    }
+  }
+}
+
+
+TEST(RunWord32OrInBranch) {
+  static const int constant = 987654321;
+  {
+    RawMachineAssemblerTester<int32_t> m;
+    Int32BinopTester bt(&m);
+    MLabel blocka, blockb;
+    m.Branch(
+        m.Word32Equal(m.Word32Or(bt.param0, bt.param1), m.Int32Constant(0)),
+        &blocka, &blockb);
+    m.Bind(&blocka);
+    bt.AddReturn(m.Int32Constant(constant));
+    m.Bind(&blockb);
+    bt.AddReturn(m.Int32Constant(0 - constant));
+    FOR_UINT32_INPUTS(i) {
+      FOR_UINT32_INPUTS(j) {
+        int32_t expected = (*i | *j) == 0 ? constant : 0 - constant;
+        CHECK_EQ(expected, bt.call(*i, *j));
+      }
+    }
+  }
+  {
+    RawMachineAssemblerTester<int32_t> m;
+    Int32BinopTester bt(&m);
+    MLabel blocka, blockb;
+    m.Branch(
+        m.Word32NotEqual(m.Word32Or(bt.param0, bt.param1), m.Int32Constant(0)),
+        &blocka, &blockb);
+    m.Bind(&blocka);
+    bt.AddReturn(m.Int32Constant(constant));
+    m.Bind(&blockb);
+    bt.AddReturn(m.Int32Constant(0 - constant));
+    FOR_UINT32_INPUTS(i) {
+      FOR_UINT32_INPUTS(j) {
+        int32_t expected = (*i | *j) != 0 ? constant : 0 - constant;
+        CHECK_EQ(expected, bt.call(*i, *j));
+      }
+    }
+  }
+  {
+    FOR_UINT32_INPUTS(i) {
+      RawMachineAssemblerTester<int32_t> m(kMachineWord32);
+      MLabel blocka, blockb;
+      m.Branch(m.Word32Equal(m.Word32Or(m.Int32Constant(*i), m.Parameter(0)),
+                             m.Int32Constant(0)),
+               &blocka, &blockb);
+      m.Bind(&blocka);
+      m.Return(m.Int32Constant(constant));
+      m.Bind(&blockb);
+      m.Return(m.Int32Constant(0 - constant));
+      FOR_UINT32_INPUTS(j) {
+        int32_t expected = (*i | *j) == 0 ? constant : 0 - constant;
+        CHECK_EQ(expected, m.Call(*j));
+      }
+    }
+  }
+  {
+    FOR_UINT32_INPUTS(i) {
+      RawMachineAssemblerTester<int32_t> m(kMachineWord32);
+      MLabel blocka, blockb;
+      m.Branch(m.Word32NotEqual(m.Word32Or(m.Int32Constant(*i), m.Parameter(0)),
+                                m.Int32Constant(0)),
+               &blocka, &blockb);
+      m.Bind(&blocka);
+      m.Return(m.Int32Constant(constant));
+      m.Bind(&blockb);
+      m.Return(m.Int32Constant(0 - constant));
+      FOR_UINT32_INPUTS(j) {
+        int32_t expected = (*i | *j) != 0 ? constant : 0 - constant;
+        CHECK_EQ(expected, m.Call(*j));
+      }
+    }
+  }
+  {
+    RawMachineAssemblerTester<void> m;
+    Operator* shops[] = {m.machine()->Word32Sar(), m.machine()->Word32Shl(),
+                         m.machine()->Word32Shr()};
+    for (size_t n = 0; n < ARRAY_SIZE(shops); n++) {
+      RawMachineAssemblerTester<int32_t> m(kMachineWord32, kMachineWord32,
+                                           kMachineWord32);
+      MLabel blocka, blockb;
+      m.Branch(m.Word32Equal(m.Word32Or(m.Parameter(0),
+                                        m.NewNode(shops[n], m.Parameter(1),
+                                                  m.Parameter(2))),
+                             m.Int32Constant(0)),
+               &blocka, &blockb);
+      m.Bind(&blocka);
+      m.Return(m.Int32Constant(constant));
+      m.Bind(&blockb);
+      m.Return(m.Int32Constant(0 - constant));
+      FOR_UINT32_INPUTS(i) {
+        FOR_INT32_INPUTS(j) {
+          FOR_UINT32_INPUTS(k) {
+            uint32_t shift = *k & 0x1F;
+            int32_t right;
+            switch (shops[n]->opcode()) {
+              default:
+                UNREACHABLE();
+              case IrOpcode::kWord32Sar:
+                right = *j >> shift;
+                break;
+              case IrOpcode::kWord32Shl:
+                right = *j << shift;
+                break;
+              case IrOpcode::kWord32Shr:
+                right = static_cast<uint32_t>(*j) >> shift;
+                break;
+            }
+            int32_t expected = ((*i | right) == 0) ? constant : 0 - constant;
+            CHECK_EQ(expected, m.Call(*i, *j, shift));
+          }
+        }
+      }
+    }
+  }
+}
+
+
+TEST(RunWord32OrInComparison) {
+  {
+    RawMachineAssemblerTester<int32_t> m;
+    Int32BinopTester bt(&m);
+    bt.AddReturn(
+        m.Word32Equal(m.Word32Or(bt.param0, bt.param1), m.Int32Constant(0)));
+    FOR_UINT32_INPUTS(i) {
+      FOR_UINT32_INPUTS(j) {
+        int32_t expected = (*i | *j) == 0;
+        CHECK_EQ(expected, bt.call(*i, *j));
+      }
+    }
+  }
+  {
+    RawMachineAssemblerTester<int32_t> m;
+    Int32BinopTester bt(&m);
+    bt.AddReturn(
+        m.Word32Equal(m.Int32Constant(0), m.Word32Or(bt.param0, bt.param1)));
+    FOR_UINT32_INPUTS(i) {
+      FOR_UINT32_INPUTS(j) {
+        int32_t expected = (*i | *j) == 0;
+        CHECK_EQ(expected, bt.call(*i, *j));
+      }
+    }
+  }
+  {
+    FOR_UINT32_INPUTS(i) {
+      RawMachineAssemblerTester<int32_t> m(kMachineWord32);
+      m.Return(m.Word32Equal(m.Word32Or(m.Int32Constant(*i), m.Parameter(0)),
+                             m.Int32Constant(0)));
+      FOR_UINT32_INPUTS(j) {
+        int32_t expected = (*i | *j) == 0;
+        CHECK_EQ(expected, m.Call(*j));
+      }
+    }
+  }
+  {
+    FOR_UINT32_INPUTS(i) {
+      RawMachineAssemblerTester<int32_t> m(kMachineWord32);
+      m.Return(m.Word32Equal(m.Word32Or(m.Parameter(0), m.Int32Constant(*i)),
+                             m.Int32Constant(0)));
+      FOR_UINT32_INPUTS(j) {
+        int32_t expected = (*j | *i) == 0;
+        CHECK_EQ(expected, m.Call(*j));
+      }
+    }
+  }
+}
+
+
+TEST(RunWord32XorP) {
+  {
+    FOR_UINT32_INPUTS(i) {
+      RawMachineAssemblerTester<int32_t> m(kMachineWord32);
+      m.Return(m.Word32Xor(m.Int32Constant(*i), m.Parameter(0)));
+      FOR_UINT32_INPUTS(j) {
+        uint32_t expected = *i ^ *j;
+        CHECK_EQ(expected, m.Call(*j));
+      }
+    }
+  }
+  {
+    RawMachineAssemblerTester<int32_t> m;
+    Int32BinopTester bt(&m);
+    bt.AddReturn(m.Word32Xor(bt.param0, bt.param1));
+    FOR_UINT32_INPUTS(i) {
+      FOR_UINT32_INPUTS(j) {
+        uint32_t expected = *i ^ *j;
+        CHECK_EQ(expected, bt.call(*i, *j));
+      }
+    }
+  }
+  {
+    RawMachineAssemblerTester<int32_t> m;
+    Int32BinopTester bt(&m);
+    bt.AddReturn(m.Word32Xor(bt.param0, m.Word32Not(bt.param1)));
+    FOR_UINT32_INPUTS(i) {
+      FOR_UINT32_INPUTS(j) {
+        uint32_t expected = *i ^ ~(*j);
+        CHECK_EQ(expected, bt.call(*i, *j));
+      }
+    }
+  }
+  {
+    RawMachineAssemblerTester<int32_t> m;
+    Int32BinopTester bt(&m);
+    bt.AddReturn(m.Word32Xor(m.Word32Not(bt.param0), bt.param1));
+    FOR_UINT32_INPUTS(i) {
+      FOR_UINT32_INPUTS(j) {
+        uint32_t expected = ~(*i) ^ *j;
+        CHECK_EQ(expected, bt.call(*i, *j));
+      }
+    }
+  }
+  {
+    FOR_UINT32_INPUTS(i) {
+      RawMachineAssemblerTester<int32_t> m(kMachineWord32);
+      m.Return(m.Word32Xor(m.Int32Constant(*i), m.Word32Not(m.Parameter(0))));
+      FOR_UINT32_INPUTS(j) {
+        uint32_t expected = *i ^ ~(*j);
+        CHECK_EQ(expected, m.Call(*j));
+      }
+    }
+  }
+}
+
+
+TEST(RunWord32XorInBranch) {
+  static const int constant = 987654321;
+  {
+    RawMachineAssemblerTester<int32_t> m;
+    Int32BinopTester bt(&m);
+    MLabel blocka, blockb;
+    m.Branch(
+        m.Word32Equal(m.Word32Xor(bt.param0, bt.param1), m.Int32Constant(0)),
+        &blocka, &blockb);
+    m.Bind(&blocka);
+    bt.AddReturn(m.Int32Constant(constant));
+    m.Bind(&blockb);
+    bt.AddReturn(m.Int32Constant(0 - constant));
+    FOR_UINT32_INPUTS(i) {
+      FOR_UINT32_INPUTS(j) {
+        int32_t expected = (*i ^ *j) == 0 ? constant : 0 - constant;
+        CHECK_EQ(expected, bt.call(*i, *j));
+      }
+    }
+  }
+  {
+    RawMachineAssemblerTester<int32_t> m;
+    Int32BinopTester bt(&m);
+    MLabel blocka, blockb;
+    m.Branch(
+        m.Word32NotEqual(m.Word32Xor(bt.param0, bt.param1), m.Int32Constant(0)),
+        &blocka, &blockb);
+    m.Bind(&blocka);
+    bt.AddReturn(m.Int32Constant(constant));
+    m.Bind(&blockb);
+    bt.AddReturn(m.Int32Constant(0 - constant));
+    FOR_UINT32_INPUTS(i) {
+      FOR_UINT32_INPUTS(j) {
+        int32_t expected = (*i ^ *j) != 0 ? constant : 0 - constant;
+        CHECK_EQ(expected, bt.call(*i, *j));
+      }
+    }
+  }
+  {
+    FOR_UINT32_INPUTS(i) {
+      RawMachineAssemblerTester<int32_t> m(kMachineWord32);
+      MLabel blocka, blockb;
+      m.Branch(m.Word32Equal(m.Word32Xor(m.Int32Constant(*i), m.Parameter(0)),
+                             m.Int32Constant(0)),
+               &blocka, &blockb);
+      m.Bind(&blocka);
+      m.Return(m.Int32Constant(constant));
+      m.Bind(&blockb);
+      m.Return(m.Int32Constant(0 - constant));
+      FOR_UINT32_INPUTS(j) {
+        int32_t expected = (*i ^ *j) == 0 ? constant : 0 - constant;
+        CHECK_EQ(expected, m.Call(*j));
+      }
+    }
+  }
+  {
+    FOR_UINT32_INPUTS(i) {
+      RawMachineAssemblerTester<int32_t> m(kMachineWord32);
+      MLabel blocka, blockb;
+      m.Branch(
+          m.Word32NotEqual(m.Word32Xor(m.Int32Constant(*i), m.Parameter(0)),
+                           m.Int32Constant(0)),
+          &blocka, &blockb);
+      m.Bind(&blocka);
+      m.Return(m.Int32Constant(constant));
+      m.Bind(&blockb);
+      m.Return(m.Int32Constant(0 - constant));
+      FOR_UINT32_INPUTS(j) {
+        int32_t expected = (*i ^ *j) != 0 ? constant : 0 - constant;
+        CHECK_EQ(expected, m.Call(*j));
+      }
+    }
+  }
+  {
+    RawMachineAssemblerTester<void> m;
+    Operator* shops[] = {m.machine()->Word32Sar(), m.machine()->Word32Shl(),
+                         m.machine()->Word32Shr()};
+    for (size_t n = 0; n < ARRAY_SIZE(shops); n++) {
+      RawMachineAssemblerTester<int32_t> m(kMachineWord32, kMachineWord32,
+                                           kMachineWord32);
+      MLabel blocka, blockb;
+      m.Branch(m.Word32Equal(m.Word32Xor(m.Parameter(0),
+                                         m.NewNode(shops[n], m.Parameter(1),
+                                                   m.Parameter(2))),
+                             m.Int32Constant(0)),
+               &blocka, &blockb);
+      m.Bind(&blocka);
+      m.Return(m.Int32Constant(constant));
+      m.Bind(&blockb);
+      m.Return(m.Int32Constant(0 - constant));
+      FOR_UINT32_INPUTS(i) {
+        FOR_INT32_INPUTS(j) {
+          FOR_UINT32_INPUTS(k) {
+            uint32_t shift = *k & 0x1F;
+            int32_t right;
+            switch (shops[n]->opcode()) {
+              default:
+                UNREACHABLE();
+              case IrOpcode::kWord32Sar:
+                right = *j >> shift;
+                break;
+              case IrOpcode::kWord32Shl:
+                right = *j << shift;
+                break;
+              case IrOpcode::kWord32Shr:
+                right = static_cast<uint32_t>(*j) >> shift;
+                break;
+            }
+            int32_t expected = ((*i ^ right) == 0) ? constant : 0 - constant;
+            CHECK_EQ(expected, m.Call(*i, *j, shift));
+          }
+        }
+      }
+    }
+  }
+}
+
+
+TEST(RunWord32ShlP) {
+  {
+    FOR_UINT32_INPUTS(i) {
+      uint32_t shift = *i & 0x1F;
+      RawMachineAssemblerTester<int32_t> m(kMachineWord32);
+      m.Return(m.Word32Shl(m.Parameter(0), m.Int32Constant(shift)));
+      FOR_UINT32_INPUTS(j) {
+        uint32_t expected = *j << shift;
+        CHECK_EQ(expected, m.Call(*j));
+      }
+    }
+  }
+  {
+    RawMachineAssemblerTester<int32_t> m;
+    Int32BinopTester bt(&m);
+    bt.AddReturn(m.Word32Shl(bt.param0, bt.param1));
+    FOR_UINT32_INPUTS(i) {
+      FOR_UINT32_INPUTS(j) {
+        uint32_t shift = *j & 0x1F;
+        uint32_t expected = *i << shift;
+        CHECK_EQ(expected, bt.call(*i, shift));
+      }
+    }
+  }
+}
+
+
+TEST(RunWord32ShrP) {
+  {
+    FOR_UINT32_INPUTS(i) {
+      uint32_t shift = *i & 0x1F;
+      RawMachineAssemblerTester<int32_t> m(kMachineWord32);
+      m.Return(m.Word32Shr(m.Parameter(0), m.Int32Constant(shift)));
+      FOR_UINT32_INPUTS(j) {
+        uint32_t expected = *j >> shift;
+        CHECK_EQ(expected, m.Call(*j));
+      }
+    }
+  }
+  {
+    RawMachineAssemblerTester<int32_t> m;
+    Int32BinopTester bt(&m);
+    bt.AddReturn(m.Word32Shr(bt.param0, bt.param1));
+    FOR_UINT32_INPUTS(i) {
+      FOR_UINT32_INPUTS(j) {
+        uint32_t shift = *j & 0x1F;
+        uint32_t expected = *i >> shift;
+        CHECK_EQ(expected, bt.call(*i, shift));
+      }
+    }
+    CHECK_EQ(0x00010000, bt.call(0x80000000, 15));
+  }
+}
+
+
+TEST(RunWord32SarP) {
+  {
+    FOR_INT32_INPUTS(i) {
+      int32_t shift = *i & 0x1F;
+      RawMachineAssemblerTester<int32_t> m(kMachineWord32);
+      m.Return(m.Word32Sar(m.Parameter(0), m.Int32Constant(shift)));
+      FOR_INT32_INPUTS(j) {
+        int32_t expected = *j >> shift;
+        CHECK_EQ(expected, m.Call(*j));
+      }
+    }
+  }
+  {
+    RawMachineAssemblerTester<int32_t> m;
+    Int32BinopTester bt(&m);
+    bt.AddReturn(m.Word32Sar(bt.param0, bt.param1));
+    FOR_INT32_INPUTS(i) {
+      FOR_INT32_INPUTS(j) {
+        int32_t shift = *j & 0x1F;
+        int32_t expected = *i >> shift;
+        CHECK_EQ(expected, bt.call(*i, shift));
+      }
+    }
+    CHECK_EQ(0xFFFF0000, bt.call(0x80000000, 15));
+  }
+}
+
+
+TEST(RunWord32NotP) {
+  RawMachineAssemblerTester<int32_t> m(kMachineWord32);
+  m.Return(m.Word32Not(m.Parameter(0)));
+  FOR_UINT32_INPUTS(i) {
+    int expected = ~(*i);
+    CHECK_EQ(expected, m.Call(*i));
+  }
+}
+
+
+TEST(RunInt32NegP) {
+  RawMachineAssemblerTester<int32_t> m(kMachineWord32);
+  m.Return(m.Int32Neg(m.Parameter(0)));
+  FOR_INT32_INPUTS(i) {
+    int expected = -*i;
+    CHECK_EQ(expected, m.Call(*i));
+  }
+}
+
+
+TEST(RunWord32EqualAndWord32SarP) {
+  {
+    RawMachineAssemblerTester<int32_t> m(kMachineWord32, kMachineWord32,
+                                         kMachineWord32);
+    m.Return(m.Word32Equal(m.Parameter(0),
+                           m.Word32Sar(m.Parameter(1), m.Parameter(2))));
+    FOR_INT32_INPUTS(i) {
+      FOR_INT32_INPUTS(j) {
+        FOR_UINT32_INPUTS(k) {
+          uint32_t shift = *k & 0x1F;
+          int32_t expected = (*i == (*j >> shift));
+          CHECK_EQ(expected, m.Call(*i, *j, shift));
+        }
+      }
+    }
+  }
+  {
+    RawMachineAssemblerTester<int32_t> m(kMachineWord32, kMachineWord32,
+                                         kMachineWord32);
+    m.Return(m.Word32Equal(m.Word32Sar(m.Parameter(0), m.Parameter(1)),
+                           m.Parameter(2)));
+    FOR_INT32_INPUTS(i) {
+      FOR_UINT32_INPUTS(j) {
+        FOR_INT32_INPUTS(k) {
+          uint32_t shift = *j & 0x1F;
+          int32_t expected = ((*i >> shift) == *k);
+          CHECK_EQ(expected, m.Call(*i, shift, *k));
+        }
+      }
+    }
+  }
+}
+
+
+TEST(RunWord32EqualAndWord32ShlP) {
+  {
+    RawMachineAssemblerTester<int32_t> m(kMachineWord32, kMachineWord32,
+                                         kMachineWord32);
+    m.Return(m.Word32Equal(m.Parameter(0),
+                           m.Word32Shl(m.Parameter(1), m.Parameter(2))));
+    FOR_UINT32_INPUTS(i) {
+      FOR_UINT32_INPUTS(j) {
+        FOR_UINT32_INPUTS(k) {
+          uint32_t shift = *k & 0x1F;
+          int32_t expected = (*i == (*j << shift));
+          CHECK_EQ(expected, m.Call(*i, *j, shift));
+        }
+      }
+    }
+  }
+  {
+    RawMachineAssemblerTester<int32_t> m(kMachineWord32, kMachineWord32,
+                                         kMachineWord32);
+    m.Return(m.Word32Equal(m.Word32Shl(m.Parameter(0), m.Parameter(1)),
+                           m.Parameter(2)));
+    FOR_UINT32_INPUTS(i) {
+      FOR_UINT32_INPUTS(j) {
+        FOR_UINT32_INPUTS(k) {
+          uint32_t shift = *j & 0x1F;
+          int32_t expected = ((*i << shift) == *k);
+          CHECK_EQ(expected, m.Call(*i, shift, *k));
+        }
+      }
+    }
+  }
+}
+
+
+TEST(RunWord32EqualAndWord32ShrP) {
+  {
+    RawMachineAssemblerTester<int32_t> m(kMachineWord32, kMachineWord32,
+                                         kMachineWord32);
+    m.Return(m.Word32Equal(m.Parameter(0),
+                           m.Word32Shr(m.Parameter(1), m.Parameter(2))));
+    FOR_UINT32_INPUTS(i) {
+      FOR_UINT32_INPUTS(j) {
+        FOR_UINT32_INPUTS(k) {
+          uint32_t shift = *k & 0x1F;
+          int32_t expected = (*i == (*j >> shift));
+          CHECK_EQ(expected, m.Call(*i, *j, shift));
+        }
+      }
+    }
+  }
+  {
+    RawMachineAssemblerTester<int32_t> m(kMachineWord32, kMachineWord32,
+                                         kMachineWord32);
+    m.Return(m.Word32Equal(m.Word32Shr(m.Parameter(0), m.Parameter(1)),
+                           m.Parameter(2)));
+    FOR_UINT32_INPUTS(i) {
+      FOR_UINT32_INPUTS(j) {
+        FOR_UINT32_INPUTS(k) {
+          uint32_t shift = *j & 0x1F;
+          int32_t expected = ((*i >> shift) == *k);
+          CHECK_EQ(expected, m.Call(*i, shift, *k));
+        }
+      }
+    }
+  }
+}
+
+
+TEST(RunDeadNodes) {
+  for (int i = 0; true; i++) {
+    RawMachineAssemblerTester<int32_t> m(i == 5 ? kMachineWord32
+                                                : kMachineLast);
+    int constant = 0x55 + i;
+    switch (i) {
+      case 0:
+        m.Int32Constant(44);
+        break;
+      case 1:
+        m.StringConstant("unused");
+        break;
+      case 2:
+        m.NumberConstant(11.1);
+        break;
+      case 3:
+        m.PointerConstant(&constant);
+        break;
+      case 4:
+        m.LoadFromPointer(&constant, kMachineWord32);
+        break;
+      case 5:
+        m.Parameter(0);
+        break;
+      default:
+        return;
+    }
+    m.Return(m.Int32Constant(constant));
+    if (i != 5) {
+      CHECK_EQ(constant, m.Call());
+    } else {
+      CHECK_EQ(constant, m.Call(0));
+    }
+  }
+}
+
+
+TEST(RunDeadInt32Binops) {
+  RawMachineAssemblerTester<int32_t> m;
+
+  Operator* ops[] = {
+      m.machine()->Word32And(),      m.machine()->Word32Or(),
+      m.machine()->Word32Xor(),      m.machine()->Word32Shl(),
+      m.machine()->Word32Shr(),      m.machine()->Word32Sar(),
+      m.machine()->Word32Equal(),    m.machine()->Int32Add(),
+      m.machine()->Int32Sub(),       m.machine()->Int32Mul(),
+      m.machine()->Int32Div(),       m.machine()->Int32UDiv(),
+      m.machine()->Int32Mod(),       m.machine()->Int32UMod(),
+      m.machine()->Int32LessThan(),  m.machine()->Int32LessThanOrEqual(),
+      m.machine()->Uint32LessThan(), m.machine()->Uint32LessThanOrEqual(),
+      NULL};
+
+  for (int i = 0; ops[i] != NULL; i++) {
+    RawMachineAssemblerTester<int32_t> m(kMachineWord32, kMachineWord32);
+    int constant = 0x55555 + i;
+    m.NewNode(ops[i], m.Parameter(0), m.Parameter(1));
+    m.Return(m.Int32Constant(constant));
+
+    CHECK_EQ(constant, m.Call(1, 1));
+  }
+}
+
+
+template <typename CType>
+static void RunLoadImmIndex(MachineRepresentation rep) {
+  const int kNumElems = 3;
+  CType buffer[kNumElems];
+
+  // initialize the buffer with raw data.
+  byte* raw = reinterpret_cast<byte*>(buffer);
+  for (size_t i = 0; i < sizeof(buffer); i++) {
+    raw[i] = (i + sizeof(buffer)) ^ 0xAA;
+  }
+
+  // Test with various large and small offsets.
+  for (int offset = -1; offset <= 200000; offset *= -5) {
+    for (int i = 0; i < kNumElems; i++) {
+      RawMachineAssemblerTester<CType> m;
+      Node* base = m.PointerConstant(buffer - offset);
+      Node* index = m.Int32Constant((offset + i) * sizeof(buffer[0]));
+      m.Return(m.Load(rep, base, index));
+
+      CHECK_EQ(buffer[i], m.Call());
+      printf("XXX\n");
+    }
+  }
+}
+
+
+TEST(RunLoadImmIndex) {
+  RunLoadImmIndex<int8_t>(kMachineWord8);
+  RunLoadImmIndex<int16_t>(kMachineWord16);
+  RunLoadImmIndex<int32_t>(kMachineWord32);
+  RunLoadImmIndex<int32_t*>(kMachineTagged);
+
+  // TODO(titzer): test kMachineFloat64 loads
+  // TODO(titzer): test various indexing modes.
+}
+
+
+template <typename CType>
+static void RunLoadStore(MachineRepresentation rep) {
+  const int kNumElems = 4;
+  CType buffer[kNumElems];
+
+  for (int32_t x = 0; x < kNumElems; x++) {
+    int32_t y = kNumElems - x - 1;
+    // initialize the buffer with raw data.
+    byte* raw = reinterpret_cast<byte*>(buffer);
+    for (size_t i = 0; i < sizeof(buffer); i++) {
+      raw[i] = (i + sizeof(buffer)) ^ 0xAA;
+    }
+
+    RawMachineAssemblerTester<int32_t> m;
+    int32_t OK = 0x29000 + x;
+    Node* base = m.PointerConstant(buffer);
+    Node* index0 = m.Int32Constant(x * sizeof(buffer[0]));
+    Node* load = m.Load(rep, base, index0);
+    Node* index1 = m.Int32Constant(y * sizeof(buffer[0]));
+    m.Store(rep, base, index1, load);
+    m.Return(m.Int32Constant(OK));
+
+    CHECK_NE(buffer[x], buffer[y]);
+    CHECK_EQ(OK, m.Call());
+    CHECK_EQ(buffer[x], buffer[y]);
+  }
+}
+
+
+TEST(RunLoadStore) {
+  RunLoadStore<int8_t>(kMachineWord8);
+  RunLoadStore<int16_t>(kMachineWord16);
+  RunLoadStore<int32_t>(kMachineWord32);
+  RunLoadStore<void*>(kMachineTagged);
+  RunLoadStore<double>(kMachineFloat64);
+}
+
+
+TEST(RunFloat64Binop) {
+  RawMachineAssemblerTester<int32_t> m;
+  double result;
+
+  Operator* ops[] = {m.machine()->Float64Add(), m.machine()->Float64Sub(),
+                     m.machine()->Float64Mul(), m.machine()->Float64Div(),
+                     m.machine()->Float64Mod(), NULL};
+
+  double inf = V8_INFINITY;
+  Operator* inputs[] = {
+      m.common()->Float64Constant(0),     m.common()->Float64Constant(1),
+      m.common()->Float64Constant(1),     m.common()->Float64Constant(0),
+      m.common()->Float64Constant(0),     m.common()->Float64Constant(-1),
+      m.common()->Float64Constant(-1),    m.common()->Float64Constant(0),
+      m.common()->Float64Constant(0.22),  m.common()->Float64Constant(-1.22),
+      m.common()->Float64Constant(-1.22), m.common()->Float64Constant(0.22),
+      m.common()->Float64Constant(inf),   m.common()->Float64Constant(0.22),
+      m.common()->Float64Constant(inf),   m.common()->Float64Constant(-inf),
+      NULL};
+
+  for (int i = 0; ops[i] != NULL; i++) {
+    for (int j = 0; inputs[j] != NULL; j += 2) {
+      RawMachineAssemblerTester<int32_t> m;
+      Node* a = m.NewNode(inputs[j]);
+      Node* b = m.NewNode(inputs[j + 1]);
+      Node* binop = m.NewNode(ops[i], a, b);
+      Node* base = m.PointerConstant(&result);
+      Node* zero = m.Int32Constant(0);
+      m.Store(kMachineFloat64, base, zero, binop);
+      m.Return(m.Int32Constant(i + j));
+      CHECK_EQ(i + j, m.Call());
+    }
+  }
+}
+
+
+TEST(RunDeadFloat64Binops) {
+  RawMachineAssemblerTester<int32_t> m;
+
+  Operator* ops[] = {m.machine()->Float64Add(), m.machine()->Float64Sub(),
+                     m.machine()->Float64Mul(), m.machine()->Float64Div(),
+                     m.machine()->Float64Mod(), NULL};
+
+  for (int i = 0; ops[i] != NULL; i++) {
+    RawMachineAssemblerTester<int32_t> m;
+    int constant = 0x53355 + i;
+    m.NewNode(ops[i], m.Float64Constant(0.1), m.Float64Constant(1.11));
+    m.Return(m.Int32Constant(constant));
+    CHECK_EQ(constant, m.Call());
+  }
+}
+
+
+TEST(RunFloat64AddP) {
+  RawMachineAssemblerTester<int32_t> m;
+  Float64BinopTester bt(&m);
+
+  bt.AddReturn(m.Float64Add(bt.param0, bt.param1));
+
+  FOR_FLOAT64_INPUTS(pl) {
+    FOR_FLOAT64_INPUTS(pr) {
+      double expected = *pl + *pr;
+      CHECK_EQ(expected, bt.call(*pl, *pr));
+    }
+  }
+}
+
+
+TEST(RunFloat64SubP) {
+  RawMachineAssemblerTester<int32_t> m;
+  Float64BinopTester bt(&m);
+
+  bt.AddReturn(m.Float64Sub(bt.param0, bt.param1));
+
+  FOR_FLOAT64_INPUTS(pl) {
+    FOR_FLOAT64_INPUTS(pr) {
+      double expected = *pl - *pr;
+      CHECK_EQ(expected, bt.call(*pl, *pr));
+    }
+  }
+}
+
+
+TEST(RunFloat64SubImm1) {
+  double input = 0.0;
+  double output = 0.0;
+
+  FOR_FLOAT64_INPUTS(i) {
+    RawMachineAssemblerTester<int32_t> m;
+    Node* t0 = m.LoadFromPointer(&input, kMachineFloat64);
+    Node* t1 = m.Float64Sub(m.Float64Constant(*i), t0);
+    m.StoreToPointer(&output, kMachineFloat64, t1);
+    m.Return(m.Int32Constant(0));
+    FOR_FLOAT64_INPUTS(j) {
+      input = *j;
+      double expected = *i - input;
+      CHECK_EQ(0, m.Call());
+      CHECK_EQ(expected, output);
+    }
+  }
+}
+
+
+TEST(RunFloat64SubImm2) {
+  double input = 0.0;
+  double output = 0.0;
+
+  FOR_FLOAT64_INPUTS(i) {
+    RawMachineAssemblerTester<int32_t> m;
+    Node* t0 = m.LoadFromPointer(&input, kMachineFloat64);
+    Node* t1 = m.Float64Sub(t0, m.Float64Constant(*i));
+    m.StoreToPointer(&output, kMachineFloat64, t1);
+    m.Return(m.Int32Constant(0));
+    FOR_FLOAT64_INPUTS(j) {
+      input = *j;
+      double expected = input - *i;
+      CHECK_EQ(0, m.Call());
+      CHECK_EQ(expected, output);
+    }
+  }
+}
+
+
+TEST(RunFloat64MulP) {
+  RawMachineAssemblerTester<int32_t> m;
+  Float64BinopTester bt(&m);
+
+  bt.AddReturn(m.Float64Mul(bt.param0, bt.param1));
+
+  FOR_FLOAT64_INPUTS(pl) {
+    FOR_FLOAT64_INPUTS(pr) {
+      double expected = *pl * *pr;
+      CHECK_EQ(expected, bt.call(*pl, *pr));
+    }
+  }
+}
+
+
+TEST(RunFloat64MulAndFloat64AddP) {
+  double input_a = 0.0;
+  double input_b = 0.0;
+  double input_c = 0.0;
+  double output = 0.0;
+
+  {
+    RawMachineAssemblerTester<int32_t> m;
+    Node* a = m.LoadFromPointer(&input_a, kMachineFloat64);
+    Node* b = m.LoadFromPointer(&input_b, kMachineFloat64);
+    Node* c = m.LoadFromPointer(&input_c, kMachineFloat64);
+    m.StoreToPointer(&output, kMachineFloat64,
+                     m.Float64Add(m.Float64Mul(a, b), c));
+    m.Return(m.Int32Constant(0));
+    FOR_FLOAT64_INPUTS(i) {
+      FOR_FLOAT64_INPUTS(j) {
+        FOR_FLOAT64_INPUTS(k) {
+          input_a = *i;
+          input_b = *j;
+          input_c = *k;
+          volatile double temp = input_a * input_b;
+          volatile double expected = temp + input_c;
+          CHECK_EQ(0, m.Call());
+          CHECK_EQ(expected, output);
+        }
+      }
+    }
+  }
+  {
+    RawMachineAssemblerTester<int32_t> m;
+    Node* a = m.LoadFromPointer(&input_a, kMachineFloat64);
+    Node* b = m.LoadFromPointer(&input_b, kMachineFloat64);
+    Node* c = m.LoadFromPointer(&input_c, kMachineFloat64);
+    m.StoreToPointer(&output, kMachineFloat64,
+                     m.Float64Add(a, m.Float64Mul(b, c)));
+    m.Return(m.Int32Constant(0));
+    FOR_FLOAT64_INPUTS(i) {
+      FOR_FLOAT64_INPUTS(j) {
+        FOR_FLOAT64_INPUTS(k) {
+          input_a = *i;
+          input_b = *j;
+          input_c = *k;
+          volatile double temp = input_b * input_c;
+          volatile double expected = input_a + temp;
+          CHECK_EQ(0, m.Call());
+          CHECK_EQ(expected, output);
+        }
+      }
+    }
+  }
+}
+
+
+TEST(RunFloat64MulAndFloat64SubP) {
+  double input_a = 0.0;
+  double input_b = 0.0;
+  double input_c = 0.0;
+  double output = 0.0;
+
+  RawMachineAssemblerTester<int32_t> m;
+  Node* a = m.LoadFromPointer(&input_a, kMachineFloat64);
+  Node* b = m.LoadFromPointer(&input_b, kMachineFloat64);
+  Node* c = m.LoadFromPointer(&input_c, kMachineFloat64);
+  m.StoreToPointer(&output, kMachineFloat64,
+                   m.Float64Sub(a, m.Float64Mul(b, c)));
+  m.Return(m.Int32Constant(0));
+
+  FOR_FLOAT64_INPUTS(i) {
+    FOR_FLOAT64_INPUTS(j) {
+      FOR_FLOAT64_INPUTS(k) {
+        input_a = *i;
+        input_b = *j;
+        input_c = *k;
+        volatile double temp = input_b * input_c;
+        volatile double expected = input_a - temp;
+        CHECK_EQ(0, m.Call());
+        CHECK_EQ(expected, output);
+      }
+    }
+  }
+}
+
+
+TEST(RunFloat64MulImm) {
+  double input = 0.0;
+  double output = 0.0;
+
+  {
+    FOR_FLOAT64_INPUTS(i) {
+      RawMachineAssemblerTester<int32_t> m;
+      Node* t0 = m.LoadFromPointer(&input, kMachineFloat64);
+      Node* t1 = m.Float64Mul(m.Float64Constant(*i), t0);
+      m.StoreToPointer(&output, kMachineFloat64, t1);
+      m.Return(m.Int32Constant(0));
+      FOR_FLOAT64_INPUTS(j) {
+        input = *j;
+        double expected = *i * input;
+        CHECK_EQ(0, m.Call());
+        CHECK_EQ(expected, output);
+      }
+    }
+  }
+  {
+    FOR_FLOAT64_INPUTS(i) {
+      RawMachineAssemblerTester<int32_t> m;
+      Node* t0 = m.LoadFromPointer(&input, kMachineFloat64);
+      Node* t1 = m.Float64Mul(t0, m.Float64Constant(*i));
+      m.StoreToPointer(&output, kMachineFloat64, t1);
+      m.Return(m.Int32Constant(0));
+      FOR_FLOAT64_INPUTS(j) {
+        input = *j;
+        double expected = input * *i;
+        CHECK_EQ(0, m.Call());
+        CHECK_EQ(expected, output);
+      }
+    }
+  }
+}
+
+
+TEST(RunFloat64DivP) {
+  RawMachineAssemblerTester<int32_t> m;
+  Float64BinopTester bt(&m);
+
+  bt.AddReturn(m.Float64Div(bt.param0, bt.param1));
+
+  FOR_FLOAT64_INPUTS(pl) {
+    FOR_FLOAT64_INPUTS(pr) {
+      double expected = *pl / *pr;
+      CHECK_EQ(expected, bt.call(*pl, *pr));
+    }
+  }
+}
+
+
+TEST(RunFloat64ModP) {
+  RawMachineAssemblerTester<int32_t> m;
+  Float64BinopTester bt(&m);
+
+  bt.AddReturn(m.Float64Mod(bt.param0, bt.param1));
+
+  FOR_FLOAT64_INPUTS(i) {
+    FOR_FLOAT64_INPUTS(j) {
+      double expected = modulo(*i, *j);
+      double found = bt.call(*i, *j);
+      CHECK_EQ(expected, found);
+    }
+  }
+}
+
+
+TEST(RunConvertInt32ToFloat64_A) {
+  RawMachineAssemblerTester<int32_t> m;
+  int32_t magic = 0x986234;
+  double result = 0;
+
+  Node* convert = m.ConvertInt32ToFloat64(m.Int32Constant(magic));
+  m.Store(kMachineFloat64, m.PointerConstant(&result), m.Int32Constant(0),
+          convert);
+  m.Return(m.Int32Constant(magic));
+
+  CHECK_EQ(magic, m.Call());
+  CHECK_EQ(static_cast<double>(magic), result);
+}
+
+
+TEST(RunConvertInt32ToFloat64_B) {
+  RawMachineAssemblerTester<int32_t> m(kMachineWord32);
+  double output = 0;
+
+  Node* convert = m.ConvertInt32ToFloat64(m.Parameter(0));
+  m.Store(kMachineFloat64, m.PointerConstant(&output), m.Int32Constant(0),
+          convert);
+  m.Return(m.Parameter(0));
+
+  FOR_INT32_INPUTS(i) {
+    int32_t expect = *i;
+    CHECK_EQ(expect, m.Call(expect));
+    CHECK_EQ(static_cast<double>(expect), output);
+  }
+}
+
+
+// TODO(titzer): Test ConvertUint32ToFloat64
+
+
+TEST(RunConvertFloat64ToInt32_A) {
+  RawMachineAssemblerTester<int32_t> m;
+  int32_t magic = 0x786234;
+  double input = 11.1;
+  int32_t result = 0;
+
+  m.Store(kMachineWord32, m.PointerConstant(&result), m.Int32Constant(0),
+          m.ConvertFloat64ToInt32(m.Float64Constant(input)));
+  m.Return(m.Int32Constant(magic));
+
+  CHECK_EQ(magic, m.Call());
+  CHECK_EQ(static_cast<int32_t>(input), result);
+}
+
+
+TEST(RunConvertFloat64ToInt32_B) {
+  RawMachineAssemblerTester<int32_t> m;
+  double input = 0;
+  int32_t output = 0;
+
+  Node* load =
+      m.Load(kMachineFloat64, m.PointerConstant(&input), m.Int32Constant(0));
+  Node* convert = m.ConvertFloat64ToInt32(load);
+  m.Store(kMachineWord32, m.PointerConstant(&output), m.Int32Constant(0),
+          convert);
+  m.Return(convert);
+
+  {
+    FOR_INT32_INPUTS(i) {
+      input = *i;
+      int expect = *i;
+      CHECK_EQ(expect, m.Call());
+      CHECK_EQ(expect, output);
+    }
+  }
+
+  {
+    FOR_FLOAT64_INPUTS(i) {
+      input = *i;
+      // TODO(titzer): float64 -> int32 outside of the int32 range; the machine
+      // backends are all wrong in different ways, and they certainly don't
+      // implement the JavaScript conversions correctly.
+      if (std::isnan(input) || input > INT_MAX || input < INT_MIN) {
+        continue;
+      }
+      int32_t expect = static_cast<int32_t>(input);
+      CHECK_EQ(expect, m.Call());
+      CHECK_EQ(expect, output);
+    }
+  }
+}
+
+
+// TODO(titzer): test ConvertFloat64ToUint32
+
+
+TEST(RunConvertFloat64ToInt32_truncation) {
+  RawMachineAssemblerTester<int32_t> m;
+  int32_t magic = 0x786234;
+  double input = 3.9;
+  int32_t result = 0;
+
+  Node* input_node =
+      m.Load(kMachineFloat64, m.PointerConstant(&input), m.Int32Constant(0));
+  m.Store(kMachineWord32, m.PointerConstant(&result), m.Int32Constant(0),
+          m.ConvertFloat64ToInt32(input_node));
+  m.Return(m.Int32Constant(magic));
+
+  for (int i = -200; i < 200; i++) {
+    input = i + (i < 0 ? -0.9 : 0.9);
+    CHECK_EQ(magic, m.Call());
+    CHECK_EQ(i, result);
+  }
+}
+
+
+TEST(RunConvertFloat64ToInt32_spilled) {
+  RawMachineAssemblerTester<int32_t> m;
+  const int kNumInputs = 32;
+  int32_t magic = 0x786234;
+  double input[kNumInputs];
+  int32_t result[kNumInputs];
+  Node* input_node[kNumInputs];
+
+  for (int i = 0; i < kNumInputs; i++) {
+    input_node[i] = m.Load(kMachineFloat64, m.PointerConstant(&input),
+                           m.Int32Constant(i * 8));
+  }
+
+  for (int i = 0; i < kNumInputs; i++) {
+    m.Store(kMachineWord32, m.PointerConstant(&result), m.Int32Constant(i * 4),
+            m.ConvertFloat64ToInt32(input_node[i]));
+  }
+
+  m.Return(m.Int32Constant(magic));
+
+  for (int i = 0; i < kNumInputs; i++) {
+    input[i] = 100.9 + i;
+  }
+
+  CHECK_EQ(magic, m.Call());
+
+  for (int i = 0; i < kNumInputs; i++) {
+    CHECK_EQ(result[i], 100 + i);
+  }
+}
+
+
+TEST(RunDeadConvertFloat64ToInt32) {
+  RawMachineAssemblerTester<int32_t> m;
+  const int magic = 0x88abcda4;
+  m.ConvertFloat64ToInt32(m.Float64Constant(999.78));
+  m.Return(m.Int32Constant(magic));
+  CHECK_EQ(magic, m.Call());
+}
+
+
+TEST(RunDeadConvertInt32ToFloat64) {
+  RawMachineAssemblerTester<int32_t> m;
+  const int magic = 0x8834abcd;
+  m.ConvertInt32ToFloat64(m.Int32Constant(magic - 6888));
+  m.Return(m.Int32Constant(magic));
+  CHECK_EQ(magic, m.Call());
+}
+
+
+TEST(RunLoopPhiInduction2) {
+  RawMachineAssemblerTester<int32_t> m;
+
+  int false_val = 0x10777;
+
+  // x = false_val; while(false) { x++; } return x;
+  MLabel header, body, end;
+  Node* false_node = m.Int32Constant(false_val);
+  m.Goto(&header);
+  m.Bind(&header);
+  Node* phi = m.Phi(false_node, false_node);
+  m.Branch(m.Int32Constant(0), &body, &end);
+  m.Bind(&body);
+  Node* add = m.Int32Add(phi, m.Int32Constant(1));
+  phi->ReplaceInput(1, add);
+  m.Goto(&header);
+  m.Bind(&end);
+  m.Return(phi);
+
+  CHECK_EQ(false_val, m.Call());
+}
+
+
+TEST(RunDoubleDiamond) {
+  RawMachineAssemblerTester<int32_t> m;
+
+  const int magic = 99645;
+  double buffer = 0.1;
+  double constant = 99.99;
+
+  MLabel blocka, blockb, end;
+  Node* k1 = m.Float64Constant(constant);
+  Node* k2 = m.Float64Constant(0 - constant);
+  m.Branch(m.Int32Constant(0), &blocka, &blockb);
+  m.Bind(&blocka);
+  m.Goto(&end);
+  m.Bind(&blockb);
+  m.Goto(&end);
+  m.Bind(&end);
+  Node* phi = m.Phi(k2, k1);
+  m.Store(kMachineFloat64, m.PointerConstant(&buffer), m.Int32Constant(0), phi);
+  m.Return(m.Int32Constant(magic));
+
+  CHECK_EQ(magic, m.Call());
+  CHECK_EQ(constant, buffer);
+}
+
+
+TEST(RunRefDiamond) {
+  RawMachineAssemblerTester<int32_t> m;
+
+  const int magic = 99644;
+  Handle<String> rexpected =
+      CcTest::i_isolate()->factory()->InternalizeUtf8String("A");
+  String* buffer;
+
+  MLabel blocka, blockb, end;
+  Node* k1 = m.StringConstant("A");
+  Node* k2 = m.StringConstant("B");
+  m.Branch(m.Int32Constant(0), &blocka, &blockb);
+  m.Bind(&blocka);
+  m.Goto(&end);
+  m.Bind(&blockb);
+  m.Goto(&end);
+  m.Bind(&end);
+  Node* phi = m.Phi(k2, k1);
+  m.Store(kMachineTagged, m.PointerConstant(&buffer), m.Int32Constant(0), phi);
+  m.Return(m.Int32Constant(magic));
+
+  CHECK_EQ(magic, m.Call());
+  CHECK(rexpected->SameValue(buffer));
+}
+
+
+TEST(RunDoubleRefDiamond) {
+  RawMachineAssemblerTester<int32_t> m;
+
+  const int magic = 99648;
+  double dbuffer = 0.1;
+  double dconstant = 99.99;
+  Handle<String> rexpected =
+      CcTest::i_isolate()->factory()->InternalizeUtf8String("AX");
+  String* rbuffer;
+
+  MLabel blocka, blockb, end;
+  Node* d1 = m.Float64Constant(dconstant);
+  Node* d2 = m.Float64Constant(0 - dconstant);
+  Node* r1 = m.StringConstant("AX");
+  Node* r2 = m.StringConstant("BX");
+  m.Branch(m.Int32Constant(0), &blocka, &blockb);
+  m.Bind(&blocka);
+  m.Goto(&end);
+  m.Bind(&blockb);
+  m.Goto(&end);
+  m.Bind(&end);
+  Node* dphi = m.Phi(d2, d1);
+  Node* rphi = m.Phi(r2, r1);
+  m.Store(kMachineFloat64, m.PointerConstant(&dbuffer), m.Int32Constant(0),
+          dphi);
+  m.Store(kMachineTagged, m.PointerConstant(&rbuffer), m.Int32Constant(0),
+          rphi);
+  m.Return(m.Int32Constant(magic));
+
+  CHECK_EQ(magic, m.Call());
+  CHECK_EQ(dconstant, dbuffer);
+  CHECK(rexpected->SameValue(rbuffer));
+}
+
+
+TEST(RunDoubleRefDoubleDiamond) {
+  RawMachineAssemblerTester<int32_t> m;
+
+  const int magic = 99649;
+  double dbuffer = 0.1;
+  double dconstant = 99.997;
+  Handle<String> rexpected =
+      CcTest::i_isolate()->factory()->InternalizeUtf8String("AD");
+  String* rbuffer;
+
+  MLabel blocka, blockb, mid, blockd, blocke, end;
+  Node* d1 = m.Float64Constant(dconstant);
+  Node* d2 = m.Float64Constant(0 - dconstant);
+  Node* r1 = m.StringConstant("AD");
+  Node* r2 = m.StringConstant("BD");
+  m.Branch(m.Int32Constant(0), &blocka, &blockb);
+  m.Bind(&blocka);
+  m.Goto(&mid);
+  m.Bind(&blockb);
+  m.Goto(&mid);
+  m.Bind(&mid);
+  Node* dphi1 = m.Phi(d2, d1);
+  Node* rphi1 = m.Phi(r2, r1);
+  m.Branch(m.Int32Constant(0), &blockd, &blocke);
+
+  m.Bind(&blockd);
+  m.Goto(&end);
+  m.Bind(&blocke);
+  m.Goto(&end);
+  m.Bind(&end);
+  Node* dphi2 = m.Phi(d1, dphi1);
+  Node* rphi2 = m.Phi(r1, rphi1);
+
+  m.Store(kMachineFloat64, m.PointerConstant(&dbuffer), m.Int32Constant(0),
+          dphi2);
+  m.Store(kMachineTagged, m.PointerConstant(&rbuffer), m.Int32Constant(0),
+          rphi2);
+  m.Return(m.Int32Constant(magic));
+
+  CHECK_EQ(magic, m.Call());
+  CHECK_EQ(dconstant, dbuffer);
+  CHECK(rexpected->SameValue(rbuffer));
+}
+
+
+TEST(RunDoubleLoopPhi) {
+  RawMachineAssemblerTester<int32_t> m;
+  MLabel header, body, end;
+
+  int magic = 99773;
+  double buffer = 0.99;
+  double dconstant = 777.1;
+
+  Node* zero = m.Int32Constant(0);
+  Node* dk = m.Float64Constant(dconstant);
+
+  m.Goto(&header);
+  m.Bind(&header);
+  Node* phi = m.Phi(dk, dk);
+  phi->ReplaceInput(1, phi);
+  m.Branch(zero, &body, &end);
+  m.Bind(&body);
+  m.Goto(&header);
+  m.Bind(&end);
+  m.Store(kMachineFloat64, m.PointerConstant(&buffer), m.Int32Constant(0), phi);
+  m.Return(m.Int32Constant(magic));
+
+  CHECK_EQ(magic, m.Call());
+}
+
+
+TEST(RunCountToTenAccRaw) {
+  RawMachineAssemblerTester<int32_t> m;
+
+  Node* zero = m.Int32Constant(0);
+  Node* ten = m.Int32Constant(10);
+  Node* one = m.Int32Constant(1);
+
+  MLabel header, body, body_cont, end;
+
+  m.Goto(&header);
+
+  m.Bind(&header);
+  Node* i = m.Phi(zero, zero);
+  Node* j = m.Phi(zero, zero);
+  m.Goto(&body);
+
+  m.Bind(&body);
+  Node* next_i = m.Int32Add(i, one);
+  Node* next_j = m.Int32Add(j, one);
+  m.Branch(m.Word32Equal(next_i, ten), &end, &body_cont);
+
+  m.Bind(&body_cont);
+  i->ReplaceInput(1, next_i);
+  j->ReplaceInput(1, next_j);
+  m.Goto(&header);
+
+  m.Bind(&end);
+  m.Return(ten);
+
+  CHECK_EQ(10, m.Call());
+}
+
+
+TEST(RunCountToTenAccRaw2) {
+  RawMachineAssemblerTester<int32_t> m;
+
+  Node* zero = m.Int32Constant(0);
+  Node* ten = m.Int32Constant(10);
+  Node* one = m.Int32Constant(1);
+
+  MLabel header, body, body_cont, end;
+
+  m.Goto(&header);
+
+  m.Bind(&header);
+  Node* i = m.Phi(zero, zero);
+  Node* j = m.Phi(zero, zero);
+  Node* k = m.Phi(zero, zero);
+  m.Goto(&body);
+
+  m.Bind(&body);
+  Node* next_i = m.Int32Add(i, one);
+  Node* next_j = m.Int32Add(j, one);
+  Node* next_k = m.Int32Add(j, one);
+  m.Branch(m.Word32Equal(next_i, ten), &end, &body_cont);
+
+  m.Bind(&body_cont);
+  i->ReplaceInput(1, next_i);
+  j->ReplaceInput(1, next_j);
+  k->ReplaceInput(1, next_k);
+  m.Goto(&header);
+
+  m.Bind(&end);
+  m.Return(ten);
+
+  CHECK_EQ(10, m.Call());
+}
+
+
+TEST(RunAddTree) {
+  RawMachineAssemblerTester<int32_t> m;
+  int32_t inputs[] = {11, 12, 13, 14, 15, 16, 17, 18};
+
+  Node* base = m.PointerConstant(inputs);
+  Node* n0 = m.Load(kMachineWord32, base, m.Int32Constant(0 * sizeof(int32_t)));
+  Node* n1 = m.Load(kMachineWord32, base, m.Int32Constant(1 * sizeof(int32_t)));
+  Node* n2 = m.Load(kMachineWord32, base, m.Int32Constant(2 * sizeof(int32_t)));
+  Node* n3 = m.Load(kMachineWord32, base, m.Int32Constant(3 * sizeof(int32_t)));
+  Node* n4 = m.Load(kMachineWord32, base, m.Int32Constant(4 * sizeof(int32_t)));
+  Node* n5 = m.Load(kMachineWord32, base, m.Int32Constant(5 * sizeof(int32_t)));
+  Node* n6 = m.Load(kMachineWord32, base, m.Int32Constant(6 * sizeof(int32_t)));
+  Node* n7 = m.Load(kMachineWord32, base, m.Int32Constant(7 * sizeof(int32_t)));
+
+  Node* i1 = m.Int32Add(n0, n1);
+  Node* i2 = m.Int32Add(n2, n3);
+  Node* i3 = m.Int32Add(n4, n5);
+  Node* i4 = m.Int32Add(n6, n7);
+
+  Node* i5 = m.Int32Add(i1, i2);
+  Node* i6 = m.Int32Add(i3, i4);
+
+  Node* i7 = m.Int32Add(i5, i6);
+
+  m.Return(i7);
+
+  CHECK_EQ(116, m.Call());
+}
+
+
+#if MACHINE_ASSEMBLER_SUPPORTS_CALL_C
+
+static int Seven() { return 7; }
+static int UnaryMinus(int a) { return -a; }
+static int APlusTwoB(int a, int b) { return a + 2 * b; }
+
+
+TEST(RunCallSeven) {
+  for (int i = 0; i < 2; i++) {
+    bool call_direct = i == 0;
+    void* function_address =
+        reinterpret_cast<void*>(reinterpret_cast<intptr_t>(&Seven));
+
+    RawMachineAssemblerTester<int32_t> m;
+    Node** args = NULL;
+    MachineRepresentation* arg_types = NULL;
+    Node* function =
+        call_direct ? m.PointerConstant(function_address)
+                    : m.LoadFromPointer(&function_address,
+                                        MachineOperatorBuilder::pointer_rep());
+    m.Return(m.CallC(function, kMachineWord32, arg_types, args, 0));
+
+    CHECK_EQ(7, m.Call());
+  }
+}
+
+
+TEST(RunCallUnaryMinus) {
+  for (int i = 0; i < 2; i++) {
+    bool call_direct = i == 0;
+    void* function_address =
+        reinterpret_cast<void*>(reinterpret_cast<intptr_t>(&UnaryMinus));
+
+    RawMachineAssemblerTester<int32_t> m(kMachineWord32);
+    Node* args[] = {m.Parameter(0)};
+    MachineRepresentation arg_types[] = {kMachineWord32};
+    Node* function =
+        call_direct ? m.PointerConstant(function_address)
+                    : m.LoadFromPointer(&function_address,
+                                        MachineOperatorBuilder::pointer_rep());
+    m.Return(m.CallC(function, kMachineWord32, arg_types, args, 1));
+
+    FOR_INT32_INPUTS(i) {
+      int a = *i;
+      CHECK_EQ(-a, m.Call(a));
+    }
+  }
+}
+
+
+TEST(RunCallAPlusTwoB) {
+  for (int i = 0; i < 2; i++) {
+    bool call_direct = i == 0;
+    void* function_address =
+        reinterpret_cast<void*>(reinterpret_cast<intptr_t>(&APlusTwoB));
+
+    RawMachineAssemblerTester<int32_t> m(kMachineWord32, kMachineWord32);
+    Node* args[] = {m.Parameter(0), m.Parameter(1)};
+    MachineRepresentation arg_types[] = {kMachineWord32, kMachineWord32};
+    Node* function =
+        call_direct ? m.PointerConstant(function_address)
+                    : m.LoadFromPointer(&function_address,
+                                        MachineOperatorBuilder::pointer_rep());
+    m.Return(m.CallC(function, kMachineWord32, arg_types, args, 2));
+
+    FOR_INT32_INPUTS(i) {
+      FOR_INT32_INPUTS(j) {
+        int a = *i;
+        int b = *j;
+        int result = m.Call(a, b);
+        CHECK_EQ(a + 2 * b, result);
+      }
+    }
+  }
+}
+
+#endif  // MACHINE_ASSEMBLER_SUPPORTS_CALL_C
+
+
+static const int kFloat64CompareHelperTestCases = 15;
+static const int kFloat64CompareHelperNodeType = 4;
+
+static int Float64CompareHelper(RawMachineAssemblerTester<int32_t>* m,
+                                int test_case, int node_type, double x,
+                                double y) {
+  static double buffer[2];
+  buffer[0] = x;
+  buffer[1] = y;
+  CHECK(0 <= test_case && test_case < kFloat64CompareHelperTestCases);
+  CHECK(0 <= node_type && node_type < kFloat64CompareHelperNodeType);
+  CHECK(x < y);
+  bool load_a = node_type / 2 == 1;
+  bool load_b = node_type % 2 == 1;
+  Node* a = load_a ? m->Load(kMachineFloat64, m->PointerConstant(&buffer[0]))
+                   : m->Float64Constant(x);
+  Node* b = load_b ? m->Load(kMachineFloat64, m->PointerConstant(&buffer[1]))
+                   : m->Float64Constant(y);
+  Node* cmp = NULL;
+  bool expected = false;
+  switch (test_case) {
+    // Equal tests.
+    case 0:
+      cmp = m->Float64Equal(a, b);
+      expected = false;
+      break;
+    case 1:
+      cmp = m->Float64Equal(a, a);
+      expected = true;
+      break;
+    // LessThan tests.
+    case 2:
+      cmp = m->Float64LessThan(a, b);
+      expected = true;
+      break;
+    case 3:
+      cmp = m->Float64LessThan(b, a);
+      expected = false;
+      break;
+    case 4:
+      cmp = m->Float64LessThan(a, a);
+      expected = false;
+      break;
+    // LessThanOrEqual tests.
+    case 5:
+      cmp = m->Float64LessThanOrEqual(a, b);
+      expected = true;
+      break;
+    case 6:
+      cmp = m->Float64LessThanOrEqual(b, a);
+      expected = false;
+      break;
+    case 7:
+      cmp = m->Float64LessThanOrEqual(a, a);
+      expected = true;
+      break;
+    // NotEqual tests.
+    case 8:
+      cmp = m->Float64NotEqual(a, b);
+      expected = true;
+      break;
+    case 9:
+      cmp = m->Float64NotEqual(b, a);
+      expected = true;
+      break;
+    case 10:
+      cmp = m->Float64NotEqual(a, a);
+      expected = false;
+      break;
+    // GreaterThan tests.
+    case 11:
+      cmp = m->Float64GreaterThan(a, a);
+      expected = false;
+      break;
+    case 12:
+      cmp = m->Float64GreaterThan(a, b);
+      expected = false;
+      break;
+    // GreaterThanOrEqual tests.
+    case 13:
+      cmp = m->Float64GreaterThanOrEqual(a, a);
+      expected = true;
+      break;
+    case 14:
+      cmp = m->Float64GreaterThanOrEqual(b, a);
+      expected = true;
+      break;
+    default:
+      UNREACHABLE();
+  }
+  m->Return(cmp);
+  return expected;
+}
+
+
+TEST(RunFloat64Compare) {
+  double inf = V8_INFINITY;
+  // All pairs (a1, a2) are of the form a1 < a2.
+  double inputs[] = {0.0,  1.0,  -1.0, 0.22, -1.22, 0.22,
+                     -inf, 0.22, 0.22, inf,  -inf,  inf};
+
+  for (int test = 0; test < kFloat64CompareHelperTestCases; test++) {
+    for (int node_type = 0; node_type < kFloat64CompareHelperNodeType;
+         node_type++) {
+      for (size_t input = 0; input < ARRAY_SIZE(inputs); input += 2) {
+        RawMachineAssemblerTester<int32_t> m;
+        int expected = Float64CompareHelper(&m, test, node_type, inputs[input],
+                                            inputs[input + 1]);
+        CHECK_EQ(expected, m.Call());
+      }
+    }
+  }
+}
+
+
+TEST(RunFloat64UnorderedCompare) {
+  RawMachineAssemblerTester<int32_t> m;
+
+  Operator* operators[] = {m.machine()->Float64Equal(),
+                           m.machine()->Float64LessThan(),
+                           m.machine()->Float64LessThanOrEqual()};
+
+  double nan = v8::base::OS::nan_value();
+
+  FOR_FLOAT64_INPUTS(i) {
+    for (size_t o = 0; o < ARRAY_SIZE(operators); ++o) {
+      for (int j = 0; j < 2; j++) {
+        RawMachineAssemblerTester<int32_t> m;
+        Node* a = m.Float64Constant(*i);
+        Node* b = m.Float64Constant(nan);
+        if (j == 1) std::swap(a, b);
+        m.Return(m.NewNode(operators[o], a, b));
+        CHECK_EQ(0, m.Call());
+      }
+    }
+  }
+}
+
+
+TEST(RunFloat64Equal) {
+  double input_a = 0.0;
+  double input_b = 0.0;
+
+  RawMachineAssemblerTester<int32_t> m;
+  Node* a = m.LoadFromPointer(&input_a, kMachineFloat64);
+  Node* b = m.LoadFromPointer(&input_b, kMachineFloat64);
+  m.Return(m.Float64Equal(a, b));
+
+  CompareWrapper cmp(IrOpcode::kFloat64Equal);
+  FOR_FLOAT64_INPUTS(pl) {
+    FOR_FLOAT64_INPUTS(pr) {
+      input_a = *pl;
+      input_b = *pr;
+      int32_t expected = cmp.Float64Compare(input_a, input_b) ? 1 : 0;
+      CHECK_EQ(expected, m.Call());
+    }
+  }
+}
+
+
+TEST(RunFloat64LessThan) {
+  double input_a = 0.0;
+  double input_b = 0.0;
+
+  RawMachineAssemblerTester<int32_t> m;
+  Node* a = m.LoadFromPointer(&input_a, kMachineFloat64);
+  Node* b = m.LoadFromPointer(&input_b, kMachineFloat64);
+  m.Return(m.Float64LessThan(a, b));
+
+  CompareWrapper cmp(IrOpcode::kFloat64LessThan);
+  FOR_FLOAT64_INPUTS(pl) {
+    FOR_FLOAT64_INPUTS(pr) {
+      input_a = *pl;
+      input_b = *pr;
+      int32_t expected = cmp.Float64Compare(input_a, input_b) ? 1 : 0;
+      CHECK_EQ(expected, m.Call());
+    }
+  }
+}
+
+
+template <typename IntType, MachineRepresentation kRepresentation>
+static void LoadStoreTruncation() {
+  IntType input;
+
+  RawMachineAssemblerTester<int32_t> m;
+  Node* a = m.LoadFromPointer(&input, kRepresentation);
+  Node* ap1 = m.Int32Add(a, m.Int32Constant(1));
+  m.StoreToPointer(&input, kRepresentation, ap1);
+  m.Return(ap1);
+
+  const IntType max = std::numeric_limits<IntType>::max();
+  const IntType min = std::numeric_limits<IntType>::min();
+
+  // Test upper bound.
+  input = max;
+  CHECK_EQ(max + 1, m.Call());
+  CHECK_EQ(min, input);
+
+  // Test lower bound.
+  input = min;
+  CHECK_EQ(max + 2, m.Call());
+  CHECK_EQ(min + 1, input);
+
+  // Test all one byte values that are not one byte bounds.
+  for (int i = -127; i < 127; i++) {
+    input = i;
+    int expected = i >= 0 ? i + 1 : max + (i - min) + 2;
+    CHECK_EQ(expected, m.Call());
+    CHECK_EQ(i + 1, input);
+  }
+}
+
+
+TEST(RunLoadStoreTruncation) {
+  LoadStoreTruncation<int8_t, kMachineWord8>();
+  LoadStoreTruncation<int16_t, kMachineWord16>();
+}
+
+
+static void IntPtrCompare(intptr_t left, intptr_t right) {
+  for (int test = 0; test < 7; test++) {
+    RawMachineAssemblerTester<bool> m(MachineOperatorBuilder::pointer_rep(),
+                                      MachineOperatorBuilder::pointer_rep());
+    Node* p0 = m.Parameter(0);
+    Node* p1 = m.Parameter(1);
+    Node* res = NULL;
+    bool expected = false;
+    switch (test) {
+      case 0:
+        res = m.IntPtrLessThan(p0, p1);
+        expected = true;
+        break;
+      case 1:
+        res = m.IntPtrLessThanOrEqual(p0, p1);
+        expected = true;
+        break;
+      case 2:
+        res = m.IntPtrEqual(p0, p1);
+        expected = false;
+        break;
+      case 3:
+        res = m.IntPtrGreaterThanOrEqual(p0, p1);
+        expected = false;
+        break;
+      case 4:
+        res = m.IntPtrGreaterThan(p0, p1);
+        expected = false;
+        break;
+      case 5:
+        res = m.IntPtrEqual(p0, p0);
+        expected = true;
+        break;
+      case 6:
+        res = m.IntPtrNotEqual(p0, p1);
+        expected = true;
+        break;
+      default:
+        UNREACHABLE();
+        break;
+    }
+    m.Return(res);
+    CHECK_EQ(expected, m.Call(reinterpret_cast<int32_t*>(left),
+                              reinterpret_cast<int32_t*>(right)));
+  }
+}
+
+
+TEST(RunIntPtrCompare) {
+  intptr_t min = std::numeric_limits<intptr_t>::min();
+  intptr_t max = std::numeric_limits<intptr_t>::max();
+  // An ascending chain of intptr_t
+  intptr_t inputs[] = {min, min / 2, -1, 0, 1, max / 2, max};
+  for (size_t i = 0; i < ARRAY_SIZE(inputs) - 1; i++) {
+    IntPtrCompare(inputs[i], inputs[i + 1]);
+  }
+}
+
+
+TEST(RunTestIntPtrArithmetic) {
+  static const int kInputSize = 10;
+  int32_t inputs[kInputSize];
+  int32_t outputs[kInputSize];
+  for (int i = 0; i < kInputSize; i++) {
+    inputs[i] = i;
+    outputs[i] = -1;
+  }
+  RawMachineAssemblerTester<int32_t*> m;
+  Node* input = m.PointerConstant(&inputs[0]);
+  Node* output = m.PointerConstant(&outputs[kInputSize - 1]);
+  Node* elem_size = m.ConvertInt32ToIntPtr(m.Int32Constant(sizeof(inputs[0])));
+  for (int i = 0; i < kInputSize; i++) {
+    m.Store(kMachineWord32, output, m.Load(kMachineWord32, input));
+    input = m.IntPtrAdd(input, elem_size);
+    output = m.IntPtrSub(output, elem_size);
+  }
+  m.Return(input);
+  CHECK_EQ(&inputs[kInputSize], m.Call());
+  for (int i = 0; i < kInputSize; i++) {
+    CHECK_EQ(i, inputs[i]);
+    CHECK_EQ(kInputSize - i - 1, outputs[i]);
+  }
+}
+
+
+TEST(RunSpillLotsOfThings) {
+  static const int kInputSize = 1000;
+  RawMachineAssemblerTester<void> m;
+  Node* accs[kInputSize];
+  int32_t outputs[kInputSize];
+  Node* one = m.Int32Constant(1);
+  Node* acc = one;
+  for (int i = 0; i < kInputSize; i++) {
+    acc = m.Int32Add(acc, one);
+    accs[i] = acc;
+  }
+  for (int i = 0; i < kInputSize; i++) {
+    m.StoreToPointer(&outputs[i], kMachineWord32, accs[i]);
+  }
+  m.Return(one);
+  m.Call();
+  for (int i = 0; i < kInputSize; i++) {
+    CHECK_EQ(outputs[i], i + 2);
+  }
+}
+
+
+TEST(RunSpillConstantsAndParameters) {
+  static const size_t kInputSize = 1000;
+  static const int32_t kBase = 987;
+  RawMachineAssemblerTester<int32_t> m(kMachineWord32, kMachineWord32);
+  int32_t outputs[kInputSize];
+  Node* csts[kInputSize];
+  Node* accs[kInputSize];
+  Node* acc = m.Int32Constant(0);
+  for (size_t i = 0; i < kInputSize; i++) {
+    csts[i] = m.Int32Constant(static_cast<int32_t>(kBase + i));
+  }
+  for (size_t i = 0; i < kInputSize; i++) {
+    acc = m.Int32Add(acc, csts[i]);
+    accs[i] = acc;
+  }
+  for (size_t i = 0; i < kInputSize; i++) {
+    m.StoreToPointer(&outputs[i], kMachineWord32, accs[i]);
+  }
+  m.Return(m.Int32Add(acc, m.Int32Add(m.Parameter(0), m.Parameter(1))));
+  FOR_INT32_INPUTS(i) {
+    FOR_INT32_INPUTS(j) {
+      int32_t expected = *i + *j;
+      for (size_t k = 0; k < kInputSize; k++) {
+        expected += kBase + k;
+      }
+      CHECK_EQ(expected, m.Call(*i, *j));
+      expected = 0;
+      for (size_t k = 0; k < kInputSize; k++) {
+        expected += kBase + k;
+        CHECK_EQ(expected, outputs[k]);
+      }
+    }
+  }
+}
+
+
+TEST(RunNewSpaceConstantsInPhi) {
+  RawMachineAssemblerTester<Object*> m(kMachineWord32);
+
+  Isolate* isolate = CcTest::i_isolate();
+  Handle<HeapNumber> true_val = isolate->factory()->NewHeapNumber(11.2);
+  Handle<HeapNumber> false_val = isolate->factory()->NewHeapNumber(11.3);
+  Node* true_node = m.HeapConstant(true_val);
+  Node* false_node = m.HeapConstant(false_val);
+
+  MLabel blocka, blockb, end;
+  m.Branch(m.Parameter(0), &blocka, &blockb);
+  m.Bind(&blocka);
+  m.Goto(&end);
+  m.Bind(&blockb);
+  m.Goto(&end);
+
+  m.Bind(&end);
+  Node* phi = m.Phi(true_node, false_node);
+  m.Return(phi);
+
+  CHECK_EQ(*false_val, m.Call(0));
+  CHECK_EQ(*true_val, m.Call(1));
+}
+
+
+#if MACHINE_ASSEMBLER_SUPPORTS_CALL_C
+
+TEST(RunSpillLotsOfThingsWithCall) {
+  static const int kInputSize = 1000;
+  RawMachineAssemblerTester<void> m;
+  Node* accs[kInputSize];
+  int32_t outputs[kInputSize];
+  Node* one = m.Int32Constant(1);
+  Node* acc = one;
+  for (int i = 0; i < kInputSize; i++) {
+    acc = m.Int32Add(acc, one);
+    accs[i] = acc;
+  }
+  // If the spill slot computation is wrong, it might load from the c frame
+  {
+    void* func = reinterpret_cast<void*>(reinterpret_cast<intptr_t>(&Seven));
+    Node** args = NULL;
+    MachineRepresentation* arg_types = NULL;
+    m.CallC(m.PointerConstant(func), kMachineWord32, arg_types, args, 0);
+  }
+  for (int i = 0; i < kInputSize; i++) {
+    m.StoreToPointer(&outputs[i], kMachineWord32, accs[i]);
+  }
+  m.Return(one);
+  m.Call();
+  for (int i = 0; i < kInputSize; i++) {
+    CHECK_EQ(outputs[i], i + 2);
+  }
+}
+
+#endif  // MACHINE_ASSEMBLER_SUPPORTS_CALL_C
+
+#endif
diff --git a/test/cctest/compiler/test-run-variables.cc b/test/cctest/compiler/test-run-variables.cc
new file mode 100644 (file)
index 0000000..bf86e0d
--- /dev/null
@@ -0,0 +1,121 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#include "test/cctest/compiler/function-tester.h"
+
+using namespace v8::internal;
+using namespace v8::internal::compiler;
+
+static const char* throws = NULL;
+
+static const char* load_tests[] = {
+    "var x = a; r = x",                       "123",       "0",
+    "var x = (r = x)",                        "undefined", "undefined",
+    "var x = (a?1:2); r = x",                 "1",         "2",
+    "const x = a; r = x",                     "123",       "0",
+    "const x = (r = x)",                      "undefined", "undefined",
+    "const x = (a?3:4); r = x",               "3",         "4",
+    "'use strict'; const x = a; r = x",       "123",       "0",
+    "'use strict'; const x = (r = x)",        throws,      throws,
+    "'use strict'; const x = (a?5:6); r = x", "5",         "6",
+    "'use strict'; let x = a; r = x",         "123",       "0",
+    "'use strict'; let x = (r = x)",          throws,      throws,
+    "'use strict'; let x = (a?7:8); r = x",   "7",         "8",
+    NULL};
+
+static const char* store_tests[] = {
+    "var x = 1; x = a; r = x",                     "123",  "0",
+    "var x = (a?(x=4,2):3); r = x",                "2",    "3",
+    "var x = (a?4:5); x = a; r = x",               "123",  "0",
+    "const x = 1; x = a; r = x",                   "1",    "1",
+    "const x = (a?(x=4,2):3); r = x",              "2",    "3",
+    "const x = (a?4:5); x = a; r = x",             "4",    "5",
+    // Assignments to 'const' are SyntaxErrors, handled by the parser,
+    // hence we cannot test them here because they are early errors.
+    "'use strict'; let x = 1; x = a; r = x",       "123",  "0",
+    "'use strict'; let x = (a?(x=4,2):3); r = x",  throws, "3",
+    "'use strict'; let x = (a?4:5); x = a; r = x", "123",  "0",
+    NULL};
+
+static const char* bind_tests[] = {
+    "if (a) { const x = a }; r = x;",            "123", "undefined",
+    "for (; a > 0; a--) { const x = a }; r = x", "123", "undefined",
+    // Re-initialization of variables other than legacy 'const' is not
+    // possible due to sane variable scoping, hence no tests here.
+    NULL};
+
+
+static void RunVariableTests(const char* source, const char* tests[]) {
+  FLAG_harmony_scoping = true;
+  EmbeddedVector<char, 512> buffer;
+
+  for (int i = 0; tests[i] != NULL; i += 3) {
+    SNPrintF(buffer, source, tests[i]);
+    PrintF("#%d: %s\n", i / 3, buffer.start());
+    FunctionTester T(buffer.start());
+
+    // Check function with non-falsey parameter.
+    if (tests[i + 1] != throws) {
+      Handle<Object> r = v8::Utils::OpenHandle(*CompileRun(tests[i + 1]));
+      T.CheckCall(r, T.Val(123), T.Val("result"));
+    } else {
+      T.CheckThrows(T.Val(123), T.Val("result"));
+    }
+
+    // Check function with falsey parameter.
+    if (tests[i + 2] != throws) {
+      Handle<Object> r = v8::Utils::OpenHandle(*CompileRun(tests[i + 2]));
+      T.CheckCall(r, T.Val(0.0), T.Val("result"));
+    } else {
+      T.CheckThrows(T.Val(0.0), T.Val("result"));
+    }
+  }
+}
+
+
+TEST(StackLoadVariables) {
+  const char* source = "(function(a,r) { %s; return r; })";
+  RunVariableTests(source, load_tests);
+}
+
+
+TEST(ContextLoadVariables) {
+  const char* source = "(function(a,r) { %s; function f() {x} return r; })";
+  RunVariableTests(source, load_tests);
+}
+
+
+TEST(StackStoreVariables) {
+  const char* source = "(function(a,r) { %s; return r; })";
+  RunVariableTests(source, store_tests);
+}
+
+
+TEST(ContextStoreVariables) {
+  const char* source = "(function(a,r) { %s; function f() {x} return r; })";
+  RunVariableTests(source, store_tests);
+}
+
+
+TEST(StackInitializeVariables) {
+  const char* source = "(function(a,r) { %s; return r; })";
+  RunVariableTests(source, bind_tests);
+}
+
+
+TEST(ContextInitializeVariables) {
+  const char* source = "(function(a,r) { %s; function f() {x} return r; })";
+  RunVariableTests(source, bind_tests);
+}
+
+
+TEST(SelfReferenceVariable) {
+  FunctionTester T("(function self() { return self; })");
+
+  T.CheckCall(T.function);
+  CompileRun("var self = 'not a function'");
+  T.CheckCall(T.function);
+}
diff --git a/test/cctest/compiler/test-schedule.cc b/test/cctest/compiler/test-schedule.cc
new file mode 100644 (file)
index 0000000..aa7dd99
--- /dev/null
@@ -0,0 +1,159 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#include "src/compiler/common-operator.h"
+#include "src/compiler/generic-node-inl.h"
+#include "src/compiler/graph.h"
+#include "src/compiler/machine-operator.h"
+#include "src/compiler/node.h"
+#include "src/compiler/operator.h"
+#include "src/compiler/schedule.h"
+#include "test/cctest/cctest.h"
+
+using namespace v8::internal;
+using namespace v8::internal::compiler;
+
+static SimpleOperator dummy_operator(IrOpcode::kParameter, Operator::kNoWrite,
+                                     0, 0, "dummy");
+
+TEST(TestScheduleAllocation) {
+  HandleAndZoneScope scope;
+  Schedule schedule(scope.main_zone());
+
+  CHECK_NE(NULL, schedule.entry());
+  CHECK_EQ(schedule.entry(), *(schedule.all_blocks().begin()));
+}
+
+
+TEST(TestScheduleAddNode) {
+  HandleAndZoneScope scope;
+  Graph graph(scope.main_zone());
+  Node* n0 = graph.NewNode(&dummy_operator);
+  Node* n1 = graph.NewNode(&dummy_operator);
+
+  Schedule schedule(scope.main_zone());
+
+  BasicBlock* entry = schedule.entry();
+  schedule.AddNode(entry, n0);
+  schedule.AddNode(entry, n1);
+
+  CHECK_EQ(entry, schedule.block(n0));
+  CHECK_EQ(entry, schedule.block(n1));
+  CHECK(schedule.SameBasicBlock(n0, n1));
+
+  Node* n2 = graph.NewNode(&dummy_operator);
+  CHECK_EQ(NULL, schedule.block(n2));
+}
+
+
+TEST(TestScheduleAddGoto) {
+  HandleAndZoneScope scope;
+
+  Schedule schedule(scope.main_zone());
+  BasicBlock* entry = schedule.entry();
+  BasicBlock* next = schedule.NewBasicBlock();
+
+  schedule.AddGoto(entry, next);
+
+  CHECK_EQ(0, entry->PredecessorCount());
+  CHECK_EQ(1, entry->SuccessorCount());
+  CHECK_EQ(next, entry->SuccessorAt(0));
+
+  CHECK_EQ(1, next->PredecessorCount());
+  CHECK_EQ(entry, next->PredecessorAt(0));
+  CHECK_EQ(0, next->SuccessorCount());
+}
+
+
+TEST(TestScheduleAddBranch) {
+  HandleAndZoneScope scope;
+  Schedule schedule(scope.main_zone());
+
+  BasicBlock* entry = schedule.entry();
+  BasicBlock* tblock = schedule.NewBasicBlock();
+  BasicBlock* fblock = schedule.NewBasicBlock();
+
+  Graph graph(scope.main_zone());
+  CommonOperatorBuilder common(scope.main_zone());
+  Node* n0 = graph.NewNode(&dummy_operator);
+  Node* b = graph.NewNode(common.Branch(), n0);
+
+  schedule.AddBranch(entry, b, tblock, fblock);
+
+  CHECK_EQ(0, entry->PredecessorCount());
+  CHECK_EQ(2, entry->SuccessorCount());
+  CHECK_EQ(tblock, entry->SuccessorAt(0));
+  CHECK_EQ(fblock, entry->SuccessorAt(1));
+
+  CHECK_EQ(1, tblock->PredecessorCount());
+  CHECK_EQ(entry, tblock->PredecessorAt(0));
+  CHECK_EQ(0, tblock->SuccessorCount());
+
+  CHECK_EQ(1, fblock->PredecessorCount());
+  CHECK_EQ(entry, fblock->PredecessorAt(0));
+  CHECK_EQ(0, fblock->SuccessorCount());
+}
+
+
+TEST(TestScheduleAddReturn) {
+  HandleAndZoneScope scope;
+  Schedule schedule(scope.main_zone());
+  Graph graph(scope.main_zone());
+  Node* n0 = graph.NewNode(&dummy_operator);
+  BasicBlock* entry = schedule.entry();
+  schedule.AddReturn(entry, n0);
+
+  CHECK_EQ(0, entry->PredecessorCount());
+  CHECK_EQ(1, entry->SuccessorCount());
+  CHECK_EQ(schedule.exit(), entry->SuccessorAt(0));
+}
+
+
+TEST(TestScheduleAddThrow) {
+  HandleAndZoneScope scope;
+  Schedule schedule(scope.main_zone());
+  Graph graph(scope.main_zone());
+  Node* n0 = graph.NewNode(&dummy_operator);
+  BasicBlock* entry = schedule.entry();
+  schedule.AddThrow(entry, n0);
+
+  CHECK_EQ(0, entry->PredecessorCount());
+  CHECK_EQ(1, entry->SuccessorCount());
+  CHECK_EQ(schedule.exit(), entry->SuccessorAt(0));
+}
+
+
+TEST(TestScheduleAddDeopt) {
+  HandleAndZoneScope scope;
+  Schedule schedule(scope.main_zone());
+  Graph graph(scope.main_zone());
+  Node* n0 = graph.NewNode(&dummy_operator);
+  BasicBlock* entry = schedule.entry();
+  schedule.AddDeoptimize(entry, n0);
+
+  CHECK_EQ(0, entry->PredecessorCount());
+  CHECK_EQ(1, entry->SuccessorCount());
+  CHECK_EQ(schedule.exit(), entry->SuccessorAt(0));
+}
+
+
+TEST(BuildMulNodeGraph) {
+  HandleAndZoneScope scope;
+  Schedule schedule(scope.main_zone());
+  Graph graph(scope.main_zone());
+  CommonOperatorBuilder common(scope.main_zone());
+  MachineOperatorBuilder machine(scope.main_zone(), kMachineWord32);
+
+  Node* start = graph.NewNode(common.Start());
+  graph.SetStart(start);
+  Node* param0 = graph.NewNode(common.Parameter(0));
+  Node* param1 = graph.NewNode(common.Parameter(1));
+
+  Node* mul = graph.NewNode(machine.Int32Mul(), param0, param1);
+  Node* ret = graph.NewNode(common.Return(), mul, start);
+
+  USE(ret);
+}
diff --git a/test/cctest/compiler/test-scheduler.cc b/test/cctest/compiler/test-scheduler.cc
new file mode 100644 (file)
index 0000000..6b56f10
--- /dev/null
@@ -0,0 +1,1840 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+#include "test/cctest/cctest.h"
+
+#include "src/compiler/common-operator.h"
+#include "src/compiler/generic-node-inl.h"
+#include "src/compiler/generic-node.h"
+#include "src/compiler/graph.h"
+#include "src/compiler/graph-visualizer.h"
+#include "src/compiler/js-operator.h"
+#include "src/compiler/machine-operator.h"
+#include "src/compiler/node.h"
+#include "src/compiler/operator.h"
+#include "src/compiler/schedule.h"
+#include "src/compiler/scheduler.h"
+
+using namespace v8::internal;
+using namespace v8::internal::compiler;
+
+struct TestLoop {
+  int count;
+  BasicBlock** nodes;
+  BasicBlock* header() { return nodes[0]; }
+  BasicBlock* last() { return nodes[count - 1]; }
+  ~TestLoop() { delete[] nodes; }
+};
+
+
+static TestLoop* CreateLoop(Schedule* schedule, int count) {
+  TestLoop* loop = new TestLoop();
+  loop->count = count;
+  loop->nodes = new BasicBlock* [count];
+  for (int i = 0; i < count; i++) {
+    loop->nodes[i] = schedule->NewBasicBlock();
+    if (i > 0) schedule->AddSuccessor(loop->nodes[i - 1], loop->nodes[i]);
+  }
+  schedule->AddSuccessor(loop->nodes[count - 1], loop->nodes[0]);
+  return loop;
+}
+
+
+static void CheckRPONumbers(BasicBlockVector* order, int expected,
+                            bool loops_allowed) {
+  CHECK_EQ(expected, static_cast<int>(order->size()));
+  for (int i = 0; i < static_cast<int>(order->size()); i++) {
+    CHECK(order->at(i)->rpo_number_ == i);
+    if (!loops_allowed) CHECK_LT(order->at(i)->loop_end_, 0);
+  }
+}
+
+
+static void CheckLoopContains(BasicBlock** blocks, int body_size) {
+  BasicBlock* header = blocks[0];
+  CHECK_GT(header->loop_end_, 0);
+  CHECK_EQ(body_size, (header->loop_end_ - header->rpo_number_));
+  for (int i = 0; i < body_size; i++) {
+    int num = blocks[i]->rpo_number_;
+    CHECK(num >= header->rpo_number_ && num < header->loop_end_);
+    CHECK(header->LoopContains(blocks[i]));
+    CHECK(header->IsLoopHeader() || blocks[i]->loop_header_ == header);
+  }
+}
+
+
+TEST(RPODegenerate1) {
+  HandleAndZoneScope scope;
+  Schedule schedule(scope.main_zone());
+  Scheduler scheduler(scope.main_zone(), NULL, &schedule);
+
+  BasicBlockVector* order = scheduler.ComputeSpecialRPO();
+  CheckRPONumbers(order, 1, false);
+  CHECK_EQ(schedule.entry(), order->at(0));
+}
+
+
+TEST(RPODegenerate2) {
+  HandleAndZoneScope scope;
+  Schedule schedule(scope.main_zone());
+  Scheduler scheduler(scope.main_zone(), NULL, &schedule);
+
+  schedule.AddGoto(schedule.entry(), schedule.exit());
+  BasicBlockVector* order = scheduler.ComputeSpecialRPO();
+  CheckRPONumbers(order, 2, false);
+  CHECK_EQ(schedule.entry(), order->at(0));
+  CHECK_EQ(schedule.exit(), order->at(1));
+}
+
+
+TEST(RPOLine) {
+  HandleAndZoneScope scope;
+
+  for (int i = 0; i < 10; i++) {
+    Schedule schedule(scope.main_zone());
+    Scheduler scheduler(scope.main_zone(), NULL, &schedule);
+
+    BasicBlock* last = schedule.entry();
+    for (int j = 0; j < i; j++) {
+      BasicBlock* block = schedule.NewBasicBlock();
+      schedule.AddGoto(last, block);
+      last = block;
+    }
+    BasicBlockVector* order = scheduler.ComputeSpecialRPO();
+    CheckRPONumbers(order, 1 + i, false);
+
+    Schedule::BasicBlocks blocks(schedule.all_blocks());
+    for (Schedule::BasicBlocks::iterator iter = blocks.begin();
+         iter != blocks.end(); ++iter) {
+      BasicBlock* block = *iter;
+      if (block->rpo_number_ >= 0 && block->SuccessorCount() == 1) {
+        CHECK(block->rpo_number_ + 1 == block->SuccessorAt(0)->rpo_number_);
+      }
+    }
+  }
+}
+
+
+TEST(RPOSelfLoop) {
+  HandleAndZoneScope scope;
+  Schedule schedule(scope.main_zone());
+  Scheduler scheduler(scope.main_zone(), NULL, &schedule);
+  schedule.AddSuccessor(schedule.entry(), schedule.entry());
+  BasicBlockVector* order = scheduler.ComputeSpecialRPO();
+  CheckRPONumbers(order, 1, true);
+  BasicBlock* loop[] = {schedule.entry()};
+  CheckLoopContains(loop, 1);
+}
+
+
+TEST(RPOEntryLoop) {
+  HandleAndZoneScope scope;
+  Schedule schedule(scope.main_zone());
+  Scheduler scheduler(scope.main_zone(), NULL, &schedule);
+  schedule.AddSuccessor(schedule.entry(), schedule.exit());
+  schedule.AddSuccessor(schedule.exit(), schedule.entry());
+  BasicBlockVector* order = scheduler.ComputeSpecialRPO();
+  CheckRPONumbers(order, 2, true);
+  BasicBlock* loop[] = {schedule.entry(), schedule.exit()};
+  CheckLoopContains(loop, 2);
+}
+
+
+TEST(RPOEndLoop) {
+  HandleAndZoneScope scope;
+  Schedule schedule(scope.main_zone());
+  Scheduler scheduler(scope.main_zone(), NULL, &schedule);
+  TestLoop* loop1 = CreateLoop(&schedule, 2);
+  schedule.AddSuccessor(schedule.entry(), loop1->header());
+  BasicBlockVector* order = scheduler.ComputeSpecialRPO();
+  CheckRPONumbers(order, 3, true);
+  CheckLoopContains(loop1->nodes, loop1->count);
+}
+
+
+TEST(RPOEndLoopNested) {
+  HandleAndZoneScope scope;
+  Schedule schedule(scope.main_zone());
+  Scheduler scheduler(scope.main_zone(), NULL, &schedule);
+  TestLoop* loop1 = CreateLoop(&schedule, 2);
+  schedule.AddSuccessor(schedule.entry(), loop1->header());
+  schedule.AddSuccessor(loop1->last(), schedule.entry());
+  BasicBlockVector* order = scheduler.ComputeSpecialRPO();
+  CheckRPONumbers(order, 3, true);
+  CheckLoopContains(loop1->nodes, loop1->count);
+}
+
+
+TEST(RPODiamond) {
+  HandleAndZoneScope scope;
+  Schedule schedule(scope.main_zone());
+  Scheduler scheduler(scope.main_zone(), NULL, &schedule);
+
+  BasicBlock* A = schedule.entry();
+  BasicBlock* B = schedule.NewBasicBlock();
+  BasicBlock* C = schedule.NewBasicBlock();
+  BasicBlock* D = schedule.exit();
+
+  schedule.AddSuccessor(A, B);
+  schedule.AddSuccessor(A, C);
+  schedule.AddSuccessor(B, D);
+  schedule.AddSuccessor(C, D);
+
+  BasicBlockVector* order = scheduler.ComputeSpecialRPO();
+  CheckRPONumbers(order, 4, false);
+
+  CHECK_EQ(0, A->rpo_number_);
+  CHECK((B->rpo_number_ == 1 && C->rpo_number_ == 2) ||
+        (B->rpo_number_ == 2 && C->rpo_number_ == 1));
+  CHECK_EQ(3, D->rpo_number_);
+}
+
+
+TEST(RPOLoop1) {
+  HandleAndZoneScope scope;
+  Schedule schedule(scope.main_zone());
+  Scheduler scheduler(scope.main_zone(), NULL, &schedule);
+
+  BasicBlock* A = schedule.entry();
+  BasicBlock* B = schedule.NewBasicBlock();
+  BasicBlock* C = schedule.NewBasicBlock();
+  BasicBlock* D = schedule.exit();
+
+  schedule.AddSuccessor(A, B);
+  schedule.AddSuccessor(B, C);
+  schedule.AddSuccessor(C, B);
+  schedule.AddSuccessor(C, D);
+
+  BasicBlockVector* order = scheduler.ComputeSpecialRPO();
+  CheckRPONumbers(order, 4, true);
+  BasicBlock* loop[] = {B, C};
+  CheckLoopContains(loop, 2);
+}
+
+
+TEST(RPOLoop2) {
+  HandleAndZoneScope scope;
+  Schedule schedule(scope.main_zone());
+  Scheduler scheduler(scope.main_zone(), NULL, &schedule);
+
+  BasicBlock* A = schedule.entry();
+  BasicBlock* B = schedule.NewBasicBlock();
+  BasicBlock* C = schedule.NewBasicBlock();
+  BasicBlock* D = schedule.exit();
+
+  schedule.AddSuccessor(A, B);
+  schedule.AddSuccessor(B, C);
+  schedule.AddSuccessor(C, B);
+  schedule.AddSuccessor(B, D);
+
+  BasicBlockVector* order = scheduler.ComputeSpecialRPO();
+  CheckRPONumbers(order, 4, true);
+  BasicBlock* loop[] = {B, C};
+  CheckLoopContains(loop, 2);
+}
+
+
+TEST(RPOLoopN) {
+  HandleAndZoneScope scope;
+
+  for (int i = 0; i < 11; i++) {
+    Schedule schedule(scope.main_zone());
+    Scheduler scheduler(scope.main_zone(), NULL, &schedule);
+    BasicBlock* A = schedule.entry();
+    BasicBlock* B = schedule.NewBasicBlock();
+    BasicBlock* C = schedule.NewBasicBlock();
+    BasicBlock* D = schedule.NewBasicBlock();
+    BasicBlock* E = schedule.NewBasicBlock();
+    BasicBlock* F = schedule.NewBasicBlock();
+    BasicBlock* G = schedule.exit();
+
+    schedule.AddSuccessor(A, B);
+    schedule.AddSuccessor(B, C);
+    schedule.AddSuccessor(C, D);
+    schedule.AddSuccessor(D, E);
+    schedule.AddSuccessor(E, F);
+    schedule.AddSuccessor(F, B);
+    schedule.AddSuccessor(B, G);
+
+    // Throw in extra backedges from time to time.
+    if (i == 1) schedule.AddSuccessor(B, B);
+    if (i == 2) schedule.AddSuccessor(C, B);
+    if (i == 3) schedule.AddSuccessor(D, B);
+    if (i == 4) schedule.AddSuccessor(E, B);
+    if (i == 5) schedule.AddSuccessor(F, B);
+
+    // Throw in extra loop exits from time to time.
+    if (i == 6) schedule.AddSuccessor(B, G);
+    if (i == 7) schedule.AddSuccessor(C, G);
+    if (i == 8) schedule.AddSuccessor(D, G);
+    if (i == 9) schedule.AddSuccessor(E, G);
+    if (i == 10) schedule.AddSuccessor(F, G);
+
+    BasicBlockVector* order = scheduler.ComputeSpecialRPO();
+    CheckRPONumbers(order, 7, true);
+    BasicBlock* loop[] = {B, C, D, E, F};
+    CheckLoopContains(loop, 5);
+  }
+}
+
+
+TEST(RPOLoopNest1) {
+  HandleAndZoneScope scope;
+  Schedule schedule(scope.main_zone());
+  Scheduler scheduler(scope.main_zone(), NULL, &schedule);
+
+  BasicBlock* A = schedule.entry();
+  BasicBlock* B = schedule.NewBasicBlock();
+  BasicBlock* C = schedule.NewBasicBlock();
+  BasicBlock* D = schedule.NewBasicBlock();
+  BasicBlock* E = schedule.NewBasicBlock();
+  BasicBlock* F = schedule.exit();
+
+  schedule.AddSuccessor(A, B);
+  schedule.AddSuccessor(B, C);
+  schedule.AddSuccessor(C, D);
+  schedule.AddSuccessor(D, C);
+  schedule.AddSuccessor(D, E);
+  schedule.AddSuccessor(E, B);
+  schedule.AddSuccessor(E, F);
+
+  BasicBlockVector* order = scheduler.ComputeSpecialRPO();
+  CheckRPONumbers(order, 6, true);
+  BasicBlock* loop1[] = {B, C, D, E};
+  CheckLoopContains(loop1, 4);
+
+  BasicBlock* loop2[] = {C, D};
+  CheckLoopContains(loop2, 2);
+}
+
+
+TEST(RPOLoopNest2) {
+  HandleAndZoneScope scope;
+  Schedule schedule(scope.main_zone());
+  Scheduler scheduler(scope.main_zone(), NULL, &schedule);
+
+  BasicBlock* A = schedule.entry();
+  BasicBlock* B = schedule.NewBasicBlock();
+  BasicBlock* C = schedule.NewBasicBlock();
+  BasicBlock* D = schedule.NewBasicBlock();
+  BasicBlock* E = schedule.NewBasicBlock();
+  BasicBlock* F = schedule.NewBasicBlock();
+  BasicBlock* G = schedule.NewBasicBlock();
+  BasicBlock* H = schedule.exit();
+
+  schedule.AddSuccessor(A, B);
+  schedule.AddSuccessor(B, C);
+  schedule.AddSuccessor(C, D);
+  schedule.AddSuccessor(D, E);
+  schedule.AddSuccessor(E, F);
+  schedule.AddSuccessor(F, G);
+  schedule.AddSuccessor(G, H);
+
+  schedule.AddSuccessor(E, D);
+  schedule.AddSuccessor(F, C);
+  schedule.AddSuccessor(G, B);
+
+  BasicBlockVector* order = scheduler.ComputeSpecialRPO();
+  CheckRPONumbers(order, 8, true);
+  BasicBlock* loop1[] = {B, C, D, E, F, G};
+  CheckLoopContains(loop1, 6);
+
+  BasicBlock* loop2[] = {C, D, E, F};
+  CheckLoopContains(loop2, 4);
+
+  BasicBlock* loop3[] = {D, E};
+  CheckLoopContains(loop3, 2);
+}
+
+
+TEST(RPOLoopFollow1) {
+  HandleAndZoneScope scope;
+  Schedule schedule(scope.main_zone());
+  Scheduler scheduler(scope.main_zone(), NULL, &schedule);
+
+  TestLoop* loop1 = CreateLoop(&schedule, 1);
+  TestLoop* loop2 = CreateLoop(&schedule, 1);
+
+  BasicBlock* A = schedule.entry();
+  BasicBlock* E = schedule.exit();
+
+  schedule.AddSuccessor(A, loop1->header());
+  schedule.AddSuccessor(loop1->header(), loop2->header());
+  schedule.AddSuccessor(loop2->last(), E);
+
+  BasicBlockVector* order = scheduler.ComputeSpecialRPO();
+
+  CheckLoopContains(loop1->nodes, loop1->count);
+
+  CHECK_EQ(schedule.BasicBlockCount(), static_cast<int>(order->size()));
+  CheckLoopContains(loop1->nodes, loop1->count);
+  CheckLoopContains(loop2->nodes, loop2->count);
+  delete loop1;
+  delete loop2;
+}
+
+
+TEST(RPOLoopFollow2) {
+  HandleAndZoneScope scope;
+  Schedule schedule(scope.main_zone());
+  Scheduler scheduler(scope.main_zone(), NULL, &schedule);
+
+  TestLoop* loop1 = CreateLoop(&schedule, 1);
+  TestLoop* loop2 = CreateLoop(&schedule, 1);
+
+  BasicBlock* A = schedule.entry();
+  BasicBlock* S = schedule.NewBasicBlock();
+  BasicBlock* E = schedule.exit();
+
+  schedule.AddSuccessor(A, loop1->header());
+  schedule.AddSuccessor(loop1->header(), S);
+  schedule.AddSuccessor(S, loop2->header());
+  schedule.AddSuccessor(loop2->last(), E);
+
+  BasicBlockVector* order = scheduler.ComputeSpecialRPO();
+
+  CheckLoopContains(loop1->nodes, loop1->count);
+
+  CHECK_EQ(schedule.BasicBlockCount(), static_cast<int>(order->size()));
+  CheckLoopContains(loop1->nodes, loop1->count);
+  CheckLoopContains(loop2->nodes, loop2->count);
+  delete loop1;
+  delete loop2;
+}
+
+
+TEST(RPOLoopFollowN) {
+  HandleAndZoneScope scope;
+
+  for (int size = 1; size < 5; size++) {
+    for (int exit = 0; exit < size; exit++) {
+      Schedule schedule(scope.main_zone());
+      Scheduler scheduler(scope.main_zone(), NULL, &schedule);
+      TestLoop* loop1 = CreateLoop(&schedule, size);
+      TestLoop* loop2 = CreateLoop(&schedule, size);
+      BasicBlock* A = schedule.entry();
+      BasicBlock* E = schedule.exit();
+
+      schedule.AddSuccessor(A, loop1->header());
+      schedule.AddSuccessor(loop1->nodes[exit], loop2->header());
+      schedule.AddSuccessor(loop2->nodes[exit], E);
+      BasicBlockVector* order = scheduler.ComputeSpecialRPO();
+      CheckLoopContains(loop1->nodes, loop1->count);
+
+      CHECK_EQ(schedule.BasicBlockCount(), static_cast<int>(order->size()));
+      CheckLoopContains(loop1->nodes, loop1->count);
+      CheckLoopContains(loop2->nodes, loop2->count);
+      delete loop1;
+      delete loop2;
+    }
+  }
+}
+
+
+TEST(RPONestedLoopFollow1) {
+  HandleAndZoneScope scope;
+  Schedule schedule(scope.main_zone());
+  Scheduler scheduler(scope.main_zone(), NULL, &schedule);
+
+  TestLoop* loop1 = CreateLoop(&schedule, 1);
+  TestLoop* loop2 = CreateLoop(&schedule, 1);
+
+  BasicBlock* A = schedule.entry();
+  BasicBlock* B = schedule.NewBasicBlock();
+  BasicBlock* C = schedule.NewBasicBlock();
+  BasicBlock* E = schedule.exit();
+
+  schedule.AddSuccessor(A, B);
+  schedule.AddSuccessor(B, loop1->header());
+  schedule.AddSuccessor(loop1->header(), loop2->header());
+  schedule.AddSuccessor(loop2->last(), C);
+  schedule.AddSuccessor(C, E);
+  schedule.AddSuccessor(C, B);
+
+  BasicBlockVector* order = scheduler.ComputeSpecialRPO();
+
+  CheckLoopContains(loop1->nodes, loop1->count);
+
+  CHECK_EQ(schedule.BasicBlockCount(), static_cast<int>(order->size()));
+  CheckLoopContains(loop1->nodes, loop1->count);
+  CheckLoopContains(loop2->nodes, loop2->count);
+
+  BasicBlock* loop3[] = {B, loop1->nodes[0], loop2->nodes[0], C};
+  CheckLoopContains(loop3, 4);
+  delete loop1;
+  delete loop2;
+}
+
+
+TEST(RPOLoopBackedges1) {
+  HandleAndZoneScope scope;
+
+  int size = 8;
+  for (int i = 0; i < size; i++) {
+    for (int j = 0; j < size; j++) {
+      Schedule schedule(scope.main_zone());
+      Scheduler scheduler(scope.main_zone(), NULL, &schedule);
+      BasicBlock* A = schedule.entry();
+      BasicBlock* E = schedule.exit();
+
+      TestLoop* loop1 = CreateLoop(&schedule, size);
+      schedule.AddSuccessor(A, loop1->header());
+      schedule.AddSuccessor(loop1->last(), E);
+
+      schedule.AddSuccessor(loop1->nodes[i], loop1->header());
+      schedule.AddSuccessor(loop1->nodes[j], E);
+
+      BasicBlockVector* order = scheduler.ComputeSpecialRPO();
+      CheckRPONumbers(order, schedule.BasicBlockCount(), true);
+      CheckLoopContains(loop1->nodes, loop1->count);
+      delete loop1;
+    }
+  }
+}
+
+
+TEST(RPOLoopOutedges1) {
+  HandleAndZoneScope scope;
+
+  int size = 8;
+  for (int i = 0; i < size; i++) {
+    for (int j = 0; j < size; j++) {
+      Schedule schedule(scope.main_zone());
+      Scheduler scheduler(scope.main_zone(), NULL, &schedule);
+      BasicBlock* A = schedule.entry();
+      BasicBlock* D = schedule.NewBasicBlock();
+      BasicBlock* E = schedule.exit();
+
+      TestLoop* loop1 = CreateLoop(&schedule, size);
+      schedule.AddSuccessor(A, loop1->header());
+      schedule.AddSuccessor(loop1->last(), E);
+
+      schedule.AddSuccessor(loop1->nodes[i], loop1->header());
+      schedule.AddSuccessor(loop1->nodes[j], D);
+      schedule.AddSuccessor(D, E);
+
+      BasicBlockVector* order = scheduler.ComputeSpecialRPO();
+      CheckRPONumbers(order, schedule.BasicBlockCount(), true);
+      CheckLoopContains(loop1->nodes, loop1->count);
+      delete loop1;
+    }
+  }
+}
+
+
+TEST(RPOLoopOutedges2) {
+  HandleAndZoneScope scope;
+
+  int size = 8;
+  for (int i = 0; i < size; i++) {
+    Schedule schedule(scope.main_zone());
+    Scheduler scheduler(scope.main_zone(), NULL, &schedule);
+    BasicBlock* A = schedule.entry();
+    BasicBlock* E = schedule.exit();
+
+    TestLoop* loop1 = CreateLoop(&schedule, size);
+    schedule.AddSuccessor(A, loop1->header());
+    schedule.AddSuccessor(loop1->last(), E);
+
+    for (int j = 0; j < size; j++) {
+      BasicBlock* O = schedule.NewBasicBlock();
+      schedule.AddSuccessor(loop1->nodes[j], O);
+      schedule.AddSuccessor(O, E);
+    }
+
+    BasicBlockVector* order = scheduler.ComputeSpecialRPO();
+    CheckRPONumbers(order, schedule.BasicBlockCount(), true);
+    CheckLoopContains(loop1->nodes, loop1->count);
+    delete loop1;
+  }
+}
+
+
+TEST(RPOLoopOutloops1) {
+  HandleAndZoneScope scope;
+
+  int size = 8;
+  for (int i = 0; i < size; i++) {
+    Schedule schedule(scope.main_zone());
+    Scheduler scheduler(scope.main_zone(), NULL, &schedule);
+    BasicBlock* A = schedule.entry();
+    BasicBlock* E = schedule.exit();
+    TestLoop* loop1 = CreateLoop(&schedule, size);
+    schedule.AddSuccessor(A, loop1->header());
+    schedule.AddSuccessor(loop1->last(), E);
+
+    TestLoop** loopN = new TestLoop* [size];
+    for (int j = 0; j < size; j++) {
+      loopN[j] = CreateLoop(&schedule, 2);
+      schedule.AddSuccessor(loop1->nodes[j], loopN[j]->header());
+      schedule.AddSuccessor(loopN[j]->last(), E);
+    }
+
+    BasicBlockVector* order = scheduler.ComputeSpecialRPO();
+    CheckRPONumbers(order, schedule.BasicBlockCount(), true);
+    CheckLoopContains(loop1->nodes, loop1->count);
+
+    for (int j = 0; j < size; j++) {
+      CheckLoopContains(loopN[j]->nodes, loopN[j]->count);
+      delete loopN[j];
+    }
+    delete[] loopN;
+    delete loop1;
+  }
+}
+
+
+TEST(RPOLoopMultibackedge) {
+  HandleAndZoneScope scope;
+  Schedule schedule(scope.main_zone());
+  Scheduler scheduler(scope.main_zone(), NULL, &schedule);
+
+  BasicBlock* A = schedule.entry();
+  BasicBlock* B = schedule.NewBasicBlock();
+  BasicBlock* C = schedule.NewBasicBlock();
+  BasicBlock* D = schedule.exit();
+  BasicBlock* E = schedule.NewBasicBlock();
+
+  schedule.AddSuccessor(A, B);
+  schedule.AddSuccessor(B, C);
+  schedule.AddSuccessor(B, D);
+  schedule.AddSuccessor(B, E);
+  schedule.AddSuccessor(C, B);
+  schedule.AddSuccessor(D, B);
+  schedule.AddSuccessor(E, B);
+
+  BasicBlockVector* order = scheduler.ComputeSpecialRPO();
+  CheckRPONumbers(order, 5, true);
+
+  BasicBlock* loop1[] = {B, C, D, E};
+  CheckLoopContains(loop1, 4);
+}
+
+
+TEST(BuildScheduleEmpty) {
+  HandleAndZoneScope scope;
+  Graph graph(scope.main_zone());
+  CommonOperatorBuilder builder(scope.main_zone());
+  graph.SetStart(graph.NewNode(builder.Start()));
+  graph.SetEnd(graph.NewNode(builder.End(), graph.start()));
+
+  Scheduler scheduler(scope.main_zone());
+  USE(scheduler.NewSchedule(&graph));
+}
+
+
+TEST(BuildScheduleOneParameter) {
+  HandleAndZoneScope scope;
+  Graph graph(scope.main_zone());
+  CommonOperatorBuilder builder(scope.main_zone());
+  graph.SetStart(graph.NewNode(builder.Start()));
+
+  Node* p1 = graph.NewNode(builder.Parameter(0));
+  Node* ret = graph.NewNode(builder.Return(), p1, graph.start(), graph.start());
+
+  graph.SetEnd(graph.NewNode(builder.End(), ret));
+
+  Scheduler scheduler(scope.main_zone());
+  USE(scheduler.NewSchedule(&graph));
+}
+
+
+static int GetScheduledNodeCount(Schedule* schedule) {
+  int node_count = 0;
+  for (BasicBlockVectorIter i = schedule->rpo_order()->begin();
+       i != schedule->rpo_order()->end(); ++i) {
+    BasicBlock* block = *i;
+    for (BasicBlock::const_iterator j = block->begin(); j != block->end();
+         ++j) {
+      ++node_count;
+    }
+    BasicBlock::Control control = block->control_;
+    if (control != BasicBlock::kNone) {
+      ++node_count;
+    }
+  }
+  return node_count;
+}
+
+
+static void PrintGraph(Graph* graph) {
+  OFStream os(stdout);
+  os << AsDOT(*graph);
+}
+
+
+static void PrintSchedule(Schedule* schedule) {
+  OFStream os(stdout);
+  os << *schedule << endl;
+}
+
+
+TEST(BuildScheduleIfSplit) {
+  HandleAndZoneScope scope;
+  Graph graph(scope.main_zone());
+  CommonOperatorBuilder builder(scope.main_zone());
+  JSOperatorBuilder js_builder(scope.main_zone());
+  graph.SetStart(graph.NewNode(builder.Start()));
+
+  Node* p1 = graph.NewNode(builder.Parameter(0));
+  Node* p2 = graph.NewNode(builder.Parameter(1));
+  Node* p3 = graph.NewNode(builder.Parameter(2));
+  Node* p4 = graph.NewNode(builder.Parameter(3));
+  Node* p5 = graph.NewNode(builder.Parameter(4));
+  Node* cmp = graph.NewNode(js_builder.LessThanOrEqual(), p1, p2, p3,
+                            graph.start(), graph.start());
+  Node* branch = graph.NewNode(builder.Branch(), cmp, graph.start());
+  Node* true_branch = graph.NewNode(builder.IfTrue(), branch);
+  Node* false_branch = graph.NewNode(builder.IfFalse(), branch);
+
+  Node* ret1 = graph.NewNode(builder.Return(), p4, graph.start(), true_branch);
+  Node* ret2 = graph.NewNode(builder.Return(), p5, graph.start(), false_branch);
+  Node* merge = graph.NewNode(builder.Merge(2), ret1, ret2);
+  graph.SetEnd(graph.NewNode(builder.End(), merge));
+
+  PrintGraph(&graph);
+
+  Scheduler scheduler(scope.main_zone());
+  Schedule* schedule = scheduler.NewSchedule(&graph);
+
+  PrintSchedule(schedule);
+
+
+  CHECK_EQ(13, GetScheduledNodeCount(schedule));
+}
+
+
+TEST(BuildScheduleIfSplitWithEffects) {
+  HandleAndZoneScope scope;
+  Isolate* isolate = scope.main_isolate();
+  Graph graph(scope.main_zone());
+  CommonOperatorBuilder common_builder(scope.main_zone());
+  JSOperatorBuilder js_builder(scope.main_zone());
+  Operator* op;
+
+  Handle<Object> object =
+      Handle<Object>(isolate->heap()->undefined_value(), isolate);
+  PrintableUnique<Object> unique_constant =
+      PrintableUnique<Object>::CreateUninitialized(scope.main_zone(), object);
+
+  // Manually transcripted code for:
+  // function turbo_fan_test(a, b, c, y) {
+  //   if (a < b) {
+  //     return a + b - c * c - a + y;
+  //   } else {
+  //     return c * c - a;
+  //   }
+  // }
+  Node* nil = graph.NewNode(common_builder.Dead());
+  op = common_builder.End();
+  Node* n23 = graph.NewNode(op, nil);
+  USE(n23);
+  op = common_builder.Merge(2);
+  Node* n22 = graph.NewNode(op, nil, nil);
+  USE(n22);
+  op = common_builder.Return();
+  Node* n16 = graph.NewNode(op, nil, nil, nil);
+  USE(n16);
+  op = js_builder.Add();
+  Node* n15 = graph.NewNode(op, nil, nil, nil, nil, nil);
+  USE(n15);
+  op = js_builder.Subtract();
+  Node* n14 = graph.NewNode(op, nil, nil, nil, nil, nil);
+  USE(n14);
+  op = js_builder.Subtract();
+  Node* n13 = graph.NewNode(op, nil, nil, nil, nil, nil);
+  USE(n13);
+  op = js_builder.Add();
+  Node* n11 = graph.NewNode(op, nil, nil, nil, nil, nil);
+  USE(n11);
+  op = common_builder.Parameter(0);
+  Node* n2 = graph.NewNode(op);
+  USE(n2);
+  n11->ReplaceInput(0, n2);
+  op = common_builder.Parameter(0);
+  Node* n3 = graph.NewNode(op);
+  USE(n3);
+  n11->ReplaceInput(1, n3);
+  op = common_builder.HeapConstant(unique_constant);
+  Node* n7 = graph.NewNode(op);
+  USE(n7);
+  n11->ReplaceInput(2, n7);
+  op = js_builder.LessThan();
+  Node* n8 = graph.NewNode(op, nil, nil, nil, nil, nil);
+  USE(n8);
+  n8->ReplaceInput(0, n2);
+  n8->ReplaceInput(1, n3);
+  n8->ReplaceInput(2, n7);
+  op = common_builder.Start();
+  Node* n0 = graph.NewNode(op);
+  USE(n0);
+  n8->ReplaceInput(3, n0);
+  n8->ReplaceInput(4, n0);
+  n11->ReplaceInput(3, n8);
+  op = common_builder.IfTrue();
+  Node* n10 = graph.NewNode(op, nil);
+  USE(n10);
+  op = common_builder.Branch();
+  Node* n9 = graph.NewNode(op, nil, nil);
+  USE(n9);
+  n9->ReplaceInput(0, n8);
+  n9->ReplaceInput(1, n0);
+  n10->ReplaceInput(0, n9);
+  n11->ReplaceInput(4, n10);
+  n13->ReplaceInput(0, n11);
+  op = js_builder.Multiply();
+  Node* n12 = graph.NewNode(op, nil, nil, nil, nil, nil);
+  USE(n12);
+  op = common_builder.Parameter(0);
+  Node* n4 = graph.NewNode(op);
+  USE(n4);
+  n12->ReplaceInput(0, n4);
+  n12->ReplaceInput(1, n4);
+  n12->ReplaceInput(2, n7);
+  n12->ReplaceInput(3, n11);
+  n12->ReplaceInput(4, n10);
+  n13->ReplaceInput(1, n12);
+  n13->ReplaceInput(2, n7);
+  n13->ReplaceInput(3, n12);
+  n13->ReplaceInput(4, n10);
+  n14->ReplaceInput(0, n13);
+  n14->ReplaceInput(1, n2);
+  n14->ReplaceInput(2, n7);
+  n14->ReplaceInput(3, n13);
+  n14->ReplaceInput(4, n10);
+  n15->ReplaceInput(0, n14);
+  op = common_builder.Parameter(0);
+  Node* n5 = graph.NewNode(op);
+  USE(n5);
+  n15->ReplaceInput(1, n5);
+  n15->ReplaceInput(2, n7);
+  n15->ReplaceInput(3, n14);
+  n15->ReplaceInput(4, n10);
+  n16->ReplaceInput(0, n15);
+  n16->ReplaceInput(1, n15);
+  n16->ReplaceInput(2, n10);
+  n22->ReplaceInput(0, n16);
+  op = common_builder.Return();
+  Node* n21 = graph.NewNode(op, nil, nil, nil);
+  USE(n21);
+  op = js_builder.Subtract();
+  Node* n20 = graph.NewNode(op, nil, nil, nil, nil, nil);
+  USE(n20);
+  op = js_builder.Multiply();
+  Node* n19 = graph.NewNode(op, nil, nil, nil, nil, nil);
+  USE(n19);
+  n19->ReplaceInput(0, n4);
+  n19->ReplaceInput(1, n4);
+  n19->ReplaceInput(2, n7);
+  n19->ReplaceInput(3, n8);
+  op = common_builder.IfFalse();
+  Node* n18 = graph.NewNode(op, nil);
+  USE(n18);
+  n18->ReplaceInput(0, n9);
+  n19->ReplaceInput(4, n18);
+  n20->ReplaceInput(0, n19);
+  n20->ReplaceInput(1, n2);
+  n20->ReplaceInput(2, n7);
+  n20->ReplaceInput(3, n19);
+  n20->ReplaceInput(4, n18);
+  n21->ReplaceInput(0, n20);
+  n21->ReplaceInput(1, n20);
+  n21->ReplaceInput(2, n18);
+  n22->ReplaceInput(1, n21);
+  n23->ReplaceInput(0, n22);
+
+  graph.SetStart(n0);
+  graph.SetEnd(n23);
+
+  PrintGraph(&graph);
+
+  Scheduler scheduler(scope.main_zone());
+  Schedule* schedule = scheduler.NewSchedule(&graph);
+
+  PrintSchedule(schedule);
+
+  CHECK_EQ(20, GetScheduledNodeCount(schedule));
+}
+
+
+TEST(BuildScheduleSimpleLoop) {
+  HandleAndZoneScope scope;
+  Isolate* isolate = scope.main_isolate();
+  Graph graph(scope.main_zone());
+  CommonOperatorBuilder common_builder(scope.main_zone());
+  JSOperatorBuilder js_builder(scope.main_zone());
+  Operator* op;
+
+  Handle<Object> object =
+      Handle<Object>(isolate->heap()->undefined_value(), isolate);
+  PrintableUnique<Object> unique_constant =
+      PrintableUnique<Object>::CreateUninitialized(scope.main_zone(), object);
+
+  // Manually transcripted code for:
+  // function turbo_fan_test(a, b) {
+  //   while (a < b) {
+  //     a++;
+  //   }
+  //   return a;
+  // }
+  Node* nil = graph.NewNode(common_builder.Dead());
+  op = common_builder.End();
+  Node* n20 = graph.NewNode(op, nil);
+  USE(n20);
+  op = common_builder.Return();
+  Node* n19 = graph.NewNode(op, nil, nil, nil);
+  USE(n19);
+  op = common_builder.Phi(2);
+  Node* n8 = graph.NewNode(op, nil, nil, nil);
+  USE(n8);
+  op = common_builder.Parameter(0);
+  Node* n2 = graph.NewNode(op);
+  USE(n2);
+  n8->ReplaceInput(0, n2);
+  op = js_builder.Add();
+  Node* n18 = graph.NewNode(op, nil, nil, nil, nil, nil);
+  USE(n18);
+  op = js_builder.ToNumber();
+  Node* n16 = graph.NewNode(op, nil, nil, nil, nil);
+  USE(n16);
+  n16->ReplaceInput(0, n8);
+  op = common_builder.HeapConstant(unique_constant);
+  Node* n5 = graph.NewNode(op);
+  USE(n5);
+  n16->ReplaceInput(1, n5);
+  op = js_builder.LessThan();
+  Node* n12 = graph.NewNode(op, nil, nil, nil, nil, nil);
+  USE(n12);
+  n12->ReplaceInput(0, n8);
+  op = common_builder.Phi(2);
+  Node* n9 = graph.NewNode(op, nil, nil, nil);
+  USE(n9);
+  op = common_builder.Parameter(0);
+  Node* n3 = graph.NewNode(op);
+  USE(n3);
+  n9->ReplaceInput(0, n3);
+  n9->ReplaceInput(1, n9);
+  op = common_builder.Loop(2);
+  Node* n6 = graph.NewNode(op, nil, nil);
+  USE(n6);
+  op = common_builder.Start();
+  Node* n0 = graph.NewNode(op);
+  USE(n0);
+  n6->ReplaceInput(0, n0);
+  op = common_builder.IfTrue();
+  Node* n14 = graph.NewNode(op, nil);
+  USE(n14);
+  op = common_builder.Branch();
+  Node* n13 = graph.NewNode(op, nil, nil);
+  USE(n13);
+  n13->ReplaceInput(0, n12);
+  n13->ReplaceInput(1, n6);
+  n14->ReplaceInput(0, n13);
+  n6->ReplaceInput(1, n14);
+  n9->ReplaceInput(2, n6);
+  n12->ReplaceInput(1, n9);
+  n12->ReplaceInput(2, n5);
+  op = common_builder.Phi(2);
+  Node* n10 = graph.NewNode(op, nil, nil, nil);
+  USE(n10);
+  n10->ReplaceInput(0, n0);
+  n10->ReplaceInput(1, n18);
+  n10->ReplaceInput(2, n6);
+  n12->ReplaceInput(3, n10);
+  n12->ReplaceInput(4, n6);
+  n16->ReplaceInput(2, n12);
+  n16->ReplaceInput(3, n14);
+  n18->ReplaceInput(0, n16);
+  op = common_builder.NumberConstant(0);
+  Node* n17 = graph.NewNode(op);
+  USE(n17);
+  n18->ReplaceInput(1, n17);
+  n18->ReplaceInput(2, n5);
+  n18->ReplaceInput(3, n16);
+  n18->ReplaceInput(4, n14);
+  n8->ReplaceInput(1, n18);
+  n8->ReplaceInput(2, n6);
+  n19->ReplaceInput(0, n8);
+  n19->ReplaceInput(1, n12);
+  op = common_builder.IfFalse();
+  Node* n15 = graph.NewNode(op, nil);
+  USE(n15);
+  n15->ReplaceInput(0, n13);
+  n19->ReplaceInput(2, n15);
+  n20->ReplaceInput(0, n19);
+
+  graph.SetStart(n0);
+  graph.SetEnd(n20);
+
+  PrintGraph(&graph);
+
+  Scheduler scheduler(scope.main_zone());
+  Schedule* schedule = scheduler.NewSchedule(&graph);
+
+  PrintSchedule(schedule);
+
+  CHECK_EQ(19, GetScheduledNodeCount(schedule));
+}
+
+
+TEST(BuildScheduleComplexLoops) {
+  HandleAndZoneScope scope;
+  Isolate* isolate = scope.main_isolate();
+  Graph graph(scope.main_zone());
+  CommonOperatorBuilder common_builder(scope.main_zone());
+  JSOperatorBuilder js_builder(scope.main_zone());
+  Operator* op;
+
+  Handle<Object> object =
+      Handle<Object>(isolate->heap()->undefined_value(), isolate);
+  PrintableUnique<Object> unique_constant =
+      PrintableUnique<Object>::CreateUninitialized(scope.main_zone(), object);
+
+  // Manually transcripted code for:
+  // function turbo_fan_test(a, b, c) {
+  //   while (a < b) {
+  //     a++;
+  //     while (c < b) {
+  //       c++;
+  //     }
+  //   }
+  //   while (a < b) {
+  //     a += 2;
+  //   }
+  //   return a;
+  // }
+  Node* nil = graph.NewNode(common_builder.Dead());
+  op = common_builder.End();
+  Node* n46 = graph.NewNode(op, nil);
+  USE(n46);
+  op = common_builder.Return();
+  Node* n45 = graph.NewNode(op, nil, nil, nil);
+  USE(n45);
+  op = common_builder.Phi(2);
+  Node* n35 = graph.NewNode(op, nil, nil, nil);
+  USE(n35);
+  op = common_builder.Phi(2);
+  Node* n9 = graph.NewNode(op, nil, nil, nil);
+  USE(n9);
+  op = common_builder.Parameter(0);
+  Node* n2 = graph.NewNode(op);
+  USE(n2);
+  n9->ReplaceInput(0, n2);
+  op = common_builder.Phi(2);
+  Node* n23 = graph.NewNode(op, nil, nil, nil);
+  USE(n23);
+  op = js_builder.Add();
+  Node* n20 = graph.NewNode(op, nil, nil, nil, nil, nil);
+  USE(n20);
+  op = js_builder.ToNumber();
+  Node* n18 = graph.NewNode(op, nil, nil, nil, nil);
+  USE(n18);
+  n18->ReplaceInput(0, n9);
+  op = common_builder.HeapConstant(unique_constant);
+  Node* n6 = graph.NewNode(op);
+  USE(n6);
+  n18->ReplaceInput(1, n6);
+  op = js_builder.LessThan();
+  Node* n14 = graph.NewNode(op, nil, nil, nil, nil, nil);
+  USE(n14);
+  n14->ReplaceInput(0, n9);
+  op = common_builder.Phi(2);
+  Node* n10 = graph.NewNode(op, nil, nil, nil);
+  USE(n10);
+  op = common_builder.Parameter(0);
+  Node* n3 = graph.NewNode(op);
+  USE(n3);
+  n10->ReplaceInput(0, n3);
+  op = common_builder.Phi(2);
+  Node* n24 = graph.NewNode(op, nil, nil, nil);
+  USE(n24);
+  n24->ReplaceInput(0, n10);
+  n24->ReplaceInput(1, n24);
+  op = common_builder.Loop(2);
+  Node* n21 = graph.NewNode(op, nil, nil);
+  USE(n21);
+  op = common_builder.IfTrue();
+  Node* n16 = graph.NewNode(op, nil);
+  USE(n16);
+  op = common_builder.Branch();
+  Node* n15 = graph.NewNode(op, nil, nil);
+  USE(n15);
+  n15->ReplaceInput(0, n14);
+  op = common_builder.Loop(2);
+  Node* n7 = graph.NewNode(op, nil, nil);
+  USE(n7);
+  op = common_builder.Start();
+  Node* n0 = graph.NewNode(op);
+  USE(n0);
+  n7->ReplaceInput(0, n0);
+  op = common_builder.IfFalse();
+  Node* n30 = graph.NewNode(op, nil);
+  USE(n30);
+  op = common_builder.Branch();
+  Node* n28 = graph.NewNode(op, nil, nil);
+  USE(n28);
+  op = js_builder.LessThan();
+  Node* n27 = graph.NewNode(op, nil, nil, nil, nil, nil);
+  USE(n27);
+  op = common_builder.Phi(2);
+  Node* n25 = graph.NewNode(op, nil, nil, nil);
+  USE(n25);
+  op = common_builder.Phi(2);
+  Node* n11 = graph.NewNode(op, nil, nil, nil);
+  USE(n11);
+  op = common_builder.Parameter(0);
+  Node* n4 = graph.NewNode(op);
+  USE(n4);
+  n11->ReplaceInput(0, n4);
+  n11->ReplaceInput(1, n25);
+  n11->ReplaceInput(2, n7);
+  n25->ReplaceInput(0, n11);
+  op = js_builder.Add();
+  Node* n32 = graph.NewNode(op, nil, nil, nil, nil, nil);
+  USE(n32);
+  op = js_builder.ToNumber();
+  Node* n31 = graph.NewNode(op, nil, nil, nil, nil);
+  USE(n31);
+  n31->ReplaceInput(0, n25);
+  n31->ReplaceInput(1, n6);
+  n31->ReplaceInput(2, n27);
+  op = common_builder.IfTrue();
+  Node* n29 = graph.NewNode(op, nil);
+  USE(n29);
+  n29->ReplaceInput(0, n28);
+  n31->ReplaceInput(3, n29);
+  n32->ReplaceInput(0, n31);
+  op = common_builder.NumberConstant(0);
+  Node* n19 = graph.NewNode(op);
+  USE(n19);
+  n32->ReplaceInput(1, n19);
+  n32->ReplaceInput(2, n6);
+  n32->ReplaceInput(3, n31);
+  n32->ReplaceInput(4, n29);
+  n25->ReplaceInput(1, n32);
+  n25->ReplaceInput(2, n21);
+  n27->ReplaceInput(0, n25);
+  n27->ReplaceInput(1, n24);
+  n27->ReplaceInput(2, n6);
+  op = common_builder.Phi(2);
+  Node* n26 = graph.NewNode(op, nil, nil, nil);
+  USE(n26);
+  n26->ReplaceInput(0, n20);
+  n26->ReplaceInput(1, n32);
+  n26->ReplaceInput(2, n21);
+  n27->ReplaceInput(3, n26);
+  n27->ReplaceInput(4, n21);
+  n28->ReplaceInput(0, n27);
+  n28->ReplaceInput(1, n21);
+  n30->ReplaceInput(0, n28);
+  n7->ReplaceInput(1, n30);
+  n15->ReplaceInput(1, n7);
+  n16->ReplaceInput(0, n15);
+  n21->ReplaceInput(0, n16);
+  n21->ReplaceInput(1, n29);
+  n24->ReplaceInput(2, n21);
+  n10->ReplaceInput(1, n24);
+  n10->ReplaceInput(2, n7);
+  n14->ReplaceInput(1, n10);
+  n14->ReplaceInput(2, n6);
+  op = common_builder.Phi(2);
+  Node* n12 = graph.NewNode(op, nil, nil, nil);
+  USE(n12);
+  n12->ReplaceInput(0, n0);
+  n12->ReplaceInput(1, n27);
+  n12->ReplaceInput(2, n7);
+  n14->ReplaceInput(3, n12);
+  n14->ReplaceInput(4, n7);
+  n18->ReplaceInput(2, n14);
+  n18->ReplaceInput(3, n16);
+  n20->ReplaceInput(0, n18);
+  n20->ReplaceInput(1, n19);
+  n20->ReplaceInput(2, n6);
+  n20->ReplaceInput(3, n18);
+  n20->ReplaceInput(4, n16);
+  n23->ReplaceInput(0, n20);
+  n23->ReplaceInput(1, n23);
+  n23->ReplaceInput(2, n21);
+  n9->ReplaceInput(1, n23);
+  n9->ReplaceInput(2, n7);
+  n35->ReplaceInput(0, n9);
+  op = js_builder.Add();
+  Node* n44 = graph.NewNode(op, nil, nil, nil, nil, nil);
+  USE(n44);
+  n44->ReplaceInput(0, n35);
+  op = common_builder.NumberConstant(0);
+  Node* n43 = graph.NewNode(op);
+  USE(n43);
+  n44->ReplaceInput(1, n43);
+  n44->ReplaceInput(2, n6);
+  op = js_builder.LessThan();
+  Node* n39 = graph.NewNode(op, nil, nil, nil, nil, nil);
+  USE(n39);
+  n39->ReplaceInput(0, n35);
+  op = common_builder.Phi(2);
+  Node* n36 = graph.NewNode(op, nil, nil, nil);
+  USE(n36);
+  n36->ReplaceInput(0, n10);
+  n36->ReplaceInput(1, n36);
+  op = common_builder.Loop(2);
+  Node* n33 = graph.NewNode(op, nil, nil);
+  USE(n33);
+  op = common_builder.IfFalse();
+  Node* n17 = graph.NewNode(op, nil);
+  USE(n17);
+  n17->ReplaceInput(0, n15);
+  n33->ReplaceInput(0, n17);
+  op = common_builder.IfTrue();
+  Node* n41 = graph.NewNode(op, nil);
+  USE(n41);
+  op = common_builder.Branch();
+  Node* n40 = graph.NewNode(op, nil, nil);
+  USE(n40);
+  n40->ReplaceInput(0, n39);
+  n40->ReplaceInput(1, n33);
+  n41->ReplaceInput(0, n40);
+  n33->ReplaceInput(1, n41);
+  n36->ReplaceInput(2, n33);
+  n39->ReplaceInput(1, n36);
+  n39->ReplaceInput(2, n6);
+  op = common_builder.Phi(2);
+  Node* n38 = graph.NewNode(op, nil, nil, nil);
+  USE(n38);
+  n38->ReplaceInput(0, n14);
+  n38->ReplaceInput(1, n44);
+  n38->ReplaceInput(2, n33);
+  n39->ReplaceInput(3, n38);
+  n39->ReplaceInput(4, n33);
+  n44->ReplaceInput(3, n39);
+  n44->ReplaceInput(4, n41);
+  n35->ReplaceInput(1, n44);
+  n35->ReplaceInput(2, n33);
+  n45->ReplaceInput(0, n35);
+  n45->ReplaceInput(1, n39);
+  op = common_builder.IfFalse();
+  Node* n42 = graph.NewNode(op, nil);
+  USE(n42);
+  n42->ReplaceInput(0, n40);
+  n45->ReplaceInput(2, n42);
+  n46->ReplaceInput(0, n45);
+
+  graph.SetStart(n0);
+  graph.SetEnd(n46);
+
+  PrintGraph(&graph);
+
+  Scheduler scheduler(scope.main_zone());
+  Schedule* schedule = scheduler.NewSchedule(&graph);
+
+  PrintSchedule(schedule);
+
+  CHECK_EQ(46, GetScheduledNodeCount(schedule));
+}
+
+
+TEST(BuildScheduleBreakAndContinue) {
+  HandleAndZoneScope scope;
+  Isolate* isolate = scope.main_isolate();
+  Graph graph(scope.main_zone());
+  CommonOperatorBuilder common_builder(scope.main_zone());
+  JSOperatorBuilder js_builder(scope.main_zone());
+  Operator* op;
+
+  Handle<Object> object =
+      Handle<Object>(isolate->heap()->undefined_value(), isolate);
+  PrintableUnique<Object> unique_constant =
+      PrintableUnique<Object>::CreateUninitialized(scope.main_zone(), object);
+
+  // Manually transcripted code for:
+  // function turbo_fan_test(a, b, c) {
+  //   var d = 0;
+  //   while (a < b) {
+  //     a++;
+  //     while (c < b) {
+  //       c++;
+  //       if (d == 0) break;
+  //       a++;
+  //     }
+  //     if (a == 1) continue;
+  //     d++;
+  //   }
+  //   return a + d;
+  // }
+  Node* nil = graph.NewNode(common_builder.Dead());
+  op = common_builder.End();
+  Node* n58 = graph.NewNode(op, nil);
+  USE(n58);
+  op = common_builder.Return();
+  Node* n57 = graph.NewNode(op, nil, nil, nil);
+  USE(n57);
+  op = js_builder.Add();
+  Node* n56 = graph.NewNode(op, nil, nil, nil, nil, nil);
+  USE(n56);
+  op = common_builder.Phi(2);
+  Node* n10 = graph.NewNode(op, nil, nil, nil);
+  USE(n10);
+  op = common_builder.Parameter(0);
+  Node* n2 = graph.NewNode(op);
+  USE(n2);
+  n10->ReplaceInput(0, n2);
+  op = common_builder.Phi(2);
+  Node* n25 = graph.NewNode(op, nil, nil, nil);
+  USE(n25);
+  op = js_builder.Add();
+  Node* n22 = graph.NewNode(op, nil, nil, nil, nil, nil);
+  USE(n22);
+  op = js_builder.ToNumber();
+  Node* n20 = graph.NewNode(op, nil, nil, nil, nil);
+  USE(n20);
+  n20->ReplaceInput(0, n10);
+  op = common_builder.HeapConstant(unique_constant);
+  Node* n6 = graph.NewNode(op);
+  USE(n6);
+  n20->ReplaceInput(1, n6);
+  op = js_builder.LessThan();
+  Node* n16 = graph.NewNode(op, nil, nil, nil, nil, nil);
+  USE(n16);
+  n16->ReplaceInput(0, n10);
+  op = common_builder.Phi(2);
+  Node* n11 = graph.NewNode(op, nil, nil, nil);
+  USE(n11);
+  op = common_builder.Parameter(0);
+  Node* n3 = graph.NewNode(op);
+  USE(n3);
+  n11->ReplaceInput(0, n3);
+  op = common_builder.Phi(2);
+  Node* n26 = graph.NewNode(op, nil, nil, nil);
+  USE(n26);
+  n26->ReplaceInput(0, n11);
+  n26->ReplaceInput(1, n26);
+  op = common_builder.Loop(2);
+  Node* n23 = graph.NewNode(op, nil, nil);
+  USE(n23);
+  op = common_builder.IfTrue();
+  Node* n18 = graph.NewNode(op, nil);
+  USE(n18);
+  op = common_builder.Branch();
+  Node* n17 = graph.NewNode(op, nil, nil);
+  USE(n17);
+  n17->ReplaceInput(0, n16);
+  op = common_builder.Loop(2);
+  Node* n8 = graph.NewNode(op, nil, nil);
+  USE(n8);
+  op = common_builder.Start();
+  Node* n0 = graph.NewNode(op);
+  USE(n0);
+  n8->ReplaceInput(0, n0);
+  op = common_builder.Merge(2);
+  Node* n53 = graph.NewNode(op, nil, nil);
+  USE(n53);
+  op = common_builder.IfTrue();
+  Node* n49 = graph.NewNode(op, nil);
+  USE(n49);
+  op = common_builder.Branch();
+  Node* n48 = graph.NewNode(op, nil, nil);
+  USE(n48);
+  op = js_builder.Equal();
+  Node* n47 = graph.NewNode(op, nil, nil, nil, nil, nil);
+  USE(n47);
+  n47->ReplaceInput(0, n25);
+  op = common_builder.NumberConstant(0);
+  Node* n46 = graph.NewNode(op);
+  USE(n46);
+  n47->ReplaceInput(1, n46);
+  n47->ReplaceInput(2, n6);
+  op = common_builder.Phi(2);
+  Node* n42 = graph.NewNode(op, nil, nil, nil);
+  USE(n42);
+  op = js_builder.LessThan();
+  Node* n30 = graph.NewNode(op, nil, nil, nil, nil, nil);
+  USE(n30);
+  op = common_builder.Phi(2);
+  Node* n27 = graph.NewNode(op, nil, nil, nil);
+  USE(n27);
+  op = common_builder.Phi(2);
+  Node* n12 = graph.NewNode(op, nil, nil, nil);
+  USE(n12);
+  op = common_builder.Parameter(0);
+  Node* n4 = graph.NewNode(op);
+  USE(n4);
+  n12->ReplaceInput(0, n4);
+  op = common_builder.Phi(2);
+  Node* n41 = graph.NewNode(op, nil, nil, nil);
+  USE(n41);
+  n41->ReplaceInput(0, n27);
+  op = js_builder.Add();
+  Node* n35 = graph.NewNode(op, nil, nil, nil, nil, nil);
+  USE(n35);
+  op = js_builder.ToNumber();
+  Node* n34 = graph.NewNode(op, nil, nil, nil, nil);
+  USE(n34);
+  n34->ReplaceInput(0, n27);
+  n34->ReplaceInput(1, n6);
+  n34->ReplaceInput(2, n30);
+  op = common_builder.IfTrue();
+  Node* n32 = graph.NewNode(op, nil);
+  USE(n32);
+  op = common_builder.Branch();
+  Node* n31 = graph.NewNode(op, nil, nil);
+  USE(n31);
+  n31->ReplaceInput(0, n30);
+  n31->ReplaceInput(1, n23);
+  n32->ReplaceInput(0, n31);
+  n34->ReplaceInput(3, n32);
+  n35->ReplaceInput(0, n34);
+  op = common_builder.NumberConstant(0);
+  Node* n21 = graph.NewNode(op);
+  USE(n21);
+  n35->ReplaceInput(1, n21);
+  n35->ReplaceInput(2, n6);
+  n35->ReplaceInput(3, n34);
+  n35->ReplaceInput(4, n32);
+  n41->ReplaceInput(1, n35);
+  op = common_builder.Merge(2);
+  Node* n40 = graph.NewNode(op, nil, nil);
+  USE(n40);
+  op = common_builder.IfFalse();
+  Node* n33 = graph.NewNode(op, nil);
+  USE(n33);
+  n33->ReplaceInput(0, n31);
+  n40->ReplaceInput(0, n33);
+  op = common_builder.IfTrue();
+  Node* n39 = graph.NewNode(op, nil);
+  USE(n39);
+  op = common_builder.Branch();
+  Node* n38 = graph.NewNode(op, nil, nil);
+  USE(n38);
+  op = js_builder.Equal();
+  Node* n37 = graph.NewNode(op, nil, nil, nil, nil, nil);
+  USE(n37);
+  op = common_builder.Phi(2);
+  Node* n28 = graph.NewNode(op, nil, nil, nil);
+  USE(n28);
+  op = common_builder.Phi(2);
+  Node* n13 = graph.NewNode(op, nil, nil, nil);
+  USE(n13);
+  op = common_builder.NumberConstant(0);
+  Node* n7 = graph.NewNode(op);
+  USE(n7);
+  n13->ReplaceInput(0, n7);
+  op = common_builder.Phi(2);
+  Node* n54 = graph.NewNode(op, nil, nil, nil);
+  USE(n54);
+  n54->ReplaceInput(0, n28);
+  op = js_builder.Add();
+  Node* n52 = graph.NewNode(op, nil, nil, nil, nil, nil);
+  USE(n52);
+  op = js_builder.ToNumber();
+  Node* n51 = graph.NewNode(op, nil, nil, nil, nil);
+  USE(n51);
+  n51->ReplaceInput(0, n28);
+  n51->ReplaceInput(1, n6);
+  n51->ReplaceInput(2, n47);
+  op = common_builder.IfFalse();
+  Node* n50 = graph.NewNode(op, nil);
+  USE(n50);
+  n50->ReplaceInput(0, n48);
+  n51->ReplaceInput(3, n50);
+  n52->ReplaceInput(0, n51);
+  n52->ReplaceInput(1, n21);
+  n52->ReplaceInput(2, n6);
+  n52->ReplaceInput(3, n51);
+  n52->ReplaceInput(4, n50);
+  n54->ReplaceInput(1, n52);
+  n54->ReplaceInput(2, n53);
+  n13->ReplaceInput(1, n54);
+  n13->ReplaceInput(2, n8);
+  n28->ReplaceInput(0, n13);
+  n28->ReplaceInput(1, n28);
+  n28->ReplaceInput(2, n23);
+  n37->ReplaceInput(0, n28);
+  op = common_builder.NumberConstant(0);
+  Node* n36 = graph.NewNode(op);
+  USE(n36);
+  n37->ReplaceInput(1, n36);
+  n37->ReplaceInput(2, n6);
+  n37->ReplaceInput(3, n35);
+  n37->ReplaceInput(4, n32);
+  n38->ReplaceInput(0, n37);
+  n38->ReplaceInput(1, n32);
+  n39->ReplaceInput(0, n38);
+  n40->ReplaceInput(1, n39);
+  n41->ReplaceInput(2, n40);
+  n12->ReplaceInput(1, n41);
+  n12->ReplaceInput(2, n8);
+  n27->ReplaceInput(0, n12);
+  n27->ReplaceInput(1, n35);
+  n27->ReplaceInput(2, n23);
+  n30->ReplaceInput(0, n27);
+  n30->ReplaceInput(1, n26);
+  n30->ReplaceInput(2, n6);
+  op = common_builder.Phi(2);
+  Node* n29 = graph.NewNode(op, nil, nil, nil);
+  USE(n29);
+  n29->ReplaceInput(0, n22);
+  op = js_builder.Add();
+  Node* n45 = graph.NewNode(op, nil, nil, nil, nil, nil);
+  USE(n45);
+  op = js_builder.ToNumber();
+  Node* n44 = graph.NewNode(op, nil, nil, nil, nil);
+  USE(n44);
+  n44->ReplaceInput(0, n25);
+  n44->ReplaceInput(1, n6);
+  n44->ReplaceInput(2, n37);
+  op = common_builder.IfFalse();
+  Node* n43 = graph.NewNode(op, nil);
+  USE(n43);
+  n43->ReplaceInput(0, n38);
+  n44->ReplaceInput(3, n43);
+  n45->ReplaceInput(0, n44);
+  n45->ReplaceInput(1, n21);
+  n45->ReplaceInput(2, n6);
+  n45->ReplaceInput(3, n44);
+  n45->ReplaceInput(4, n43);
+  n29->ReplaceInput(1, n45);
+  n29->ReplaceInput(2, n23);
+  n30->ReplaceInput(3, n29);
+  n30->ReplaceInput(4, n23);
+  n42->ReplaceInput(0, n30);
+  n42->ReplaceInput(1, n37);
+  n42->ReplaceInput(2, n40);
+  n47->ReplaceInput(3, n42);
+  n47->ReplaceInput(4, n40);
+  n48->ReplaceInput(0, n47);
+  n48->ReplaceInput(1, n40);
+  n49->ReplaceInput(0, n48);
+  n53->ReplaceInput(0, n49);
+  n53->ReplaceInput(1, n50);
+  n8->ReplaceInput(1, n53);
+  n17->ReplaceInput(1, n8);
+  n18->ReplaceInput(0, n17);
+  n23->ReplaceInput(0, n18);
+  n23->ReplaceInput(1, n43);
+  n26->ReplaceInput(2, n23);
+  n11->ReplaceInput(1, n26);
+  n11->ReplaceInput(2, n8);
+  n16->ReplaceInput(1, n11);
+  n16->ReplaceInput(2, n6);
+  op = common_builder.Phi(2);
+  Node* n14 = graph.NewNode(op, nil, nil, nil);
+  USE(n14);
+  n14->ReplaceInput(0, n0);
+  op = common_builder.Phi(2);
+  Node* n55 = graph.NewNode(op, nil, nil, nil);
+  USE(n55);
+  n55->ReplaceInput(0, n47);
+  n55->ReplaceInput(1, n52);
+  n55->ReplaceInput(2, n53);
+  n14->ReplaceInput(1, n55);
+  n14->ReplaceInput(2, n8);
+  n16->ReplaceInput(3, n14);
+  n16->ReplaceInput(4, n8);
+  n20->ReplaceInput(2, n16);
+  n20->ReplaceInput(3, n18);
+  n22->ReplaceInput(0, n20);
+  n22->ReplaceInput(1, n21);
+  n22->ReplaceInput(2, n6);
+  n22->ReplaceInput(3, n20);
+  n22->ReplaceInput(4, n18);
+  n25->ReplaceInput(0, n22);
+  n25->ReplaceInput(1, n45);
+  n25->ReplaceInput(2, n23);
+  n10->ReplaceInput(1, n25);
+  n10->ReplaceInput(2, n8);
+  n56->ReplaceInput(0, n10);
+  n56->ReplaceInput(1, n13);
+  n56->ReplaceInput(2, n6);
+  n56->ReplaceInput(3, n16);
+  op = common_builder.IfFalse();
+  Node* n19 = graph.NewNode(op, nil);
+  USE(n19);
+  n19->ReplaceInput(0, n17);
+  n56->ReplaceInput(4, n19);
+  n57->ReplaceInput(0, n56);
+  n57->ReplaceInput(1, n56);
+  n57->ReplaceInput(2, n19);
+  n58->ReplaceInput(0, n57);
+
+  graph.SetStart(n0);
+  graph.SetEnd(n58);
+
+  PrintGraph(&graph);
+
+  Scheduler scheduler(scope.main_zone());
+  Schedule* schedule = scheduler.NewSchedule(&graph);
+
+  PrintSchedule(schedule);
+
+  CHECK_EQ(62, GetScheduledNodeCount(schedule));
+}
+
+
+TEST(BuildScheduleSimpleLoopWithCodeMotion) {
+  HandleAndZoneScope scope;
+  Isolate* isolate = scope.main_isolate();
+  Graph graph(scope.main_zone());
+  CommonOperatorBuilder common_builder(scope.main_zone());
+  JSOperatorBuilder js_builder(scope.main_zone());
+  MachineOperatorBuilder machine_builder(scope.main_zone(), kMachineWord32);
+  Operator* op;
+
+  Handle<Object> object =
+      Handle<Object>(isolate->heap()->undefined_value(), isolate);
+  PrintableUnique<Object> unique_constant =
+      PrintableUnique<Object>::CreateUninitialized(scope.main_zone(), object);
+
+  // Manually transcripted code for:
+  // function turbo_fan_test(a, b, c) {
+  //   while (a < b) {
+  //     a += b + c;
+  //   }
+  //   return a;
+  // }
+  Node* nil = graph.NewNode(common_builder.Dead());
+  op = common_builder.End();
+  Node* n22 = graph.NewNode(op, nil);
+  USE(n22);
+  op = common_builder.Return();
+  Node* n21 = graph.NewNode(op, nil, nil, nil);
+  USE(n21);
+  op = common_builder.Phi(2);
+  Node* n9 = graph.NewNode(op, nil, nil, nil);
+  USE(n9);
+  op = common_builder.Parameter(0);
+  Node* n2 = graph.NewNode(op);
+  USE(n2);
+  n9->ReplaceInput(0, n2);
+  op = js_builder.Add();
+  Node* n20 = graph.NewNode(op, nil, nil, nil, nil, nil);
+  USE(n20);
+  n20->ReplaceInput(0, n9);
+  op = machine_builder.Int32Add();
+  Node* n19 = graph.NewNode(op, nil, nil);
+  USE(n19);
+  op = common_builder.Phi(2);
+  Node* n10 = graph.NewNode(op, nil, nil, nil);
+  USE(n10);
+  op = common_builder.Parameter(0);
+  Node* n3 = graph.NewNode(op);
+  USE(n3);
+  n10->ReplaceInput(0, n3);
+  n10->ReplaceInput(1, n10);
+  op = common_builder.Loop(2);
+  Node* n7 = graph.NewNode(op, nil, nil);
+  USE(n7);
+  op = common_builder.Start();
+  Node* n0 = graph.NewNode(op);
+  USE(n0);
+  n7->ReplaceInput(0, n0);
+  op = common_builder.IfTrue();
+  Node* n17 = graph.NewNode(op, nil);
+  USE(n17);
+  op = common_builder.Branch();
+  Node* n16 = graph.NewNode(op, nil, nil);
+  USE(n16);
+  op = js_builder.ToBoolean();
+  Node* n15 = graph.NewNode(op, nil, nil, nil, nil);
+  USE(n15);
+  op = js_builder.LessThan();
+  Node* n14 = graph.NewNode(op, nil, nil, nil, nil, nil);
+  USE(n14);
+  n14->ReplaceInput(0, n9);
+  n14->ReplaceInput(1, n10);
+  op = common_builder.HeapConstant(unique_constant);
+  Node* n6 = graph.NewNode(op);
+  USE(n6);
+  n14->ReplaceInput(2, n6);
+  op = common_builder.Phi(2);
+  Node* n12 = graph.NewNode(op, nil, nil, nil);
+  USE(n12);
+  n12->ReplaceInput(0, n0);
+  n12->ReplaceInput(1, n20);
+  n12->ReplaceInput(2, n7);
+  n14->ReplaceInput(3, n12);
+  n14->ReplaceInput(4, n7);
+  n15->ReplaceInput(0, n14);
+  n15->ReplaceInput(1, n6);
+  n15->ReplaceInput(2, n14);
+  n15->ReplaceInput(3, n7);
+  n16->ReplaceInput(0, n15);
+  n16->ReplaceInput(1, n7);
+  n17->ReplaceInput(0, n16);
+  n7->ReplaceInput(1, n17);
+  n10->ReplaceInput(2, n7);
+  n19->ReplaceInput(0, n2);
+  op = common_builder.Phi(2);
+  Node* n11 = graph.NewNode(op, nil, nil, nil);
+  USE(n11);
+  op = common_builder.Parameter(0);
+  Node* n4 = graph.NewNode(op);
+  USE(n4);
+  n11->ReplaceInput(0, n4);
+  n11->ReplaceInput(1, n11);
+  n11->ReplaceInput(2, n7);
+  n19->ReplaceInput(1, n3);
+  n20->ReplaceInput(1, n19);
+  n20->ReplaceInput(2, n6);
+  n20->ReplaceInput(3, n19);
+  n20->ReplaceInput(4, n17);
+  n9->ReplaceInput(1, n20);
+  n9->ReplaceInput(2, n7);
+  n21->ReplaceInput(0, n9);
+  n21->ReplaceInput(1, n15);
+  op = common_builder.IfFalse();
+  Node* n18 = graph.NewNode(op, nil);
+  USE(n18);
+  n18->ReplaceInput(0, n16);
+  n21->ReplaceInput(2, n18);
+  n22->ReplaceInput(0, n21);
+
+  graph.SetStart(n0);
+  graph.SetEnd(n22);
+
+  PrintGraph(&graph);
+
+  Scheduler scheduler(scope.main_zone());
+  Schedule* schedule = scheduler.NewSchedule(&graph);
+
+  PrintSchedule(schedule);
+
+  CHECK_EQ(19, GetScheduledNodeCount(schedule));
+
+  // Make sure the integer-only add gets hoisted to a different block that the
+  // JSAdd.
+  CHECK(schedule->block(n19) != schedule->block(n20));
+}
+
+
+// So we can get a real JS function.
+static Handle<JSFunction> Compile(const char* source) {
+  Isolate* isolate = CcTest::i_isolate();
+  Handle<String> source_code = isolate->factory()
+                                   ->NewStringFromUtf8(CStrVector(source))
+                                   .ToHandleChecked();
+  Handle<SharedFunctionInfo> shared_function = Compiler::CompileScript(
+      source_code, Handle<String>(), 0, 0, false,
+      Handle<Context>(isolate->native_context()), NULL, NULL,
+      v8::ScriptCompiler::kNoCompileOptions, NOT_NATIVES_CODE);
+  return isolate->factory()->NewFunctionFromSharedFunctionInfo(
+      shared_function, isolate->native_context());
+}
+
+
+TEST(BuildScheduleTrivialLazyDeoptCall) {
+  HandleAndZoneScope scope;
+  Isolate* isolate = scope.main_isolate();
+  Graph graph(scope.main_zone());
+  CommonOperatorBuilder common_builder(scope.main_zone());
+  JSOperatorBuilder js_builder(scope.main_zone());
+
+  InitializedHandleScope handles;
+  Handle<JSFunction> function = Compile("m()");
+  CompilationInfoWithZone info(function);
+  Linkage linkage(&info);
+
+  // Manually transcribed code for:
+  // function turbo_fan_test() {
+  //   m();
+  // }
+  // where m can lazy deopt (so it has a deopt block associated with it).
+
+
+  //                  Start                                    //
+  //                    ^                                      //
+  //                    | (EC)                                 //
+  //                    |                                      //
+  //         /------> Call <--------------\                    //
+  //        /        ^    ^                \                   //
+  //       /         |    |                 \        undef     //
+  //      /          /    \                  \         ^       //
+  //  (E) |     (C) /      \  (C)             \ (E)    |       //
+  //      | Continuation  LazyDeoptimization  |        |       //
+  //      \___    ^           ^               /        |       //
+  //          \   |           |        ______/    Framestate   //
+  //    undef  \  | (VC)      | (C)   /            ^           //
+  //         \  \ |           |      /            /            //
+  //          Return    Deoptimization ----------/             //
+  //              ^           ^                                //
+  //               \         /                                 //
+  //            (C) \       / (C)                              //
+  //                 \     /                                   //
+  //                  Merge                                    //
+  //                    ^                                      //
+  //                    |                                      //
+  //                   End                                     //
+
+  Handle<Object> undef_object =
+      Handle<Object>(isolate->heap()->undefined_value(), isolate);
+  PrintableUnique<Object> undef_constant =
+      PrintableUnique<Object>::CreateUninitialized(scope.main_zone(),
+                                                   undef_object);
+
+  Node* undef_node = graph.NewNode(common_builder.HeapConstant(undef_constant));
+
+  Node* start_node = graph.NewNode(common_builder.Start());
+
+  CallDescriptor* descriptor = linkage.GetJSCallDescriptor(0);
+  Node* call_node = graph.NewNode(common_builder.Call(descriptor),
+                                  undef_node,   // function
+                                  undef_node,   // context
+                                  start_node,   // effect
+                                  start_node);  // control
+
+  Node* cont_node = graph.NewNode(common_builder.Continuation(), call_node);
+  Node* lazy_deopt_node =
+      graph.NewNode(common_builder.LazyDeoptimization(), call_node);
+
+  FrameStateDescriptor stateDescriptor(BailoutId(1234));
+  Node* state_node = graph.NewNode(common_builder.FrameState(stateDescriptor));
+
+  Node* return_node = graph.NewNode(common_builder.Return(),
+                                    undef_node,  // return value
+                                    call_node,   // effect
+                                    cont_node);  // control
+  Node* deoptimization_node = graph.NewNode(common_builder.Deoptimize(),
+                                            state_node,  // deopt environment
+                                            call_node,   // effect
+                                            lazy_deopt_node);  // control
+
+  Node* merge_node =
+      graph.NewNode(common_builder.Merge(2), return_node, deoptimization_node);
+
+  Node* end_node = graph.NewNode(common_builder.End(), merge_node);
+
+  graph.SetStart(start_node);
+  graph.SetEnd(end_node);
+
+  PrintGraph(&graph);
+
+  Scheduler scheduler(scope.main_zone());
+  Schedule* schedule = scheduler.NewSchedule(&graph);
+
+  PrintSchedule(schedule);
+
+  // Tests:
+  // Continuation and deopt have basic blocks.
+  BasicBlock* cont_block = schedule->block(cont_node);
+  BasicBlock* deopt_block = schedule->block(lazy_deopt_node);
+  BasicBlock* call_block = schedule->block(call_node);
+  CHECK_NE(NULL, cont_block);
+  CHECK_NE(NULL, deopt_block);
+  CHECK_NE(NULL, call_block);
+  // The basic blocks are different.
+  CHECK_NE(cont_block, deopt_block);
+  CHECK_NE(cont_block, call_block);
+  CHECK_NE(deopt_block, call_block);
+  // The call node finishes its own basic block.
+  CHECK_EQ(BasicBlock::kCall, call_block->control_);
+  CHECK_EQ(call_node, call_block->control_input_);
+  // The lazy deopt block is deferred.
+  CHECK(deopt_block->deferred_);
+  CHECK(!call_block->deferred_);
+  CHECK(!cont_block->deferred_);
+  // The lazy deopt block contains framestate + bailout (and nothing else).
+  CHECK_EQ(deoptimization_node, deopt_block->control_input_);
+  CHECK_EQ(2, deopt_block->nodes_.size());
+  CHECK_EQ(lazy_deopt_node, deopt_block->nodes_[0]);
+  CHECK_EQ(state_node, deopt_block->nodes_[1]);
+}
diff --git a/test/cctest/compiler/test-simplified-lowering.cc b/test/cctest/compiler/test-simplified-lowering.cc
new file mode 100644 (file)
index 0000000..f1d9570
--- /dev/null
@@ -0,0 +1,614 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <limits>
+
+#include "src/compiler/control-builders.h"
+#include "src/compiler/node-properties-inl.h"
+#include "src/compiler/pipeline.h"
+#include "src/compiler/simplified-lowering.h"
+#include "src/compiler/simplified-node-factory.h"
+#include "src/compiler/typer.h"
+#include "src/compiler/verifier.h"
+#include "src/execution.h"
+#include "src/parser.h"
+#include "src/rewriter.h"
+#include "src/scopes.h"
+#include "test/cctest/cctest.h"
+#include "test/cctest/compiler/codegen-tester.h"
+#include "test/cctest/compiler/graph-builder-tester.h"
+#include "test/cctest/compiler/value-helper.h"
+
+using namespace v8::internal;
+using namespace v8::internal::compiler;
+
+template <typename ReturnType>
+class SimplifiedGraphBuilderTester : public GraphBuilderTester<ReturnType> {
+ public:
+  SimplifiedGraphBuilderTester(MachineRepresentation p0 = kMachineLast,
+                               MachineRepresentation p1 = kMachineLast,
+                               MachineRepresentation p2 = kMachineLast,
+                               MachineRepresentation p3 = kMachineLast,
+                               MachineRepresentation p4 = kMachineLast)
+      : GraphBuilderTester<ReturnType>(p0, p1, p2, p3, p4) {}
+
+  // Close graph and lower one node.
+  void Lower(Node* node) {
+    this->End();
+    Typer typer(this->zone());
+    CommonOperatorBuilder common(this->zone());
+    SourcePositionTable source_positions(this->graph());
+    JSGraph jsgraph(this->graph(), &common, &typer);
+    SimplifiedLowering lowering(&jsgraph, &source_positions);
+    if (node == NULL) {
+      lowering.LowerAllNodes();
+    } else {
+      lowering.Lower(node);
+    }
+  }
+
+  // Close graph and lower all nodes.
+  void LowerAllNodes() { Lower(NULL); }
+
+  void StoreFloat64(Node* node, double* ptr) {
+    Node* ptr_node = this->PointerConstant(ptr);
+    this->Store(kMachineFloat64, ptr_node, node);
+  }
+
+  Node* LoadInt32(int32_t* ptr) {
+    Node* ptr_node = this->PointerConstant(ptr);
+    return this->Load(kMachineWord32, ptr_node);
+  }
+
+  Node* LoadUint32(uint32_t* ptr) {
+    Node* ptr_node = this->PointerConstant(ptr);
+    return this->Load(kMachineWord32, ptr_node);
+  }
+
+  Node* LoadFloat64(double* ptr) {
+    Node* ptr_node = this->PointerConstant(ptr);
+    return this->Load(kMachineFloat64, ptr_node);
+  }
+
+  Factory* factory() { return this->isolate()->factory(); }
+  Heap* heap() { return this->isolate()->heap(); }
+};
+
+
+class SimplifiedGraphBuilderJSTester
+    : public SimplifiedGraphBuilderTester<Object*> {
+ public:
+  SimplifiedGraphBuilderJSTester()
+      : SimplifiedGraphBuilderTester<Object*>(),
+        f_(v8::Utils::OpenHandle(*v8::Handle<v8::Function>::Cast(CompileRun(
+            "(function() { 'use strict'; return 2.7123; })")))),
+        swapped_(false) {
+    set_current_context(HeapConstant(handle(f_->context())));
+  }
+
+  template <typename T>
+  T* CallJS() {
+    if (!swapped_) {
+      Compile();
+    }
+    Handle<Object>* args = NULL;
+    MaybeHandle<Object> result = Execution::Call(
+        isolate(), f_, factory()->undefined_value(), 0, args, false);
+    return T::cast(*result.ToHandleChecked());
+  }
+
+ private:
+  void Compile() {
+    CompilationInfoWithZone info(f_);
+    CHECK(Parser::Parse(&info));
+    StrictMode strict_mode = info.function()->strict_mode();
+    info.SetStrictMode(strict_mode);
+    info.SetOptimizing(BailoutId::None(), Handle<Code>(f_->code()));
+    CHECK(Rewriter::Rewrite(&info));
+    CHECK(Scope::Analyze(&info));
+    CHECK_NE(NULL, info.scope());
+    Pipeline pipeline(&info);
+    Linkage linkage(&info);
+    Handle<Code> code = pipeline.GenerateCodeForMachineGraph(&linkage, graph());
+    CHECK(!code.is_null());
+    f_->ReplaceCode(*code);
+    swapped_ = true;
+  }
+
+  Handle<JSFunction> f_;
+  bool swapped_;
+};
+
+
+TEST(RunChangeTaggedToInt32) {
+  SimplifiedGraphBuilderTester<int32_t> t(kMachineTagged);
+  Node* x = t.ChangeTaggedToInt32(t.Parameter(0));
+  t.Return(x);
+
+  t.Lower(x);
+
+  // TODO(titzer): remove me.
+  return;
+
+  FOR_INT32_INPUTS(i) {
+    int32_t input = *i;
+
+    if (Smi::IsValid(input)) {
+      int32_t result = t.Call(Smi::FromInt(input));
+      CHECK_EQ(input, result);
+    }
+
+    {
+      Handle<Object> number = t.factory()->NewNumber(input);
+      int32_t result = t.Call(*number);
+      CHECK_EQ(input, result);
+    }
+
+    {
+      Handle<HeapNumber> number = t.factory()->NewHeapNumber(input);
+      int32_t result = t.Call(*number);
+      CHECK_EQ(input, result);
+    }
+  }
+}
+
+
+TEST(RunChangeTaggedToUint32) {
+  SimplifiedGraphBuilderTester<int32_t> t(kMachineTagged);
+  Node* x = t.ChangeTaggedToUint32(t.Parameter(0));
+  t.Return(x);
+
+  t.Lower(x);
+
+  // TODO(titzer): remove me.
+  return;
+
+  FOR_UINT32_INPUTS(i) {
+    uint32_t input = *i;
+
+    if (Smi::IsValid(input)) {
+      int32_t result = t.Call(Smi::FromInt(input));
+      CHECK_EQ(static_cast<int32_t>(input), result);
+    }
+
+    {
+      Handle<Object> number = t.factory()->NewNumber(input);
+      int32_t result = t.Call(*number);
+      CHECK_EQ(static_cast<int32_t>(input), result);
+    }
+
+    {
+      Handle<HeapNumber> number = t.factory()->NewHeapNumber(input);
+      int32_t result = t.Call(*number);
+      CHECK_EQ(static_cast<int32_t>(input), result);
+    }
+  }
+}
+
+
+TEST(RunChangeTaggedToFloat64) {
+  SimplifiedGraphBuilderTester<int32_t> t(kMachineTagged);
+  double result;
+  Node* x = t.ChangeTaggedToFloat64(t.Parameter(0));
+  t.StoreFloat64(x, &result);
+  t.Return(t.Int32Constant(0));
+
+  t.Lower(x);
+
+  // TODO(titzer): remove me.
+  return;
+
+  {
+    FOR_INT32_INPUTS(i) {
+      int32_t input = *i;
+
+      if (Smi::IsValid(input)) {
+        t.Call(Smi::FromInt(input));
+        CHECK_EQ(input, static_cast<int32_t>(result));
+      }
+
+      {
+        Handle<Object> number = t.factory()->NewNumber(input);
+        t.Call(*number);
+        CHECK_EQ(input, static_cast<int32_t>(result));
+      }
+
+      {
+        Handle<HeapNumber> number = t.factory()->NewHeapNumber(input);
+        t.Call(*number);
+        CHECK_EQ(input, static_cast<int32_t>(result));
+      }
+    }
+  }
+
+  {
+    FOR_FLOAT64_INPUTS(i) {
+      double input = *i;
+      {
+        Handle<Object> number = t.factory()->NewNumber(input);
+        t.Call(*number);
+        CHECK_EQ(input, result);
+      }
+
+      {
+        Handle<HeapNumber> number = t.factory()->NewHeapNumber(input);
+        t.Call(*number);
+        CHECK_EQ(input, result);
+      }
+    }
+  }
+}
+
+
+TEST(RunChangeBoolToBit) {
+  SimplifiedGraphBuilderTester<int32_t> t(kMachineTagged);
+  Node* x = t.ChangeBoolToBit(t.Parameter(0));
+  t.Return(x);
+
+  t.Lower(x);
+
+  if (!Pipeline::SupportedTarget()) return;
+
+  {
+    Object* true_obj = t.heap()->true_value();
+    int32_t result = t.Call(true_obj);
+    CHECK_EQ(1, result);
+  }
+
+  {
+    Object* false_obj = t.heap()->false_value();
+    int32_t result = t.Call(false_obj);
+    CHECK_EQ(0, result);
+  }
+}
+
+
+TEST(RunChangeBitToBool) {
+  SimplifiedGraphBuilderTester<Object*> t(kMachineTagged);
+  Node* x = t.ChangeBitToBool(t.Parameter(0));
+  t.Return(x);
+
+  t.Lower(x);
+
+  // TODO(titzer): remove me.
+  return;
+
+  {
+    Object* result = t.Call(1);
+    Object* true_obj = t.heap()->true_value();
+    CHECK_EQ(true_obj, result);
+  }
+
+  {
+    Object* result = t.Call(0);
+    Object* false_obj = t.heap()->false_value();
+    CHECK_EQ(false_obj, result);
+  }
+}
+
+
+TEST(RunChangeInt32ToTagged) {
+  SimplifiedGraphBuilderJSTester t;
+  int32_t input;
+  Node* load = t.LoadInt32(&input);
+  Node* x = t.ChangeInt32ToTagged(load);
+  t.Return(x);
+
+  t.Lower(x);
+
+  // TODO(titzer): remove me.
+  return;
+
+
+  {
+    FOR_INT32_INPUTS(i) {
+      input = *i;
+      HeapNumber* result = t.CallJS<HeapNumber>();
+      CHECK_EQ(static_cast<double>(input), result->value());
+    }
+  }
+
+  {
+    FOR_INT32_INPUTS(i) {
+      input = *i;
+      SimulateFullSpace(CcTest::heap()->new_space());
+      HeapNumber* result = t.CallJS<HeapNumber>();
+      CHECK_EQ(static_cast<double>(input), result->value());
+    }
+  }
+}
+
+
+TEST(RunChangeUint32ToTagged) {
+  SimplifiedGraphBuilderJSTester t;
+  uint32_t input;
+  Node* load = t.LoadUint32(&input);
+  Node* x = t.ChangeUint32ToTagged(load);
+  t.Return(x);
+
+  t.Lower(x);
+
+  // TODO(titzer): remove me.
+  return;
+
+  {
+    FOR_UINT32_INPUTS(i) {
+      input = *i;
+      HeapNumber* result = t.CallJS<HeapNumber>();
+      double expected = static_cast<double>(input);
+      CHECK_EQ(expected, result->value());
+    }
+  }
+
+  {
+    FOR_UINT32_INPUTS(i) {
+      input = *i;
+      SimulateFullSpace(CcTest::heap()->new_space());
+      HeapNumber* result = t.CallJS<HeapNumber>();
+      double expected = static_cast<double>(static_cast<uint32_t>(input));
+      CHECK_EQ(expected, result->value());
+    }
+  }
+}
+
+
+TEST(RunChangeFloat64ToTagged) {
+  SimplifiedGraphBuilderJSTester t;
+  double input;
+  Node* load = t.LoadFloat64(&input);
+  Node* x = t.ChangeFloat64ToTagged(load);
+  t.Return(x);
+
+  t.Lower(x);
+
+  // TODO(titzer): remove me.
+  return;
+
+  {
+    FOR_FLOAT64_INPUTS(i) {
+      input = *i;
+      HeapNumber* result = t.CallJS<HeapNumber>();
+      CHECK_EQ(input, result->value());
+    }
+  }
+  {
+    FOR_FLOAT64_INPUTS(i) {
+      input = *i;
+      SimulateFullSpace(CcTest::heap()->new_space());
+      HeapNumber* result = t.CallJS<HeapNumber>();
+      CHECK_EQ(input, result->value());
+    }
+  }
+}
+
+
+// TODO(dcarney): find a home for these functions.
+namespace {
+
+FieldAccess ForJSObjectMap() {
+  FieldAccess access = {JSObject::kMapOffset, Handle<Name>(), Type::Any(),
+                        kMachineTagged};
+  return access;
+}
+
+
+FieldAccess ForJSObjectProperties() {
+  FieldAccess access = {JSObject::kPropertiesOffset, Handle<Name>(),
+                        Type::Any(), kMachineTagged};
+  return access;
+}
+
+
+FieldAccess ForArrayBufferBackingStore() {
+  FieldAccess access = {
+      JSArrayBuffer::kBackingStoreOffset, Handle<Name>(), Type::UntaggedPtr(),
+      MachineOperatorBuilder::pointer_rep(),
+  };
+  return access;
+}
+
+
+ElementAccess ForFixedArrayElement() {
+  ElementAccess access = {FixedArray::kHeaderSize, Type::Any(), kMachineTagged};
+  return access;
+}
+
+
+ElementAccess ForBackingStoreElement(MachineRepresentation rep) {
+  ElementAccess access = {kNonHeapObjectHeaderSize, Type::Any(), rep};
+  return access;
+}
+}
+
+
+// Create a simple JSObject with a unique map.
+static Handle<JSObject> TestObject() {
+  static int index = 0;
+  char buffer[50];
+  v8::base::OS::SNPrintF(buffer, 50, "({'a_%d':1})", index++);
+  return Handle<JSObject>::cast(v8::Utils::OpenHandle(*CompileRun(buffer)));
+}
+
+
+TEST(RunLoadMap) {
+  SimplifiedGraphBuilderTester<Object*> t(kMachineTagged);
+  FieldAccess access = ForJSObjectMap();
+  Node* load = t.LoadField(access, t.Parameter(0));
+  t.Return(load);
+
+  t.LowerAllNodes();
+
+  if (!Pipeline::SupportedTarget()) return;
+
+  Handle<JSObject> src = TestObject();
+  Handle<Map> src_map(src->map());
+  Object* result = t.Call(*src);
+  CHECK_EQ(*src_map, result);
+}
+
+
+TEST(RunStoreMap) {
+  SimplifiedGraphBuilderTester<int32_t> t(kMachineTagged, kMachineTagged);
+  FieldAccess access = ForJSObjectMap();
+  t.StoreField(access, t.Parameter(1), t.Parameter(0));
+  t.Return(t.Int32Constant(0));
+
+  t.LowerAllNodes();
+
+  if (!Pipeline::SupportedTarget()) return;
+
+  Handle<JSObject> src = TestObject();
+  Handle<Map> src_map(src->map());
+  Handle<JSObject> dst = TestObject();
+  CHECK(src->map() != dst->map());
+  t.Call(*src_map, *dst);
+  CHECK(*src_map == dst->map());
+}
+
+
+TEST(RunLoadProperties) {
+  SimplifiedGraphBuilderTester<Object*> t(kMachineTagged);
+  FieldAccess access = ForJSObjectProperties();
+  Node* load = t.LoadField(access, t.Parameter(0));
+  t.Return(load);
+
+  t.LowerAllNodes();
+
+  if (!Pipeline::SupportedTarget()) return;
+
+  Handle<JSObject> src = TestObject();
+  Handle<FixedArray> src_props(src->properties());
+  Object* result = t.Call(*src);
+  CHECK_EQ(*src_props, result);
+}
+
+
+TEST(RunLoadStoreMap) {
+  SimplifiedGraphBuilderTester<Object*> t(kMachineTagged, kMachineTagged);
+  FieldAccess access = ForJSObjectMap();
+  Node* load = t.LoadField(access, t.Parameter(0));
+  t.StoreField(access, t.Parameter(1), load);
+  t.Return(load);
+
+  t.LowerAllNodes();
+
+  if (!Pipeline::SupportedTarget()) return;
+
+  Handle<JSObject> src = TestObject();
+  Handle<Map> src_map(src->map());
+  Handle<JSObject> dst = TestObject();
+  CHECK(src->map() != dst->map());
+  Object* result = t.Call(*src, *dst);
+  CHECK(result->IsMap());
+  CHECK_EQ(*src_map, result);
+  CHECK(*src_map == dst->map());
+}
+
+
+TEST(RunLoadStoreFixedArrayIndex) {
+  SimplifiedGraphBuilderTester<Object*> t(kMachineTagged);
+  ElementAccess access = ForFixedArrayElement();
+  Node* load = t.LoadElement(access, t.Parameter(0), t.Int32Constant(0));
+  t.StoreElement(access, t.Parameter(0), t.Int32Constant(1), load);
+  t.Return(load);
+
+  t.LowerAllNodes();
+
+  if (!Pipeline::SupportedTarget()) return;
+
+  Handle<FixedArray> array = t.factory()->NewFixedArray(2);
+  Handle<JSObject> src = TestObject();
+  Handle<JSObject> dst = TestObject();
+  array->set(0, *src);
+  array->set(1, *dst);
+  Object* result = t.Call(*array);
+  CHECK_EQ(*src, result);
+  CHECK_EQ(*src, array->get(0));
+  CHECK_EQ(*src, array->get(1));
+}
+
+
+TEST(RunLoadStoreArrayBuffer) {
+  SimplifiedGraphBuilderTester<int32_t> t(kMachineTagged);
+  const int index = 12;
+  FieldAccess access = ForArrayBufferBackingStore();
+  Node* backing_store = t.LoadField(access, t.Parameter(0));
+  ElementAccess buffer_access = ForBackingStoreElement(kMachineWord8);
+  Node* load =
+      t.LoadElement(buffer_access, backing_store, t.Int32Constant(index));
+  t.StoreElement(buffer_access, backing_store, t.Int32Constant(index + 1),
+                 load);
+  t.Return(load);
+
+  t.LowerAllNodes();
+
+  if (!Pipeline::SupportedTarget()) return;
+
+  Handle<JSArrayBuffer> array = t.factory()->NewJSArrayBuffer();
+  const int array_length = 2 * index;
+  Runtime::SetupArrayBufferAllocatingData(t.isolate(), array, array_length);
+  uint8_t* data = reinterpret_cast<uint8_t*>(array->backing_store());
+  for (int i = 0; i < array_length; i++) {
+    data[i] = i;
+  }
+  int32_t result = t.Call(*array);
+  CHECK_EQ(index, result);
+  for (int i = 0; i < array_length; i++) {
+    uint8_t expected = i;
+    if (i == (index + 1)) expected = result;
+    CHECK_EQ(data[i], expected);
+  }
+}
+
+
+TEST(RunCopyFixedArray) {
+  SimplifiedGraphBuilderTester<int32_t> t(kMachineTagged, kMachineTagged);
+
+  const int kArraySize = 15;
+  Node* one = t.Int32Constant(1);
+  Node* index = t.Int32Constant(0);
+  Node* limit = t.Int32Constant(kArraySize);
+  t.environment()->Push(index);
+  {
+    LoopBuilder loop(&t);
+    loop.BeginLoop();
+    // Loop exit condition.
+    index = t.environment()->Top();
+    Node* condition = t.Int32LessThan(index, limit);
+    loop.BreakUnless(condition);
+    // src[index] = dst[index].
+    index = t.environment()->Pop();
+    ElementAccess access = ForFixedArrayElement();
+    Node* src = t.Parameter(0);
+    Node* load = t.LoadElement(access, src, index);
+    Node* dst = t.Parameter(1);
+    t.StoreElement(access, dst, index, load);
+    // index++
+    index = t.Int32Add(index, one);
+    t.environment()->Push(index);
+    // continue.
+    loop.EndBody();
+    loop.EndLoop();
+  }
+  index = t.environment()->Pop();
+  t.Return(index);
+
+  t.LowerAllNodes();
+
+  if (!Pipeline::SupportedTarget()) return;
+
+  Handle<FixedArray> src = t.factory()->NewFixedArray(kArraySize);
+  Handle<FixedArray> src_copy = t.factory()->NewFixedArray(kArraySize);
+  Handle<FixedArray> dst = t.factory()->NewFixedArray(kArraySize);
+  for (int i = 0; i < kArraySize; i++) {
+    src->set(i, *TestObject());
+    src_copy->set(i, src->get(i));
+    dst->set(i, *TestObject());
+    CHECK_NE(src_copy->get(i), dst->get(i));
+  }
+  CHECK_EQ(kArraySize, t.Call(*src, *dst));
+  for (int i = 0; i < kArraySize; i++) {
+    CHECK_EQ(src_copy->get(i), dst->get(i));
+  }
+}
diff --git a/test/cctest/compiler/test-structured-ifbuilder-fuzzer.cc b/test/cctest/compiler/test-structured-ifbuilder-fuzzer.cc
new file mode 100644 (file)
index 0000000..156ab8d
--- /dev/null
@@ -0,0 +1,666 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <string>
+
+#include "src/v8.h"
+#include "test/cctest/cctest.h"
+
+#include "src/base/utils/random-number-generator.h"
+#include "test/cctest/compiler/codegen-tester.h"
+
+#if V8_TURBOFAN_TARGET
+
+using namespace v8::internal;
+using namespace v8::internal::compiler;
+
+typedef StructuredMachineAssembler::IfBuilder IfBuilder;
+typedef StructuredMachineAssembler::LoopBuilder Loop;
+
+static const int32_t kUninitializedVariableOffset = -1;
+static const int32_t kUninitializedOutput = -1;
+static const int32_t kVerifiedOutput = -2;
+
+static const int32_t kInitalVar = 1013;
+static const int32_t kConjunctionInc = 1069;
+static const int32_t kDisjunctionInc = 1151;
+static const int32_t kThenInc = 1223;
+static const int32_t kElseInc = 1291;
+static const int32_t kIfInc = 1373;
+
+class IfBuilderModel {
+ public:
+  explicit IfBuilderModel(Zone* zone)
+      : zone_(zone),
+        variable_offset_(0),
+        root_(new (zone_) Node(NULL)),
+        current_node_(root_),
+        current_expression_(NULL) {}
+
+  void If() {
+    if (current_node_->else_node != NULL) {
+      current_node_ = current_node_->else_node;
+    } else if (current_node_->then_node != NULL) {
+      current_node_ = current_node_->then_node;
+    }
+    ASSERT(current_expression_ == NULL);
+    current_expression_ = new (zone_) Expression(zone_, NULL);
+    current_node_->condition = current_expression_;
+  }
+  void IfNode() { LastChild()->variable_offset = variable_offset_++; }
+
+  void OpenParen() { current_expression_ = LastChild(); }
+  void CloseParen() { current_expression_ = current_expression_->parent; }
+
+  void And() { NewChild()->conjunction = true; }
+  void Or() { NewChild()->disjunction = true; }
+
+  void Then() {
+    ASSERT(current_expression_ == NULL || current_expression_->parent == NULL);
+    current_expression_ = NULL;
+    ASSERT(current_node_->then_node == NULL);
+    current_node_->then_node = new (zone_) Node(current_node_);
+  }
+  void Else() {
+    ASSERT(current_expression_ == NULL || current_expression_->parent == NULL);
+    current_expression_ = NULL;
+    ASSERT(current_node_->else_node == NULL);
+    current_node_->else_node = new (zone_) Node(current_node_);
+  }
+  void Return() {
+    if (current_node_->else_node != NULL) {
+      current_node_->else_node->returns = true;
+    } else if (current_node_->then_node != NULL) {
+      current_node_->then_node->returns = true;
+    } else {
+      CHECK(false);
+    }
+  }
+  void End() {}
+
+  void Print(std::vector<char>* v) { PrintRecursive(v, root_); }
+
+  struct VerificationState {
+    int32_t* inputs;
+    int32_t* outputs;
+    int32_t var;
+  };
+
+  int32_t Verify(int length, int32_t* inputs, int32_t* outputs) {
+    CHECK_EQ(variable_offset_, length);
+    // Input/Output verification.
+    for (int i = 0; i < length; ++i) {
+      CHECK(inputs[i] == 0 || inputs[i] == 1);
+      CHECK(outputs[i] == kUninitializedOutput || outputs[i] >= 0);
+    }
+    // Do verification.
+    VerificationState state;
+    state.inputs = inputs;
+    state.outputs = outputs;
+    state.var = kInitalVar;
+    VerifyRecursive(root_, &state);
+    // Verify all outputs marked.
+    for (int i = 0; i < length; ++i) {
+      CHECK(outputs[i] == kUninitializedOutput ||
+            outputs[i] == kVerifiedOutput);
+    }
+    return state.var;
+  }
+
+ private:
+  struct Expression;
+  typedef std::vector<Expression*, zone_allocator<Expression*> > Expressions;
+
+  struct Expression : public ZoneObject {
+    Expression(Zone* zone, Expression* p)
+        : variable_offset(kUninitializedVariableOffset),
+          disjunction(false),
+          conjunction(false),
+          parent(p),
+          children(Expressions::allocator_type(zone)) {}
+    int variable_offset;
+    bool disjunction;
+    bool conjunction;
+    Expression* parent;
+    Expressions children;
+
+   private:
+    DISALLOW_COPY_AND_ASSIGN(Expression);
+  };
+
+  struct Node : public ZoneObject {
+    explicit Node(Node* p)
+        : parent(p),
+          condition(NULL),
+          then_node(NULL),
+          else_node(NULL),
+          returns(false) {}
+    Node* parent;
+    Expression* condition;
+    Node* then_node;
+    Node* else_node;
+    bool returns;
+
+   private:
+    DISALLOW_COPY_AND_ASSIGN(Node);
+  };
+
+  Expression* LastChild() {
+    if (current_expression_->children.empty()) {
+      current_expression_->children.push_back(
+          new (zone_) Expression(zone_, current_expression_));
+    }
+    return current_expression_->children.back();
+  }
+
+  Expression* NewChild() {
+    Expression* child = new (zone_) Expression(zone_, current_expression_);
+    current_expression_->children.push_back(child);
+    return child;
+  }
+
+  static void PrintRecursive(std::vector<char>* v, Expression* expression) {
+    CHECK(expression != NULL);
+    if (expression->conjunction) {
+      ASSERT(!expression->disjunction);
+      v->push_back('&');
+    } else if (expression->disjunction) {
+      v->push_back('|');
+    }
+    if (expression->variable_offset != kUninitializedVariableOffset) {
+      v->push_back('v');
+    }
+    Expressions& children = expression->children;
+    if (children.empty()) return;
+    v->push_back('(');
+    for (Expressions::iterator i = children.begin(); i != children.end(); ++i) {
+      PrintRecursive(v, *i);
+    }
+    v->push_back(')');
+  }
+
+  static void PrintRecursive(std::vector<char>* v, Node* node) {
+    // Termination condition.
+    if (node->condition == NULL) {
+      CHECK(node->then_node == NULL && node->else_node == NULL);
+      if (node->returns) v->push_back('r');
+      return;
+    }
+    CHECK(!node->returns);
+    v->push_back('i');
+    PrintRecursive(v, node->condition);
+    if (node->then_node != NULL) {
+      v->push_back('t');
+      PrintRecursive(v, node->then_node);
+    }
+    if (node->else_node != NULL) {
+      v->push_back('e');
+      PrintRecursive(v, node->else_node);
+    }
+  }
+
+  static bool VerifyRecursive(Expression* expression,
+                              VerificationState* state) {
+    bool result = false;
+    bool first_iteration = true;
+    Expressions& children = expression->children;
+    CHECK(!children.empty());
+    for (Expressions::iterator i = children.begin(); i != children.end(); ++i) {
+      Expression* child = *i;
+      // Short circuit evaluation,
+      // but mixes of &&s and ||s have weird semantics.
+      if ((child->conjunction && !result) || (child->disjunction && result)) {
+        continue;
+      }
+      if (child->conjunction) state->var += kConjunctionInc;
+      if (child->disjunction) state->var += kDisjunctionInc;
+      bool child_result;
+      if (child->variable_offset != kUninitializedVariableOffset) {
+        // Verify output
+        CHECK_EQ(state->var, state->outputs[child->variable_offset]);
+        state->outputs[child->variable_offset] = kVerifiedOutput;  // Mark seen.
+        child_result = state->inputs[child->variable_offset];
+        CHECK(child->children.empty());
+        state->var += kIfInc;
+      } else {
+        child_result = VerifyRecursive(child, state);
+      }
+      if (child->conjunction) {
+        result &= child_result;
+      } else if (child->disjunction) {
+        result |= child_result;
+      } else {
+        CHECK(first_iteration);
+        result = child_result;
+      }
+      first_iteration = false;
+    }
+    return result;
+  }
+
+  static void VerifyRecursive(Node* node, VerificationState* state) {
+    if (node->condition == NULL) return;
+    bool result = VerifyRecursive(node->condition, state);
+    if (result) {
+      if (node->then_node) {
+        state->var += kThenInc;
+        return VerifyRecursive(node->then_node, state);
+      }
+    } else {
+      if (node->else_node) {
+        state->var += kElseInc;
+        return VerifyRecursive(node->else_node, state);
+      }
+    }
+  }
+
+  Zone* zone_;
+  int variable_offset_;
+  Node* root_;
+  Node* current_node_;
+  Expression* current_expression_;
+  DISALLOW_COPY_AND_ASSIGN(IfBuilderModel);
+};
+
+
+class IfBuilderGenerator : public StructuredMachineAssemblerTester<int32_t> {
+ public:
+  IfBuilderGenerator()
+      : StructuredMachineAssemblerTester(MachineOperatorBuilder::pointer_rep(),
+                                         MachineOperatorBuilder::pointer_rep()),
+        var_(NewVariable(Int32Constant(kInitalVar))),
+        c_(this),
+        m_(this->zone()),
+        one_(Int32Constant(1)),
+        offset_(0) {}
+
+  static void GenerateExpression(v8::base::RandomNumberGenerator* rng,
+                                 std::vector<char>* v, int n_vars) {
+    int depth = 1;
+    v->push_back('(');
+    bool need_if = true;
+    bool populated = false;
+    while (n_vars != 0) {
+      if (need_if) {
+        // can nest a paren or do a variable
+        if (rng->NextBool()) {
+          v->push_back('v');
+          n_vars--;
+          need_if = false;
+          populated = true;
+        } else {
+          v->push_back('(');
+          depth++;
+          populated = false;
+        }
+      } else {
+        // can pop, do && or do ||
+        int options = 3;
+        if (depth == 1 || !populated) {
+          options--;
+        }
+        switch (rng->NextInt(options)) {
+          case 0:
+            v->push_back('&');
+            need_if = true;
+            break;
+          case 1:
+            v->push_back('|');
+            need_if = true;
+            break;
+          case 2:
+            v->push_back(')');
+            depth--;
+            break;
+        }
+      }
+    }
+    CHECK(!need_if);
+    while (depth != 0) {
+      v->push_back(')');
+      depth--;
+    }
+  }
+
+  static void GenerateIfThenElse(v8::base::RandomNumberGenerator* rng,
+                                 std::vector<char>* v, int n_ifs,
+                                 int max_exp_length) {
+    CHECK_GT(n_ifs, 0);
+    CHECK_GT(max_exp_length, 0);
+    bool have_env = true;
+    bool then_done = false;
+    bool else_done = false;
+    bool first_iteration = true;
+    while (n_ifs != 0) {
+      if (have_env) {
+        int options = 3;
+        if (else_done || first_iteration) {  // Don't do else or return
+          options -= 2;
+          first_iteration = false;
+        }
+        switch (rng->NextInt(options)) {
+          case 0:
+            v->push_back('i');
+            n_ifs--;
+            have_env = false;
+            GenerateExpression(rng, v, rng->NextInt(max_exp_length) + 1);
+            break;
+          case 1:
+            v->push_back('r');
+            have_env = false;
+            break;
+          case 2:
+            v->push_back('e');
+            else_done = true;
+            then_done = false;
+            break;
+          default:
+            CHECK(false);
+        }
+      } else {  // Can only do then or else
+        int options = 2;
+        if (then_done) options--;
+        switch (rng->NextInt(options)) {
+          case 0:
+            v->push_back('e');
+            else_done = true;
+            then_done = false;
+            break;
+          case 1:
+            v->push_back('t');
+            then_done = true;
+            else_done = false;
+            break;
+          default:
+            CHECK(false);
+        }
+        have_env = true;
+      }
+    }
+    // Last instruction must have been an if, can complete it in several ways.
+    int options = 2;
+    if (then_done && !else_done) options++;
+    switch (rng->NextInt(3)) {
+      case 0:
+        // Do nothing.
+        break;
+      case 1:
+        v->push_back('t');
+        switch (rng->NextInt(3)) {
+          case 0:
+            v->push_back('r');
+            break;
+          case 1:
+            v->push_back('e');
+            break;
+          case 2:
+            v->push_back('e');
+            v->push_back('r');
+            break;
+          default:
+            CHECK(false);
+        }
+        break;
+      case 2:
+        v->push_back('e');
+        if (rng->NextBool()) v->push_back('r');
+        break;
+      default:
+        CHECK(false);
+    }
+  }
+
+  std::string::const_iterator ParseExpression(std::string::const_iterator it,
+                                              std::string::const_iterator end) {
+    // Prepare for expression.
+    m_.If();
+    c_.If();
+    int depth = 0;
+    for (; it != end; ++it) {
+      switch (*it) {
+        case 'v':
+          m_.IfNode();
+          {
+            Node* offset = Int32Constant(offset_ * 4);
+            Store(kMachineWord32, Parameter(1), offset, var_.Get());
+            var_.Set(Int32Add(var_.Get(), Int32Constant(kIfInc)));
+            c_.If(Load(kMachineWord32, Parameter(0), offset));
+            offset_++;
+          }
+          break;
+        case '&':
+          m_.And();
+          c_.And();
+          var_.Set(Int32Add(var_.Get(), Int32Constant(kConjunctionInc)));
+          break;
+        case '|':
+          m_.Or();
+          c_.Or();
+          var_.Set(Int32Add(var_.Get(), Int32Constant(kDisjunctionInc)));
+          break;
+        case '(':
+          if (depth != 0) {
+            m_.OpenParen();
+            c_.OpenParen();
+          }
+          depth++;
+          break;
+        case ')':
+          depth--;
+          if (depth == 0) return it;
+          m_.CloseParen();
+          c_.CloseParen();
+          break;
+        default:
+          CHECK(false);
+      }
+    }
+    CHECK(false);
+    return it;
+  }
+
+  void ParseIfThenElse(const std::string& str) {
+    int n_vars = 0;
+    for (std::string::const_iterator it = str.begin(); it != str.end(); ++it) {
+      if (*it == 'v') n_vars++;
+    }
+    InitializeConstants(n_vars);
+    for (std::string::const_iterator it = str.begin(); it != str.end(); ++it) {
+      switch (*it) {
+        case 'i': {
+          it++;
+          CHECK(it != str.end());
+          CHECK_EQ('(', *it);
+          it = ParseExpression(it, str.end());
+          CHECK_EQ(')', *it);
+          break;
+        }
+        case 't':
+          m_.Then();
+          c_.Then();
+          var_.Set(Int32Add(var_.Get(), Int32Constant(kThenInc)));
+          break;
+        case 'e':
+          m_.Else();
+          c_.Else();
+          var_.Set(Int32Add(var_.Get(), Int32Constant(kElseInc)));
+          break;
+        case 'r':
+          m_.Return();
+          Return(var_.Get());
+          break;
+        default:
+          CHECK(false);
+      }
+    }
+    m_.End();
+    c_.End();
+    Return(var_.Get());
+    // Compare generated model to parsed version.
+    {
+      std::vector<char> v;
+      m_.Print(&v);
+      std::string m_str(v.begin(), v.end());
+      CHECK(m_str == str);
+    }
+  }
+
+  void ParseExpression(const std::string& str) {
+    CHECK(inputs_.is_empty());
+    std::string wrapped = "i(" + str + ")te";
+    ParseIfThenElse(wrapped);
+  }
+
+  void ParseRandomIfThenElse(v8::base::RandomNumberGenerator* rng, int n_ifs,
+                             int n_vars) {
+    std::vector<char> v;
+    GenerateIfThenElse(rng, &v, n_ifs, n_vars);
+    std::string str(v.begin(), v.end());
+    ParseIfThenElse(str);
+  }
+
+  void RunRandom(v8::base::RandomNumberGenerator* rng) {
+    // TODO(dcarney): permute inputs via model.
+    // TODO(dcarney): compute test_cases from n_ifs and n_vars.
+    int test_cases = 100;
+    for (int test = 0; test < test_cases; test++) {
+      Initialize();
+      for (int i = 0; i < offset_; i++) {
+        inputs_[i] = rng->NextBool();
+      }
+      DoCall();
+    }
+  }
+
+  void Run(const std::string& str, int32_t expected) {
+    Initialize();
+    int offset = 0;
+    for (std::string::const_iterator it = str.begin(); it != str.end(); ++it) {
+      switch (*it) {
+        case 't':
+          inputs_[offset++] = 1;
+          break;
+        case 'f':
+          inputs_[offset++] = 0;
+          break;
+        default:
+          CHECK(false);
+      }
+    }
+    CHECK_EQ(offset_, offset);
+    // Call.
+    int32_t result = DoCall();
+    CHECK_EQ(result, expected);
+  }
+
+ private:
+  typedef std::vector<int32_t, zone_allocator<int32_t> > IOVector;
+
+  void InitializeConstants(int n_vars) {
+    CHECK(inputs_.is_empty());
+    inputs_.Reset(new int32_t[n_vars]);
+    outputs_.Reset(new int32_t[n_vars]);
+  }
+
+  void Initialize() {
+    for (int i = 0; i < offset_; i++) {
+      inputs_[i] = 0;
+      outputs_[i] = kUninitializedOutput;
+    }
+  }
+
+  int32_t DoCall() {
+    int32_t result = Call(inputs_.get(), outputs_.get());
+    int32_t expected = m_.Verify(offset_, inputs_.get(), outputs_.get());
+    CHECK_EQ(result, expected);
+    return result;
+  }
+
+  const v8::internal::compiler::Variable var_;
+  IfBuilder c_;
+  IfBuilderModel m_;
+  Node* one_;
+  int32_t offset_;
+  SmartArrayPointer<int32_t> inputs_;
+  SmartArrayPointer<int32_t> outputs_;
+};
+
+
+TEST(RunExpressionString) {
+  IfBuilderGenerator m;
+  m.ParseExpression("((v|v)|v)");
+  m.Run("ttt", kInitalVar + 1 * kIfInc + kThenInc);
+  m.Run("ftt", kInitalVar + 2 * kIfInc + kDisjunctionInc + kThenInc);
+  m.Run("fft", kInitalVar + 3 * kIfInc + 2 * kDisjunctionInc + kThenInc);
+  m.Run("fff", kInitalVar + 3 * kIfInc + 2 * kDisjunctionInc + kElseInc);
+}
+
+
+TEST(RunExpressionStrings) {
+  const char* strings[] = {
+      "v",       "(v)",     "((v))",     "v|v",
+      "(v|v)",   "((v|v))", "v&v",       "(v&v)",
+      "((v&v))", "v&(v)",   "v&(v|v)",   "v&(v|v)&v",
+      "v|(v)",   "v|(v&v)", "v|(v&v)|v", "v|(((v)|(v&v)|(v)|v)&(v))|v",
+  };
+  v8::base::RandomNumberGenerator rng;
+  for (size_t i = 0; i < ARRAY_SIZE(strings); i++) {
+    IfBuilderGenerator m;
+    m.ParseExpression(strings[i]);
+    m.RunRandom(&rng);
+  }
+}
+
+
+TEST(RunSimpleIfElseTester) {
+  const char* tests[] = {
+      "i(v)",   "i(v)t",   "i(v)te",
+      "i(v)er", "i(v)ter", "i(v)ti(v)trei(v)ei(v)ei(v)ei(v)ei(v)ei(v)ei(v)e"};
+  v8::base::RandomNumberGenerator rng;
+  for (size_t i = 0; i < ARRAY_SIZE(tests); ++i) {
+    IfBuilderGenerator m;
+    m.ParseIfThenElse(tests[i]);
+    m.RunRandom(&rng);
+  }
+}
+
+
+TEST(RunRandomExpressions) {
+  v8::base::RandomNumberGenerator rng;
+  for (int n_vars = 1; n_vars < 12; n_vars++) {
+    for (int i = 0; i < n_vars * n_vars + 10; i++) {
+      IfBuilderGenerator m;
+      m.ParseRandomIfThenElse(&rng, 1, n_vars);
+      m.RunRandom(&rng);
+    }
+  }
+}
+
+
+TEST(RunRandomIfElse) {
+  v8::base::RandomNumberGenerator rng;
+  for (int n_ifs = 1; n_ifs < 12; n_ifs++) {
+    for (int i = 0; i < n_ifs * n_ifs + 10; i++) {
+      IfBuilderGenerator m;
+      m.ParseRandomIfThenElse(&rng, n_ifs, 1);
+      m.RunRandom(&rng);
+    }
+  }
+}
+
+
+TEST(RunRandomIfElseExpressions) {
+  v8::base::RandomNumberGenerator rng;
+  for (int n_vars = 2; n_vars < 6; n_vars++) {
+    for (int n_ifs = 2; n_ifs < 7; n_ifs++) {
+      for (int i = 0; i < n_ifs * n_vars + 10; i++) {
+        IfBuilderGenerator m;
+        m.ParseRandomIfThenElse(&rng, n_ifs, n_vars);
+        m.RunRandom(&rng);
+      }
+    }
+  }
+}
+
+#endif
diff --git a/test/cctest/compiler/test-structured-machine-assembler.cc b/test/cctest/compiler/test-structured-machine-assembler.cc
new file mode 100644 (file)
index 0000000..ab06348
--- /dev/null
@@ -0,0 +1,1055 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+#include "test/cctest/cctest.h"
+
+#include "src/base/utils/random-number-generator.h"
+#include "src/compiler/structured-machine-assembler.h"
+#include "test/cctest/compiler/codegen-tester.h"
+#include "test/cctest/compiler/value-helper.h"
+
+#if V8_TURBOFAN_TARGET
+
+using namespace v8::internal::compiler;
+
+typedef StructuredMachineAssembler::IfBuilder IfBuilder;
+typedef StructuredMachineAssembler::LoopBuilder Loop;
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+class StructuredMachineAssemblerFriend {
+ public:
+  static bool VariableAlive(StructuredMachineAssembler* m,
+                            const Variable& var) {
+    CHECK(m->current_environment_ != NULL);
+    int offset = var.offset_;
+    return offset < static_cast<int>(m->CurrentVars()->size()) &&
+           m->CurrentVars()->at(offset) != NULL;
+  }
+};
+}
+}
+}  // namespace v8::internal::compiler
+
+
+TEST(RunVariable) {
+  StructuredMachineAssemblerTester<int32_t> m;
+
+  int32_t constant = 0x86c2bb16;
+
+  Variable v1 = m.NewVariable(m.Int32Constant(constant));
+  Variable v2 = m.NewVariable(v1.Get());
+  m.Return(v2.Get());
+
+  CHECK_EQ(constant, m.Call());
+}
+
+
+TEST(RunSimpleIf) {
+  StructuredMachineAssemblerTester<int32_t> m(kMachineWord32);
+
+  int32_t constant = 0xc4a3e3a6;
+  {
+    IfBuilder cond(&m);
+    cond.If(m.Parameter(0)).Then();
+    m.Return(m.Int32Constant(constant));
+  }
+  m.Return(m.Word32Not(m.Int32Constant(constant)));
+
+  CHECK_EQ(~constant, m.Call(0));
+  CHECK_EQ(constant, m.Call(1));
+}
+
+
+TEST(RunSimpleIfVariable) {
+  StructuredMachineAssemblerTester<int32_t> m(kMachineWord32);
+
+  int32_t constant = 0xdb6f20c2;
+  Variable var = m.NewVariable(m.Int32Constant(constant));
+  {
+    IfBuilder cond(&m);
+    cond.If(m.Parameter(0)).Then();
+    var.Set(m.Word32Not(var.Get()));
+  }
+  m.Return(var.Get());
+
+  CHECK_EQ(constant, m.Call(0));
+  CHECK_EQ(~constant, m.Call(1));
+}
+
+
+TEST(RunSimpleElse) {
+  StructuredMachineAssemblerTester<int32_t> m(kMachineWord32);
+
+  int32_t constant = 0xfc5eadf4;
+  {
+    IfBuilder cond(&m);
+    cond.If(m.Parameter(0)).Else();
+    m.Return(m.Int32Constant(constant));
+  }
+  m.Return(m.Word32Not(m.Int32Constant(constant)));
+
+  CHECK_EQ(constant, m.Call(0));
+  CHECK_EQ(~constant, m.Call(1));
+}
+
+
+TEST(RunSimpleIfElse) {
+  StructuredMachineAssemblerTester<int32_t> m(kMachineWord32);
+
+  int32_t constant = 0xaa9c8cd3;
+  {
+    IfBuilder cond(&m);
+    cond.If(m.Parameter(0)).Then();
+    m.Return(m.Int32Constant(constant));
+    cond.Else();
+    m.Return(m.Word32Not(m.Int32Constant(constant)));
+  }
+
+  CHECK_EQ(~constant, m.Call(0));
+  CHECK_EQ(constant, m.Call(1));
+}
+
+
+TEST(RunSimpleIfElseVariable) {
+  StructuredMachineAssemblerTester<int32_t> m(kMachineWord32);
+
+  int32_t constant = 0x67b6f39c;
+  Variable var = m.NewVariable(m.Int32Constant(constant));
+  {
+    IfBuilder cond(&m);
+    cond.If(m.Parameter(0)).Then();
+    var.Set(m.Word32Not(m.Word32Not(var.Get())));
+    cond.Else();
+    var.Set(m.Word32Not(var.Get()));
+  }
+  m.Return(var.Get());
+
+  CHECK_EQ(~constant, m.Call(0));
+  CHECK_EQ(constant, m.Call(1));
+}
+
+
+TEST(RunSimpleIfNoThenElse) {
+  StructuredMachineAssemblerTester<int32_t> m(kMachineWord32);
+
+  int32_t constant = 0xd5e550ed;
+  {
+    IfBuilder cond(&m);
+    cond.If(m.Parameter(0));
+  }
+  m.Return(m.Int32Constant(constant));
+
+  CHECK_EQ(constant, m.Call(0));
+  CHECK_EQ(constant, m.Call(1));
+}
+
+
+TEST(RunSimpleConjunctionVariable) {
+  StructuredMachineAssemblerTester<int32_t> m(kMachineWord32);
+
+  int32_t constant = 0xf8fb9ec6;
+  Variable var = m.NewVariable(m.Int32Constant(constant));
+  {
+    IfBuilder cond(&m);
+    cond.If(m.Int32Constant(1)).And();
+    var.Set(m.Word32Not(var.Get()));
+    cond.If(m.Parameter(0)).Then();
+    var.Set(m.Word32Not(m.Word32Not(var.Get())));
+    cond.Else();
+    var.Set(m.Word32Not(var.Get()));
+  }
+  m.Return(var.Get());
+
+  CHECK_EQ(constant, m.Call(0));
+  CHECK_EQ(~constant, m.Call(1));
+}
+
+
+TEST(RunSimpleDisjunctionVariable) {
+  StructuredMachineAssemblerTester<int32_t> m(kMachineWord32);
+
+  int32_t constant = 0x118f6ffc;
+  Variable var = m.NewVariable(m.Int32Constant(constant));
+  {
+    IfBuilder cond(&m);
+    cond.If(m.Int32Constant(0)).Or();
+    var.Set(m.Word32Not(var.Get()));
+    cond.If(m.Parameter(0)).Then();
+    var.Set(m.Word32Not(m.Word32Not(var.Get())));
+    cond.Else();
+    var.Set(m.Word32Not(var.Get()));
+  }
+  m.Return(var.Get());
+
+  CHECK_EQ(constant, m.Call(0));
+  CHECK_EQ(~constant, m.Call(1));
+}
+
+
+TEST(RunIfElse) {
+  StructuredMachineAssemblerTester<int32_t> m(kMachineWord32);
+
+  {
+    IfBuilder cond(&m);
+    bool first = true;
+    FOR_INT32_INPUTS(i) {
+      Node* c = m.Int32Constant(*i);
+      if (first) {
+        cond.If(m.Word32Equal(m.Parameter(0), c)).Then();
+        m.Return(c);
+        first = false;
+      } else {
+        cond.Else();
+        cond.If(m.Word32Equal(m.Parameter(0), c)).Then();
+        m.Return(c);
+      }
+    }
+  }
+  m.Return(m.Int32Constant(333));
+
+  FOR_INT32_INPUTS(i) { CHECK_EQ(*i, m.Call(*i)); }
+}
+
+
+enum IfBuilderBranchType { kSkipBranch, kBranchFallsThrough, kBranchReturns };
+
+
+static IfBuilderBranchType all_branch_types[] = {
+    kSkipBranch, kBranchFallsThrough, kBranchReturns};
+
+
+static void RunIfBuilderDisjunction(size_t max, IfBuilderBranchType then_type,
+                                    IfBuilderBranchType else_type) {
+  StructuredMachineAssemblerTester<int32_t> m(kMachineWord32);
+
+  std::vector<int32_t> inputs = ValueHelper::int32_vector();
+  std::vector<int32_t>::const_iterator i = inputs.begin();
+  int32_t hit = 0x8c723c9a;
+  int32_t miss = 0x88a6b9f3;
+  {
+    Node* p0 = m.Parameter(0);
+    IfBuilder cond(&m);
+    for (size_t j = 0; j < max; j++, ++i) {
+      CHECK(i != inputs.end());  // Thank you STL.
+      if (j > 0) cond.Or();
+      cond.If(m.Word32Equal(p0, m.Int32Constant(*i)));
+    }
+    switch (then_type) {
+      case kSkipBranch:
+        break;
+      case kBranchFallsThrough:
+        cond.Then();
+        break;
+      case kBranchReturns:
+        cond.Then();
+        m.Return(m.Int32Constant(hit));
+        break;
+    }
+    switch (else_type) {
+      case kSkipBranch:
+        break;
+      case kBranchFallsThrough:
+        cond.Else();
+        break;
+      case kBranchReturns:
+        cond.Else();
+        m.Return(m.Int32Constant(miss));
+        break;
+    }
+  }
+  if (then_type != kBranchReturns || else_type != kBranchReturns) {
+    m.Return(m.Int32Constant(miss));
+  }
+
+  if (then_type != kBranchReturns) hit = miss;
+
+  i = inputs.begin();
+  for (size_t j = 0; i != inputs.end(); j++, ++i) {
+    int32_t result = m.Call(*i);
+    CHECK_EQ(j < max ? hit : miss, result);
+  }
+}
+
+
+TEST(RunIfBuilderDisjunction) {
+  size_t len = ValueHelper::int32_vector().size() - 1;
+  size_t max = len > 10 ? 10 : len - 1;
+  for (size_t i = 0; i < ARRAY_SIZE(all_branch_types); i++) {
+    for (size_t j = 0; j < ARRAY_SIZE(all_branch_types); j++) {
+      for (size_t size = 1; size < max; size++) {
+        RunIfBuilderDisjunction(size, all_branch_types[i], all_branch_types[j]);
+      }
+      RunIfBuilderDisjunction(len, all_branch_types[i], all_branch_types[j]);
+    }
+  }
+}
+
+
+static void RunIfBuilderConjunction(size_t max, IfBuilderBranchType then_type,
+                                    IfBuilderBranchType else_type) {
+  StructuredMachineAssemblerTester<int32_t> m(kMachineWord32);
+
+  std::vector<int32_t> inputs = ValueHelper::int32_vector();
+  std::vector<int32_t>::const_iterator i = inputs.begin();
+  int32_t hit = 0xa0ceb9ca;
+  int32_t miss = 0x226cafaa;
+  {
+    IfBuilder cond(&m);
+    Node* p0 = m.Parameter(0);
+    for (size_t j = 0; j < max; j++, ++i) {
+      if (j > 0) cond.And();
+      cond.If(m.Word32NotEqual(p0, m.Int32Constant(*i)));
+    }
+    switch (then_type) {
+      case kSkipBranch:
+        break;
+      case kBranchFallsThrough:
+        cond.Then();
+        break;
+      case kBranchReturns:
+        cond.Then();
+        m.Return(m.Int32Constant(hit));
+        break;
+    }
+    switch (else_type) {
+      case kSkipBranch:
+        break;
+      case kBranchFallsThrough:
+        cond.Else();
+        break;
+      case kBranchReturns:
+        cond.Else();
+        m.Return(m.Int32Constant(miss));
+        break;
+    }
+  }
+  if (then_type != kBranchReturns || else_type != kBranchReturns) {
+    m.Return(m.Int32Constant(miss));
+  }
+
+  if (then_type != kBranchReturns) hit = miss;
+
+  i = inputs.begin();
+  for (size_t j = 0; i != inputs.end(); j++, ++i) {
+    int32_t result = m.Call(*i);
+    CHECK_EQ(j >= max ? hit : miss, result);
+  }
+}
+
+
+TEST(RunIfBuilderConjunction) {
+  size_t len = ValueHelper::int32_vector().size() - 1;
+  size_t max = len > 10 ? 10 : len - 1;
+  for (size_t i = 0; i < ARRAY_SIZE(all_branch_types); i++) {
+    for (size_t j = 0; j < ARRAY_SIZE(all_branch_types); j++) {
+      for (size_t size = 1; size < max; size++) {
+        RunIfBuilderConjunction(size, all_branch_types[i], all_branch_types[j]);
+      }
+      RunIfBuilderConjunction(len, all_branch_types[i], all_branch_types[j]);
+    }
+  }
+}
+
+
+static void RunDisjunctionVariables(int disjunctions, bool explicit_then,
+                                    bool explicit_else) {
+  StructuredMachineAssemblerTester<int32_t> m(kMachineWord32);
+
+  int32_t constant = 0x65a09535;
+
+  Node* cmp_val = m.Int32Constant(constant);
+  Node* one = m.Int32Constant(1);
+  Variable var = m.NewVariable(m.Parameter(0));
+  {
+    IfBuilder cond(&m);
+    cond.If(m.Word32Equal(var.Get(), cmp_val));
+    for (int i = 0; i < disjunctions; i++) {
+      cond.Or();
+      var.Set(m.Int32Add(var.Get(), one));
+      cond.If(m.Word32Equal(var.Get(), cmp_val));
+    }
+    if (explicit_then) {
+      cond.Then();
+    }
+    if (explicit_else) {
+      cond.Else();
+      var.Set(m.Int32Add(var.Get(), one));
+    }
+  }
+  m.Return(var.Get());
+
+  int adds = disjunctions + (explicit_else ? 1 : 0);
+  int32_t input = constant - 2 * adds;
+  for (int i = 0; i < adds; i++) {
+    CHECK_EQ(input + adds, m.Call(input));
+    input++;
+  }
+  for (int i = 0; i < adds + 1; i++) {
+    CHECK_EQ(constant, m.Call(input));
+    input++;
+  }
+  for (int i = 0; i < adds; i++) {
+    CHECK_EQ(input + adds, m.Call(input));
+    input++;
+  }
+}
+
+
+TEST(RunDisjunctionVariables) {
+  for (int disjunctions = 0; disjunctions < 10; disjunctions++) {
+    RunDisjunctionVariables(disjunctions, false, false);
+    RunDisjunctionVariables(disjunctions, false, true);
+    RunDisjunctionVariables(disjunctions, true, false);
+    RunDisjunctionVariables(disjunctions, true, true);
+  }
+}
+
+
+static void RunConjunctionVariables(int conjunctions, bool explicit_then,
+                                    bool explicit_else) {
+  StructuredMachineAssemblerTester<int32_t> m(kMachineWord32);
+
+  int32_t constant = 0x2c7f4b45;
+  Node* cmp_val = m.Int32Constant(constant);
+  Node* one = m.Int32Constant(1);
+  Variable var = m.NewVariable(m.Parameter(0));
+  {
+    IfBuilder cond(&m);
+    cond.If(m.Word32NotEqual(var.Get(), cmp_val));
+    for (int i = 0; i < conjunctions; i++) {
+      cond.And();
+      var.Set(m.Int32Add(var.Get(), one));
+      cond.If(m.Word32NotEqual(var.Get(), cmp_val));
+    }
+    if (explicit_then) {
+      cond.Then();
+      var.Set(m.Int32Add(var.Get(), one));
+    }
+    if (explicit_else) {
+      cond.Else();
+    }
+  }
+  m.Return(var.Get());
+
+  int adds = conjunctions + (explicit_then ? 1 : 0);
+  int32_t input = constant - 2 * adds;
+  for (int i = 0; i < adds; i++) {
+    CHECK_EQ(input + adds, m.Call(input));
+    input++;
+  }
+  for (int i = 0; i < adds + 1; i++) {
+    CHECK_EQ(constant, m.Call(input));
+    input++;
+  }
+  for (int i = 0; i < adds; i++) {
+    CHECK_EQ(input + adds, m.Call(input));
+    input++;
+  }
+}
+
+
+TEST(RunConjunctionVariables) {
+  for (int conjunctions = 0; conjunctions < 10; conjunctions++) {
+    RunConjunctionVariables(conjunctions, false, false);
+    RunConjunctionVariables(conjunctions, false, true);
+    RunConjunctionVariables(conjunctions, true, false);
+    RunConjunctionVariables(conjunctions, true, true);
+  }
+}
+
+
+TEST(RunSimpleNestedIf) {
+  StructuredMachineAssemblerTester<int32_t> m(kMachineWord32, kMachineWord32);
+  const size_t NUM_VALUES = 7;
+  std::vector<int32_t> inputs = ValueHelper::int32_vector();
+  CHECK(inputs.size() >= NUM_VALUES);
+  Node* values[NUM_VALUES];
+  for (size_t j = 0; j < NUM_VALUES; j++) {
+    values[j] = m.Int32Constant(inputs[j]);
+  }
+  {
+    IfBuilder if_0(&m);
+    if_0.If(m.Word32Equal(m.Parameter(0), values[0])).Then();
+    {
+      IfBuilder if_1(&m);
+      if_1.If(m.Word32Equal(m.Parameter(1), values[1])).Then();
+      { m.Return(values[3]); }
+      if_1.Else();
+      { m.Return(values[4]); }
+    }
+    if_0.Else();
+    {
+      IfBuilder if_1(&m);
+      if_1.If(m.Word32Equal(m.Parameter(1), values[2])).Then();
+      { m.Return(values[5]); }
+      if_1.Else();
+      { m.Return(values[6]); }
+    }
+  }
+
+  int32_t result = m.Call(inputs[0], inputs[1]);
+  CHECK_EQ(inputs[3], result);
+
+  result = m.Call(inputs[0], inputs[1] + 1);
+  CHECK_EQ(inputs[4], result);
+
+  result = m.Call(inputs[0] + 1, inputs[2]);
+  CHECK_EQ(inputs[5], result);
+
+  result = m.Call(inputs[0] + 1, inputs[2] + 1);
+  CHECK_EQ(inputs[6], result);
+}
+
+
+TEST(RunUnreachableBlockAfterIf) {
+  StructuredMachineAssemblerTester<int32_t> m;
+  {
+    IfBuilder cond(&m);
+    cond.If(m.Int32Constant(0)).Then();
+    m.Return(m.Int32Constant(1));
+    cond.Else();
+    m.Return(m.Int32Constant(2));
+  }
+  // This is unreachable.
+  m.Return(m.Int32Constant(3));
+  CHECK_EQ(2, m.Call());
+}
+
+
+TEST(RunUnreachableBlockAfterLoop) {
+  StructuredMachineAssemblerTester<int32_t> m;
+  {
+    Loop loop(&m);
+    m.Return(m.Int32Constant(1));
+  }
+  // This is unreachable.
+  m.Return(m.Int32Constant(3));
+  CHECK_EQ(1, m.Call());
+}
+
+
+TEST(RunSimpleLoop) {
+  StructuredMachineAssemblerTester<int32_t> m;
+  int32_t constant = 0x120c1f85;
+  {
+    Loop loop(&m);
+    m.Return(m.Int32Constant(constant));
+  }
+  CHECK_EQ(constant, m.Call());
+}
+
+
+TEST(RunSimpleLoopBreak) {
+  StructuredMachineAssemblerTester<int32_t> m;
+  int32_t constant = 0x10ddb0a6;
+  {
+    Loop loop(&m);
+    loop.Break();
+  }
+  m.Return(m.Int32Constant(constant));
+  CHECK_EQ(constant, m.Call());
+}
+
+
+TEST(RunCountToTen) {
+  StructuredMachineAssemblerTester<int32_t> m;
+  Variable i = m.NewVariable(m.Int32Constant(0));
+  Node* ten = m.Int32Constant(10);
+  Node* one = m.Int32Constant(1);
+  {
+    Loop loop(&m);
+    {
+      IfBuilder cond(&m);
+      cond.If(m.Word32Equal(i.Get(), ten)).Then();
+      loop.Break();
+    }
+    i.Set(m.Int32Add(i.Get(), one));
+  }
+  m.Return(i.Get());
+  CHECK_EQ(10, m.Call());
+}
+
+
+TEST(RunCountToTenAcc) {
+  StructuredMachineAssemblerTester<int32_t> m;
+  int32_t constant = 0xf27aed64;
+  Variable i = m.NewVariable(m.Int32Constant(0));
+  Variable var = m.NewVariable(m.Int32Constant(constant));
+  Node* ten = m.Int32Constant(10);
+  Node* one = m.Int32Constant(1);
+  {
+    Loop loop(&m);
+    {
+      IfBuilder cond(&m);
+      cond.If(m.Word32Equal(i.Get(), ten)).Then();
+      loop.Break();
+    }
+    i.Set(m.Int32Add(i.Get(), one));
+    var.Set(m.Int32Add(var.Get(), i.Get()));
+  }
+  m.Return(var.Get());
+
+  CHECK_EQ(constant + 10 + 9 * 5, m.Call());
+}
+
+
+TEST(RunSimpleNestedLoop) {
+  StructuredMachineAssemblerTester<int32_t> m(kMachineWord32);
+
+  Node* zero = m.Int32Constant(0);
+  Node* one = m.Int32Constant(1);
+  Node* two = m.Int32Constant(2);
+  Node* three = m.Int32Constant(3);
+  {
+    Loop l1(&m);
+    {
+      Loop l2(&m);
+      {
+        IfBuilder cond(&m);
+        cond.If(m.Word32Equal(m.Parameter(0), one)).Then();
+        l1.Break();
+      }
+      {
+        Loop l3(&m);
+        {
+          IfBuilder cond(&m);
+          cond.If(m.Word32Equal(m.Parameter(0), two)).Then();
+          l2.Break();
+          cond.Else();
+          cond.If(m.Word32Equal(m.Parameter(0), three)).Then();
+          l3.Break();
+        }
+        m.Return(three);
+      }
+      m.Return(two);
+    }
+    m.Return(one);
+  }
+  m.Return(zero);
+
+  CHECK_EQ(0, m.Call(1));
+  CHECK_EQ(1, m.Call(2));
+  CHECK_EQ(2, m.Call(3));
+  CHECK_EQ(3, m.Call(4));
+}
+
+
+TEST(RunFib) {
+  StructuredMachineAssemblerTester<int32_t> m(kMachineWord32);
+
+  // Constants.
+  Node* zero = m.Int32Constant(0);
+  Node* one = m.Int32Constant(1);
+  Node* two = m.Int32Constant(2);
+  // Variables.
+  // cnt = input
+  Variable cnt = m.NewVariable(m.Parameter(0));
+  // if (cnt < 2) return i
+  {
+    IfBuilder lt2(&m);
+    lt2.If(m.Int32LessThan(cnt.Get(), two)).Then();
+    m.Return(cnt.Get());
+  }
+  // cnt -= 2
+  cnt.Set(m.Int32Sub(cnt.Get(), two));
+  // res = 1
+  Variable res = m.NewVariable(one);
+  {
+    // prv_0 = 1
+    // prv_1 = 1
+    Variable prv_0 = m.NewVariable(one);
+    Variable prv_1 = m.NewVariable(one);
+    // while (cnt != 0) {
+    Loop main(&m);
+    {
+      IfBuilder nz(&m);
+      nz.If(m.Word32Equal(cnt.Get(), zero)).Then();
+      main.Break();
+    }
+    // res = prv_0 + prv_1
+    // prv_0 = prv_1
+    // prv_1 = res
+    res.Set(m.Int32Add(prv_0.Get(), prv_1.Get()));
+    prv_0.Set(prv_1.Get());
+    prv_1.Set(res.Get());
+    // cnt--
+    cnt.Set(m.Int32Sub(cnt.Get(), one));
+  }
+  m.Return(res.Get());
+
+  int32_t values[] = {0, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89, 144};
+  for (size_t i = 0; i < ARRAY_SIZE(values); i++) {
+    CHECK_EQ(values[i], m.Call(static_cast<int32_t>(i)));
+  }
+}
+
+
+static int VariableIntroduction() {
+  while (true) {
+    int ret = 0;
+    for (int i = 0; i < 10; i++) {
+      for (int j = i; j < 10; j++) {
+        for (int k = j; k < 10; k++) {
+          ret++;
+        }
+        ret++;
+      }
+      ret++;
+    }
+    return ret;
+  }
+}
+
+
+TEST(RunVariableIntroduction) {
+  StructuredMachineAssemblerTester<int32_t> m;
+  Node* zero = m.Int32Constant(0);
+  Node* one = m.Int32Constant(1);
+  // Use an IfBuilder to get out of start block.
+  {
+    IfBuilder i0(&m);
+    i0.If(zero).Then();
+    m.Return(one);
+  }
+  Node* ten = m.Int32Constant(10);
+  Variable v0 =
+      m.NewVariable(zero);  // Introduce variable outside of start block.
+  {
+    Loop l0(&m);
+    Variable ret = m.NewVariable(zero);  // Introduce loop variable.
+    {
+      Loop l1(&m);
+      {
+        IfBuilder i1(&m);
+        i1.If(m.Word32Equal(v0.Get(), ten)).Then();
+        l1.Break();
+      }
+      Variable v1 = m.NewVariable(v0.Get());  // Introduce loop variable.
+      {
+        Loop l2(&m);
+        {
+          IfBuilder i2(&m);
+          i2.If(m.Word32Equal(v1.Get(), ten)).Then();
+          l2.Break();
+        }
+        Variable v2 = m.NewVariable(v1.Get());  // Introduce loop variable.
+        {
+          Loop l3(&m);
+          {
+            IfBuilder i3(&m);
+            i3.If(m.Word32Equal(v2.Get(), ten)).Then();
+            l3.Break();
+          }
+          ret.Set(m.Int32Add(ret.Get(), one));
+          v2.Set(m.Int32Add(v2.Get(), one));
+        }
+        ret.Set(m.Int32Add(ret.Get(), one));
+        v1.Set(m.Int32Add(v1.Get(), one));
+      }
+      ret.Set(m.Int32Add(ret.Get(), one));
+      v0.Set(m.Int32Add(v0.Get(), one));
+    }
+    m.Return(ret.Get());  // Return loop variable.
+  }
+  CHECK_EQ(VariableIntroduction(), m.Call());
+}
+
+
+TEST(RunIfBuilderVariableLiveness) {
+  StructuredMachineAssemblerTester<int32_t> m;
+  typedef i::compiler::StructuredMachineAssemblerFriend F;
+  Node* zero = m.Int32Constant(0);
+  Variable v_outer = m.NewVariable(zero);
+  IfBuilder cond(&m);
+  cond.If(zero).Then();
+  Variable v_then = m.NewVariable(zero);
+  CHECK(F::VariableAlive(&m, v_outer));
+  CHECK(F::VariableAlive(&m, v_then));
+  cond.Else();
+  Variable v_else = m.NewVariable(zero);
+  CHECK(F::VariableAlive(&m, v_outer));
+  CHECK(F::VariableAlive(&m, v_else));
+  CHECK(!F::VariableAlive(&m, v_then));
+  cond.End();
+  CHECK(F::VariableAlive(&m, v_outer));
+  CHECK(!F::VariableAlive(&m, v_then));
+  CHECK(!F::VariableAlive(&m, v_else));
+}
+
+
+TEST(RunSimpleExpression1) {
+  StructuredMachineAssemblerTester<int32_t> m;
+
+  int32_t constant = 0x0c2974ef;
+  Node* zero = m.Int32Constant(0);
+  Node* one = m.Int32Constant(1);
+  {
+    // if (((1 && 1) && 1) && 1) return constant; return 0;
+    IfBuilder cond(&m);
+    cond.OpenParen();
+    cond.OpenParen().If(one).And();
+    cond.If(one).CloseParen().And();
+    cond.If(one).CloseParen().And();
+    cond.If(one).Then();
+    m.Return(m.Int32Constant(constant));
+  }
+  m.Return(zero);
+
+  CHECK_EQ(constant, m.Call());
+}
+
+
+TEST(RunSimpleExpression2) {
+  StructuredMachineAssemblerTester<int32_t> m;
+
+  int32_t constant = 0x2eddc11b;
+  Node* zero = m.Int32Constant(0);
+  Node* one = m.Int32Constant(1);
+  {
+    // if (((0 || 1) && 1) && 1) return constant; return 0;
+    IfBuilder cond(&m);
+    cond.OpenParen();
+    cond.OpenParen().If(zero).Or();
+    cond.If(one).CloseParen().And();
+    cond.If(one).CloseParen().And();
+    cond.If(one).Then();
+    m.Return(m.Int32Constant(constant));
+  }
+  m.Return(zero);
+
+  CHECK_EQ(constant, m.Call());
+}
+
+
+TEST(RunSimpleExpression3) {
+  StructuredMachineAssemblerTester<int32_t> m;
+
+  int32_t constant = 0x9ed5e9ef;
+  Node* zero = m.Int32Constant(0);
+  Node* one = m.Int32Constant(1);
+  {
+    // if (1 && ((0 || 1) && 1) && 1) return constant; return 0;
+    IfBuilder cond(&m);
+    cond.If(one).And();
+    cond.OpenParen();
+    cond.OpenParen().If(zero).Or();
+    cond.If(one).CloseParen().And();
+    cond.If(one).CloseParen().And();
+    cond.If(one).Then();
+    m.Return(m.Int32Constant(constant));
+  }
+  m.Return(zero);
+
+  CHECK_EQ(constant, m.Call());
+}
+
+
+TEST(RunSimpleExpressionVariable1) {
+  StructuredMachineAssemblerTester<int32_t> m;
+
+  int32_t constant = 0x4b40a986;
+  Node* one = m.Int32Constant(1);
+  Variable var = m.NewVariable(m.Int32Constant(constant));
+  {
+    // if (var.Get() && ((!var || var) && var) && var) {} return var;
+    // incrementing var in each environment.
+    IfBuilder cond(&m);
+    cond.If(var.Get()).And();
+    var.Set(m.Int32Add(var.Get(), one));
+    cond.OpenParen().OpenParen().If(m.Word32BinaryNot(var.Get())).Or();
+    var.Set(m.Int32Add(var.Get(), one));
+    cond.If(var.Get()).CloseParen().And();
+    var.Set(m.Int32Add(var.Get(), one));
+    cond.If(var.Get()).CloseParen().And();
+    var.Set(m.Int32Add(var.Get(), one));
+    cond.If(var.Get());
+  }
+  m.Return(var.Get());
+
+  CHECK_EQ(constant + 4, m.Call());
+}
+
+
+class QuicksortHelper : public StructuredMachineAssemblerTester<int32_t> {
+ public:
+  QuicksortHelper()
+      : StructuredMachineAssemblerTester(
+            MachineOperatorBuilder::pointer_rep(), kMachineWord32,
+            MachineOperatorBuilder::pointer_rep(), kMachineWord32),
+        input_(NULL),
+        stack_limit_(NULL),
+        one_(Int32Constant(1)),
+        stack_frame_size_(Int32Constant(kFrameVariables * 4)),
+        left_offset_(Int32Constant(0 * 4)),
+        right_offset_(Int32Constant(1 * 4)) {
+    Build();
+  }
+
+  int32_t DoCall(int32_t* input, int32_t input_length) {
+    int32_t stack_space[20];
+    // Do call.
+    int32_t return_val = Call(input, input_length, stack_space,
+                              static_cast<int32_t>(ARRAY_SIZE(stack_space)));
+    // Ran out of stack space.
+    if (return_val != 0) return return_val;
+    // Check sorted.
+    int32_t last = input[0];
+    for (int32_t i = 0; i < input_length; i++) {
+      CHECK(last <= input[i]);
+      last = input[i];
+    }
+    return return_val;
+  }
+
+ private:
+  void Inc32(const Variable& var) { var.Set(Int32Add(var.Get(), one_)); }
+  Node* Index(Node* index) { return Word32Shl(index, Int32Constant(2)); }
+  Node* ArrayLoad(Node* index) {
+    return Load(kMachineWord32, input_, Index(index));
+  }
+  void Swap(Node* a_index, Node* b_index) {
+    Node* a = ArrayLoad(a_index);
+    Node* b = ArrayLoad(b_index);
+    Store(kMachineWord32, input_, Index(a_index), b);
+    Store(kMachineWord32, input_, Index(b_index), a);
+  }
+  void AddToCallStack(const Variable& fp, Node* left, Node* right) {
+    {
+      // Stack limit check.
+      IfBuilder cond(this);
+      cond.If(IntPtrLessThanOrEqual(fp.Get(), stack_limit_)).Then();
+      Return(Int32Constant(-1));
+    }
+    Store(kMachineWord32, fp.Get(), left_offset_, left);
+    Store(kMachineWord32, fp.Get(), right_offset_, right);
+    fp.Set(IntPtrAdd(fp.Get(), ConvertInt32ToIntPtr(stack_frame_size_)));
+  }
+  void Build() {
+    Variable left = NewVariable(Int32Constant(0));
+    Variable right =
+        NewVariable(Int32Sub(Parameter(kInputLengthParameter), one_));
+    input_ = Parameter(kInputParameter);
+    Node* top_of_stack = Parameter(kStackParameter);
+    stack_limit_ = IntPtrSub(
+        top_of_stack, ConvertInt32ToIntPtr(Parameter(kStackLengthParameter)));
+    Variable fp = NewVariable(top_of_stack);
+    {
+      Loop outermost(this);
+      // Edge case - 2 element array.
+      {
+        IfBuilder cond(this);
+        cond.If(Word32Equal(left.Get(), Int32Sub(right.Get(), one_))).And();
+        cond.If(Int32LessThanOrEqual(ArrayLoad(right.Get()),
+                                     ArrayLoad(left.Get()))).Then();
+        Swap(left.Get(), right.Get());
+      }
+      {
+        IfBuilder cond(this);
+        // Algorithm complete condition.
+        cond.If(WordEqual(top_of_stack, fp.Get())).And();
+        cond.If(Int32LessThanOrEqual(Int32Sub(right.Get(), one_), left.Get()))
+            .Then();
+        outermost.Break();
+        // 'Recursion' exit condition. Pop frame and continue.
+        cond.Else();
+        cond.If(Int32LessThanOrEqual(Int32Sub(right.Get(), one_), left.Get()))
+            .Then();
+        fp.Set(IntPtrSub(fp.Get(), ConvertInt32ToIntPtr(stack_frame_size_)));
+        left.Set(Load(kMachineWord32, fp.Get(), left_offset_));
+        right.Set(Load(kMachineWord32, fp.Get(), right_offset_));
+        outermost.Continue();
+      }
+      // Partition.
+      Variable store_index = NewVariable(left.Get());
+      {
+        Node* pivot_index =
+            Int32Div(Int32Add(left.Get(), right.Get()), Int32Constant(2));
+        Node* pivot = ArrayLoad(pivot_index);
+        Swap(pivot_index, right.Get());
+        Variable i = NewVariable(left.Get());
+        {
+          Loop partition(this);
+          {
+            IfBuilder cond(this);
+            // Parition complete.
+            cond.If(Word32Equal(i.Get(), right.Get())).Then();
+            partition.Break();
+            // Need swap.
+            cond.Else();
+            cond.If(Int32LessThanOrEqual(ArrayLoad(i.Get()), pivot)).Then();
+            Swap(i.Get(), store_index.Get());
+            Inc32(store_index);
+          }
+          Inc32(i);
+        }  // End partition loop.
+        Swap(store_index.Get(), right.Get());
+      }
+      // 'Recurse' left and right halves of partition.
+      // Tail recurse second one.
+      AddToCallStack(fp, left.Get(), Int32Sub(store_index.Get(), one_));
+      left.Set(Int32Add(store_index.Get(), one_));
+    }  // End outermost loop.
+    Return(Int32Constant(0));
+  }
+
+  static const int kFrameVariables = 2;  // left, right
+  // Parameter offsets.
+  static const int kInputParameter = 0;
+  static const int kInputLengthParameter = 1;
+  static const int kStackParameter = 2;
+  static const int kStackLengthParameter = 3;
+  // Function inputs.
+  Node* input_;
+  Node* stack_limit_;
+  // Constants.
+  Node* const one_;
+  // Frame constants.
+  Node* const stack_frame_size_;
+  Node* const left_offset_;
+  Node* const right_offset_;
+};
+
+
+TEST(RunSimpleQuicksort) {
+  QuicksortHelper m;
+  int32_t inputs[] = {9, 7, 1, 8, 11};
+  CHECK_EQ(0, m.DoCall(inputs, ARRAY_SIZE(inputs)));
+}
+
+
+TEST(RunRandomQuicksort) {
+  QuicksortHelper m;
+
+  v8::base::RandomNumberGenerator rng;
+  static const int kMaxLength = 40;
+  int32_t inputs[kMaxLength];
+
+  for (int length = 1; length < kMaxLength; length++) {
+    for (int i = 0; i < 70; i++) {
+      // Randomize inputs.
+      for (int j = 0; j < length; j++) {
+        inputs[j] = rng.NextInt(10) - 5;
+      }
+      CHECK_EQ(0, m.DoCall(inputs, length));
+    }
+  }
+}
+
+
+TEST(MultipleScopes) {
+  StructuredMachineAssemblerTester<int32_t> m;
+  for (int i = 0; i < 10; i++) {
+    IfBuilder b(&m);
+    b.If(m.Int32Constant(0)).Then();
+    m.NewVariable(m.Int32Constant(0));
+  }
+  m.Return(m.Int32Constant(0));
+  CHECK_EQ(0, m.Call());
+}
+
+#endif
diff --git a/test/cctest/compiler/value-helper.h b/test/cctest/compiler/value-helper.h
new file mode 100644 (file)
index 0000000..7b8fcc6
--- /dev/null
@@ -0,0 +1,122 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_CCTEST_COMPILER_VALUE_HELPER_H_
+#define V8_CCTEST_COMPILER_VALUE_HELPER_H_
+
+#include "src/compiler/common-operator.h"
+#include "src/compiler/node.h"
+#include "src/compiler/node-matchers.h"
+#include "src/isolate.h"
+#include "src/objects.h"
+#include "test/cctest/cctest.h"
+#include "v8.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// A collection of utilities related to numerical and heap values, including
+// example input values of various types, including int32_t, uint32_t, double,
+// etc.
+class ValueHelper {
+ public:
+  Isolate* isolate_;
+
+  ValueHelper() : isolate_(CcTest::InitIsolateOnce()) {}
+
+  template <typename T>
+  void CheckConstant(T expected, Node* node) {
+    CHECK_EQ(expected, ValueOf<T>(node->op()));
+  }
+
+  void CheckFloat64Constant(double expected, Node* node) {
+    CHECK_EQ(IrOpcode::kFloat64Constant, node->opcode());
+    CHECK_EQ(expected, ValueOf<double>(node->op()));
+  }
+
+  void CheckNumberConstant(double expected, Node* node) {
+    CHECK_EQ(IrOpcode::kNumberConstant, node->opcode());
+    CHECK_EQ(expected, ValueOf<double>(node->op()));
+  }
+
+  void CheckInt32Constant(int32_t expected, Node* node) {
+    CHECK_EQ(IrOpcode::kInt32Constant, node->opcode());
+    CHECK_EQ(expected, ValueOf<int32_t>(node->op()));
+  }
+
+  void CheckUint32Constant(int32_t expected, Node* node) {
+    CHECK_EQ(IrOpcode::kInt32Constant, node->opcode());
+    CHECK_EQ(expected, ValueOf<uint32_t>(node->op()));
+  }
+
+  void CheckHeapConstant(Object* expected, Node* node) {
+    CHECK_EQ(IrOpcode::kHeapConstant, node->opcode());
+    CHECK_EQ(expected, *ValueOf<Handle<Object> >(node->op()));
+  }
+
+  void CheckTrue(Node* node) {
+    CheckHeapConstant(isolate_->heap()->true_value(), node);
+  }
+
+  void CheckFalse(Node* node) {
+    CheckHeapConstant(isolate_->heap()->false_value(), node);
+  }
+
+  static std::vector<double> float64_vector() {
+    static const double nan = v8::base::OS::nan_value();
+    static const double values[] = {
+        0.125,           0.25,            0.375,          0.5,
+        1.25,            -1.75,           2,              5.125,
+        6.25,            0.0,             -0.0,           982983.25,
+        888,             2147483647.0,    -999.75,        3.1e7,
+        -2e66,           3e-88,           -2147483648.0,  V8_INFINITY,
+        -V8_INFINITY,    nan,             2147483647.375, 2147483647.75,
+        2147483648.0,    2147483648.25,   2147483649.25,  -2147483647.0,
+        -2147483647.125, -2147483647.875, -2147483648.25, -2147483649.5};
+    return std::vector<double>(&values[0], &values[ARRAY_SIZE(values)]);
+  }
+
+  static const std::vector<int32_t> int32_vector() {
+    std::vector<uint32_t> values = uint32_vector();
+    return std::vector<int32_t>(values.begin(), values.end());
+  }
+
+  static const std::vector<uint32_t> uint32_vector() {
+    static const uint32_t kValues[] = {
+        0x00000000, 0x00000001, 0xffffffff, 0x1b09788b, 0x04c5fce8, 0xcc0de5bf,
+        0x273a798e, 0x187937a3, 0xece3af83, 0x5495a16b, 0x0b668ecc, 0x11223344,
+        0x0000009e, 0x00000043, 0x0000af73, 0x0000116b, 0x00658ecc, 0x002b3b4c,
+        0x88776655, 0x70000000, 0x07200000, 0x7fffffff, 0x56123761, 0x7fffff00,
+        0x761c4761, 0x80000000, 0x88888888, 0xa0000000, 0xdddddddd, 0xe0000000,
+        0xeeeeeeee, 0xfffffffd, 0xf0000000, 0x007fffff, 0x003fffff, 0x001fffff,
+        0x000fffff, 0x0007ffff, 0x0003ffff, 0x0001ffff, 0x0000ffff, 0x00007fff,
+        0x00003fff, 0x00001fff, 0x00000fff, 0x000007ff, 0x000003ff, 0x000001ff};
+    return std::vector<uint32_t>(&kValues[0], &kValues[ARRAY_SIZE(kValues)]);
+  }
+
+  static const std::vector<double> nan_vector(size_t limit = 0) {
+    static const double nan = v8::base::OS::nan_value();
+    static const double values[] = {-nan,               -V8_INFINITY * -0.0,
+                                    -V8_INFINITY * 0.0, V8_INFINITY * -0.0,
+                                    V8_INFINITY * 0.0,  nan};
+    return std::vector<double>(&values[0], &values[ARRAY_SIZE(values)]);
+  }
+};
+
+// Helper macros that can be used in FOR_INT32_INPUTS(i) { ... *i ... }
+// Watch out, these macros aren't hygenic; they pollute your scope. Thanks STL.
+#define FOR_INPUTS(ctype, itype, var)                           \
+  std::vector<ctype> var##_vec = ValueHelper::itype##_vector(); \
+  for (std::vector<ctype>::iterator var = var##_vec.begin();    \
+       var != var##_vec.end(); ++var)
+
+#define FOR_INT32_INPUTS(var) FOR_INPUTS(int32_t, int32, var)
+#define FOR_UINT32_INPUTS(var) FOR_INPUTS(uint32_t, uint32, var)
+#define FOR_FLOAT64_INPUTS(var) FOR_INPUTS(double, float64, var)
+}
+}
+}  // namespace v8::internal::compiler
+
+#endif  // V8_CCTEST_COMPILER_VALUE_HELPER_H_
index f26c4615a73401ccb027c9beb1a3d5577cd4c587..bbe9ebef899c825d0476d331e323692ed172254d 100644 (file)
@@ -201,7 +201,7 @@ TEST(CodeRange) {
   code_range.SetUp(code_range_size);
   size_t current_allocated = 0;
   size_t total_allocated = 0;
-  List<Block> blocks(1000);
+  List< ::Block> blocks(1000);
 
   while (total_allocated < 5 * code_range_size) {
     if (current_allocated < code_range_size / 10) {
@@ -218,7 +218,7 @@ TEST(CodeRange) {
                                                   requested,
                                                   &allocated);
       CHECK(base != NULL);
-      blocks.Add(Block(base, static_cast<int>(allocated)));
+      blocks.Add(::Block(base, static_cast<int>(allocated)));
       current_allocated += static_cast<int>(allocated);
       total_allocated += static_cast<int>(allocated);
     } else {
index 089f0b8a069067580998c99c733189ea8cebf04f..6183e65ee08af93a6bac8da72da1929839229d55 100644 (file)
@@ -32,6 +32,7 @@
 #include "src/arm/simulator-arm.h"
 #include "src/disassembler.h"
 #include "src/factory.h"
+#include "src/ostreams.h"
 
 using namespace v8::internal;
 
index 3ac3669750242fdf405e135e7afdef47cf8215cb..1e1b99e37494d5135c0e69f23972c61088260693 100644 (file)
@@ -10382,7 +10382,7 @@ TEST(process_nan_float) {
 
 static void ProcessNaNsHelper(double n, double m, double expected) {
   ASSERT(std::isnan(n) || std::isnan(m));
-  ASSERT(isnan(expected));
+  ASSERT(std::isnan(expected));
 
   SETUP();
   START();
@@ -10454,7 +10454,7 @@ TEST(process_nans_double) {
 
 static void ProcessNaNsHelper(float n, float m, float expected) {
   ASSERT(std::isnan(n) || std::isnan(m));
-  ASSERT(isnan(expected));
+  ASSERT(std::isnan(expected));
 
   SETUP();
   START();
@@ -10525,7 +10525,7 @@ TEST(process_nans_float) {
 
 
 static void DefaultNaNHelper(float n, float m, float a) {
-  ASSERT(std::isnan(n) || std::isnan(m) || isnan(a));
+  ASSERT(std::isnan(n) || std::isnan(m) || std::isnan(a));
 
   bool test_1op = std::isnan(n);
   bool test_2op = std::isnan(n) || std::isnan(m);
@@ -10653,7 +10653,7 @@ TEST(default_nan_float) {
 
 
 static void DefaultNaNHelper(double n, double m, double a) {
-  ASSERT(std::isnan(n) || std::isnan(m) || isnan(a));
+  ASSERT(std::isnan(n) || std::isnan(m) || std::isnan(a));
 
   bool test_1op = std::isnan(n);
   bool test_2op = std::isnan(n) || std::isnan(m);
index 4846efedcd81de4ab9a97ab55bf673037d5692e8..e8c7f951feb9b0cc73d2eb08191ab4535a0913d1 100644 (file)
@@ -33,6 +33,7 @@
 #include "src/disassembler.h"
 #include "src/factory.h"
 #include "src/macro-assembler.h"
+#include "src/ostreams.h"
 #include "src/serialize.h"
 #include "test/cctest/cctest.h"
 
index 2fcb6fbc6d1d7f4e750ecfca6b29e6c9f1edb4f8..3d305b650e807280961b334b502042911aad103c 100644 (file)
@@ -32,6 +32,7 @@
 #include "src/base/platform/platform.h"
 #include "src/factory.h"
 #include "src/macro-assembler.h"
+#include "src/ostreams.h"
 #include "src/serialize.h"
 #include "test/cctest/cctest.h"
 
diff --git a/test/cctest/test-checks.cc b/test/cctest/test-checks.cc
new file mode 100644 (file)
index 0000000..a49a7db
--- /dev/null
@@ -0,0 +1,26 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/checks.h"
+
+#include "test/cctest/cctest.h"
+
+
+TEST(CheckEqualsZeroAndMinusZero) {
+  CHECK_EQ(0.0, 0.0);
+  CHECK_NE(0.0, -0.0);
+  CHECK_NE(-0.0, 0.0);
+  CHECK_EQ(-0.0, -0.0);
+}
+
+
+TEST(CheckEqualsReflexivity) {
+  double inf = V8_INFINITY;
+  double nan = v8::base::OS::nan_value();
+  double constants[] = {-nan, -inf, -3.1415, -1.0,   -0.1, -0.0,
+                        0.0,  0.1,  1.0,     3.1415, inf,  nan};
+  for (size_t i = 0; i < ARRAY_SIZE(constants); ++i) {
+    CHECK_EQ(constants[i], constants[i]);
+  }
+}
index 794a6310e55881e5e7163291f81b502bdefed89a..8436df7c5ab23c7de3afc5b8c97d4f5362601489 100644 (file)
@@ -168,6 +168,11 @@ TEST(DisasmIa320) {
 
   __ nop();
   __ idiv(edx);
+  __ idiv(Operand(edx, ecx, times_1, 1));
+  __ idiv(Operand(esp, 12));
+  __ div(edx);
+  __ div(Operand(edx, ecx, times_1, 1));
+  __ div(Operand(esp, 12));
   __ mul(edx);
   __ neg(edx);
   __ not_(edx);
@@ -175,7 +180,9 @@ TEST(DisasmIa320) {
 
   __ imul(edx, Operand(ebx, ecx, times_4, 10000));
   __ imul(edx, ecx, 12);
+  __ imul(edx, Operand(edx, eax, times_2, 42), 8);
   __ imul(edx, ecx, 1000);
+  __ imul(edx, Operand(ebx, ecx, times_4, 1), 9000);
 
   __ inc(edx);
   __ inc(Operand(ebx, ecx, times_4, 10000));
@@ -197,15 +204,24 @@ TEST(DisasmIa320) {
   __ sar(edx, 1);
   __ sar(edx, 6);
   __ sar_cl(edx);
+  __ sar(Operand(ebx, ecx, times_4, 10000), 1);
+  __ sar(Operand(ebx, ecx, times_4, 10000), 6);
+  __ sar_cl(Operand(ebx, ecx, times_4, 10000));
   __ sbb(edx, Operand(ebx, ecx, times_4, 10000));
   __ shld(edx, Operand(ebx, ecx, times_4, 10000));
   __ shl(edx, 1);
   __ shl(edx, 6);
   __ shl_cl(edx);
+  __ shl(Operand(ebx, ecx, times_4, 10000), 1);
+  __ shl(Operand(ebx, ecx, times_4, 10000), 6);
+  __ shl_cl(Operand(ebx, ecx, times_4, 10000));
   __ shrd(edx, Operand(ebx, ecx, times_4, 10000));
   __ shr(edx, 1);
   __ shr(edx, 7);
   __ shr_cl(edx);
+  __ shr(Operand(ebx, ecx, times_4, 10000), 1);
+  __ shr(Operand(ebx, ecx, times_4, 10000), 6);
+  __ shr_cl(Operand(ebx, ecx, times_4, 10000));
 
 
   // Immediates
@@ -441,6 +457,14 @@ TEST(DisasmIa320) {
     }
   }
 
+  // xchg.
+  {
+    __ xchg(eax, eax);
+    __ xchg(eax, ebx);
+    __ xchg(ebx, ebx);
+    __ xchg(ebx, Operand(esp, 12));
+  }
+
   // Nop instructions
   for (int i = 0; i < 16; i++) {
     __ Nop(i);
index 5f6312bd147519c87a26efc0b7647d67007022fd..4778b04bb71d2883ad5046792a5e1a3c6efe62a9 100644 (file)
@@ -420,6 +420,14 @@ TEST(DisasmX64) {
     }
   }
 
+  // xchg.
+  {
+    __ xchgq(rax, rax);
+    __ xchgq(rax, rbx);
+    __ xchgq(rbx, rbx);
+    __ xchgq(rbx, Operand(rsp, 12));
+  }
+
   // Nop instructions
   for (int i = 0; i < 16; i++) {
     __ Nop(i);
index f55a625d6b6b5ff72a18525c6ff2667d26c9025b..b82b7c44ac184ac143357001135ab96e3cc466d9 100644 (file)
@@ -42,6 +42,7 @@
 #include "src/scanner-character-streams.h"
 #include "src/token.h"
 #include "src/utils.h"
+
 #include "test/cctest/cctest.h"
 
 TEST(ScanKeywords) {
@@ -2852,6 +2853,167 @@ TEST(RegressionLazyFunctionWithErrorWithArg) {
 }
 
 
+TEST(SerializationOfMaybeAssignmentFlag) {
+  i::Isolate* isolate = CcTest::i_isolate();
+  i::Factory* factory = isolate->factory();
+  i::HandleScope scope(isolate);
+  LocalContext env;
+
+  const char* src =
+      "function h() {"
+      "  var result = [];"
+      "  function f() {"
+      "    result.push(2);"
+      "  }"
+      "  function assertResult(r) {"
+      "    f();"
+      "    result = [];"
+      "  }"
+      "  assertResult([2]);"
+      "  assertResult([2]);"
+      "  return f;"
+      "};"
+      "h();";
+
+  i::ScopedVector<char> program(Utf8LengthHelper(src) + 1);
+  i::SNPrintF(program, "%s", src);
+  i::Handle<i::String> source = factory->InternalizeUtf8String(program.start());
+  source->PrintOn(stdout);
+  printf("\n");
+  i::Zone zone(isolate);
+  v8::Local<v8::Value> v = CompileRun(src);
+  i::Handle<i::Object> o = v8::Utils::OpenHandle(*v);
+  i::Handle<i::JSFunction> f = i::Handle<i::JSFunction>::cast(o);
+  i::Context* context = f->context();
+  i::AstValueFactory avf(&zone, isolate->heap()->HashSeed());
+  avf.Internalize(isolate);
+  const i::AstRawString* name = avf.GetOneByteString("result");
+  i::Handle<i::String> str = name->string();
+  CHECK(str->IsInternalizedString());
+  i::Scope* global_scope =
+      new (&zone) i::Scope(NULL, i::GLOBAL_SCOPE, &avf, &zone);
+  global_scope->Initialize();
+  i::Scope* s = i::Scope::DeserializeScopeChain(context, global_scope, &zone);
+  ASSERT(s != global_scope);
+  ASSERT(name != NULL);
+
+  // Get result from h's function context (that is f's context)
+  i::Variable* var = s->Lookup(name);
+
+  CHECK(var != NULL);
+  // Maybe assigned should survive deserialization
+  CHECK(var->maybe_assigned() == i::kMaybeAssigned);
+  // TODO(sigurds) Figure out if is_used should survive context serialization.
+}
+
+
+TEST(IfArgumentsArrayAccessedThenParametersMaybeAssigned) {
+  i::Isolate* isolate = CcTest::i_isolate();
+  i::Factory* factory = isolate->factory();
+  i::HandleScope scope(isolate);
+  LocalContext env;
+
+
+  const char* src =
+      "function f(x) {"
+      "    var a = arguments;"
+      "    function g(i) {"
+      "      ++a[0];"
+      "    };"
+      "    return g;"
+      "  }"
+      "f(0);";
+
+  i::ScopedVector<char> program(Utf8LengthHelper(src) + 1);
+  i::SNPrintF(program, "%s", src);
+  i::Handle<i::String> source = factory->InternalizeUtf8String(program.start());
+  source->PrintOn(stdout);
+  printf("\n");
+  i::Zone zone(isolate);
+  v8::Local<v8::Value> v = CompileRun(src);
+  i::Handle<i::Object> o = v8::Utils::OpenHandle(*v);
+  i::Handle<i::JSFunction> f = i::Handle<i::JSFunction>::cast(o);
+  i::Context* context = f->context();
+  i::AstValueFactory avf(&zone, isolate->heap()->HashSeed());
+  avf.Internalize(isolate);
+
+  i::Scope* global_scope =
+      new (&zone) i::Scope(NULL, i::GLOBAL_SCOPE, &avf, &zone);
+  global_scope->Initialize();
+  i::Scope* s = i::Scope::DeserializeScopeChain(context, global_scope, &zone);
+  ASSERT(s != global_scope);
+  const i::AstRawString* name_x = avf.GetOneByteString("x");
+
+  // Get result from f's function context (that is g's outer context)
+  i::Variable* var_x = s->Lookup(name_x);
+  CHECK(var_x != NULL);
+  CHECK(var_x->maybe_assigned() == i::kMaybeAssigned);
+}
+
+
+TEST(ExportsMaybeAssigned) {
+  i::FLAG_use_strict = true;
+  i::FLAG_harmony_scoping = true;
+  i::FLAG_harmony_modules = true;
+
+  i::Isolate* isolate = CcTest::i_isolate();
+  i::Factory* factory = isolate->factory();
+  i::HandleScope scope(isolate);
+  LocalContext env;
+
+  const char* src =
+      "module A {"
+      "  export var x = 1;"
+      "  export function f() { return x };"
+      "  export const y = 2;"
+      "  module B {}"
+      "  export module C {}"
+      "};"
+      "A.f";
+
+  i::ScopedVector<char> program(Utf8LengthHelper(src) + 1);
+  i::SNPrintF(program, "%s", src);
+  i::Handle<i::String> source = factory->InternalizeUtf8String(program.start());
+  source->PrintOn(stdout);
+  printf("\n");
+  i::Zone zone(isolate);
+  v8::Local<v8::Value> v = CompileRun(src);
+  i::Handle<i::Object> o = v8::Utils::OpenHandle(*v);
+  i::Handle<i::JSFunction> f = i::Handle<i::JSFunction>::cast(o);
+  i::Context* context = f->context();
+  i::AstValueFactory avf(&zone, isolate->heap()->HashSeed());
+  avf.Internalize(isolate);
+
+  i::Scope* global_scope =
+      new (&zone) i::Scope(NULL, i::GLOBAL_SCOPE, &avf, &zone);
+  global_scope->Initialize();
+  i::Scope* s = i::Scope::DeserializeScopeChain(context, global_scope, &zone);
+  ASSERT(s != global_scope);
+  const i::AstRawString* name_x = avf.GetOneByteString("x");
+  const i::AstRawString* name_f = avf.GetOneByteString("f");
+  const i::AstRawString* name_y = avf.GetOneByteString("y");
+  const i::AstRawString* name_B = avf.GetOneByteString("B");
+  const i::AstRawString* name_C = avf.GetOneByteString("C");
+
+  // Get result from h's function context (that is f's context)
+  i::Variable* var_x = s->Lookup(name_x);
+  CHECK(var_x != NULL);
+  CHECK(var_x->maybe_assigned() == i::kMaybeAssigned);
+  i::Variable* var_f = s->Lookup(name_f);
+  CHECK(var_f != NULL);
+  CHECK(var_f->maybe_assigned() == i::kMaybeAssigned);
+  i::Variable* var_y = s->Lookup(name_y);
+  CHECK(var_y != NULL);
+  CHECK(var_y->maybe_assigned() == i::kNotAssigned);
+  i::Variable* var_B = s->Lookup(name_B);
+  CHECK(var_B != NULL);
+  CHECK(var_B->maybe_assigned() == i::kNotAssigned);
+  i::Variable* var_C = s->Lookup(name_C);
+  CHECK(var_C != NULL);
+  CHECK(var_C->maybe_assigned() == i::kNotAssigned);
+}
+
+
 TEST(InnerAssignment) {
   i::Isolate* isolate = CcTest::i_isolate();
   i::Factory* factory = isolate->factory();
@@ -2940,6 +3102,10 @@ TEST(InnerAssignment) {
     { "(function(x) { eval(''); })", true, false },
   };
 
+  // Used to trigger lazy compilation of function
+  int comment_len = 2048;
+  i::ScopedVector<char> comment(comment_len + 1);
+  i::SNPrintF(comment, "/*%0*d*/", comment_len - 4, 0);
   int prefix_len = Utf8LengthHelper(prefix);
   int midfix_len = Utf8LengthHelper(midfix);
   int suffix_len = Utf8LengthHelper(suffix);
@@ -2947,36 +3113,48 @@ TEST(InnerAssignment) {
     const char* outer = outers[i].source;
     int outer_len = Utf8LengthHelper(outer);
     for (unsigned j = 0; j < ARRAY_SIZE(inners); ++j) {
-      if (outers[i].strict && inners[j].with) continue;
-      const char* inner = inners[j].source;
-      int inner_len = Utf8LengthHelper(inner);
-      int len = prefix_len + outer_len + midfix_len + inner_len + suffix_len;
-      i::ScopedVector<char> program(len + 1);
-      i::SNPrintF(program, "%s%s%s%s%s", prefix, outer, midfix, inner, suffix);
-      i::Handle<i::String> source =
-          factory->InternalizeUtf8String(program.start());
-      source->PrintOn(stdout);
-      printf("\n");
-
-      i::Handle<i::Script> script = factory->NewScript(source);
-      i::CompilationInfoWithZone info(script);
-      i::Parser parser(&info);
-      parser.set_allow_harmony_scoping(true);
-      CHECK(parser.Parse());
-      CHECK(i::Rewriter::Rewrite(&info));
-      CHECK(i::Scope::Analyze(&info));
-      CHECK(info.function() != NULL);
-
-      i::Scope* scope = info.function()->scope();
-      CHECK_EQ(scope->inner_scopes()->length(), 1);
-      i::Scope* inner_scope = scope->inner_scopes()->at(0);
-      const i::AstRawString* var_name =
-          info.ast_value_factory()->GetOneByteString("x");
-      i::Variable* var = inner_scope->Lookup(var_name);
-      bool expected = outers[i].assigned || inners[j].assigned;
-      CHECK(var != NULL);
-      CHECK(var->is_used() || !expected);
-      CHECK(var->maybe_assigned() == expected);
+      for (unsigned outer_lazy = 0; outer_lazy < 2; ++outer_lazy) {
+        for (unsigned inner_lazy = 0; inner_lazy < 2; ++inner_lazy) {
+          if (outers[i].strict && inners[j].with) continue;
+          const char* inner = inners[j].source;
+          int inner_len = Utf8LengthHelper(inner);
+
+          int outer_comment_len = outer_lazy ? comment_len : 0;
+          int inner_comment_len = inner_lazy ? comment_len : 0;
+          const char* outer_comment = outer_lazy ? comment.start() : "";
+          const char* inner_comment = inner_lazy ? comment.start() : "";
+          int len = prefix_len + outer_comment_len + outer_len + midfix_len +
+                    inner_comment_len + inner_len + suffix_len;
+          i::ScopedVector<char> program(len + 1);
+
+          i::SNPrintF(program, "%s%s%s%s%s%s%s", prefix, outer_comment, outer,
+                      midfix, inner_comment, inner, suffix);
+          i::Handle<i::String> source =
+              factory->InternalizeUtf8String(program.start());
+          source->PrintOn(stdout);
+          printf("\n");
+
+          i::Handle<i::Script> script = factory->NewScript(source);
+          i::CompilationInfoWithZone info(script);
+          i::Parser parser(&info);
+          parser.set_allow_harmony_scoping(true);
+          CHECK(parser.Parse());
+          CHECK(i::Rewriter::Rewrite(&info));
+          CHECK(i::Scope::Analyze(&info));
+          CHECK(info.function() != NULL);
+
+          i::Scope* scope = info.function()->scope();
+          CHECK_EQ(scope->inner_scopes()->length(), 1);
+          i::Scope* inner_scope = scope->inner_scopes()->at(0);
+          const i::AstRawString* var_name =
+              info.ast_value_factory()->GetOneByteString("x");
+          i::Variable* var = inner_scope->Lookup(var_name);
+          bool expected = outers[i].assigned || inners[j].assigned;
+          CHECK(var != NULL);
+          CHECK(var->is_used() || !expected);
+          CHECK((var->maybe_assigned() == i::kMaybeAssigned) == expected);
+        }
+      }
     }
   }
 }
index 13ba8c6cc022e5bc58e8e437459191125d9163aa..71227b59f71f14558ea802b4a32e6b6656b5b0f7 100644 (file)
@@ -33,6 +33,7 @@
 #include "src/ast.h"
 #include "src/char-predicates-inl.h"
 #include "src/jsregexp.h"
+#include "src/ostreams.h"
 #include "src/parser.h"
 #include "src/regexp-macro-assembler.h"
 #include "src/regexp-macro-assembler-irregexp.h"
index 9ab023fe1dd8281b6196086336ad9743ecb1e895..066c99703763dd2cc94e6dd6d5b00799350e49f6 100644 (file)
@@ -8,6 +8,7 @@
 #include "src/v8.h"
 
 #include "src/objects.h"
+#include "src/ostreams.h"
 #include "test/cctest/cctest.h"
 
 using namespace v8::internal;
index ae8f5189725c549463851c1a76d5c020095b5043..7165c3845a25e8be4427683f83a96272ce501ef3 100644 (file)
   "PushWithContext": [SKIP],
   "PushCatchContext": [SKIP],
   "PushModuleContext": [SKIP],
+  "LoadLookupSlot": [SKIP],
+  "LoadLookupSlotNoReferenceError": [SKIP],
+  "ResolvePossiblyDirectEval": [SKIP],
+  "ForInInit": [SKIP],
+  "ForInNext": [SKIP],
 
   # TODO(jkummerow): Figure out what to do about inlined functions.
   "_GeneratorNext": [SKIP],
index d0caafa27cd140f8b649112395057224f70f638f..e9aba1d3c92d0f7a4ecf47df08f091fc210fe7a4 100644 (file)
@@ -137,7 +137,7 @@ OptTracker.prototype.DisableAsserts_ = function(func) {
     case OptTracker.OptimizationState.NEVER:
       return true;
   }
-  return false;
+  return true;
 }
 // (End of class OptTracker.)
 
index f329deb3609173915461fcd4d3690ffb21b5b8fb..a4f6eb0009c4c549520ca95181fcacd53dfff873 100644 (file)
   # Issue 3389: deopt_every_n_garbage_collections is unsafe
   'regress/regress-2653': [SKIP],
 
+  ##############################################################################
+  # TurboFan compiler failures.
+
+  # TODO(mstarzinger): An arguments object materialized in the prologue can't
+  # be accessed indirectly. Either we drop that requirement or wait for support
+  # from the deoptimizer to do that.
+  'arguments-indirect': [PASS, NO_VARIANTS],
+
+  # TODO(mstarzinger): Sometimes the try-catch blacklist fails.
+  'debug-references': [PASS, NO_VARIANTS],
+  'regress/regress-263': [PASS, NO_VARIANTS],
+
+  # Some tests are over-restrictive about object layout.
+  'array-constructor-feedback': [PASS, NO_VARIANTS],
+  'array-feedback': [PASS, NO_VARIANTS],
+  'fast-non-keyed': [PASS, NO_VARIANTS],
+  'track-fields': [PASS, NO_VARIANTS],
+
+  # Some tests are just too slow to run for now.
+  'array-store-and-grow': [PASS, NO_VARIANTS],
+  'big-object-literal': [PASS, NO_VARIANTS],
+  'bit-not': [PASS, NO_VARIANTS],
+  'elements-kind': [PASS, NO_VARIANTS],
+  'elements-transition': [PASS, NO_VARIANTS],
+  'json2': [PASS, NO_VARIANTS],
+  'packed-elements': [PASS, NO_VARIANTS],
+  'unbox-double-arrays': [PASS, NO_VARIANTS],
+  'whitespaces': [PASS, NO_VARIANTS],
+  'compiler/optimized-for-in': [PASS, NO_VARIANTS],
+  'compiler/osr-assert': [PASS, NO_VARIANTS],
+  'compiler/osr-regress-max-locals': [PASS, NO_VARIANTS],
+  'es7/object-observe': [PASS, NO_VARIANTS],
+  'regress/regress-1167': [PASS, NO_VARIANTS],
+  'regress/regress-201': [PASS, NO_VARIANTS],
+  'regress/regress-2185-2': [PASS, NO_VARIANTS],
+  'regress/regress-284': [PASS, NO_VARIANTS],
+  'regress/regress-91008': [PASS, NO_VARIANTS],
+  'regress/string-set-char-deopt': [PASS, NO_VARIANTS],
+  'tools/profviz': [PASS, NO_VARIANTS],
+
+  # Support for breakpoints requires special relocation info for DebugBreak.
+  'debug-clearbreakpointgroup': [PASS, NO_VARIANTS],
+  'debug-step-2': [PASS, NO_VARIANTS],
+  'regress/regress-debug-deopt-while-recompile': [PASS, NO_VARIANTS],
+  'regress/regress-opt-after-debug-deopt': [PASS, NO_VARIANTS],
+
+  # Support for %GetFrameDetails is missing and requires checkpoints.
+  'debug-backtrace-text': [PASS, NO_VARIANTS],
+  'debug-break-inline': [PASS, NO_VARIANTS],
+  'debug-evaluate-arguments': [PASS, NO_VARIANTS],
+  'debug-evaluate-bool-constructor': [PASS, NO_VARIANTS],
+  'debug-evaluate-closure': [PASS, NO_VARIANTS],
+  'debug-evaluate-const': [PASS, NO_VARIANTS],
+  'debug-evaluate-locals-optimized-double': [PASS, NO_VARIANTS],
+  'debug-evaluate-locals-optimized': [PASS, NO_VARIANTS],
+  'debug-evaluate-locals': [PASS, NO_VARIANTS],
+  'debug-evaluate-with-context': [PASS, NO_VARIANTS],
+  'debug-evaluate-with': [PASS, NO_VARIANTS],
+  'debug-liveedit-double-call': [PASS, NO_VARIANTS],
+  'debug-liveedit-restart-frame': [PASS, NO_VARIANTS],
+  'debug-receiver': [PASS, NO_VARIANTS],
+  'debug-return-value': [PASS, NO_VARIANTS],
+  'debug-scopes': [PASS, NO_VARIANTS],
+  'debug-set-variable-value': [PASS, NO_VARIANTS],
+  'debug-step-stub-callfunction': [PASS, NO_VARIANTS],
+  'debug-stepin-accessor': [PASS, NO_VARIANTS],
+  'debug-stepin-builtin': [PASS, NO_VARIANTS],
+  'debug-stepin-constructor': [PASS, NO_VARIANTS],
+  'debug-stepin-function-call': [PASS, NO_VARIANTS],
+  'debug-stepnext-do-while': [PASS, NO_VARIANTS],
+  'debug-stepout-recursive-function': [PASS, NO_VARIANTS],
+  'debug-stepout-scope-part1': [PASS, NO_VARIANTS],
+  'debug-stepout-scope-part2': [PASS, NO_VARIANTS],
+  'debug-stepout-scope-part3': [PASS, NO_VARIANTS],
+  'debug-stepout-scope-part7': [PASS, NO_VARIANTS],
+  'debug-stepout-to-builtin': [PASS, NO_VARIANTS],
+  'es6/debug-promises-throw-in-constructor': [PASS, NO_VARIANTS],
+  'es6/debug-promises-throw-in-reject': [PASS, NO_VARIANTS],
+  'es6/debug-promises-uncaught-all': [PASS, NO_VARIANTS],
+  'es6/debug-promises-uncaught-uncaught': [PASS, NO_VARIANTS],
+  'harmony/debug-blockscopes': [PASS, NO_VARIANTS],
+  'harmony/generators-debug-scopes': [PASS, NO_VARIANTS],
+  'regress/regress-1081309': [PASS, NO_VARIANTS],
+  'regress/regress-1170187': [PASS, NO_VARIANTS],
+  'regress/regress-119609': [PASS, NO_VARIANTS],
+  'regress/regress-131994': [PASS, NO_VARIANTS],
+  'regress/regress-269': [PASS, NO_VARIANTS],
+  'regress/regress-325676': [PASS, NO_VARIANTS],
+  'regress/regress-crbug-107996': [PASS, NO_VARIANTS],
+  'regress/regress-crbug-171715': [PASS, NO_VARIANTS],
+  'regress/regress-crbug-222893': [PASS, NO_VARIANTS],
+  'regress/regress-crbug-259300': [PASS, NO_VARIANTS],
+  'regress/regress-frame-details-null-receiver': [PASS, NO_VARIANTS],
+
+  # Support for ES6 generators is missing.
+  'regress-3225': [PASS, NO_VARIANTS],
+  'harmony/generators-debug-liveedit': [PASS, NO_VARIANTS],
+  'harmony/generators-iteration': [PASS, NO_VARIANTS],
+  'harmony/generators-parsing': [PASS, NO_VARIANTS],
+  'harmony/generators-poisoned-properties': [PASS, NO_VARIANTS],
+  'harmony/generators-relocation': [PASS, NO_VARIANTS],
+  'harmony/regress/regress-2681': [PASS, NO_VARIANTS],
+  'harmony/regress/regress-2691': [PASS, NO_VARIANTS],
+  'harmony/regress/regress-3280': [PASS, NO_VARIANTS],
+
+  # Support for ES6 for-of iteration is missing.
+  'harmony/array-iterator': [PASS, NO_VARIANTS],
+  'harmony/iteration-semantics': [PASS, NO_VARIANTS],
+  'harmony/string-iterator': [PASS, NO_VARIANTS],
+  'harmony/typed-array-iterator': [PASS, NO_VARIANTS],
+
   ##############################################################################
   # Too slow in debug mode with --stress-opt mode.
   'compiler/regress-stacktrace-methods': [PASS, ['mode == debug', SKIP]],
diff --git a/test/mjsunit/runtime-gen/classof.js b/test/mjsunit/runtime-gen/classof.js
deleted file mode 100644 (file)
index 59fdde8..0000000
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var _obj = new Object();
-%_ClassOf(_obj);
index 00a4158d3e78496c4e92521b336982a5c43bf7f3..a22ac16251476b724e5e3140f79a33579d6c7522 100755 (executable)
@@ -47,9 +47,9 @@ EXPAND_MACROS = [
 # that the parser doesn't bit-rot. Change the values as needed when you add,
 # remove or change runtime functions, but make sure we don't lose our ability
 # to parse them!
-EXPECTED_FUNCTION_COUNT = 422
-EXPECTED_FUZZABLE_COUNT = 336
-EXPECTED_CCTEST_COUNT = 8
+EXPECTED_FUNCTION_COUNT = 427
+EXPECTED_FUZZABLE_COUNT = 338
+EXPECTED_CCTEST_COUNT = 11
 EXPECTED_UNKNOWN_COUNT = 4
 EXPECTED_BUILTINS_COUNT = 816
 
index d7afa3be4005be300eff1c434d24f86c7177ad4d..550bb6896ccfdb3226259dae30461c8fe13682f2 100644 (file)
         '../../src/codegen.h',
         '../../src/compilation-cache.cc',
         '../../src/compilation-cache.h',
+        '../../src/compiler/ast-graph-builder.cc',
+        '../../src/compiler/ast-graph-builder.h',
+        '../../src/compiler/code-generator-impl.h',
+        '../../src/compiler/code-generator.cc',
+        '../../src/compiler/code-generator.h',
+        '../../src/compiler/common-node-cache.h',
+        '../../src/compiler/control-builders.cc',
+        '../../src/compiler/control-builders.h',
+        '../../src/compiler/frame.h',
+        '../../src/compiler/gap-resolver.cc',
+        '../../src/compiler/gap-resolver.h',
+        '../../src/compiler/generic-algorithm-inl.h',
+        '../../src/compiler/generic-algorithm.h',
+        '../../src/compiler/generic-graph.h',
+        '../../src/compiler/generic-node-inl.h',
+        '../../src/compiler/generic-node.h',
+        '../../src/compiler/graph-builder.cc',
+        '../../src/compiler/graph-builder.h',
+        '../../src/compiler/graph-inl.h',
+        '../../src/compiler/graph-reducer.cc',
+        '../../src/compiler/graph-reducer.h',
+        '../../src/compiler/graph-replay.cc',
+        '../../src/compiler/graph-replay.h',
+        '../../src/compiler/graph-visualizer.cc',
+        '../../src/compiler/graph-visualizer.h',
+        '../../src/compiler/graph.cc',
+        '../../src/compiler/graph.h',
+        '../../src/compiler/instruction-codes.h',
+        '../../src/compiler/instruction-selector-impl.h',
+        '../../src/compiler/instruction-selector.cc',
+        '../../src/compiler/instruction-selector.h',
+        '../../src/compiler/instruction.cc',
+        '../../src/compiler/instruction.h',
+        '../../src/compiler/js-context-specialization.cc',
+        '../../src/compiler/js-context-specialization.h',
+        '../../src/compiler/js-generic-lowering.cc',
+        '../../src/compiler/js-generic-lowering.h',
+        '../../src/compiler/js-graph.cc',
+        '../../src/compiler/js-graph.h',
+        '../../src/compiler/js-operator.h',
+        '../../src/compiler/js-typed-lowering.cc',
+        '../../src/compiler/js-typed-lowering.h',
+        '../../src/compiler/linkage-impl.h',
+        '../../src/compiler/linkage.cc',
+        '../../src/compiler/linkage.h',
+        '../../src/compiler/lowering-builder.cc',
+        '../../src/compiler/lowering-builder.h',
+        '../../src/compiler/machine-node-factory.h',
+        '../../src/compiler/machine-operator-reducer.cc',
+        '../../src/compiler/machine-operator-reducer.h',
+        '../../src/compiler/machine-operator.h',
+        '../../src/compiler/node-aux-data-inl.h',
+        '../../src/compiler/node-aux-data.h',
+        '../../src/compiler/node-cache.cc',
+        '../../src/compiler/node-cache.h',
+        '../../src/compiler/node-matchers.h',
+        '../../src/compiler/node-properties-inl.h',
+        '../../src/compiler/node-properties.h',
+        '../../src/compiler/node.cc',
+        '../../src/compiler/node.h',
+        '../../src/compiler/operator-properties-inl.h',
+        '../../src/compiler/operator-properties.h',
+        '../../src/compiler/operator.h',
+        '../../src/compiler/phi-reducer.h',
+        '../../src/compiler/pipeline.cc',
+        '../../src/compiler/pipeline.h',
+        '../../src/compiler/raw-machine-assembler.cc',
+        '../../src/compiler/raw-machine-assembler.h',
+        '../../src/compiler/register-allocator.cc',
+        '../../src/compiler/register-allocator.h',
+        '../../src/compiler/representation-changer.h',
+        '../../src/compiler/schedule.cc',
+        '../../src/compiler/schedule.h',
+        '../../src/compiler/scheduler.cc',
+        '../../src/compiler/scheduler.h',
+        '../../src/compiler/simplified-lowering.cc',
+        '../../src/compiler/simplified-lowering.h',
+        '../../src/compiler/simplified-node-factory.h',
+        '../../src/compiler/simplified-operator.h',
+        '../../src/compiler/source-position.cc',
+        '../../src/compiler/source-position.h',
+        '../../src/compiler/structured-machine-assembler.cc',
+        '../../src/compiler/structured-machine-assembler.h',
+        '../../src/compiler/typer.cc',
+        '../../src/compiler/typer.h',
+        '../../src/compiler/verifier.cc',
+        '../../src/compiler/verifier.h',
         '../../src/compiler.cc',
         '../../src/compiler.h',
         '../../src/contexts.cc',
         '../../src/lithium-codegen.h',
         '../../src/lithium.cc',
         '../../src/lithium.h',
+        '../../src/lithium-inl.h',
         '../../src/liveedit.cc',
         '../../src/liveedit.h',
         '../../src/log-inl.h',
             '../../src/arm/regexp-macro-assembler-arm.h',
             '../../src/arm/simulator-arm.cc',
             '../../src/arm/stub-cache-arm.cc',
+            '../../src/compiler/arm/code-generator-arm.cc',
+            '../../src/compiler/arm/instruction-codes-arm.h',
+            '../../src/compiler/arm/instruction-selector-arm.cc',
+            '../../src/compiler/arm/linkage-arm.cc',
           ],
         }],
         ['v8_target_arch=="arm64"', {
             '../../src/arm64/stub-cache-arm64.cc',
             '../../src/arm64/utils-arm64.cc',
             '../../src/arm64/utils-arm64.h',
+            '../../src/compiler/arm64/code-generator-arm64.cc',
+            '../../src/compiler/arm64/instruction-codes-arm64.h',
+            '../../src/compiler/arm64/instruction-selector-arm64.cc',
+            '../../src/compiler/arm64/linkage-arm64.cc',
           ],
         }],
         ['v8_target_arch=="ia32"', {
             '../../src/ia32/regexp-macro-assembler-ia32.cc',
             '../../src/ia32/regexp-macro-assembler-ia32.h',
             '../../src/ia32/stub-cache-ia32.cc',
+            '../../src/compiler/ia32/code-generator-ia32.cc',
+            '../../src/compiler/ia32/instruction-codes-ia32.h',
+            '../../src/compiler/ia32/instruction-selector-ia32.cc',
+            '../../src/compiler/ia32/linkage-ia32.cc',
           ],
         }],
         ['v8_target_arch=="x87"', {
             '../../src/x64/regexp-macro-assembler-x64.cc',
             '../../src/x64/regexp-macro-assembler-x64.h',
             '../../src/x64/stub-cache-x64.cc',
+            '../../src/compiler/x64/code-generator-x64.cc',
+            '../../src/compiler/x64/instruction-codes-x64.h',
+            '../../src/compiler/x64/instruction-selector-x64.cc',
+            '../../src/compiler/x64/linkage-x64.cc',
           ],
         }],
         ['OS=="linux"', {
index d1eb3a339171f3281171c21ac495da890d3f3553..cbc6116f892035f6c8392444618154df25350081 100755 (executable)
@@ -59,6 +59,7 @@ TIMEOUT_SCALEFACTOR = {"debug"   : 4,
 VARIANT_FLAGS = {
     "default": [],
     "stress": ["--stress-opt", "--always-opt"],
+    "turbofan": ["--turbo-filter=*", "--always-opt"],
     "nocrankshaft": ["--nocrankshaft"]}
 
 VARIANTS = ["default", "stress", "nocrankshaft"]