Update V8 to version 3.0.
authorkasperl@chromium.org <kasperl@chromium.org@ce2b1a6d-e550-0410-aec6-3dcde31c8c00>
Tue, 7 Dec 2010 09:11:56 +0000 (09:11 +0000)
committerkasperl@chromium.org <kasperl@chromium.org@ce2b1a6d-e550-0410-aec6-3dcde31c8c00>
Tue, 7 Dec 2010 09:11:56 +0000 (09:11 +0000)
git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@5920 ce2b1a6d-e550-0410-aec6-3dcde31c8c00

331 files changed:
SConstruct
include/v8-debug.h [changed mode: 0755->0644]
include/v8-testing.h [new file with mode: 0644]
samples/shell.cc
src/SConscript
src/accessors.cc
src/accessors.h
src/api.cc
src/api.h
src/arm/assembler-arm-inl.h
src/arm/assembler-arm.cc
src/arm/assembler-arm.h
src/arm/builtins-arm.cc
src/arm/code-stubs-arm.cc
src/arm/code-stubs-arm.h
src/arm/codegen-arm.cc
src/arm/codegen-arm.h
src/arm/cpu-arm.cc
src/arm/deoptimizer-arm.cc [new file with mode: 0644]
src/arm/frames-arm.cc
src/arm/frames-arm.h
src/arm/full-codegen-arm.cc
src/arm/ic-arm.cc
src/arm/lithium-arm.cc [new file with mode: 0644]
src/arm/lithium-arm.h [new file with mode: 0644]
src/arm/lithium-codegen-arm.cc [new file with mode: 0644]
src/arm/lithium-codegen-arm.h [new file with mode: 0644]
src/arm/macro-assembler-arm.cc
src/arm/macro-assembler-arm.h
src/arm/simulator-arm.cc
src/arm/simulator-arm.h
src/arm/stub-cache-arm.cc
src/assembler.cc
src/assembler.cc.rej [new file with mode: 0644]
src/assembler.h
src/ast-inl.h
src/ast.cc
src/ast.h
src/atomicops.h [new file with mode: 0644]
src/atomicops_internals_arm_gcc.h [new file with mode: 0644]
src/atomicops_internals_x86_gcc.cc [new file with mode: 0644]
src/atomicops_internals_x86_gcc.h [new file with mode: 0644]
src/atomicops_internals_x86_macosx.h [new file with mode: 0644]
src/atomicops_internals_x86_msvc.h [new file with mode: 0644]
src/bootstrapper.cc
src/builtins.cc
src/builtins.h
src/checks.h
src/code-stubs.cc
src/code-stubs.h
src/codegen.cc
src/codegen.h
src/compilation-cache.cc
src/compilation-cache.h
src/compiler.cc
src/compiler.h
src/contexts.cc
src/contexts.h
src/cpu-profiler.cc
src/cpu-profiler.h
src/d8.h
src/data-flow.cc
src/data-flow.h
src/debug.cc
src/deoptimizer.cc [new file with mode: 0644]
src/deoptimizer.h [new file with mode: 0644]
src/disassembler.cc
src/execution.cc
src/execution.h
src/factory.cc
src/factory.h
src/flag-definitions.h
src/flags.cc
src/frame-element.h
src/frames.cc
src/frames.h
src/full-codegen.cc
src/full-codegen.h
src/global-handles.cc
src/globals.h
src/graph-codegen.cc.rej [new file with mode: 0644]
src/graph-codegen.h.rej [new file with mode: 0644]
src/handles.cc
src/handles.h
src/heap-inl.h
src/heap.cc
src/heap.h
src/hydrogen-instructions.cc [new file with mode: 0644]
src/hydrogen-instructions.h [new file with mode: 0644]
src/hydrogen.cc [new file with mode: 0644]
src/hydrogen.h [new file with mode: 0644]
src/ia32/assembler-ia32-inl.h
src/ia32/assembler-ia32.cc
src/ia32/assembler-ia32.h
src/ia32/builtins-ia32.cc
src/ia32/code-stubs-ia32.cc
src/ia32/code-stubs-ia32.h
src/ia32/codegen-ia32.cc
src/ia32/codegen-ia32.h
src/ia32/cpu-ia32.cc
src/ia32/deoptimizer-ia32.cc [new file with mode: 0644]
src/ia32/disasm-ia32.cc
src/ia32/frames-ia32.h
src/ia32/full-codegen-ia32.cc
src/ia32/ic-ia32.cc
src/ia32/lithium-codegen-ia32.cc [new file with mode: 0644]
src/ia32/lithium-codegen-ia32.h [new file with mode: 0644]
src/ia32/lithium-ia32.cc [new file with mode: 0644]
src/ia32/lithium-ia32.h [new file with mode: 0644]
src/ia32/macro-assembler-ia32.cc
src/ia32/macro-assembler-ia32.h
src/ia32/stub-cache-ia32.cc
src/ic-inl.h
src/ic.cc
src/ic.h
src/jump-target-light.h
src/list-inl.h
src/list.h
src/lithium-allocator.cc [new file with mode: 0644]
src/lithium-allocator.h [new file with mode: 0644]
src/liveedit-debugger.js
src/liveedit.cc
src/liveedit.h
src/log.cc
src/log.h
src/mark-compact.cc
src/math.js
src/memory.h
src/messages.js
src/objects-debug.cc
src/objects-inl.h
src/objects-visiting.h
src/objects.cc
src/objects.cc.rej [new file with mode: 0644]
src/objects.h
src/parser.cc
src/platform-freebsd.cc
src/platform-linux.cc
src/platform-macos.cc
src/platform-nullos.cc
src/platform-openbsd.cc
src/platform-solaris.cc
src/platform-win32.cc
src/platform.h
src/prettyprinter.h
src/profile-generator.cc
src/property.h
src/rewriter.cc
src/runtime-profiler.cc [new file with mode: 0644]
src/runtime-profiler.h [new file with mode: 0644]
src/runtime.cc
src/runtime.h
src/safepoint-table.cc [new file with mode: 0644]
src/safepoint-table.h [new file with mode: 0644]
src/scopeinfo.h
src/scopes.cc
src/scopes.h
src/serialize.cc
src/serialize.h
src/spaces-inl.h
src/spaces.cc
src/spaces.h
src/string-stream.h
src/stub-cache.cc
src/stub-cache.h
src/token.h
src/top.cc
src/top.h
src/type-info.cc
src/type-info.h
src/utils.cc
src/utils.h
src/v8-counters.h
src/v8.cc
src/v8.h
src/v8globals.h
src/v8utils.h
src/variables.cc
src/variables.h
src/version.cc
src/vm-state-inl.h
src/vm-state.h
src/win32-headers.h [new file with mode: 0644]
src/x64/assembler-x64-inl.h
src/x64/assembler-x64.cc
src/x64/assembler-x64.h
src/x64/builtins-x64.cc
src/x64/code-stubs-x64.cc
src/x64/code-stubs-x64.h
src/x64/codegen-x64.cc
src/x64/codegen-x64.h
src/x64/cpu-x64.cc
src/x64/deoptimizer-x64.cc [new file with mode: 0644]
src/x64/frames-x64.h
src/x64/full-codegen-x64.cc
src/x64/ic-x64.cc
src/x64/lithium-codegen-x64.h [new file with mode: 0644]
src/x64/lithium-x64.h [new file with mode: 0644]
src/x64/macro-assembler-x64.cc
src/x64/macro-assembler-x64.h
src/x64/stub-cache-x64.cc
src/zone.h
test/cctest/SConscript
test/cctest/cctest.status
test/cctest/test-alloc.cc
test/cctest/test-api.cc
test/cctest/test-dataflow.cc
test/cctest/test-debug.cc
test/cctest/test-deoptimization.cc [new file with mode: 0644]
test/cctest/test-heap.cc
test/cctest/test-log-stack-tracer.cc
test/cctest/test-log.cc
test/cctest/test-mark-compact.cc
test/cctest/test-profile-generator.cc
test/cctest/test-spaces.cc
test/cctest/test-utils.cc
test/cctest/test-version.cc
test/es5conform/es5conform.status
test/message/message.status
test/message/try-catch-finally-return-in-finally.js
test/message/try-catch-finally-return-in-finally.out
test/message/try-finally-return-in-finally.js
test/message/try-finally-return-in-finally.out
test/mjsunit/accessors-on-global-object.js [new file with mode: 0644]
test/mjsunit/apply-arguments-gc-safepoint.js [new file with mode: 0644]
test/mjsunit/array-functions-prototype.js
test/mjsunit/codegen-coverage.js
test/mjsunit/compiler/alloc-number.js [new file with mode: 0644]
test/mjsunit/compiler/array-access.js [new file with mode: 0644]
test/mjsunit/compiler/array-length.js [new file with mode: 0644]
test/mjsunit/compiler/assignment-deopt.js [new file with mode: 0644]
test/mjsunit/compiler/assignment.js
test/mjsunit/compiler/binary-ops.js [new file with mode: 0644]
test/mjsunit/compiler/call-keyed.js [new file with mode: 0644]
test/mjsunit/compiler/compare.js [new file with mode: 0644]
test/mjsunit/compiler/complex-for-in.js [new file with mode: 0644]
test/mjsunit/compiler/control-flow-0.js [new file with mode: 0644]
test/mjsunit/compiler/control-flow-1.js [new file with mode: 0644]
test/mjsunit/compiler/control-flow-2.js [new file with mode: 0644]
test/mjsunit/compiler/count-deopt.js [new file with mode: 0644]
test/mjsunit/compiler/countoperation.js
test/mjsunit/compiler/delete.js [new file with mode: 0644]
test/mjsunit/compiler/deopt-args.js [new file with mode: 0644]
test/mjsunit/compiler/deopt-inlined-smi.js [new file with mode: 0644]
test/mjsunit/compiler/expression-trees.js [new file with mode: 0644]
test/mjsunit/compiler/for-stmt.js [new file with mode: 0644]
test/mjsunit/compiler/globals.js
test/mjsunit/compiler/inline-compare.js [new file with mode: 0644]
test/mjsunit/compiler/inline-conditional.js [new file with mode: 0644]
test/mjsunit/compiler/inline-global-access.js [new file with mode: 0644]
test/mjsunit/compiler/inline-param.js [new file with mode: 0644]
test/mjsunit/compiler/inline-two.js [new file with mode: 0644]
test/mjsunit/compiler/logical-and.js [new file with mode: 0644]
test/mjsunit/compiler/logical-or.js [new file with mode: 0644]
test/mjsunit/compiler/loops.js
test/mjsunit/compiler/null-compare.js [new file with mode: 0644]
test/mjsunit/compiler/optimized-function-calls.js [new file with mode: 0644]
test/mjsunit/compiler/pic.js [new file with mode: 0644]
test/mjsunit/compiler/property-calls.js [new file with mode: 0644]
test/mjsunit/compiler/property-refs.js [new file with mode: 0644]
test/mjsunit/compiler/property-stores.js [new file with mode: 0644]
test/mjsunit/compiler/recursive-deopt.js [new file with mode: 0644]
test/mjsunit/compiler/regress-0.js [new file with mode: 0644]
test/mjsunit/compiler/regress-1.js [new file with mode: 0644]
test/mjsunit/compiler/regress-2.js [new file with mode: 0644]
test/mjsunit/compiler/regress-3.js [new file with mode: 0644]
test/mjsunit/compiler/regress-3136962.js [new file with mode: 0644]
test/mjsunit/compiler/regress-3185901.js [new file with mode: 0644]
test/mjsunit/compiler/regress-3218915.js [new file with mode: 0644]
test/mjsunit/compiler/regress-3249650.js [new file with mode: 0644]
test/mjsunit/compiler/regress-4.js [new file with mode: 0644]
test/mjsunit/compiler/regress-5.js [new file with mode: 0644]
test/mjsunit/compiler/regress-6.js [new file with mode: 0644]
test/mjsunit/compiler/regress-7.js [new file with mode: 0644]
test/mjsunit/compiler/regress-8.js [new file with mode: 0644]
test/mjsunit/compiler/regress-arguments.js [new file with mode: 0644]
test/mjsunit/compiler/regress-arrayliteral.js [new file with mode: 0644]
test/mjsunit/compiler/regress-funarguments.js [new file with mode: 0644]
test/mjsunit/compiler/regress-funcaller.js [new file with mode: 0644]
test/mjsunit/compiler/regress-gap.js [new file with mode: 0644]
test/mjsunit/compiler/regress-gvn.js [new file with mode: 0644]
test/mjsunit/compiler/regress-loop-deopt.js [new file with mode: 0644]
test/mjsunit/compiler/regress-max.js [new file with mode: 0644]
test/mjsunit/compiler/regress-or.js [new file with mode: 0644]
test/mjsunit/compiler/regress-rep-change.js [new file with mode: 0644]
test/mjsunit/compiler/regress-stacktrace-methods.js [new file with mode: 0644]
test/mjsunit/compiler/regress-stacktrace.js [new file with mode: 0644]
test/mjsunit/compiler/safepoint.js [new file with mode: 0644]
test/mjsunit/compiler/simple-bailouts.js
test/mjsunit/compiler/simple-binary-op.js
test/mjsunit/compiler/simple-deopt.js [new file with mode: 0644]
test/mjsunit/compiler/simple-global-access.js
test/mjsunit/compiler/simple-inlining.js [new file with mode: 0644]
test/mjsunit/compiler/simple-osr.js [new file with mode: 0644]
test/mjsunit/compiler/switch-bailout.js [new file with mode: 0644]
test/mjsunit/compiler/this-property-refs.js
test/mjsunit/compiler/thisfunction.js
test/mjsunit/compiler/variables.js [new file with mode: 0644]
test/mjsunit/debug-changebreakpoint.js
test/mjsunit/debug-clearbreakpoint.js
test/mjsunit/debug-clearbreakpointgroup.js
test/mjsunit/debug-liveedit-2.js
test/mjsunit/debug-liveedit-breakpoints.js
test/mjsunit/debug-liveedit-patch-positions.js
test/mjsunit/debug-stepout-recursive-function.js
test/mjsunit/fuzz-natives.js
test/mjsunit/mirror-object.js
test/mjsunit/mjsunit.status
test/mjsunit/regress/regress-3006390.js [new file with mode: 0644]
test/mjsunit/regress/regress-3185905.js [new file with mode: 0644]
test/mjsunit/regress/regress-3199913.js [new file with mode: 0644]
test/mjsunit/regress/regress-3218530.js [new file with mode: 0644]
test/mjsunit/regress/regress-3218915.js [new file with mode: 0644]
test/mjsunit/regress/regress-3230771.js [new file with mode: 0644]
test/mjsunit/regress/regress-3247124.js [new file with mode: 0644]
test/mjsunit/regress/regress-3252443.js [new file with mode: 0644]
test/mjsunit/regress/regress-52801.js
test/mjsunit/regress/regress-580.js
test/mjsunit/smi-ops-inlined.js [new file with mode: 0644]
test/mjsunit/smi-ops.js
test/mjsunit/string-replace-gc.js
test/mjsunit/sum-0-plus-undefined-is-NaN.js [new file with mode: 0644]
test/mjsunit/typeof.js
test/mozilla/mozilla.status
test/sputnik/sputnik.status
tools/gyp/v8.gyp
tools/ll_prof.py [changed mode: 0755->0644]
tools/test.py
tools/visual_studio/v8_base.vcproj
tools/visual_studio/v8_base_arm.vcproj
tools/visual_studio/v8_base_x64.vcproj

index ca63c29..63fc33c 100644 (file)
@@ -523,7 +523,8 @@ SAMPLE_FLAGS = {
       'CCFLAGS':      ['-O2']
     },
     'mode:debug': {
-      'CCFLAGS':      ['-g', '-O0']
+      'CCFLAGS':      ['-g', '-O0'],
+      'CPPDEFINES':   ['DEBUG']
     },
     'prof:oprofile': {
       'LIBPATH': ['/usr/lib32', '/usr/lib32/oprofile'],
@@ -578,13 +579,14 @@ SAMPLE_FLAGS = {
       'LINKFLAGS': ['/MACHINE:X64', '/STACK:2091752']
     },
     'mode:debug': {
-      'CCFLAGS':   ['/Od'],
-      'LINKFLAGS': ['/DEBUG'],
+      'CCFLAGS':    ['/Od'],
+      'LINKFLAGS':  ['/DEBUG'],
+      'CPPDEFINES': ['DEBUG'],
       'msvcrt:static': {
-        'CCFLAGS': ['/MTd']
+        'CCFLAGS':  ['/MTd']
       },
       'msvcrt:shared': {
-        'CCFLAGS': ['/MDd']
+        'CCFLAGS':  ['/MDd']
       }
     }
   }
old mode 100755 (executable)
new mode 100644 (file)
diff --git a/include/v8-testing.h b/include/v8-testing.h
new file mode 100644 (file)
index 0000000..4db30a4
--- /dev/null
@@ -0,0 +1,99 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_V8_TEST_H_
+#define V8_V8_TEST_H_
+
+#include "v8.h"
+
+#ifdef _WIN32
+// Setup for Windows DLL export/import. See v8.h in this directory for
+// information on how to build/use V8 as a DLL.
+#if defined(BUILDING_V8_SHARED) && defined(USING_V8_SHARED)
+#error both BUILDING_V8_SHARED and USING_V8_SHARED are set - please check the\
+  build configuration to ensure that at most one of these is set
+#endif
+
+#ifdef BUILDING_V8_SHARED
+#define V8EXPORT __declspec(dllexport)
+#elif USING_V8_SHARED
+#define V8EXPORT __declspec(dllimport)
+#else
+#define V8EXPORT
+#endif
+
+#else  // _WIN32
+
+// Setup for Linux shared library export. See v8.h in this directory for
+// information on how to build/use V8 as shared library.
+#if defined(__GNUC__) && (__GNUC__ >= 4) && defined(V8_SHARED)
+#define V8EXPORT __attribute__ ((visibility("default")))
+#else  // defined(__GNUC__) && (__GNUC__ >= 4)
+#define V8EXPORT
+#endif  // defined(__GNUC__) && (__GNUC__ >= 4)
+
+#endif  // _WIN32
+
+
+/**
+ * Testing support for the V8 JavaScript engine.
+ */
+namespace v8 {
+
+class V8EXPORT Testing {
+ public:
+  enum StressType {
+    kStressTypeOpt,
+    kStressTypeDeopt
+  };
+
+  /**
+   * Set the type of stressing to do. The default if not set is kStressTypeOpt.
+   */
+  static void SetStressRunType(StressType type);
+
+  /**
+   * Get the number of runs of a given test that is required to get the full
+   * stress coverage.
+   */
+  static int GetStressRuns();
+
+  /**
+   * Indicate the number of the run which is about to start. The value of run
+   * should be between 0 and one less than the result from GetStressRuns()
+   */
+  static void PrepareStressRun(int run);
+};
+
+
+}  // namespace v8
+
+
+#undef V8EXPORT
+
+
+#endif  // V8_V8_TEST_H_
index 1a13f5f..4604575 100644 (file)
@@ -26,6 +26,7 @@
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
 #include <v8.h>
+#include <v8-testing.h>
 #include <fcntl.h>
 #include <string.h>
 #include <stdio.h>
@@ -44,10 +45,10 @@ v8::Handle<v8::Value> Quit(const v8::Arguments& args);
 v8::Handle<v8::Value> Version(const v8::Arguments& args);
 v8::Handle<v8::String> ReadFile(const char* name);
 void ReportException(v8::TryCatch* handler);
+void SetFlagsFromString(const char* flags);
 
 
 int RunMain(int argc, char* argv[]) {
-  v8::V8::SetFlagsFromCommandLine(&argc, argv, true);
   v8::HandleScope handle_scope;
   // Create a template for the global object.
   v8::Handle<v8::ObjectTemplate> global = v8::ObjectTemplate::New();
@@ -63,11 +64,11 @@ int RunMain(int argc, char* argv[]) {
   global->Set(v8::String::New("version"), v8::FunctionTemplate::New(Version));
   // Create a new execution environment containing the built-in
   // functions
-  v8::Handle<v8::Context> context = v8::Context::New(NULL, global);
-  // Enter the newly created execution environment.
-  v8::Context::Scope context_scope(context);
+  v8::Persistent<v8::Context> context = v8::Context::New(NULL, global);
   bool run_shell = (argc == 1);
   for (int i = 1; i < argc; i++) {
+    // Enter the execution environment before evaluating any code.
+    v8::Context::Scope context_scope(context);
     const char* str = argv[i];
     if (strcmp(str, "--shell") == 0) {
       run_shell = true;
@@ -99,12 +100,48 @@ int RunMain(int argc, char* argv[]) {
     }
   }
   if (run_shell) RunShell(context);
+  context.Dispose();
   return 0;
 }
 
 
 int main(int argc, char* argv[]) {
-  int result = RunMain(argc, argv);
+  // Figure out if we're requested to stress the optimization
+  // infrastructure by running tests multiple times and forcing
+  // optimization in the last run.
+  bool FLAG_stress_opt = false;
+  bool FLAG_stress_deopt = false;
+  for (int i = 0; i < argc; i++) {
+    if (strcmp(argv[i], "--stress-opt") == 0) {
+      FLAG_stress_opt = true;
+      argv[i] = NULL;
+    } else if (strcmp(argv[i], "--stress-deopt") == 0) {
+      FLAG_stress_deopt = true;
+      argv[i] = NULL;
+    } else if (strcmp(argv[i], "--noalways-opt") == 0) {
+      // No support for stressing if we can't use --always-opt.
+      FLAG_stress_opt = false;
+      FLAG_stress_deopt = false;
+      break;
+    }
+  }
+
+  v8::V8::SetFlagsFromCommandLine(&argc, argv, true);
+  int result = 0;
+  if (FLAG_stress_opt || FLAG_stress_deopt) {
+    v8::Testing::SetStressRunType(FLAG_stress_opt
+                                  ? v8::Testing::kStressTypeOpt
+                                  : v8::Testing::kStressTypeDeopt);
+    int stress_runs = v8::Testing::GetStressRuns();
+    for (int i = 0; i < stress_runs && result == 0; i++) {
+      printf("============ Stress %d/%d ============\n",
+             i + 1, stress_runs);
+      v8::Testing::PrepareStressRun(i);
+      result = RunMain(argc, argv);
+    }
+  } else {
+    result = RunMain(argc, argv);
+  }
   v8::V8::Dispose();
   return result;
 }
@@ -221,6 +258,8 @@ v8::Handle<v8::String> ReadFile(const char* name) {
 void RunShell(v8::Handle<v8::Context> context) {
   printf("V8 version %s\n", v8::V8::GetVersion());
   static const int kBufferSize = 256;
+  // Enter the execution environment before evaluating any code.
+  v8::Context::Scope context_scope(context);
   while (true) {
     char buffer[kBufferSize];
     printf("> ");
@@ -306,3 +345,8 @@ void ReportException(v8::TryCatch* try_catch) {
     }
   }
 }
+
+
+void SetFlagsFromString(const char* flags) {
+  v8::V8::SetFlagsFromString(flags, strlen(flags));
+}
index 8953698..8ab781e 100755 (executable)
@@ -40,6 +40,7 @@ SOURCES = {
     api.cc
     assembler.cc
     ast.cc
+    atomicops_internals_x86_gcc.cc
     bignum.cc
     bignum-dtoa.cc
     bootstrapper.cc
@@ -59,6 +60,7 @@ SOURCES = {
     dateparser.cc
     debug-agent.cc
     debug.cc
+    deoptimizer.cc
     disassembler.cc
     diy-fp.cc
     dtoa.cc
@@ -76,10 +78,13 @@ SOURCES = {
     hashmap.cc
     heap-profiler.cc
     heap.cc
+    hydrogen.cc
+    hydrogen-instructions.cc
     ic.cc
     interpreter-irregexp.cc
     jsregexp.cc
     jump-target.cc
+    lithium-allocator.cc
     liveedit.cc
     log-utils.cc
     log.cc
@@ -99,6 +104,8 @@ SOURCES = {
     register-allocator.cc
     rewriter.cc
     runtime.cc
+    runtime-profiler.cc
+    safepoint-table.cc
     scanner-base.cc
     scanner.cc
     scopeinfo.cc
@@ -134,11 +141,14 @@ SOURCES = {
     arm/constants-arm.cc
     arm/cpu-arm.cc
     arm/debug-arm.cc
+    arm/deoptimizer-arm.cc
     arm/disasm-arm.cc
     arm/frames-arm.cc
     arm/full-codegen-arm.cc
     arm/ic-arm.cc
     arm/jump-target-arm.cc
+    arm/lithium-arm.cc
+    arm/lithium-codegen-arm.cc
     arm/macro-assembler-arm.cc
     arm/regexp-macro-assembler-arm.cc
     arm/register-allocator-arm.cc
@@ -172,11 +182,14 @@ SOURCES = {
     ia32/codegen-ia32.cc
     ia32/cpu-ia32.cc
     ia32/debug-ia32.cc
+    ia32/deoptimizer-ia32.cc
     ia32/disasm-ia32.cc
     ia32/frames-ia32.cc
     ia32/full-codegen-ia32.cc
     ia32/ic-ia32.cc
     ia32/jump-target-ia32.cc
+    ia32/lithium-codegen-ia32.cc
+    ia32/lithium-ia32.cc
     ia32/macro-assembler-ia32.cc
     ia32/regexp-macro-assembler-ia32.cc
     ia32/register-allocator-ia32.cc
@@ -192,6 +205,7 @@ SOURCES = {
     x64/codegen-x64.cc
     x64/cpu-x64.cc
     x64/debug-x64.cc
+    x64/deoptimizer-x64.cc
     x64/disasm-x64.cc
     x64/frames-x64.cc
     x64/full-codegen-x64.cc
index 08ef41b..43d54fe 100644 (file)
 #include "v8.h"
 
 #include "accessors.h"
+#include "ast.h"
+#include "deoptimizer.h"
 #include "execution.h"
 #include "factory.h"
+#include "safepoint-table.h"
 #include "scopeinfo.h"
 #include "top.h"
 
@@ -503,11 +506,9 @@ MaybeObject* Accessors::FunctionGetLength(Object* object, void*) {
     // If the function isn't compiled yet, the length is not computed
     // correctly yet. Compile it now and return the right length.
     HandleScope scope;
-    Handle<SharedFunctionInfo> shared(function->shared());
-    if (!CompileLazyShared(shared, KEEP_EXCEPTION)) {
-      return Failure::Exception();
-    }
-    return Smi::FromInt(shared->length());
+    Handle<JSFunction> handle(function);
+    if (!CompileLazy(handle, KEEP_EXCEPTION)) return Failure::Exception();
+    return Smi::FromInt(handle->shared()->length());
   } else {
     return Smi::FromInt(function->shared()->length());
   }
@@ -545,6 +546,208 @@ const AccessorDescriptor Accessors::FunctionName = {
 // Accessors::FunctionArguments
 //
 
+static Address SlotAddress(JavaScriptFrame* frame, int slot_index) {
+  if (slot_index >= 0) {
+    const int offset = JavaScriptFrameConstants::kLocal0Offset;
+    return frame->fp() + offset - (slot_index * kPointerSize);
+  } else {
+    const int offset = JavaScriptFrameConstants::kReceiverOffset;
+    return frame->caller_sp() + offset + (slot_index * kPointerSize);
+  }
+}
+
+
+// We can't intermix stack decoding and allocations because
+// deoptimization infrastracture is not GC safe.
+// Thus we build a temporary structure in malloced space.
+class SlotRef BASE_EMBEDDED {
+ public:
+  enum SlotRepresentation {
+    UNKNOWN,
+    TAGGED,
+    INT32,
+    DOUBLE,
+    LITERAL
+  };
+
+  SlotRef()
+      : addr_(NULL), representation_(UNKNOWN) { }
+
+  SlotRef(Address addr, SlotRepresentation representation)
+      : addr_(addr), representation_(representation) { }
+
+  explicit SlotRef(Object* literal)
+      : literal_(literal), representation_(LITERAL) { }
+
+  Handle<Object> GetValue() {
+    switch (representation_) {
+      case TAGGED:
+        return Handle<Object>(Memory::Object_at(addr_));
+
+      case INT32: {
+        int value = Memory::int32_at(addr_);
+        if (Smi::IsValid(value)) {
+          return Handle<Object>(Smi::FromInt(value));
+        } else {
+          return Factory::NewNumberFromInt(value);
+        }
+      }
+
+      case DOUBLE: {
+        double value = Memory::double_at(addr_);
+        return Factory::NewNumber(value);
+      }
+
+      case LITERAL:
+        return literal_;
+
+      default:
+        UNREACHABLE();
+        return Handle<Object>::null();
+    }
+  }
+
+ private:
+  Address addr_;
+  Handle<Object> literal_;
+  SlotRepresentation representation_;
+};
+
+
+static SlotRef ComputeSlotForNextArgument(TranslationIterator* iterator,
+                                          DeoptimizationInputData* data,
+                                          JavaScriptFrame* frame) {
+  Translation::Opcode opcode =
+      static_cast<Translation::Opcode>(iterator->Next());
+
+  switch (opcode) {
+    case Translation::BEGIN:
+    case Translation::FRAME:
+      // Peeled off before getting here.
+      break;
+
+    case Translation::ARGUMENTS_OBJECT:
+      // This can be only emitted for local slots not for argument slots.
+      break;
+
+    case Translation::REGISTER:
+    case Translation::INT32_REGISTER:
+    case Translation::DOUBLE_REGISTER:
+    case Translation::DUPLICATE:
+      // We are at safepoint which corresponds to call.  All registers are
+      // saved by caller so there would be no live registers at this
+      // point. Thus these translation commands should not be used.
+      break;
+
+    case Translation::STACK_SLOT: {
+      int slot_index = iterator->Next();
+      Address slot_addr = SlotAddress(frame, slot_index);
+      return SlotRef(slot_addr, SlotRef::TAGGED);
+    }
+
+    case Translation::INT32_STACK_SLOT: {
+      int slot_index = iterator->Next();
+      Address slot_addr = SlotAddress(frame, slot_index);
+      return SlotRef(slot_addr, SlotRef::INT32);
+    }
+
+    case Translation::DOUBLE_STACK_SLOT: {
+      int slot_index = iterator->Next();
+      Address slot_addr = SlotAddress(frame, slot_index);
+      return SlotRef(slot_addr, SlotRef::DOUBLE);
+    }
+
+    case Translation::LITERAL: {
+      int literal_index = iterator->Next();
+      return SlotRef(data->LiteralArray()->get(literal_index));
+    }
+  }
+
+  UNREACHABLE();
+  return SlotRef();
+}
+
+
+
+
+
+static void ComputeSlotMappingForArguments(JavaScriptFrame* frame,
+                                           int inlined_frame_index,
+                                           Vector<SlotRef>* args_slots) {
+  AssertNoAllocation no_gc;
+
+  int deopt_index = AstNode::kNoNumber;
+
+  DeoptimizationInputData* data =
+      static_cast<OptimizedFrame*>(frame)->GetDeoptimizationData(&deopt_index);
+
+  TranslationIterator it(data->TranslationByteArray(),
+                         data->TranslationIndex(deopt_index)->value());
+
+  Translation::Opcode opcode = static_cast<Translation::Opcode>(it.Next());
+  ASSERT(opcode == Translation::BEGIN);
+  int frame_count = it.Next();
+
+  USE(frame_count);
+  ASSERT(frame_count > inlined_frame_index);
+
+  int frames_to_skip = inlined_frame_index;
+  while (true) {
+    opcode = static_cast<Translation::Opcode>(it.Next());
+
+    // Skip over operands to advance to the next opcode.
+    it.Skip(Translation::NumberOfOperandsFor(opcode));
+
+    if (opcode == Translation::FRAME) {
+      if (frames_to_skip == 0) {
+        // We reached frame corresponding to inlined function in question.
+        // Process translation commands for arguments.
+
+        // Skip translation command for receiver.
+        it.Skip(Translation::NumberOfOperandsFor(
+            static_cast<Translation::Opcode>(it.Next())));
+
+        // Compute slots for arguments.
+        for (int i = 0; i < args_slots->length(); ++i) {
+          (*args_slots)[i] = ComputeSlotForNextArgument(&it, data, frame);
+        }
+
+        return;
+      }
+
+      frames_to_skip--;
+    }
+  }
+
+  UNREACHABLE();
+}
+
+
+static MaybeObject* ConstructArgumentsObjectForInlinedFunction(
+    JavaScriptFrame* frame,
+    Handle<JSFunction> inlined_function,
+    int inlined_frame_index) {
+
+  int args_count = inlined_function->shared()->formal_parameter_count();
+
+  ScopedVector<SlotRef> args_slots(args_count);
+
+  ComputeSlotMappingForArguments(frame, inlined_frame_index, &args_slots);
+
+  Handle<JSObject> arguments =
+      Factory::NewArgumentsObject(inlined_function, args_count);
+
+  Handle<FixedArray> array = Factory::NewFixedArray(args_count);
+  for (int i = 0; i < args_count; ++i) {
+    Handle<Object> value = args_slots[i].GetValue();
+    array->set(i, *value);
+  }
+  arguments->set_elements(*array);
+
+  // Return the freshly allocated arguments object.
+  return *arguments;
+}
+
 
 MaybeObject* Accessors::FunctionGetArguments(Object* object, void*) {
   HandleScope scope;
@@ -554,38 +757,50 @@ MaybeObject* Accessors::FunctionGetArguments(Object* object, void*) {
   Handle<JSFunction> function(holder);
 
   // Find the top invocation of the function by traversing frames.
+  List<JSFunction*> functions(2);
   for (JavaScriptFrameIterator it; !it.done(); it.Advance()) {
-    // Skip all frames that aren't invocations of the given function.
     JavaScriptFrame* frame = it.frame();
-    if (frame->function() != *function) continue;
-
-    // If there is an arguments variable in the stack, we return that.
-    int index = function->shared()->scope_info()->
-        StackSlotIndex(Heap::arguments_symbol());
-    if (index >= 0) {
-      Handle<Object> arguments = Handle<Object>(frame->GetExpression(index));
-      if (!arguments->IsTheHole()) return *arguments;
+    frame->GetFunctions(&functions);
+    for (int i = functions.length() - 1; i >= 0; i--) {
+      // Skip all frames that aren't invocations of the given function.
+      if (functions[i] != *function) continue;
+
+      if (i > 0) {
+        // Function in question was inlined.
+        return ConstructArgumentsObjectForInlinedFunction(frame, function, i);
+      } else {
+        // If there is an arguments variable in the stack, we return that.
+        int index = function->shared()->scope_info()->
+            StackSlotIndex(Heap::arguments_symbol());
+        if (index >= 0) {
+          Handle<Object> arguments =
+              Handle<Object>(frame->GetExpression(index));
+          if (!arguments->IsTheHole()) return *arguments;
+        }
+
+        // If there isn't an arguments variable in the stack, we need to
+        // find the frame that holds the actual arguments passed to the
+        // function on the stack.
+        it.AdvanceToArgumentsFrame();
+        frame = it.frame();
+
+        // Get the number of arguments and construct an arguments object
+        // mirror for the right frame.
+        const int length = frame->GetProvidedParametersCount();
+        Handle<JSObject> arguments = Factory::NewArgumentsObject(function,
+                                                                 length);
+        Handle<FixedArray> array = Factory::NewFixedArray(length);
+
+        // Copy the parameters to the arguments object.
+        ASSERT(array->length() == length);
+        for (int i = 0; i < length; i++) array->set(i, frame->GetParameter(i));
+        arguments->set_elements(*array);
+
+        // Return the freshly allocated arguments object.
+        return *arguments;
+      }
     }
-
-    // If there isn't an arguments variable in the stack, we need to
-    // find the frame that holds the actual arguments passed to the
-    // function on the stack.
-    it.AdvanceToArgumentsFrame();
-    frame = it.frame();
-
-    // Get the number of arguments and construct an arguments object
-    // mirror for the right frame.
-    const int length = frame->GetProvidedParametersCount();
-    Handle<JSObject> arguments = Factory::NewArgumentsObject(function, length);
-    Handle<FixedArray> array = Factory::NewFixedArray(length);
-
-    // Copy the parameters to the arguments object.
-    ASSERT(array->length() == length);
-    for (int i = 0; i < length; i++) array->set(i, frame->GetParameter(i));
-    arguments->set_elements(*array);
-
-    // Return the freshly allocated arguments object.
-    return *arguments;
+    functions.Rewind(0);
   }
 
   // No frame corresponding to the given function found. Return null.
@@ -613,19 +828,34 @@ MaybeObject* Accessors::FunctionGetCaller(Object* object, void*) {
   if (!found_it) return Heap::undefined_value();
   Handle<JSFunction> function(holder);
 
-  // Find the top invocation of the function by traversing frames.
+  List<JSFunction*> functions(2);
   for (JavaScriptFrameIterator it; !it.done(); it.Advance()) {
-    // Skip all frames that aren't invocations of the given function.
-    if (it.frame()->function() != *function) continue;
-    // Once we have found the frame, we need to go to the caller
-    // frame. This may require skipping through a number of top-level
-    // frames, e.g. frames for scripts not functions.
-    while (true) {
-      it.Advance();
-      if (it.done()) return Heap::null_value();
-      JSFunction* caller = JSFunction::cast(it.frame()->function());
-      if (!caller->shared()->is_toplevel()) return caller;
+    JavaScriptFrame* frame = it.frame();
+    frame->GetFunctions(&functions);
+    for (int i = functions.length() - 1; i >= 0; i--) {
+      if (functions[i] == *function) {
+        // Once we have found the frame, we need to go to the caller
+        // frame. This may require skipping through a number of top-level
+        // frames, e.g. frames for scripts not functions.
+        if (i > 0) {
+          ASSERT(!functions[i - 1]->shared()->is_toplevel());
+          return functions[i - 1];
+        } else {
+          for (it.Advance(); !it.done(); it.Advance()) {
+            frame = it.frame();
+            functions.Rewind(0);
+            frame->GetFunctions(&functions);
+            if (!functions.last()->shared()->is_toplevel()) {
+              return functions.last();
+            }
+            ASSERT(functions.length() == 1);
+          }
+          if (it.done()) return Heap::null_value();
+          break;
+        }
+      }
     }
+    functions.Rewind(0);
   }
 
   // No frame corresponding to the given function found. Return null.
index 96d742e..14ccc8f 100644 (file)
@@ -78,13 +78,14 @@ class Accessors : public AllStatic {
   MUST_USE_RESULT static MaybeObject* FunctionGetPrototype(Object* object,
                                                            void*);
   MUST_USE_RESULT static MaybeObject* FunctionSetPrototype(JSObject* object,
-                                                           Object* value,
-                                                           void*);
+                                                      Object* value,
+                                                      void*);
+  static MaybeObject* FunctionGetArguments(Object* object, void*);
+
  private:
   // Accessor functions only used through the descriptor.
   static MaybeObject* FunctionGetLength(Object* object, void*);
   static MaybeObject* FunctionGetName(Object* object, void*);
-  static MaybeObject* FunctionGetArguments(Object* object, void*);
   static MaybeObject* FunctionGetCaller(Object* object, void*);
   MUST_USE_RESULT static MaybeObject* ArraySetLength(JSObject* object,
                                                      Object* value, void*);
index 42c1db4..0ec8cf1 100644 (file)
@@ -33,6 +33,7 @@
 #include "bootstrapper.h"
 #include "compiler.h"
 #include "debug.h"
+#include "deoptimizer.h"
 #include "execution.h"
 #include "global-handles.h"
 #include "heap-profiler.h"
 #include "parser.h"
 #include "platform.h"
 #include "profile-generator-inl.h"
+#include "runtime-profiler.h"
 #include "serialize.h"
 #include "snapshot.h"
 #include "top.h"
 #include "v8threads.h"
 #include "version.h"
+#include "vm-state-inl.h"
 
 #include "../include/v8-profiler.h"
+#include "../include/v8-testing.h"
 
 #define LOG_API(expr) LOG(ApiEntryCall(expr))
 
 #ifdef ENABLE_VMSTATE_TRACKING
-#define ENTER_V8 i::VMState __state__(i::OTHER)
+#define ENTER_V8 ASSERT(i::V8::IsRunning()); i::VMState __state__(i::OTHER)
 #define LEAVE_V8 i::VMState __state__(i::EXTERNAL)
 #else
 #define ENTER_V8 ((void) 0)
@@ -97,6 +101,7 @@ namespace v8 {
     }                                                                          \
   } while (false)
 
+
 // --- D a t a   t h a t   i s   s p e c i f i c   t o   a   t h r e a d ---
 
 
@@ -2312,6 +2317,11 @@ bool v8::Object::ForceDelete(v8::Handle<Value> key) {
   HandleScope scope;
   i::Handle<i::JSObject> self = Utils::OpenHandle(this);
   i::Handle<i::Object> key_obj = Utils::OpenHandle(*key);
+
+  // When turning on access checks for a global object deoptimize all functions
+  // as optimized code does not always handle access checks.
+  i::Deoptimizer::DeoptimizeGlobalObject(*self);
+
   EXCEPTION_PREAMBLE();
   i::Handle<i::Object> obj = i::ForceDeleteProperty(self, key_obj);
   has_pending_exception = obj.is_null();
@@ -2598,6 +2608,10 @@ void v8::Object::TurnOnAccessCheck() {
   HandleScope scope;
   i::Handle<i::JSObject> obj = Utils::OpenHandle(this);
 
+  // When turning on access checks for a global object deoptimize all functions
+  // as optimized code does not always handle access checks.
+  i::Deoptimizer::DeoptimizeGlobalObject(*obj);
+
   i::Handle<i::Map> new_map =
     i::Factory::CopyMapDropTransitions(i::Handle<i::Map>(obj->map()));
   new_map->set_is_access_check_needed(true);
@@ -3262,7 +3276,6 @@ void v8::Object::SetPointerInInternalField(int index, void* value) {
 
 bool v8::V8::Initialize() {
   if (i::V8::IsRunning()) return true;
-  ENTER_V8;
   HandleScope scope;
   if (i::Snapshot::Initialize()) return true;
   return i::V8::Initialize(NULL);
@@ -3386,6 +3399,7 @@ Persistent<Context> v8::Context::New(
       global_constructor->set_needs_access_check(
           proxy_constructor->needs_access_check());
     }
+    i::RuntimeProfiler::Reset();
   }
   // Leave V8.
 
@@ -4945,6 +4959,66 @@ const HeapSnapshot* HeapProfiler::TakeSnapshot(Handle<String> title,
 #endif  // ENABLE_LOGGING_AND_PROFILING
 
 
+v8::Testing::StressType internal::Testing::stress_type_ =
+    v8::Testing::kStressTypeOpt;
+
+
+void Testing::SetStressRunType(Testing::StressType type) {
+  internal::Testing::set_stress_type(type);
+}
+
+int Testing::GetStressRuns() {
+#ifdef DEBUG
+  // In debug mode the code runs much slower so stressing will only make two
+  // runs.
+  return 2;
+#else
+  return 5;
+#endif
+}
+
+
+static void SetFlagsFromString(const char* flags) {
+  V8::SetFlagsFromString(flags, i::StrLength(flags));
+}
+
+
+void Testing::PrepareStressRun(int run) {
+  static const char* kLazyOptimizations =
+      "--prepare-always-opt --nolimit-inlining "
+      "--noalways-opt --noopt-eagerly";
+  static const char* kEagerOptimizations = "--opt-eagerly";
+  static const char* kForcedOptimizations = "--always-opt";
+
+  // If deoptimization stressed turn on frequent deoptimization. If no value
+  // is spefified through --deopt-every-n-times use a default default value.
+  static const char* kDeoptEvery13Times = "--deopt-every-n-times=13";
+  if (internal::Testing::stress_type() == Testing::kStressTypeDeopt &&
+      internal::FLAG_deopt_every_n_times == 0) {
+    SetFlagsFromString(kDeoptEvery13Times);
+  }
+
+#ifdef DEBUG
+  // As stressing in debug mode only make two runs skip the deopt stressing
+  // here.
+  if (run == GetStressRuns() - 1) {
+    SetFlagsFromString(kForcedOptimizations);
+  } else {
+    SetFlagsFromString(kEagerOptimizations);
+    SetFlagsFromString(kLazyOptimizations);
+  }
+#else
+  if (run == GetStressRuns() - 1) {
+    SetFlagsFromString(kForcedOptimizations);
+  } else if (run == GetStressRuns() - 2) {
+    SetFlagsFromString(kEagerOptimizations);
+  } else {
+    SetFlagsFromString(kLazyOptimizations);
+  }
+#endif
+}
+
+
 namespace internal {
 
 
index e36160c..d07d75b 100644 (file)
--- a/src/api.h
+++ b/src/api.h
@@ -31,6 +31,8 @@
 #include "apiutils.h"
 #include "factory.h"
 
+#include "../include/v8-testing.h"
+
 namespace v8 {
 
 // Constants used in the implementation of the API.  The most natural thing
@@ -489,6 +491,18 @@ void HandleScopeImplementer::DeleteExtensions(internal::Object** prev_limit) {
          (!blocks_.is_empty() && prev_limit != NULL));
 }
 
+
+class Testing {
+ public:
+  static v8::Testing::StressType stress_type() { return stress_type_; }
+  static void set_stress_type(v8::Testing::StressType stress_type) {
+    stress_type_ = stress_type;
+  }
+
+ private:
+  static v8::Testing::StressType stress_type_;
+};
+
 } }  // namespace v8::internal
 
 #endif  // V8_API_H_
index 15720c9..68d32f1 100644 (file)
@@ -110,6 +110,30 @@ Address* RelocInfo::target_reference_address() {
 }
 
 
+Handle<JSGlobalPropertyCell> RelocInfo::target_cell_handle() {
+  ASSERT(rmode_ == RelocInfo::GLOBAL_PROPERTY_CELL);
+  Address address = Memory::Address_at(pc_);
+  return Handle<JSGlobalPropertyCell>(
+      reinterpret_cast<JSGlobalPropertyCell**>(address));
+}
+
+
+JSGlobalPropertyCell* RelocInfo::target_cell() {
+  ASSERT(rmode_ == RelocInfo::GLOBAL_PROPERTY_CELL);
+  Address address = Memory::Address_at(pc_);
+  Object* object = HeapObject::FromAddress(
+      address - JSGlobalPropertyCell::kValueOffset);
+  return reinterpret_cast<JSGlobalPropertyCell*>(object);
+}
+
+
+void RelocInfo::set_target_cell(JSGlobalPropertyCell* cell) {
+  ASSERT(rmode_ == RelocInfo::GLOBAL_PROPERTY_CELL);
+  Address address = cell->address() + JSGlobalPropertyCell::kValueOffset;
+  Memory::Address_at(pc_) = address;
+}
+
+
 Address RelocInfo::call_address() {
   // The 2 instructions offset assumes patched debug break slot or return
   // sequence.
index cfdd164..8fdcf18 100644 (file)
@@ -70,7 +70,7 @@ static uint64_t CpuFeaturesImpliedByCompiler() {
 #endif  // def __arm__
 
 
-void CpuFeatures::Probe() {
+void CpuFeatures::Probe(bool portable) {
 #ifndef __arm__
   // For the simulator=arm build, use VFP when FLAG_enable_vfp3 is enabled.
   if (FLAG_enable_vfp3) {
@@ -81,7 +81,7 @@ void CpuFeatures::Probe() {
     supported_ |= 1u << ARMv7;
   }
 #else  // def __arm__
-  if (Serializer::enabled()) {
+  if (portable && Serializer::enabled()) {
     supported_ |= OS::CpuFeaturesImpliedByPlatform();
     supported_ |= CpuFeaturesImpliedByCompiler();
     return;  // No features if we might serialize.
@@ -98,6 +98,8 @@ void CpuFeatures::Probe() {
     supported_ |= 1u << ARMv7;
     found_by_runtime_probing_ |= 1u << ARMv7;
   }
+
+  if (!portable) found_by_runtime_probing_ = 0;
 #endif
 }
 
@@ -318,7 +320,10 @@ static const int kMinimalBufferSize = 4*KB;
 static byte* spare_buffer_ = NULL;
 
 Assembler::Assembler(void* buffer, int buffer_size)
-    : positions_recorder_(this) {
+    : positions_recorder_(this),
+      allow_peephole_optimization_(false) {
+  // BUG(3245989): disable peephole optimization if crankshaft is enabled.
+  allow_peephole_optimization_ = FLAG_peephole_optimization;
   if (buffer == NULL) {
     // Do our own buffer management.
     if (buffer_size <= kMinimalBufferSize) {
@@ -987,6 +992,7 @@ void Assembler::b(int branch_offset, Condition cond) {
 
 
 void Assembler::bl(int branch_offset, Condition cond) {
+  positions_recorder()->WriteRecordedPositions();
   ASSERT((branch_offset & 3) == 0);
   int imm24 = branch_offset >> 2;
   ASSERT(is_int24(imm24));
@@ -1650,9 +1656,10 @@ void Assembler::stop(const char* msg, Condition cond, int32_t code) {
   emit(reinterpret_cast<Instr>(msg));
 #else  // def __arm__
 #ifdef CAN_USE_ARMV5_INSTRUCTIONS
+  ASSERT(cond == al);
   bkpt(0);
 #else  // ndef CAN_USE_ARMV5_INSTRUCTIONS
-  svc(0x9f0001);
+  svc(0x9f0001, cond);
 #endif  // ndef CAN_USE_ARMV5_INSTRUCTIONS
 #endif  // def __arm__
 }
@@ -1826,13 +1833,18 @@ void Assembler::vldr(const DwVfpRegister dst,
                      const Condition cond) {
   // Ddst = MEM(Rbase + offset).
   // Instruction details available in ARM DDI 0406A, A8-628.
-  // cond(31-28) | 1101(27-24)| 1001(23-20) | Rbase(19-16) |
+  // cond(31-28) | 1101(27-24)| U001(23-20) | Rbase(19-16) |
   // Vdst(15-12) | 1011(11-8) | offset
   ASSERT(CpuFeatures::IsEnabled(VFP3));
+  int u = 1;
+  if (offset < 0) {
+    offset = -offset;
+    u = 0;
+  }
   ASSERT(offset % 4 == 0);
   ASSERT((offset / 4) < 256);
   ASSERT(offset >= 0);
-  emit(cond | 0xD9*B20 | base.code()*B16 | dst.code()*B12 |
+  emit(cond | u*B23 | 0xD1*B20 | base.code()*B16 | dst.code()*B12 |
        0xB*B8 | ((offset / 4) & 255));
 }
 
@@ -1843,15 +1855,20 @@ void Assembler::vldr(const SwVfpRegister dst,
                      const Condition cond) {
   // Sdst = MEM(Rbase + offset).
   // Instruction details available in ARM DDI 0406A, A8-628.
-  // cond(31-28) | 1101(27-24)| 1001(23-20) | Rbase(19-16) |
+  // cond(31-28) | 1101(27-24)| U001(23-20) | Rbase(19-16) |
   // Vdst(15-12) | 1010(11-8) | offset
   ASSERT(CpuFeatures::IsEnabled(VFP3));
+  int u = 1;
+  if (offset < 0) {
+    offset = -offset;
+    u = 0;
+  }
   ASSERT(offset % 4 == 0);
   ASSERT((offset / 4) < 256);
   ASSERT(offset >= 0);
   int sd, d;
   dst.split_code(&sd, &d);
-  emit(cond | d*B22 | 0xD9*B20 | base.code()*B16 | sd*B12 |
+  emit(cond | u*B23 | d*B22 | 0xD1*B20 | base.code()*B16 | sd*B12 |
        0xA*B8 | ((offset / 4) & 255));
 }
 
@@ -1862,13 +1879,18 @@ void Assembler::vstr(const DwVfpRegister src,
                      const Condition cond) {
   // MEM(Rbase + offset) = Dsrc.
   // Instruction details available in ARM DDI 0406A, A8-786.
-  // cond(31-28) | 1101(27-24)| 1000(23-20) | | Rbase(19-16) |
+  // cond(31-28) | 1101(27-24)| U000(23-20) | | Rbase(19-16) |
   // Vsrc(15-12) | 1011(11-8) | (offset/4)
   ASSERT(CpuFeatures::IsEnabled(VFP3));
+  int u = 1;
+  if (offset < 0) {
+    offset = -offset;
+    u = 0;
+  }
   ASSERT(offset % 4 == 0);
   ASSERT((offset / 4) < 256);
   ASSERT(offset >= 0);
-  emit(cond | 0xD8*B20 | base.code()*B16 | src.code()*B12 |
+  emit(cond | u*B23 | 0xD0*B20 | base.code()*B16 | src.code()*B12 |
        0xB*B8 | ((offset / 4) & 255));
 }
 
@@ -1879,15 +1901,20 @@ void Assembler::vstr(const SwVfpRegister src,
                      const Condition cond) {
   // MEM(Rbase + offset) = SSrc.
   // Instruction details available in ARM DDI 0406A, A8-786.
-  // cond(31-28) | 1101(27-24)| 1000(23-20) | Rbase(19-16) |
+  // cond(31-28) | 1101(27-24)| U000(23-20) | Rbase(19-16) |
   // Vdst(15-12) | 1010(11-8) | (offset/4)
   ASSERT(CpuFeatures::IsEnabled(VFP3));
+  int u = 1;
+  if (offset < 0) {
+    offset = -offset;
+    u = 0;
+  }
   ASSERT(offset % 4 == 0);
   ASSERT((offset / 4) < 256);
   ASSERT(offset >= 0);
   int sd, d;
   src.split_code(&sd, &d);
-  emit(cond | d*B22 | 0xD8*B20 | base.code()*B16 | sd*B12 |
+  emit(cond | u*B23 | d*B22 | 0xD0*B20 | base.code()*B16 | sd*B12 |
        0xA*B8 | ((offset / 4) & 255));
 }
 
@@ -2411,7 +2438,7 @@ void Assembler::RecordDebugBreakSlot() {
 
 
 void Assembler::RecordComment(const char* msg) {
-  if (FLAG_debug_code) {
+  if (FLAG_code_comments) {
     CheckBuffer();
     RecordRelocInfo(RelocInfo::COMMENT, reinterpret_cast<intptr_t>(msg));
   }
@@ -2469,6 +2496,20 @@ void Assembler::GrowBuffer() {
 }
 
 
+void Assembler::db(uint8_t data) {
+  CheckBuffer();
+  *reinterpret_cast<uint8_t*>(pc_) = data;
+  pc_ += sizeof(uint8_t);
+}
+
+
+void Assembler::dd(uint32_t data) {
+  CheckBuffer();
+  *reinterpret_cast<uint32_t*>(pc_) = data;
+  pc_ += sizeof(uint32_t);
+}
+
+
 void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
   RelocInfo rinfo(pc_, rmode, data);  // we do not try to reuse pool constants
   if (rmode >= RelocInfo::JS_RETURN && rmode <= RelocInfo::DEBUG_BREAK_SLOT) {
index ee4c9aa..36f7507 100644 (file)
@@ -69,7 +69,39 @@ namespace internal {
 //
 // Core register
 struct Register {
-  bool is_valid() const { return 0 <= code_ && code_ < 16; }
+  static const int kNumRegisters = 16;
+  static const int kNumAllocatableRegisters = 8;
+
+  static int ToAllocationIndex(Register reg) {
+    return reg.code();
+  }
+
+  static Register FromAllocationIndex(int index) {
+    ASSERT(index >= 0 && index < kNumAllocatableRegisters);
+    return from_code(index);
+  }
+
+  static const char* AllocationIndexToString(int index) {
+    ASSERT(index >= 0 && index < kNumAllocatableRegisters);
+    const char* const names[] = {
+      "r0",
+      "r1",
+      "r2",
+      "r3",
+      "r4",
+      "r5",
+      "r6",
+      "r7",
+    };
+    return names[index];
+  }
+
+  static Register from_code(int code) {
+    Register r = { code };
+    return r;
+  }
+
+  bool is_valid() const { return 0 <= code_ && code_ < kNumRegisters; }
   bool is(Register reg) const { return code_ == reg.code_; }
   int code() const {
     ASSERT(is_valid());
@@ -132,6 +164,48 @@ struct SwVfpRegister {
 
 // Double word VFP register.
 struct DwVfpRegister {
+  // d0 has been excluded from allocation. This is following ia32
+  // where xmm0 is excluded. This should be revisited.
+  static const int kNumRegisters = 16;
+  static const int kNumAllocatableRegisters = 15;
+
+  static int ToAllocationIndex(DwVfpRegister reg) {
+    ASSERT(reg.code() != 0);
+    return reg.code() - 1;
+  }
+
+  static DwVfpRegister FromAllocationIndex(int index) {
+    ASSERT(index >= 0 && index < kNumAllocatableRegisters);
+    return from_code(index + 1);
+  }
+
+  static const char* AllocationIndexToString(int index) {
+    ASSERT(index >= 0 && index < kNumAllocatableRegisters);
+    const char* const names[] = {
+      "d1",
+      "d2",
+      "d3",
+      "d4",
+      "d5",
+      "d6",
+      "d7",
+      "d8",
+      "d9",
+      "d10",
+      "d11",
+      "d12",
+      "d13",
+      "d14",
+      "d15"
+    };
+    return names[index];
+  }
+
+  static DwVfpRegister from_code(int code) {
+    DwVfpRegister r = { code };
+    return r;
+  }
+
   // Supporting d0 to d15, can be later extended to d31.
   bool is_valid() const { return 0 <= code_ && code_ < 16; }
   bool is(DwVfpRegister reg) const { return code_ == reg.code_; }
@@ -167,6 +241,9 @@ struct DwVfpRegister {
 };
 
 
+typedef DwVfpRegister DoubleRegister;
+
+
 // Support for the VFP registers s0 to s31 (d0 to d15).
 // Note that "s(N):s(N+1)" is the same as "d(N/2)".
 const SwVfpRegister s0  = {  0 };
@@ -286,6 +363,9 @@ enum Coprocessor {
 
 // Condition field in instructions.
 enum Condition {
+  // any value < 0 is considered no_condition
+  no_condition  = -1,
+
   eq =  0 << 28,  // Z set            equal.
   ne =  1 << 28,  // Z clear          not equal.
   nz =  1 << 28,  // Z clear          not zero.
@@ -527,7 +607,7 @@ class CpuFeatures : public AllStatic {
  public:
   // Detect features of the target CPU. Set safe defaults if the serializer
   // is enabled (snapshots must be portable).
-  static void Probe();
+  static void Probe(bool portable);
 
   // Check whether a feature is supported by the target CPU.
   static bool IsSupported(CpuFeature f) {
@@ -1148,15 +1228,20 @@ class Assembler : public Malloced {
   void RecordDebugBreakSlot();
 
   // Record a comment relocation entry that can be used by a disassembler.
-  // Use --debug_code to enable.
+  // Use --code-comments to enable.
   void RecordComment(const char* msg);
 
+  // Writes a single byte or word of data in the code stream.  Used for
+  // inline tables, e.g., jump-tables.
+  void db(uint8_t data);
+  void dd(uint32_t data);
+
   int pc_offset() const { return pc_ - buffer_; }
 
   PositionsRecorder* positions_recorder() { return &positions_recorder_; }
 
   bool can_peephole_optimize(int instructions) {
-    if (!FLAG_peephole_optimization) return false;
+    if (!allow_peephole_optimization_) return false;
     if (last_bound_pos_ > pc_offset() - instructions * kInstrSize) return false;
     return reloc_info_writer.last_pc() <= pc_ - instructions * kInstrSize;
   }
@@ -1185,6 +1270,8 @@ class Assembler : public Malloced {
   static bool IsLdrPcImmediateOffset(Instr instr);
   static bool IsNop(Instr instr, int type = NON_MARKING_NOP);
 
+  // Check if is time to emit a constant pool for pending reloc info entries
+  void CheckConstPool(bool force_emit, bool require_jump);
 
  protected:
   int buffer_space() const { return reloc_info_writer.pos() - pc_; }
@@ -1201,9 +1288,6 @@ class Assembler : public Malloced {
   // Patch branch instruction at pos to branch to given branch target pos
   void target_at_put(int pos, int target_pos);
 
-  // Check if is time to emit a constant pool for pending reloc info entries
-  void CheckConstPool(bool force_emit, bool require_jump);
-
   // Block the emission of the constant pool before pc_offset
   void BlockConstPoolBefore(int pc_offset) {
     if (no_const_pool_before_ < pc_offset) no_const_pool_before_ = pc_offset;
@@ -1317,6 +1401,7 @@ class Assembler : public Malloced {
   friend class BlockConstPoolScope;
 
   PositionsRecorder positions_recorder_;
+  bool allow_peephole_optimization_;
   friend class PositionsRecorder;
   friend class EnsureSpace;
 };
index 862ef39..6480a91 100644 (file)
@@ -1,4 +1,4 @@
-// Copyright 2006-2009 the V8 project authors. All rights reserved.
+// Copyright 2010 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -31,6 +31,8 @@
 
 #include "codegen-inl.h"
 #include "debug.h"
+#include "deoptimizer.h"
+#include "full-codegen.h"
 #include "runtime.h"
 
 namespace v8 {
@@ -1089,6 +1091,80 @@ void Builtins::Generate_LazyCompile(MacroAssembler* masm) {
 }
 
 
+void Builtins::Generate_LazyRecompile(MacroAssembler* masm) {
+  // Enter an internal frame.
+  __ EnterInternalFrame();
+
+  // Preserve the function.
+  __ push(r1);
+
+  // Push the function on the stack as the argument to the runtime function.
+  __ push(r1);
+  __ CallRuntime(Runtime::kLazyRecompile, 1);
+  // Calculate the entry point.
+  __ add(r2, r0, Operand(Code::kHeaderSize - kHeapObjectTag));
+  // Restore saved function.
+  __ pop(r1);
+
+  // Tear down temporary frame.
+  __ LeaveInternalFrame();
+
+  // Do a tail-call of the compiled function.
+  __ Jump(r2);
+}
+
+
+static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm,
+                                             Deoptimizer::BailoutType type) {
+  __ EnterInternalFrame();
+  // Pass the function and deoptimization type to the runtime system.
+  __ mov(r0, Operand(Smi::FromInt(static_cast<int>(type))));
+  __ push(r0);
+  __ CallRuntime(Runtime::kNotifyDeoptimized, 1);
+  __ LeaveInternalFrame();
+
+  // Get the full codegen state from the stack and untag it -> r6.
+  __ ldr(r6, MemOperand(sp, 0 * kPointerSize));
+  __ SmiUntag(r6);
+  // Switch on the state.
+  Label with_tos_register, unknown_state;
+  __ cmp(r6, Operand(FullCodeGenerator::NO_REGISTERS));
+  __ b(ne, &with_tos_register);
+  __ add(sp, sp, Operand(1 * kPointerSize));  // Remove state.
+  __ Ret();
+
+  __ bind(&with_tos_register);
+  __ ldr(r0, MemOperand(sp, 1 * kPointerSize));
+  __ cmp(r6, Operand(FullCodeGenerator::TOS_REG));
+  __ b(ne, &unknown_state);
+  __ add(sp, sp, Operand(2 * kPointerSize));  // Remove state.
+  __ Ret();
+
+  __ bind(&unknown_state);
+  __ stop("no cases left");
+}
+
+
+void Builtins::Generate_NotifyDeoptimized(MacroAssembler* masm) {
+  Generate_NotifyDeoptimizedHelper(masm, Deoptimizer::EAGER);
+}
+
+
+void Builtins::Generate_NotifyLazyDeoptimized(MacroAssembler* masm) {
+  Generate_NotifyDeoptimizedHelper(masm, Deoptimizer::LAZY);
+}
+
+
+void Builtins::Generate_NotifyOSR(MacroAssembler* masm) {
+  __ stop("builtins-arm.cc: NotifyOSR");
+}
+
+
+void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
+  __ stop("builtins-arm.cc: OnStackReplacement");
+}
+
+
 void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
   // 1. Make sure we have at least one argument.
   // r0: actual number of arguments
index b42d627..3670765 100644 (file)
@@ -82,12 +82,15 @@ void FastNewClosureStub::Generate(MacroAssembler* masm) {
   // write barrier because the allocated object is in new space.
   __ LoadRoot(r1, Heap::kEmptyFixedArrayRootIndex);
   __ LoadRoot(r2, Heap::kTheHoleValueRootIndex);
+  __ LoadRoot(r4, Heap::kUndefinedValueRootIndex);
   __ str(r1, FieldMemOperand(r0, JSObject::kPropertiesOffset));
   __ str(r1, FieldMemOperand(r0, JSObject::kElementsOffset));
   __ str(r2, FieldMemOperand(r0, JSFunction::kPrototypeOrInitialMapOffset));
   __ str(r3, FieldMemOperand(r0, JSFunction::kSharedFunctionInfoOffset));
   __ str(cp, FieldMemOperand(r0, JSFunction::kContextOffset));
   __ str(r1, FieldMemOperand(r0, JSFunction::kLiteralsOffset));
+  __ str(r4, FieldMemOperand(r0, JSFunction::kNextFunctionLinkOffset));
+
 
   // Initialize the code pointer in the function to be the one
   // found in the shared function info object.
@@ -1088,6 +1091,10 @@ void ToBooleanStub::Generate(MacroAssembler* masm) {
   Label not_heap_number;
   Register scratch = r7;
 
+  __ LoadRoot(ip, Heap::kNullValueRootIndex);
+  __ cmp(tos_, ip);
+  __ b(eq, &false_result);
+
   // HeapNumber => false iff +0, -0, or NaN.
   __ ldr(scratch, FieldMemOperand(tos_, HeapObject::kMapOffset));
   __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
@@ -2200,6 +2207,14 @@ Handle<Code> GetBinaryOpStub(int key, BinaryOpIC::TypeInfo type_info) {
 }
 
 
+Handle<Code> GetTypeRecordingBinaryOpStub(int key,
+    TRBinaryOpIC::TypeInfo type_info,
+    TRBinaryOpIC::TypeInfo result_type_info) {
+  UNIMPLEMENTED();
+  return Handle<Code>::null();
+}
+
+
 void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
   // Argument is a number and is on stack and in r0.
   Label runtime_call;
@@ -2641,7 +2656,7 @@ void CEntryStub::GenerateCore(MacroAssembler* masm,
   // r0:r1: result
   // sp: stack pointer
   // fp: frame pointer
-  __ LeaveExitFrame();
+  __ LeaveExitFrame(save_doubles_);
 
   // check if we should retry or throw exception
   Label retry;
@@ -2690,7 +2705,7 @@ void CEntryStub::Generate(MacroAssembler* masm) {
   // builtin once.
 
   // Enter the exit frame that transitions from JavaScript to C++.
-  __ EnterExitFrame();
+  __ EnterExitFrame(save_doubles_);
 
   // r4: number of arguments (C callee-saved)
   // r5: pointer to builtin function (C callee-saved)
@@ -2778,6 +2793,15 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
   // Setup frame pointer for the frame to be pushed.
   __ add(fp, sp, Operand(-EntryFrameConstants::kCallerFPOffset));
 
+#ifdef ENABLE_LOGGING_AND_PROFILING
+  // If this is the outermost JS call, set js_entry_sp value.
+  ExternalReference js_entry_sp(Top::k_js_entry_sp_address);
+  __ mov(r5, Operand(ExternalReference(js_entry_sp)));
+  __ ldr(r6, MemOperand(r5));
+  __ cmp(r6, Operand(0, RelocInfo::NONE));
+  __ str(fp, MemOperand(r5), eq);
+#endif
+
   // Call a faked try-block that does the invoke.
   __ bl(&invoke);
 
@@ -2840,6 +2864,15 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
   // No need to restore registers
   __ add(sp, sp, Operand(StackHandlerConstants::kSize));
 
+#ifdef ENABLE_LOGGING_AND_PROFILING
+  // If current FP value is the same as js_entry_sp value, it means that
+  // the current function is the outermost.
+  __ mov(r5, Operand(ExternalReference(js_entry_sp)));
+  __ ldr(r6, MemOperand(r5));
+  __ cmp(fp, Operand(r6));
+  __ mov(r6, Operand(0, RelocInfo::NONE), LeaveCC, eq);
+  __ str(r6, MemOperand(r5), eq);
+#endif
 
   __ bind(&exit);  // r0 holds result
   // Restore the top frame descriptors from the stack.
@@ -3430,6 +3463,95 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
 }
 
 
+void RegExpConstructResultStub::Generate(MacroAssembler* masm) {
+  const int kMaxInlineLength = 100;
+  Label slowcase;
+  Label done;
+  __ ldr(r1, MemOperand(sp, kPointerSize * 2));
+  STATIC_ASSERT(kSmiTag == 0);
+  STATIC_ASSERT(kSmiTagSize == 1);
+  __ tst(r1, Operand(kSmiTagMask));
+  __ b(ne, &slowcase);
+  __ cmp(r1, Operand(Smi::FromInt(kMaxInlineLength)));
+  __ b(hi, &slowcase);
+  // Smi-tagging is equivalent to multiplying by 2.
+  // Allocate RegExpResult followed by FixedArray with size in ebx.
+  // JSArray:   [Map][empty properties][Elements][Length-smi][index][input]
+  // Elements:  [Map][Length][..elements..]
+  // Size of JSArray with two in-object properties and the header of a
+  // FixedArray.
+  int objects_size =
+      (JSRegExpResult::kSize + FixedArray::kHeaderSize) / kPointerSize;
+  __ mov(r5, Operand(r1, LSR, kSmiTagSize + kSmiShiftSize));
+  __ add(r2, r5, Operand(objects_size));
+  __ AllocateInNewSpace(
+      r2,  // In: Size, in words.
+      r0,  // Out: Start of allocation (tagged).
+      r3,  // Scratch register.
+      r4,  // Scratch register.
+      &slowcase,
+      static_cast<AllocationFlags>(TAG_OBJECT | SIZE_IN_WORDS));
+  // r0: Start of allocated area, object-tagged.
+  // r1: Number of elements in array, as smi.
+  // r5: Number of elements, untagged.
+
+  // Set JSArray map to global.regexp_result_map().
+  // Set empty properties FixedArray.
+  // Set elements to point to FixedArray allocated right after the JSArray.
+  // Interleave operations for better latency.
+  __ ldr(r2, ContextOperand(cp, Context::GLOBAL_INDEX));
+  __ add(r3, r0, Operand(JSRegExpResult::kSize));
+  __ mov(r4, Operand(Factory::empty_fixed_array()));
+  __ ldr(r2, FieldMemOperand(r2, GlobalObject::kGlobalContextOffset));
+  __ str(r3, FieldMemOperand(r0, JSObject::kElementsOffset));
+  __ ldr(r2, ContextOperand(r2, Context::REGEXP_RESULT_MAP_INDEX));
+  __ str(r4, FieldMemOperand(r0, JSObject::kPropertiesOffset));
+  __ str(r2, FieldMemOperand(r0, HeapObject::kMapOffset));
+
+  // Set input, index and length fields from arguments.
+  __ ldr(r1, MemOperand(sp, kPointerSize * 0));
+  __ str(r1, FieldMemOperand(r0, JSRegExpResult::kInputOffset));
+  __ ldr(r1, MemOperand(sp, kPointerSize * 1));
+  __ str(r1, FieldMemOperand(r0, JSRegExpResult::kIndexOffset));
+  __ ldr(r1, MemOperand(sp, kPointerSize * 2));
+  __ str(r1, FieldMemOperand(r0, JSArray::kLengthOffset));
+
+  // Fill out the elements FixedArray.
+  // r0: JSArray, tagged.
+  // r3: FixedArray, tagged.
+  // r5: Number of elements in array, untagged.
+
+  // Set map.
+  __ mov(r2, Operand(Factory::fixed_array_map()));
+  __ str(r2, FieldMemOperand(r3, HeapObject::kMapOffset));
+  // Set FixedArray length.
+  __ mov(r6, Operand(r5, LSL, kSmiTagSize));
+  __ str(r6, FieldMemOperand(r3, FixedArray::kLengthOffset));
+  // Fill contents of fixed-array with the-hole.
+  __ mov(r2, Operand(Factory::the_hole_value()));
+  __ add(r3, r3, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+  // Fill fixed array elements with hole.
+  // r0: JSArray, tagged.
+  // r2: the hole.
+  // r3: Start of elements in FixedArray.
+  // r5: Number of elements to fill.
+  Label loop;
+  __ tst(r5, Operand(r5));
+  __ bind(&loop);
+  __ b(le, &done);  // Jump if r1 is negative or zero.
+  __ sub(r5, r5, Operand(1), SetCC);
+  __ str(r2, MemOperand(r3, r5, LSL, kPointerSizeLog2));
+  __ jmp(&loop);
+
+  __ bind(&done);
+  __ add(sp, sp, Operand(3 * kPointerSize));
+  __ Ret();
+
+  __ bind(&slowcase);
+  __ TailCallRuntime(Runtime::kRegExpConstructResult, 3, 1);
+}
+
+
 void CallFunctionStub::Generate(MacroAssembler* masm) {
   Label slow;
 
@@ -4722,6 +4844,123 @@ void StringAddStub::Generate(MacroAssembler* masm) {
 }
 
 
+void ICCompareStub::GenerateSmis(MacroAssembler* masm) {
+  ASSERT(state_ == CompareIC::SMIS);
+  Label miss;
+  __ orr(r2, r1, r0);
+  __ tst(r2, Operand(kSmiTagMask));
+  __ b(ne, &miss);
+
+  if (GetCondition() == eq) {
+    // For equality we do not care about the sign of the result.
+    __ sub(r0, r0, r1, SetCC);
+  } else {
+    __ sub(r1, r1, r0, SetCC);
+    // Correct sign of result in case of overflow.
+    __ rsb(r1, r1, Operand(0), SetCC, vs);
+    __ mov(r0, r1);
+  }
+  __ Ret();
+
+  __ bind(&miss);
+  GenerateMiss(masm);
+}
+
+
+void ICCompareStub::GenerateHeapNumbers(MacroAssembler* masm) {
+  ASSERT(state_ == CompareIC::HEAP_NUMBERS);
+
+  Label generic_stub;
+  Label unordered;
+  Label miss;
+  __ and_(r2, r1, Operand(r0));
+  __ tst(r2, Operand(kSmiTagMask));
+  __ b(eq, &generic_stub);
+
+  __ CompareObjectType(r0, r2, r2, HEAP_NUMBER_TYPE);
+  __ b(ne, &miss);
+  __ CompareObjectType(r1, r2, r2, HEAP_NUMBER_TYPE);
+  __ b(ne, &miss);
+
+  // Inlining the double comparison and falling back to the general compare
+  // stub if NaN is involved or VFP3 is unsupported.
+  if (CpuFeatures::IsSupported(VFP3)) {
+    CpuFeatures::Scope scope(VFP3);
+
+    // Load left and right operand
+    __ sub(r2, r1, Operand(kHeapObjectTag));
+    __ vldr(d0, r2, HeapNumber::kValueOffset);
+    __ sub(r2, r0, Operand(kHeapObjectTag));
+    __ vldr(d1, r2, HeapNumber::kValueOffset);
+
+    // Compare operands
+    __ vcmp(d0, d1);
+    __ vmrs(pc);  // Move vector status bits to normal status bits.
+
+    // Don't base result on status bits when a NaN is involved.
+    __ b(vs, &unordered);
+
+    // Return a result of -1, 0, or 1, based on status bits.
+    __ mov(r0, Operand(EQUAL), LeaveCC, eq);
+    __ mov(r0, Operand(LESS), LeaveCC, lt);
+    __ mov(r0, Operand(GREATER), LeaveCC, gt);
+    __ Ret();
+
+    __ bind(&unordered);
+  }
+
+  CompareStub stub(GetCondition(), strict(), NO_COMPARE_FLAGS, r1, r0);
+  __ bind(&generic_stub);
+  __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
+
+  __ bind(&miss);
+  GenerateMiss(masm);
+}
+
+
+void ICCompareStub::GenerateObjects(MacroAssembler* masm) {
+  ASSERT(state_ == CompareIC::OBJECTS);
+  Label miss;
+  __ and_(r2, r1, Operand(r0));
+  __ tst(r2, Operand(kSmiTagMask));
+  __ b(eq, &miss);
+
+  __ CompareObjectType(r0, r2, r2, JS_OBJECT_TYPE);
+  __ b(ne, &miss);
+  __ CompareObjectType(r1, r2, r2, JS_OBJECT_TYPE);
+  __ b(ne, &miss);
+
+  ASSERT(GetCondition() == eq);
+  __ sub(r0, r0, Operand(r1));
+  __ Ret();
+
+  __ bind(&miss);
+  GenerateMiss(masm);
+}
+
+
+void ICCompareStub::GenerateMiss(MacroAssembler* masm) {
+  __ Push(r1, r0);
+  __ push(lr);
+
+  // Call the runtime system in a fresh internal frame.
+  ExternalReference miss = ExternalReference(IC_Utility(IC::kCompareIC_Miss));
+  __ EnterInternalFrame();
+  __ Push(r1, r0);
+  __ mov(ip, Operand(Smi::FromInt(op_)));
+  __ push(ip);
+  __ CallExternalReference(miss, 3);
+  __ LeaveInternalFrame();
+  // Compute the entry point of the rewritten stub.
+  __ add(r2, r0, Operand(Code::kHeaderSize - kHeapObjectTag));
+  // Restore registers.
+  __ pop(lr);
+  __ pop(r0);
+  __ pop(r1);
+  __ Jump(r2);
+}
+
+
 #undef __
 
 } }  // namespace v8::internal
index 2e07e3b..8ffca77 100644 (file)
@@ -106,9 +106,9 @@ class GenericBinaryOpStub : public CodeStub {
   // Minor key encoding in 17 bits.
   class ModeBits: public BitField<OverwriteMode, 0, 2> {};
   class OpBits: public BitField<Token::Value, 2, 6> {};
-  class TypeInfoBits: public BitField<int, 8, 2> {};
-  class RegisterBits: public BitField<bool, 10, 1> {};
-  class KnownIntBits: public BitField<int, 11, kKnownRhsKeyBits> {};
+  class TypeInfoBits: public BitField<int, 8, 3> {};
+  class RegisterBits: public BitField<bool, 11, 1> {};
+  class KnownIntBits: public BitField<int, 12, kKnownRhsKeyBits> {};
 
   Major MajorKey() { return GenericBinaryOp; }
   int MinorKey() {
@@ -196,6 +196,10 @@ class GenericBinaryOpStub : public CodeStub {
 
   const char* GetName();
 
+  virtual void FinishCode(Code* code) {
+    code->set_binary_op_type(runtime_operands_type_);
+  }
+
 #ifdef DEBUG
   void Print() {
     if (!specialized_on_rhs_) {
index ea57d2d..2b0b324 100644 (file)
@@ -36,7 +36,7 @@
 #include "debug.h"
 #include "ic-inl.h"
 #include "jsregexp.h"
-#include "jump-target-light-inl.h"
+#include "jump-target-inl.h"
 #include "parser.h"
 #include "regexp-macro-assembler.h"
 #include "regexp-stack.h"
@@ -79,12 +79,12 @@ void VirtualFrameRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
 }
 
 
-void ICRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const {
+void StubRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const {
   masm->EnterInternalFrame();
 }
 
 
-void ICRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
+void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
   masm->LeaveInternalFrame();
 }
 
@@ -165,6 +165,9 @@ void CodeGenerator::Generate(CompilationInfo* info) {
 
   int slots = scope()->num_parameters() + scope()->num_stack_slots();
   ScopedVector<TypeInfo> type_info_array(slots);
+  for (int i = 0; i < slots; i++) {
+    type_info_array[i] = TypeInfo::Unknown();
+  }
   type_info_ = &type_info_array;
 
   ASSERT(allocator_ == NULL);
@@ -5416,97 +5419,14 @@ void CodeGenerator::GenerateRegExpExec(ZoneList<Expression*>* args) {
 
 
 void CodeGenerator::GenerateRegExpConstructResult(ZoneList<Expression*>* args) {
-  // No stub. This code only occurs a few times in regexp.js.
-  const int kMaxInlineLength = 100;
   ASSERT_EQ(3, args->length());
+
   Load(args->at(0));  // Size of array, smi.
   Load(args->at(1));  // "index" property value.
   Load(args->at(2));  // "input" property value.
-  {
-    VirtualFrame::SpilledScope spilled_scope(frame_);
-    Label slowcase;
-    Label done;
-    __ ldr(r1, MemOperand(sp, kPointerSize * 2));
-    STATIC_ASSERT(kSmiTag == 0);
-    STATIC_ASSERT(kSmiTagSize == 1);
-    __ tst(r1, Operand(kSmiTagMask));
-    __ b(ne, &slowcase);
-    __ cmp(r1, Operand(Smi::FromInt(kMaxInlineLength)));
-    __ b(hi, &slowcase);
-    // Smi-tagging is equivalent to multiplying by 2.
-    // Allocate RegExpResult followed by FixedArray with size in ebx.
-    // JSArray:   [Map][empty properties][Elements][Length-smi][index][input]
-    // Elements:  [Map][Length][..elements..]
-    // Size of JSArray with two in-object properties and the header of a
-    // FixedArray.
-    int objects_size =
-        (JSRegExpResult::kSize + FixedArray::kHeaderSize) / kPointerSize;
-    __ mov(r5, Operand(r1, LSR, kSmiTagSize + kSmiShiftSize));
-    __ add(r2, r5, Operand(objects_size));
-    __ AllocateInNewSpace(
-        r2,  // In: Size, in words.
-        r0,  // Out: Start of allocation (tagged).
-        r3,  // Scratch register.
-        r4,  // Scratch register.
-        &slowcase,
-        static_cast<AllocationFlags>(TAG_OBJECT | SIZE_IN_WORDS));
-    // r0: Start of allocated area, object-tagged.
-    // r1: Number of elements in array, as smi.
-    // r5: Number of elements, untagged.
-
-    // Set JSArray map to global.regexp_result_map().
-    // Set empty properties FixedArray.
-    // Set elements to point to FixedArray allocated right after the JSArray.
-    // Interleave operations for better latency.
-    __ ldr(r2, ContextOperand(cp, Context::GLOBAL_INDEX));
-    __ add(r3, r0, Operand(JSRegExpResult::kSize));
-    __ mov(r4, Operand(Factory::empty_fixed_array()));
-    __ ldr(r2, FieldMemOperand(r2, GlobalObject::kGlobalContextOffset));
-    __ str(r3, FieldMemOperand(r0, JSObject::kElementsOffset));
-    __ ldr(r2, ContextOperand(r2, Context::REGEXP_RESULT_MAP_INDEX));
-    __ str(r4, FieldMemOperand(r0, JSObject::kPropertiesOffset));
-    __ str(r2, FieldMemOperand(r0, HeapObject::kMapOffset));
-
-    // Set input, index and length fields from arguments.
-    __ ldm(ia_w, sp, static_cast<RegList>(r2.bit() | r4.bit()));
-    __ str(r1, FieldMemOperand(r0, JSArray::kLengthOffset));
-    __ add(sp, sp, Operand(kPointerSize));
-    __ str(r4, FieldMemOperand(r0, JSRegExpResult::kIndexOffset));
-    __ str(r2, FieldMemOperand(r0, JSRegExpResult::kInputOffset));
-
-    // Fill out the elements FixedArray.
-    // r0: JSArray, tagged.
-    // r3: FixedArray, tagged.
-    // r5: Number of elements in array, untagged.
-
-    // Set map.
-    __ mov(r2, Operand(Factory::fixed_array_map()));
-    __ str(r2, FieldMemOperand(r3, HeapObject::kMapOffset));
-    // Set FixedArray length.
-    __ mov(r6, Operand(r5, LSL, kSmiTagSize));
-    __ str(r6, FieldMemOperand(r3, FixedArray::kLengthOffset));
-    // Fill contents of fixed-array with the-hole.
-    __ mov(r2, Operand(Factory::the_hole_value()));
-    __ add(r3, r3, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
-    // Fill fixed array elements with hole.
-    // r0: JSArray, tagged.
-    // r2: the hole.
-    // r3: Start of elements in FixedArray.
-    // r5: Number of elements to fill.
-    Label loop;
-    __ tst(r5, Operand(r5));
-    __ bind(&loop);
-    __ b(le, &done);  // Jump if r1 is negative or zero.
-    __ sub(r5, r5, Operand(1), SetCC);
-    __ str(r2, MemOperand(r3, r5, LSL, kPointerSizeLog2));
-    __ jmp(&loop);
-
-    __ bind(&slowcase);
-    __ CallRuntime(Runtime::kRegExpConstructResult, 3);
-
-    __ bind(&done);
-  }
-  frame_->Forget(3);
+  RegExpConstructResultStub stub;
+  frame_->SpillAll();
+  frame_->CallStub(&stub, 3);
   frame_->EmitPush(r0);
 }
 
index 4e672b2..589e704 100644 (file)
@@ -209,6 +209,9 @@ class CodeGenerator: public AstVisitor {
                                        Code::Flags flags,
                                        CompilationInfo* info);
 
+  // Print the code after compiling it.
+  static void PrintCode(Handle<Code> code, CompilationInfo* info);
+
 #ifdef ENABLE_LOGGING_AND_PROFILING
   static bool ShouldGenerateLog(Expression* type);
 #endif
@@ -305,8 +308,9 @@ class CodeGenerator: public AstVisitor {
   // Node visitors.
   void VisitStatements(ZoneList<Statement*>* statements);
 
+  virtual void VisitSlot(Slot* node);
 #define DEF_VISIT(type) \
-  void Visit##type(type* node);
+  virtual void Visit##type(type* node);
   AST_NODE_LIST(DEF_VISIT)
 #undef DEF_VISIT
 
@@ -579,6 +583,7 @@ class CodeGenerator: public AstVisitor {
   friend class FastCodeGenerator;
   friend class FullCodeGenerator;
   friend class FullCodeGenSyntaxChecker;
+  friend class LCodeGen;
 
   DISALLOW_COPY_AND_ASSIGN(CodeGenerator);
 };
index e998b6f..b359dce 100644 (file)
@@ -42,7 +42,10 @@ namespace v8 {
 namespace internal {
 
 void CPU::Setup() {
-  CpuFeatures::Probe();
+  CpuFeatures::Probe(true);
+  if (!CpuFeatures::IsSupported(VFP3) || Serializer::enabled()) {
+    V8::DisableCrankshaft();
+  }
 }
 
 
diff --git a/src/arm/deoptimizer-arm.cc b/src/arm/deoptimizer-arm.cc
new file mode 100644 (file)
index 0000000..e7a669d
--- /dev/null
@@ -0,0 +1,503 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "codegen.h"
+#include "deoptimizer.h"
+#include "full-codegen.h"
+#include "safepoint-table.h"
+
+namespace v8 {
+namespace internal {
+
+int Deoptimizer::table_entry_size_ = 16;
+
+void Deoptimizer::DeoptimizeFunction(JSFunction* function) {
+  AssertNoAllocation no_allocation;
+
+  if (!function->IsOptimized()) return;
+
+  // Get the optimized code.
+  Code* code = function->code();
+
+  // Invalidate the relocation information, as it will become invalid by the
+  // code patching below, and is not needed any more.
+  code->InvalidateRelocation();
+
+  // For each return after a safepoint insert an absolute call to the
+  // corresponding deoptimization entry.
+  unsigned last_pc_offset = 0;
+  SafepointTable table(function->code());
+  for (unsigned i = 0; i < table.length(); i++) {
+    unsigned pc_offset = table.GetPcOffset(i);
+    int deoptimization_index = table.GetDeoptimizationIndex(i);
+    int gap_code_size = table.GetGapCodeSize(i);
+    // Check that we did not shoot past next safepoint.
+    // TODO(srdjan): How do we guarantee that safepoint code does not
+    // overlap other safepoint patching code?
+    CHECK(pc_offset >= last_pc_offset);
+#ifdef DEBUG
+    // Destroy the code which is not supposed to be run again.
+    int instructions = (pc_offset - last_pc_offset) / Assembler::kInstrSize;
+    CodePatcher destroyer(code->instruction_start() + last_pc_offset,
+                          instructions);
+    for (int x = 0; x < instructions; x++) {
+      destroyer.masm()->bkpt(0);
+    }
+#endif
+    last_pc_offset = pc_offset;
+    if (deoptimization_index != Safepoint::kNoDeoptimizationIndex) {
+      const int kCallInstructionSizeInWords = 3;
+      CodePatcher patcher(code->instruction_start() + pc_offset + gap_code_size,
+                          kCallInstructionSizeInWords);
+      Address deoptimization_entry = Deoptimizer::GetDeoptimizationEntry(
+          deoptimization_index, Deoptimizer::LAZY);
+      patcher.masm()->Call(deoptimization_entry, RelocInfo::NONE);
+      last_pc_offset +=
+          gap_code_size + kCallInstructionSizeInWords * Assembler::kInstrSize;
+    }
+  }
+
+
+#ifdef DEBUG
+  // Destroy the code which is not supposed to be run again.
+  int instructions =
+      (code->safepoint_table_start() - last_pc_offset) / Assembler::kInstrSize;
+  CodePatcher destroyer(code->instruction_start() + last_pc_offset,
+                        instructions);
+  for (int x = 0; x < instructions; x++) {
+    destroyer.masm()->bkpt(0);
+  }
+#endif
+
+  // Add the deoptimizing code to the list.
+  DeoptimizingCodeListNode* node = new DeoptimizingCodeListNode(code);
+  node->set_next(deoptimizing_code_list_);
+  deoptimizing_code_list_ = node;
+
+  // Set the code for the function to non-optimized version.
+  function->ReplaceCode(function->shared()->code());
+
+  if (FLAG_trace_deopt) {
+    PrintF("[forced deoptimization: ");
+    function->PrintName();
+    PrintF(" / %x]\n", reinterpret_cast<uint32_t>(function));
+  }
+}
+
+
+void Deoptimizer::PatchStackCheckCode(RelocInfo* rinfo,
+                                      Code* replacement_code) {
+  UNIMPLEMENTED();
+}
+
+
+void Deoptimizer::RevertStackCheckCode(RelocInfo* rinfo, Code* check_code) {
+  UNIMPLEMENTED();
+}
+
+
+void Deoptimizer::DoComputeOsrOutputFrame() {
+  UNIMPLEMENTED();
+}
+
+
+// This code is very similar to ia32 code, but relies on register names (fp, sp)
+// and how the frame is laid out.
+void Deoptimizer::DoComputeFrame(TranslationIterator* iterator,
+                                 int frame_index) {
+  // Read the ast node id, function, and frame height for this output frame.
+  Translation::Opcode opcode =
+      static_cast<Translation::Opcode>(iterator->Next());
+  USE(opcode);
+  ASSERT(Translation::FRAME == opcode);
+  int node_id = iterator->Next();
+  JSFunction* function = JSFunction::cast(ComputeLiteral(iterator->Next()));
+  unsigned height = iterator->Next();
+  unsigned height_in_bytes = height * kPointerSize;
+  if (FLAG_trace_deopt) {
+    PrintF("  translating ");
+    function->PrintName();
+    PrintF(" => node=%d, height=%d\n", node_id, height_in_bytes);
+  }
+
+  // The 'fixed' part of the frame consists of the incoming parameters and
+  // the part described by JavaScriptFrameConstants.
+  unsigned fixed_frame_size = ComputeFixedSize(function);
+  unsigned input_frame_size = input_->GetFrameSize();
+  unsigned output_frame_size = height_in_bytes + fixed_frame_size;
+
+  // Allocate and store the output frame description.
+  FrameDescription* output_frame =
+      new(output_frame_size) FrameDescription(output_frame_size, function);
+
+  bool is_bottommost = (0 == frame_index);
+  bool is_topmost = (output_count_ - 1 == frame_index);
+  ASSERT(frame_index >= 0 && frame_index < output_count_);
+  ASSERT(output_[frame_index] == NULL);
+  output_[frame_index] = output_frame;
+
+  // The top address for the bottommost output frame can be computed from
+  // the input frame pointer and the output frame's height.  For all
+  // subsequent output frames, it can be computed from the previous one's
+  // top address and the current frame's size.
+  uint32_t top_address;
+  if (is_bottommost) {
+    // 2 = context and function in the frame.
+    top_address =
+        input_->GetRegister(fp.code()) - (2 * kPointerSize) - height_in_bytes;
+  } else {
+    top_address = output_[frame_index - 1]->GetTop() - output_frame_size;
+  }
+  output_frame->SetTop(top_address);
+
+  // Compute the incoming parameter translation.
+  int parameter_count = function->shared()->formal_parameter_count() + 1;
+  unsigned output_offset = output_frame_size;
+  unsigned input_offset = input_frame_size;
+  for (int i = 0; i < parameter_count; ++i) {
+    output_offset -= kPointerSize;
+    DoTranslateCommand(iterator, frame_index, output_offset);
+  }
+  input_offset -= (parameter_count * kPointerSize);
+
+  // There are no translation commands for the caller's pc and fp, the
+  // context, and the function.  Synthesize their values and set them up
+  // explicitly.
+  //
+  // The caller's pc for the bottommost output frame is the same as in the
+  // input frame.  For all subsequent output frames, it can be read from the
+  // previous one.  This frame's pc can be computed from the non-optimized
+  // function code and AST id of the bailout.
+  output_offset -= kPointerSize;
+  input_offset -= kPointerSize;
+  uint32_t value;
+  if (is_bottommost) {
+    value = input_->GetFrameSlot(input_offset);
+  } else {
+    value = output_[frame_index - 1]->GetPc();
+  }
+  output_frame->SetFrameSlot(output_offset, value);
+  if (FLAG_trace_deopt) {
+    PrintF("    0x%08x: [top + %d] <- 0x%08x ; caller's pc\n",
+           top_address + output_offset, output_offset, value);
+  }
+
+  // The caller's frame pointer for the bottommost output frame is the same
+  // as in the input frame.  For all subsequent output frames, it can be
+  // read from the previous one.  Also compute and set this frame's frame
+  // pointer.
+  output_offset -= kPointerSize;
+  input_offset -= kPointerSize;
+  if (is_bottommost) {
+    value = input_->GetFrameSlot(input_offset);
+  } else {
+    value = output_[frame_index - 1]->GetFp();
+  }
+  output_frame->SetFrameSlot(output_offset, value);
+  unsigned fp_value = top_address + output_offset;
+  ASSERT(!is_bottommost || input_->GetRegister(fp.code()) == fp_value);
+  output_frame->SetFp(fp_value);
+  if (is_topmost) {
+    output_frame->SetRegister(fp.code(), fp_value);
+  }
+  if (FLAG_trace_deopt) {
+    PrintF("    0x%08x: [top + %d] <- 0x%08x ; caller's fp\n",
+           fp_value, output_offset, value);
+  }
+
+  // The context can be gotten from the function so long as we don't
+  // optimize functions that need local contexts.
+  output_offset -= kPointerSize;
+  input_offset -= kPointerSize;
+  value = reinterpret_cast<uint32_t>(function->context());
+  // The context for the bottommost output frame should also agree with the
+  // input frame.
+  ASSERT(!is_bottommost || input_->GetFrameSlot(input_offset) == value);
+  output_frame->SetFrameSlot(output_offset, value);
+  if (is_topmost) {
+    output_frame->SetRegister(cp.code(), value);
+  }
+  if (FLAG_trace_deopt) {
+    PrintF("    0x%08x: [top + %d] <- 0x%08x ; context\n",
+           top_address + output_offset, output_offset, value);
+  }
+
+  // The function was mentioned explicitly in the BEGIN_FRAME.
+  output_offset -= kPointerSize;
+  input_offset -= kPointerSize;
+  value = reinterpret_cast<uint32_t>(function);
+  // The function for the bottommost output frame should also agree with the
+  // input frame.
+  ASSERT(!is_bottommost || input_->GetFrameSlot(input_offset) == value);
+  output_frame->SetFrameSlot(output_offset, value);
+  if (FLAG_trace_deopt) {
+    PrintF("    0x%08x: [top + %d] <- 0x%08x ; function\n",
+           top_address + output_offset, output_offset, value);
+  }
+
+  // Translate the rest of the frame.
+  for (unsigned i = 0; i < height; ++i) {
+    output_offset -= kPointerSize;
+    DoTranslateCommand(iterator, frame_index, output_offset);
+  }
+  ASSERT(0 == output_offset);
+
+  // Compute this frame's PC, state, and continuation.
+  Code* non_optimized_code = function->shared()->code();
+  FixedArray* raw_data = non_optimized_code->deoptimization_data();
+  DeoptimizationOutputData* data = DeoptimizationOutputData::cast(raw_data);
+  Address start = non_optimized_code->instruction_start();
+  unsigned pc_and_state = GetOutputInfo(data, node_id, function->shared());
+  unsigned pc_offset = FullCodeGenerator::PcField::decode(pc_and_state);
+  uint32_t pc_value = reinterpret_cast<uint32_t>(start + pc_offset);
+  output_frame->SetPc(pc_value);
+  if (is_topmost) {
+    output_frame->SetRegister(pc.code(), pc_value);
+  }
+
+  FullCodeGenerator::State state =
+      FullCodeGenerator::StateField::decode(pc_and_state);
+  output_frame->SetState(Smi::FromInt(state));
+
+  // Set the continuation for the topmost frame.
+  if (is_topmost) {
+    Code* continuation = (bailout_type_ == EAGER)
+        ? Builtins::builtin(Builtins::NotifyDeoptimized)
+        : Builtins::builtin(Builtins::NotifyLazyDeoptimized);
+    output_frame->SetContinuation(
+        reinterpret_cast<uint32_t>(continuation->entry()));
+  }
+
+  if (output_count_ - 1 == frame_index) iterator->Done();
+}
+
+
+#define __ masm()->
+
+
+// This code tries to be close to ia32 code so that any changes can be
+// easily ported.
+void Deoptimizer::EntryGenerator::Generate() {
+  GeneratePrologue();
+  // TOS: bailout-id; TOS+1: return address if not EAGER.
+  CpuFeatures::Scope scope(VFP3);
+  // Save all general purpose registers before messing with them.
+  const int kNumberOfRegisters = Register::kNumRegisters;
+
+  // Everything but pc, lr and ip which will be saved but not restored.
+  RegList restored_regs = kJSCallerSaved | kCalleeSaved | ip.bit();
+
+  const int kDoubleRegsSize =
+      kDoubleSize * DwVfpRegister::kNumAllocatableRegisters;
+
+  // Save all general purpose registers before messing with them.
+  __ sub(sp, sp, Operand(kDoubleRegsSize));
+  for (int i = 0; i < DwVfpRegister::kNumAllocatableRegisters; ++i) {
+    DwVfpRegister vfp_reg = DwVfpRegister::FromAllocationIndex(i);
+    int offset = i * kDoubleSize;
+    __ vstr(vfp_reg, sp, offset);
+  }
+
+  // Push all 16 registers (needed to populate FrameDescription::registers_).
+  __ stm(db_w, sp, restored_regs  | sp.bit() | lr.bit() | pc.bit());
+
+  const int kSavedRegistersAreaSize =
+      (kNumberOfRegisters * kPointerSize) + kDoubleRegsSize;
+
+  // Get the bailout id from the stack.
+  __ ldr(r2, MemOperand(sp, kSavedRegistersAreaSize));
+
+  // Get the address of the location in the code object if possible (r3) (return
+  // address for lazy deoptimization) and compute the fp-to-sp delta in
+  // register r4.
+  if (type() == EAGER) {
+    __ mov(r3, Operand(0));
+    // Correct one word for bailout id.
+    __ add(r4, sp, Operand(kSavedRegistersAreaSize + (1 * kPointerSize)));
+  } else {
+    __ mov(r3, lr);
+    // Correct two words for bailout id and return address.
+    __ add(r4, sp, Operand(kSavedRegistersAreaSize + (2 * kPointerSize)));
+  }
+  __ sub(r4, fp, r4);
+
+  // Allocate a new deoptimizer object.
+  // Pass four arguments in r0 to r3 and fifth argument on stack.
+  __ PrepareCallCFunction(5, r5);
+  __ ldr(r0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+  __ mov(r1, Operand(type()));  // bailout type,
+  // r2: bailout id already loaded.
+  // r3: code address or 0 already loaded.
+  __ str(r4, MemOperand(sp, 0 * kPointerSize));  // Fp-to-sp delta.
+  // Call Deoptimizer::New().
+  __ CallCFunction(ExternalReference::new_deoptimizer_function(), 5);
+
+  // Preserve "deoptimizer" object in register r0 and get the input
+  // frame descriptor pointer to r1 (deoptimizer->input_);
+  __ ldr(r1, MemOperand(r0, Deoptimizer::input_offset()));
+
+
+  // Copy core registers into FrameDescription::registers_[kNumRegisters].
+  ASSERT(Register::kNumRegisters == kNumberOfRegisters);
+  for (int i = 0; i < kNumberOfRegisters; i++) {
+    int offset = (i * kIntSize) + FrameDescription::registers_offset();
+    __ ldr(r2, MemOperand(sp, i * kPointerSize));
+    __ str(r2, MemOperand(r1, offset));
+  }
+
+  // Copy VFP registers to
+  // double_registers_[DoubleRegister::kNumAllocatableRegisters]
+  int double_regs_offset = FrameDescription::double_registers_offset();
+  for (int i = 0; i < DwVfpRegister::kNumAllocatableRegisters; ++i) {
+    int dst_offset = i * kDoubleSize + double_regs_offset;
+    int src_offset = i * kDoubleSize + kNumberOfRegisters * kPointerSize;
+    __ vldr(d0, sp, src_offset);
+    __ vstr(d0, r1, dst_offset);
+  }
+
+  // Remove the bailout id, eventually return address, and the saved registers
+  // from the stack.
+  if (type() == EAGER) {
+    __ add(sp, sp, Operand(kSavedRegistersAreaSize + (1 * kPointerSize)));
+  } else {
+    __ add(sp, sp, Operand(kSavedRegistersAreaSize + (2 * kPointerSize)));
+  }
+
+  // Compute a pointer to the unwinding limit in register r2; that is
+  // the first stack slot not part of the input frame.
+  __ ldr(r2, MemOperand(r1, FrameDescription::frame_size_offset()));
+  __ add(r2, r2, sp);
+
+  // Unwind the stack down to - but not including - the unwinding
+  // limit and copy the contents of the activation frame to the input
+  // frame description.
+  __ add(r3,  r1, Operand(FrameDescription::frame_content_offset()));
+  Label pop_loop;
+  __ bind(&pop_loop);
+  __ pop(r4);
+  __ str(r4, MemOperand(r3, 0));
+  __ add(r3, r3, Operand(sizeof(uint32_t)));
+  __ cmp(r2, sp);
+  __ b(ne, &pop_loop);
+
+  // Compute the output frame in the deoptimizer.
+  __ push(r0);  // Preserve deoptimizer object across call.
+  // r0: deoptimizer object; r1: scratch.
+  __ PrepareCallCFunction(1, r1);
+  // Call Deoptimizer::ComputeOutputFrames().
+  __ CallCFunction(ExternalReference::compute_output_frames_function(), 1);
+  __ pop(r0);  // Restore deoptimizer object (class Deoptimizer).
+
+  // Replace the current (input) frame with the output frames.
+  Label outer_push_loop, inner_push_loop;
+  // Outer loop state: r0 = current "FrameDescription** output_",
+  // r1 = one past the last FrameDescription**.
+  __ ldr(r1, MemOperand(r0, Deoptimizer::output_count_offset()));
+  __ ldr(r0, MemOperand(r0, Deoptimizer::output_offset()));  // r0 is output_.
+  __ add(r1, r0, Operand(r1, LSL, 2));
+  __ bind(&outer_push_loop);
+  // Inner loop state: r2 = current FrameDescription*, r3 = loop index.
+  __ ldr(r2, MemOperand(r0, 0));  // output_[ix]
+  __ ldr(r3, MemOperand(r2, FrameDescription::frame_size_offset()));
+  __ bind(&inner_push_loop);
+  __ sub(r3, r3, Operand(sizeof(uint32_t)));
+  // __ add(r6, r2, Operand(r3, LSL, 1));
+  __ add(r6, r2, Operand(r3));
+  __ ldr(r7, MemOperand(r6, FrameDescription::frame_content_offset()));
+  __ push(r7);
+  __ cmp(r3, Operand(0));
+  __ b(ne, &inner_push_loop);  // test for gt?
+  __ add(r0, r0, Operand(kPointerSize));
+  __ cmp(r0, r1);
+  __ b(lt, &outer_push_loop);
+
+  // In case of OSR, we have to restore the XMM registers.
+  if (type() == OSR) {
+    UNIMPLEMENTED();
+  }
+
+  // Push state, pc, and continuation from the last output frame.
+  if (type() != OSR) {
+    __ ldr(r6, MemOperand(r2, FrameDescription::state_offset()));
+    __ push(r6);
+  }
+
+  __ ldr(r6, MemOperand(r2, FrameDescription::pc_offset()));
+  __ push(r6);
+  __ ldr(r6, MemOperand(r2, FrameDescription::continuation_offset()));
+  __ push(r6);
+
+  // Push the registers from the last output frame.
+  for (int i = kNumberOfRegisters - 1; i >= 0; i--) {
+    int offset = (i * kIntSize) + FrameDescription::registers_offset();
+    __ ldr(r6, MemOperand(r2, offset));
+    __ push(r6);
+  }
+
+  // Restore the registers from the stack.
+  __ ldm(ia_w, sp, restored_regs);  // all but pc registers.
+  __ pop(ip);  // remove sp
+  __ pop(ip);  // remove lr
+
+  // Set up the roots register.
+  ExternalReference roots_address = ExternalReference::roots_address();
+  __ mov(r10, Operand(roots_address));
+
+  __ pop(ip);  // remove pc
+  __ pop(r7);  // get continuation, leave pc on stack
+  __ pop(lr);
+  __ Jump(r7);
+  __ stop("Unreachable.");
+}
+
+
+void Deoptimizer::TableEntryGenerator::GeneratePrologue() {
+  // Create a sequence of deoptimization entries. Note that any
+  // registers may be still live.
+  Label done;
+  for (int i = 0; i < count(); i++) {
+    int start = masm()->pc_offset();
+    USE(start);
+    if (type() == EAGER) {
+      __ nop();
+    } else {
+      // Emulate ia32 like call by pushing return address to stack.
+      __ push(lr);
+    }
+    __ mov(ip, Operand(i));
+    __ push(ip);
+    __ b(&done);
+    ASSERT(masm()->pc_offset() - start == table_entry_size_);
+  }
+  __ bind(&done);
+}
+
+#undef __
+
+} }  // namespace v8::internal
index b0c0990..d2726cf 100644 (file)
@@ -38,7 +38,12 @@ namespace internal {
 
 
 Address ExitFrame::ComputeStackPointer(Address fp) {
-  return fp + ExitFrameConstants::kSPOffset;
+  Address marker = Memory::Address_at(fp + ExitFrameConstants::kMarkerOffset);
+  Address sp = fp + ExitFrameConstants::kSPOffset;
+  if (marker == NULL) {
+    sp -= DwVfpRegister::kNumRegisters * kDoubleSize + 2 * kPointerSize;
+  }
+  return sp;
 }
 
 
index 5847a6a..00c20ef 100644 (file)
@@ -74,6 +74,18 @@ static const RegList kCalleeSaved =
 static const int kNumCalleeSaved = 7 + kR9Available;
 
 
+// Number of registers for which space is reserved in safepoints. Must be a
+// multiple of 8.
+// TODO(regis): Only 8 registers may actually be sufficient. Revisit.
+static const int kNumSafepointRegisters = 16;
+
+// Define the list of registers actually saved at safepoints.
+// Note that the number of saved registers may be smaller than the reserved
+// space, i.e. kNumSafepointSavedRegisters <= kNumSafepointRegisters.
+static const RegList kSafepointSavedRegisters = kJSCallerSaved | kCalleeSaved;
+static const int kNumSafepointSavedRegisters =
+    kNumJSCallerSaved + kNumCalleeSaved;
+
 // ----------------------------------------------------
 
 
@@ -99,7 +111,9 @@ class ExitFrameConstants : public AllStatic {
   static const int kCodeOffset = -1 * kPointerSize;
   static const int kSPOffset = -1 * kPointerSize;
 
-  static const int kSavedRegistersOffset = 0 * kPointerSize;
+  // TODO(regis): Use a patched sp value on the stack instead.
+  // A marker of 0 indicates that double registers are saved.
+  static const int kMarkerOffset = -2 * kPointerSize;
 
   // The caller fields are below the frame pointer on the stack.
   static const int kCallerFPOffset = +0 * kPointerSize;
index 633b5b4..7e4a280 100644 (file)
@@ -1,4 +1,4 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
+// Copyright 2010 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -171,21 +171,20 @@ void FullCodeGenerator::Generate(CompilationInfo* info) {
     }
   }
 
+  if (FLAG_trace) {
+    __ CallRuntime(Runtime::kTraceEnter, 0);
+  }
+
   // Check the stack for overflow or break request.
   { Comment cmnt(masm_, "[ Stack check");
-    __ LoadRoot(r2, Heap::kStackLimitRootIndex);
-    __ cmp(sp, Operand(r2));
+    PrepareForBailout(info->function(), NO_REGISTERS);
+    Label ok;
+    __ LoadRoot(ip, Heap::kStackLimitRootIndex);
+    __ cmp(sp, Operand(ip));
+    __ b(hs, &ok);
     StackCheckStub stub;
-    __ mov(ip,
-           Operand(reinterpret_cast<intptr_t>(stub.GetCode().location()),
-                   RelocInfo::CODE_TARGET),
-           LeaveCC,
-           lo);
-    __ Call(ip, lo);
-  }
-
-  if (FLAG_trace) {
-    __ CallRuntime(Runtime::kTraceEnter, 0);
+    __ CallStub(&stub);
+    __ bind(&ok);
   }
 
   { Comment cmnt(masm_, "[ Body");
@@ -200,6 +199,25 @@ void FullCodeGenerator::Generate(CompilationInfo* info) {
     __ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
   }
   EmitReturnSequence();
+
+  // Force emit the constant pool, so it doesn't get emitted in the middle
+  // of the stack check table.
+  masm()->CheckConstPool(true, false);
+}
+
+
+void FullCodeGenerator::EmitStackCheck(IterationStatement* stmt) {
+  Comment cmnt(masm_, "[ Stack check");
+  Label ok;
+  __ LoadRoot(ip, Heap::kStackLimitRootIndex);
+  __ cmp(sp, Operand(ip));
+  __ b(hs, &ok);
+  StackCheckStub stub;
+  __ CallStub(&stub);
+  __ bind(&ok);
+  PrepareForBailoutForId(stmt->EntryId(), NO_REGISTERS);
+  PrepareForBailoutForId(stmt->OsrEntryId(), NO_REGISTERS);
+  RecordStackCheck(stmt->OsrEntryId());
 }
 
 
@@ -275,6 +293,7 @@ void FullCodeGenerator::StackValueContext::Plug(Slot* slot) const {
 void FullCodeGenerator::TestContext::Plug(Slot* slot) const {
   // For simplicity we always test the accumulator register.
   codegen()->Move(result_register(), slot);
+  codegen()->PrepareForBailoutBeforeSplit(TOS_REG, false, NULL, NULL);
   codegen()->DoTest(true_label_, false_label_, fall_through_);
 }
 
@@ -297,12 +316,16 @@ void FullCodeGenerator::StackValueContext::Plug(
 
 
 void FullCodeGenerator::TestContext::Plug(Heap::RootListIndex index) const {
+  codegen()->PrepareForBailoutBeforeSplit(TOS_REG,
+                                          true,
+                                          true_label_,
+                                          false_label_);
   if (index == Heap::kUndefinedValueRootIndex ||
       index == Heap::kNullValueRootIndex ||
       index == Heap::kFalseValueRootIndex) {
-    __ b(false_label_);
+    if (false_label_ != fall_through_) __ b(false_label_);
   } else if (index == Heap::kTrueValueRootIndex) {
-    __ b(true_label_);
+    if (true_label_ != fall_through_) __ b(true_label_);
   } else {
     __ LoadRoot(result_register(), index);
     codegen()->DoTest(true_label_, false_label_, fall_through_);
@@ -321,29 +344,34 @@ void FullCodeGenerator::AccumulatorValueContext::Plug(
 
 
 void FullCodeGenerator::StackValueContext::Plug(Handle<Object> lit) const {
-  // Immediates can be pushed directly.
+  // Immediates cannot be pushed directly.
   __ mov(result_register(), Operand(lit));
   __ push(result_register());
 }
 
 
 void FullCodeGenerator::TestContext::Plug(Handle<Object> lit) const {
+  codegen()->PrepareForBailoutBeforeSplit(TOS_REG,
+                                          true,
+                                          true_label_,
+                                          false_label_);
   ASSERT(!lit->IsUndetectableObject());  // There are no undetectable literals.
   if (lit->IsUndefined() || lit->IsNull() || lit->IsFalse()) {
-    __ b(false_label_);
+    if (false_label_ != fall_through_) __ b(false_label_);
   } else if (lit->IsTrue() || lit->IsJSObject()) {
-    __ b(true_label_);
+    if (true_label_ != fall_through_) __ b(true_label_);
   } else if (lit->IsString()) {
     if (String::cast(*lit)->length() == 0) {
+    if (false_label_ != fall_through_) __ b(false_label_);
       __ b(false_label_);
     } else {
-      __ b(true_label_);
+      if (true_label_ != fall_through_) __ b(true_label_);
     }
   } else if (lit->IsSmi()) {
     if (Smi::cast(*lit)->value() == 0) {
-      __ b(false_label_);
+      if (false_label_ != fall_through_) __ b(false_label_);
     } else {
-      __ b(true_label_);
+      if (true_label_ != fall_through_) __ b(true_label_);
     }
   } else {
     // For simplicity we always test the accumulator register.
@@ -383,13 +411,14 @@ void FullCodeGenerator::TestContext::DropAndPlug(int count,
   // For simplicity we always test the accumulator register.
   __ Drop(count);
   __ Move(result_register(), reg);
+  codegen()->PrepareForBailoutBeforeSplit(TOS_REG, false, NULL, NULL);
   codegen()->DoTest(true_label_, false_label_, fall_through_);
 }
 
 
 void FullCodeGenerator::EffectContext::Plug(Label* materialize_true,
                                             Label* materialize_false) const {
-  ASSERT_EQ(materialize_true, materialize_false);
+  ASSERT(materialize_true == materialize_false);
   __ bind(materialize_true);
 }
 
@@ -424,8 +453,8 @@ void FullCodeGenerator::StackValueContext::Plug(
 
 void FullCodeGenerator::TestContext::Plug(Label* materialize_true,
                                           Label* materialize_false) const {
-  ASSERT(materialize_false == false_label_);
   ASSERT(materialize_true == true_label_);
+  ASSERT(materialize_false == false_label_);
 }
 
 
@@ -449,6 +478,10 @@ void FullCodeGenerator::StackValueContext::Plug(bool flag) const {
 
 
 void FullCodeGenerator::TestContext::Plug(bool flag) const {
+  codegen()->PrepareForBailoutBeforeSplit(TOS_REG,
+                                          true,
+                                          true_label_,
+                                          false_label_);
   if (flag) {
     if (true_label_ != fall_through_) __ b(true_label_);
   } else {
@@ -529,6 +562,33 @@ void FullCodeGenerator::Move(Slot* dst,
 }
 
 
+void FullCodeGenerator::PrepareForBailoutBeforeSplit(State state,
+                                                     bool should_normalize,
+                                                     Label* if_true,
+                                                     Label* if_false) {
+  // Only prepare for bailouts before splits if we're in a test
+  // context. Otherwise, we let the Visit function deal with the
+  // preparation to avoid preparing with the same AST id twice.
+  if (!context()->IsTest() || !info_->IsOptimizable()) return;
+
+  Label skip;
+  if (should_normalize) __ b(&skip);
+
+  ForwardBailoutStack* current = forward_bailout_stack_;
+  while (current != NULL) {
+    PrepareForBailout(current->expr(), state);
+    current = current->parent();
+  }
+
+  if (should_normalize) {
+    __ LoadRoot(ip, Heap::kTrueValueRootIndex);
+    __ cmp(r0, ip);
+    Split(eq, if_true, if_false, NULL);
+    __ bind(&skip);
+  }
+}
+
+
 void FullCodeGenerator::EmitDeclaration(Variable* variable,
                                         Variable::Mode mode,
                                         FunctionLiteral* function) {
@@ -651,6 +711,8 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
   // Keep the switch value on the stack until a case matches.
   VisitForStackValue(stmt->tag());
 
+  PrepareForBailoutForId(stmt->EntryId(), NO_REGISTERS);
+
   ZoneList<CaseClause*>* clauses = stmt->cases();
   CaseClause* default_clause = NULL;  // Can occur anywhere in the list.
 
@@ -716,6 +778,7 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
   }
 
   __ bind(nested_statement.break_target());
+  PrepareForBailoutForId(stmt->ExitId(), NO_REGISTERS);
 }
 
 
@@ -830,25 +893,17 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
   EmitAssignment(stmt->each());
 
   // Generate code for the body of the loop.
-  Label stack_limit_hit, stack_check_done;
   Visit(stmt->body());
 
-  __ StackLimitCheck(&stack_limit_hit);
-  __ bind(&stack_check_done);
-
   // Generate code for the going to the next element by incrementing
   // the index (smi) stored on top of the stack.
   __ bind(loop_statement.continue_target());
   __ pop(r0);
   __ add(r0, r0, Operand(Smi::FromInt(1)));
   __ push(r0);
-  __ b(&loop);
 
-  // Slow case for the stack limit check.
-  StackCheckStub stack_check_stub;
-  __ bind(&stack_limit_hit);
-  __ CallStub(&stack_check_stub);
-  __ b(&stack_check_done);
+  EmitStackCheck(stmt);
+  __ b(&loop);
 
   // Remove the pointers stored on the stack.
   __ bind(loop_statement.break_target());
@@ -1195,12 +1250,15 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
         // Fall through.
       case ObjectLiteral::Property::COMPUTED:
         if (key->handle()->IsSymbol()) {
-          VisitForAccumulatorValue(value);
-          __ mov(r2, Operand(key->handle()));
-          __ ldr(r1, MemOperand(sp));
           if (property->emit_store()) {
+            VisitForAccumulatorValue(value);
+            __ mov(r2, Operand(key->handle()));
+            __ ldr(r1, MemOperand(sp));
             Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
             EmitCallIC(ic, RelocInfo::CODE_TARGET);
+            PrepareForBailoutForId(key->id(), NO_REGISTERS);
+          } else {
+            VisitForEffect(value);
           }
           break;
         }
@@ -1295,6 +1353,8 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
     // Update the write barrier for the array store with r0 as the scratch
     // register.
     __ RecordWrite(r1, Operand(offset), r2, result_register());
+
+    PrepareForBailoutForId(expr->GetIdForElement(i), NO_REGISTERS);
   }
 
   if (result_saved) {
@@ -1341,13 +1401,27 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
       break;
     case KEYED_PROPERTY:
       if (expr->is_compound()) {
-        VisitForStackValue(property->obj());
-        VisitForAccumulatorValue(property->key());
+        if (property->is_arguments_access()) {
+          VariableProxy* obj_proxy = property->obj()->AsVariableProxy();
+          __ ldr(r0, EmitSlotSearch(obj_proxy->var()->AsSlot(), r0));
+          __ push(r0);
+          __ mov(r0, Operand(property->key()->AsLiteral()->handle()));
+        } else {
+          VisitForStackValue(property->obj());
+          VisitForAccumulatorValue(property->key());
+        }
         __ ldr(r1, MemOperand(sp, 0));
         __ push(r0);
       } else {
-        VisitForStackValue(property->obj());
-        VisitForStackValue(property->key());
+        if (property->is_arguments_access()) {
+          VariableProxy* obj_proxy = property->obj()->AsVariableProxy();
+          __ ldr(r1, EmitSlotSearch(obj_proxy->var()->AsSlot(), r0));
+          __ mov(r0, Operand(property->key()->AsLiteral()->handle()));
+          __ Push(r1, r0);
+        } else {
+          VisitForStackValue(property->obj());
+          VisitForStackValue(property->key());
+        }
       }
       break;
   }
@@ -1367,6 +1441,12 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
       }
     }
 
+    // For property compound assignments we need another deoptimization
+    // point after the property load.
+    if (property != NULL) {
+      PrepareForBailoutForId(expr->compound_bailout_id(), TOS_REG);
+    }
+
     Token::Value op = expr->binary_op();
     ConstantOperand constant = ShouldInlineSmiCase(op)
         ? GetConstantOperand(op, expr->target(), expr->value())
@@ -1392,6 +1472,9 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
     } else {
       EmitBinaryOp(op, mode);
     }
+
+    // Deoptimization point in case the binary operation may have side effects.
+    PrepareForBailout(expr->binary_operation(), TOS_REG);
   } else {
     VisitForAccumulatorValue(expr->value());
   }
@@ -1676,13 +1759,14 @@ void FullCodeGenerator::VisitProperty(Property* expr) {
   if (key->IsPropertyName()) {
     VisitForAccumulatorValue(expr->obj());
     EmitNamedPropertyLoad(expr);
+    context()->Plug(r0);
   } else {
     VisitForStackValue(expr->obj());
     VisitForAccumulatorValue(expr->key());
     __ pop(r1);
     EmitKeyedPropertyLoad(expr);
+    context()->Plug(r0);
   }
-  context()->Plug(r0);
 }
 
 void FullCodeGenerator::EmitCallWithIC(Call* expr,
@@ -1703,6 +1787,7 @@ void FullCodeGenerator::EmitCallWithIC(Call* expr,
   InLoopFlag in_loop = (loop_depth() > 0) ? IN_LOOP : NOT_IN_LOOP;
   Handle<Code> ic = StubCache::ComputeCallInitialize(arg_count, in_loop);
   EmitCallIC(ic, mode);
+  RecordJSReturnSite(expr);
   // Restore context register.
   __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
   context()->Plug(r0);
@@ -1736,6 +1821,7 @@ void FullCodeGenerator::EmitKeyedCallWithIC(Call* expr,
   Handle<Code> ic = StubCache::ComputeKeyedCallInitialize(arg_count, in_loop);
   __ ldr(r2, MemOperand(sp, (arg_count + 1) * kPointerSize));  // Key.
   EmitCallIC(ic, mode);
+  RecordJSReturnSite(expr);
   // Restore context register.
   __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
   context()->DropAndPlug(1, r0);  // Drop the key still on the stack.
@@ -1756,6 +1842,7 @@ void FullCodeGenerator::EmitCallWithStub(Call* expr) {
   InLoopFlag in_loop = (loop_depth() > 0) ? IN_LOOP : NOT_IN_LOOP;
   CallFunctionStub stub(arg_count, in_loop, RECEIVER_MIGHT_BE_VALUE);
   __ CallStub(&stub);
+  RecordJSReturnSite(expr);
   // Restore context register.
   __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
   context()->DropAndPlug(1, r0);
@@ -1763,6 +1850,12 @@ void FullCodeGenerator::EmitCallWithStub(Call* expr) {
 
 
 void FullCodeGenerator::VisitCall(Call* expr) {
+#ifdef DEBUG
+  // We want to verify that RecordJSReturnSite gets called on all paths
+  // through this function.  Avoid early returns.
+  expr->return_is_recorded_ = false;
+#endif
+
   Comment cmnt(masm_, "[ Call");
   Expression* fun = expr->expression();
   Variable* var = fun->AsVariableProxy()->AsVariable();
@@ -1814,6 +1907,7 @@ void FullCodeGenerator::VisitCall(Call* expr) {
     InLoopFlag in_loop = (loop_depth() > 0) ? IN_LOOP : NOT_IN_LOOP;
     CallFunctionStub stub(arg_count, in_loop, RECEIVER_MIGHT_BE_VALUE);
     __ CallStub(&stub);
+    RecordJSReturnSite(expr);
     // Restore context register.
     __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
     context()->DropAndPlug(1, r0);
@@ -1918,6 +2012,11 @@ void FullCodeGenerator::VisitCall(Call* expr) {
     // Emit function call.
     EmitCallWithStub(expr);
   }
+
+#ifdef DEBUG
+  // RecordJSReturnSite should have been called.
+  ASSERT(expr->return_is_recorded_);
+#endif
 }
 
 
@@ -1965,8 +2064,9 @@ void FullCodeGenerator::EmitIsSmi(ZoneList<Expression*>* args) {
   context()->PrepareTest(&materialize_true, &materialize_false,
                          &if_true, &if_false, &fall_through);
 
-  __ BranchOnSmi(r0, if_true);
-  __ b(if_false);
+  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+  __ tst(r0, Operand(kSmiTagMask));
+  Split(eq, if_true, if_false, fall_through);
 
   context()->Plug(if_true, if_false);
 }
@@ -1984,6 +2084,7 @@ void FullCodeGenerator::EmitIsNonNegativeSmi(ZoneList<Expression*>* args) {
   context()->PrepareTest(&materialize_true, &materialize_false,
                          &if_true, &if_false, &fall_through);
 
+  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
   __ tst(r0, Operand(kSmiTagMask | 0x80000000));
   Split(eq, if_true, if_false, fall_through);
 
@@ -2016,6 +2117,7 @@ void FullCodeGenerator::EmitIsObject(ZoneList<Expression*>* args) {
   __ cmp(r1, Operand(FIRST_JS_OBJECT_TYPE));
   __ b(lt, if_false);
   __ cmp(r1, Operand(LAST_JS_OBJECT_TYPE));
+  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
   Split(le, if_true, if_false, fall_through);
 
   context()->Plug(if_true, if_false);
@@ -2036,6 +2138,7 @@ void FullCodeGenerator::EmitIsSpecObject(ZoneList<Expression*>* args) {
 
   __ BranchOnSmi(r0, if_false);
   __ CompareObjectType(r0, r1, r1, FIRST_JS_OBJECT_TYPE);
+  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
   Split(ge, if_true, if_false, fall_through);
 
   context()->Plug(if_true, if_false);
@@ -2058,6 +2161,7 @@ void FullCodeGenerator::EmitIsUndetectableObject(ZoneList<Expression*>* args) {
   __ ldr(r1, FieldMemOperand(r0, HeapObject::kMapOffset));
   __ ldrb(r1, FieldMemOperand(r1, Map::kBitFieldOffset));
   __ tst(r1, Operand(1 << Map::kIsUndetectable));
+  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
   Split(ne, if_true, if_false, fall_through);
 
   context()->Plug(if_true, if_false);
@@ -2081,6 +2185,7 @@ void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
   // Just indicate false, as %_IsStringWrapperSafeForDefaultValueOf() is only
   // used in a few functions in runtime.js which should not normally be hit by
   // this compiler.
+  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
   __ jmp(if_false);
   context()->Plug(if_true, if_false);
 }
@@ -2100,6 +2205,7 @@ void FullCodeGenerator::EmitIsFunction(ZoneList<Expression*>* args) {
 
   __ BranchOnSmi(r0, if_false);
   __ CompareObjectType(r0, r1, r1, JS_FUNCTION_TYPE);
+  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
   Split(eq, if_true, if_false, fall_through);
 
   context()->Plug(if_true, if_false);
@@ -2120,6 +2226,7 @@ void FullCodeGenerator::EmitIsArray(ZoneList<Expression*>* args) {
 
   __ BranchOnSmi(r0, if_false);
   __ CompareObjectType(r0, r1, r1, JS_ARRAY_TYPE);
+  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
   Split(eq, if_true, if_false, fall_through);
 
   context()->Plug(if_true, if_false);
@@ -2140,6 +2247,7 @@ void FullCodeGenerator::EmitIsRegExp(ZoneList<Expression*>* args) {
 
   __ BranchOnSmi(r0, if_false);
   __ CompareObjectType(r0, r1, r1, JS_REGEXP_TYPE);
+  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
   Split(eq, if_true, if_false, fall_through);
 
   context()->Plug(if_true, if_false);
@@ -2171,6 +2279,7 @@ void FullCodeGenerator::EmitIsConstructCall(ZoneList<Expression*>* args) {
   __ bind(&check_frame_marker);
   __ ldr(r1, MemOperand(r2, StandardFrameConstants::kMarkerOffset));
   __ cmp(r1, Operand(Smi::FromInt(StackFrame::CONSTRUCT)));
+  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
   Split(eq, if_true, if_false, fall_through);
 
   context()->Plug(if_true, if_false);
@@ -2193,6 +2302,7 @@ void FullCodeGenerator::EmitObjectEquals(ZoneList<Expression*>* args) {
 
   __ pop(r1);
   __ cmp(r0, r1);
+  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
   Split(eq, if_true, if_false, fall_through);
 
   context()->Plug(if_true, if_false);
@@ -2646,11 +2756,12 @@ void FullCodeGenerator::EmitCallFunction(ZoneList<Expression*>* args) {
 
 
 void FullCodeGenerator::EmitRegExpConstructResult(ZoneList<Expression*>* args) {
+  RegExpConstructResultStub stub;
   ASSERT(args->length() == 3);
   VisitForStackValue(args->at(0));
   VisitForStackValue(args->at(1));
   VisitForStackValue(args->at(2));
-  __ CallRuntime(Runtime::kRegExpConstructResult, 3);
+  __ CallStub(&stub);
   context()->Plug(r0);
 }
 
@@ -2769,9 +2880,8 @@ void FullCodeGenerator::EmitHasCachedArrayIndex(ZoneList<Expression*>* args) {
 
   __ ldr(r0, FieldMemOperand(r0, String::kHashFieldOffset));
   __ tst(r0, Operand(String::kContainsCachedArrayIndexMask));
-
-  __ b(eq, if_true);
-  __ b(if_false);
+  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+  Split(eq, if_true, if_false, fall_through);
 
   context()->Plug(if_true, if_false);
 }
@@ -2894,6 +3004,7 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
       // Notice that the labels are swapped.
       context()->PrepareTest(&materialize_true, &materialize_false,
                              &if_false, &if_true, &fall_through);
+      if (context()->IsTest()) ForwardBailoutToChild(expr);
       VisitForControl(expr->expression(), if_true, if_false, fall_through);
       context()->Plug(if_false, if_true);  // Labels swapped.
       break;
@@ -3013,14 +3124,25 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
       __ push(r0);
       EmitNamedPropertyLoad(prop);
     } else {
-      VisitForStackValue(prop->obj());
-      VisitForAccumulatorValue(prop->key());
+      if (prop->is_arguments_access()) {
+        VariableProxy* obj_proxy = prop->obj()->AsVariableProxy();
+        __ ldr(r0, EmitSlotSearch(obj_proxy->var()->AsSlot(), r0));
+        __ push(r0);
+        __ mov(r0, Operand(prop->key()->AsLiteral()->handle()));
+      } else {
+        VisitForStackValue(prop->obj());
+        VisitForAccumulatorValue(prop->key());
+      }
       __ ldr(r1, MemOperand(sp, 0));
       __ push(r0);
       EmitKeyedPropertyLoad(prop);
     }
   }
 
+  // We need a second deoptimization point after loading the value
+  // in case evaluating the property load my have a side effect.
+  PrepareForBailout(expr->increment(), TOS_REG);
+
   // Call ToNumber only if operand is not a smi.
   Label no_conversion;
   __ BranchOnSmi(r0, &no_conversion);
@@ -3063,6 +3185,10 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
     __ sub(r0, r0, Operand(Smi::FromInt(count_value)));
   }
   __ mov(r1, Operand(Smi::FromInt(count_value)));
+
+  // Record position before stub call.
+  SetSourcePosition(expr->position());
+
   GenericBinaryOpStub stub(Token::ADD, NO_OVERWRITE, r1, r0);
   __ CallStub(&stub);
   __ bind(&done);
@@ -3129,6 +3255,7 @@ void FullCodeGenerator::VisitForTypeofValue(Expression* expr) {
     // Use a regular load, not a contextual load, to avoid a reference
     // error.
     EmitCallIC(ic, RelocInfo::CODE_TARGET);
+    PrepareForBailout(expr, TOS_REG);
     context()->Plug(r0);
   } else if (proxy != NULL &&
              proxy->var()->AsSlot() != NULL &&
@@ -3144,12 +3271,13 @@ void FullCodeGenerator::VisitForTypeofValue(Expression* expr) {
     __ mov(r0, Operand(proxy->name()));
     __ Push(cp, r0);
     __ CallRuntime(Runtime::kLoadContextSlotNoReferenceError, 2);
+    PrepareForBailout(expr, TOS_REG);
     __ bind(&done);
 
     context()->Plug(r0);
   } else {
     // This expression cannot throw a reference error at the top level.
-    Visit(expr);
+    context()->HandleExpression(expr);
   }
 }
 
@@ -3174,6 +3302,8 @@ bool FullCodeGenerator::TryLiteralCompare(Token::Value op,
   { AccumulatorValueContext context(this);
     VisitForTypeofValue(left_unary->expression());
   }
+  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+
   if (check->Equals(Heap::number_symbol())) {
     __ tst(r0, Operand(kSmiTagMask));
     __ b(eq, if_true);
@@ -3277,6 +3407,7 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
     case Token::IN:
       VisitForStackValue(expr->right());
       __ InvokeBuiltin(Builtins::IN, CALL_JS);
+      PrepareForBailoutBeforeSplit(TOS_REG, false, NULL, NULL);
       __ LoadRoot(ip, Heap::kTrueValueRootIndex);
       __ cmp(r0, ip);
       Split(eq, if_true, if_false, fall_through);
@@ -3286,6 +3417,7 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
       VisitForStackValue(expr->right());
       InstanceofStub stub;
       __ CallStub(&stub);
+      PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
       // The stub returns 0 for true.
       __ tst(r0, r0);
       Split(eq, if_true, if_false, fall_through);
@@ -3344,6 +3476,7 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
           : NO_COMPARE_FLAGS;
       CompareStub stub(cc, strict, flags, r1, r0);
       __ CallStub(&stub);
+      PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
       __ cmp(r0, Operand(0, RelocInfo::NONE));
       Split(cc, if_true, if_false, fall_through);
     }
@@ -3365,6 +3498,7 @@ void FullCodeGenerator::VisitCompareToNull(CompareToNull* expr) {
                          &if_true, &if_false, &fall_through);
 
   VisitForAccumulatorValue(expr->expression());
+  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
   __ LoadRoot(r1, Heap::kNullValueRootIndex);
   __ cmp(r0, r1);
   if (expr->is_strict()) {
index ef7cf6a..a75d96b 100644 (file)
@@ -907,6 +907,8 @@ void LoadIC::GenerateMiss(MacroAssembler* masm) {
 // Returns the code marker, or the 0 if the code is not marked.
 static inline int InlinedICSiteMarker(Address address,
                                       Address* inline_end_address) {
+  if (V8::UseCrankshaft()) return false;
+
   // If the instruction after the call site is not the pseudo instruction nop1
   // then this is not related to an inlined in-object property load. The nop1
   // instruction is located just after the call to the IC in the deferred code
@@ -940,6 +942,8 @@ static inline int InlinedICSiteMarker(Address address,
 
 
 bool LoadIC::PatchInlinedLoad(Address address, Object* map, int offset) {
+  if (V8::UseCrankshaft()) return false;
+
   // Find the end of the inlined code for handling the load if this is an
   // inlined IC call site.
   Address inline_end_address;
@@ -1019,6 +1023,8 @@ bool LoadIC::PatchInlinedContextualLoad(Address address,
 
 
 bool StoreIC::PatchInlinedStore(Address address, Object* map, int offset) {
+  if (V8::UseCrankshaft()) return false;
+
   // Find the end of the inlined code for the store if there is an
   // inlined version of the store.
   Address inline_end_address;
@@ -1069,6 +1075,8 @@ bool StoreIC::PatchInlinedStore(Address address, Object* map, int offset) {
 
 
 bool KeyedLoadIC::PatchInlinedLoad(Address address, Object* map) {
+  if (V8::UseCrankshaft()) return false;
+
   Address inline_end_address;
   if (InlinedICSiteMarker(address, &inline_end_address)
       != Assembler::PROPERTY_ACCESS_INLINED) {
@@ -1087,6 +1095,8 @@ bool KeyedLoadIC::PatchInlinedLoad(Address address, Object* map) {
 
 
 bool KeyedStoreIC::PatchInlinedStore(Address address, Object* map) {
+  if (V8::UseCrankshaft()) return false;
+
   // Find the end of the inlined code for handling the store if this is an
   // inlined IC call site.
   Address inline_end_address;
@@ -1315,7 +1325,7 @@ void KeyedLoadIC::GenerateString(MacroAssembler* masm) {
   char_at_generator.GenerateFast(masm);
   __ Ret();
 
-  ICRuntimeCallHelper call_helper;
+  StubRuntimeCallHelper call_helper;
   char_at_generator.GenerateSlow(masm, call_helper);
 
   __ bind(&miss);
@@ -2307,9 +2317,72 @@ void StoreIC::GenerateNormal(MacroAssembler* masm) {
 }
 
 
+void StoreIC::GenerateGlobalProxy(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- r0    : value
+  //  -- r1    : receiver
+  //  -- r2    : name
+  //  -- lr    : return address
+  // -----------------------------------
+
+  __ Push(r1, r2, r0);
+
+  // Do tail-call to runtime routine.
+  __ TailCallRuntime(Runtime::kSetProperty, 3, 1);
+}
+
+
 #undef __
 
 
+Condition CompareIC::ComputeCondition(Token::Value op) {
+  switch (op) {
+    case Token::EQ_STRICT:
+    case Token::EQ:
+      return eq;
+    case Token::LT:
+      return lt;
+    case Token::GT:
+      // Reverse left and right operands to obtain ECMA-262 conversion order.
+      return lt;
+    case Token::LTE:
+      // Reverse left and right operands to obtain ECMA-262 conversion order.
+      return ge;
+    case Token::GTE:
+      return ge;
+    default:
+      UNREACHABLE();
+      return no_condition;
+  }
+}
+
+
+void CompareIC::UpdateCaches(Handle<Object> x, Handle<Object> y) {
+  HandleScope scope;
+  Handle<Code> rewritten;
+#ifdef DEBUG
+  State previous_state = GetState();
+#endif
+  State state = TargetState(x, y);
+  if (state == GENERIC) {
+    CompareStub stub(GetCondition(), strict(), NO_COMPARE_FLAGS, r1, r0);
+    rewritten = stub.GetCode();
+  } else {
+    ICCompareStub stub(op_, state);
+    rewritten = stub.GetCode();
+  }
+  set_target(*rewritten);
+
+#ifdef DEBUG
+  if (FLAG_trace_ic) {
+    PrintF("[CompareIC (%s->%s)#%s]\n",
+           GetStateName(previous_state),
+           GetStateName(state),
+           Token::Name(op_));
+  }
+#endif
+}
+
 } }  // namespace v8::internal
 
 #endif  // V8_TARGET_ARCH_ARM
diff --git a/src/arm/lithium-arm.cc b/src/arm/lithium-arm.cc
new file mode 100644 (file)
index 0000000..682c448
--- /dev/null
@@ -0,0 +1,2081 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "arm/lithium-arm.h"
+#include "arm/lithium-codegen-arm.h"
+
+namespace v8 {
+namespace internal {
+
+#define DEFINE_COMPILE(type)                            \
+  void L##type::CompileToNative(LCodeGen* generator) {  \
+    generator->Do##type(this);                          \
+  }
+LITHIUM_CONCRETE_INSTRUCTION_LIST(DEFINE_COMPILE)
+#undef DEFINE_COMPILE
+
+LOsrEntry::LOsrEntry() {
+  for (int i = 0; i < Register::kNumAllocatableRegisters; ++i) {
+    register_spills_[i] = NULL;
+  }
+  for (int i = 0; i < DoubleRegister::kNumAllocatableRegisters; ++i) {
+    double_register_spills_[i] = NULL;
+  }
+}
+
+
+void LOsrEntry::MarkSpilledRegister(int allocation_index,
+                                    LOperand* spill_operand) {
+  ASSERT(spill_operand->IsStackSlot());
+  ASSERT(register_spills_[allocation_index] == NULL);
+  register_spills_[allocation_index] = spill_operand;
+}
+
+
+void LOsrEntry::MarkSpilledDoubleRegister(int allocation_index,
+                                          LOperand* spill_operand) {
+  ASSERT(spill_operand->IsDoubleStackSlot());
+  ASSERT(double_register_spills_[allocation_index] == NULL);
+  double_register_spills_[allocation_index] = spill_operand;
+}
+
+
+void LInstruction::PrintTo(StringStream* stream) const {
+  stream->Add("%s ", this->Mnemonic());
+  if (HasResult()) {
+    result()->PrintTo(stream);
+    stream->Add(" ");
+  }
+  PrintDataTo(stream);
+
+  if (HasEnvironment()) {
+    stream->Add(" ");
+    environment()->PrintTo(stream);
+  }
+
+  if (HasPointerMap()) {
+    stream->Add(" ");
+    pointer_map()->PrintTo(stream);
+  }
+}
+
+
+void LLabel::PrintDataTo(StringStream* stream) const {
+  LGap::PrintDataTo(stream);
+  LLabel* rep = replacement();
+  if (rep != NULL) {
+    stream->Add(" Dead block replaced with B%d", rep->block_id());
+  }
+}
+
+
+bool LParallelMove::IsRedundant() const {
+  for (int i = 0; i < move_operands_.length(); ++i) {
+    if (!move_operands_[i].IsRedundant()) return false;
+  }
+  return true;
+}
+
+
+void LParallelMove::PrintDataTo(StringStream* stream) const {
+  for (int i = move_operands_.length() - 1; i >= 0; --i) {
+    if (!move_operands_[i].IsEliminated()) {
+      LOperand* from = move_operands_[i].from();
+      LOperand* to = move_operands_[i].to();
+      if (from->Equals(to)) {
+        to->PrintTo(stream);
+      } else {
+        to->PrintTo(stream);
+        stream->Add(" = ");
+        from->PrintTo(stream);
+      }
+      stream->Add("; ");
+    }
+  }
+}
+
+
+bool LGap::IsRedundant() const {
+  for (int i = 0; i < 4; i++) {
+    if (parallel_moves_[i] != NULL && !parallel_moves_[i]->IsRedundant()) {
+      return false;
+    }
+  }
+
+  return true;
+}
+
+
+void LGap::PrintDataTo(StringStream* stream) const {
+  for (int i = 0; i < 4; i++) {
+    stream->Add("(");
+    if (parallel_moves_[i] != NULL) {
+      parallel_moves_[i]->PrintDataTo(stream);
+    }
+    stream->Add(") ");
+  }
+}
+
+
+const char* LArithmeticD::Mnemonic() const {
+  switch (op()) {
+    case Token::ADD: return "add-d";
+    case Token::SUB: return "sub-d";
+    case Token::MUL: return "mul-d";
+    case Token::DIV: return "div-d";
+    case Token::MOD: return "mod-d";
+    default:
+      UNREACHABLE();
+      return NULL;
+  }
+}
+
+
+const char* LArithmeticT::Mnemonic() const {
+  switch (op()) {
+    case Token::ADD: return "add-t";
+    case Token::SUB: return "sub-t";
+    case Token::MUL: return "mul-t";
+    case Token::MOD: return "mod-t";
+    case Token::DIV: return "div-t";
+    default:
+      UNREACHABLE();
+      return NULL;
+  }
+}
+
+
+
+void LBinaryOperation::PrintDataTo(StringStream* stream) const {
+  stream->Add("= ");
+  left()->PrintTo(stream);
+  stream->Add(" ");
+  right()->PrintTo(stream);
+}
+
+
+void LGoto::PrintDataTo(StringStream* stream) const {
+  stream->Add("B%d", block_id());
+}
+
+
+void LBranch::PrintDataTo(StringStream* stream) const {
+  stream->Add("B%d | B%d on ", true_block_id(), false_block_id());
+  input()->PrintTo(stream);
+}
+
+
+void LCmpIDAndBranch::PrintDataTo(StringStream* stream) const {
+  stream->Add("if ");
+  left()->PrintTo(stream);
+  stream->Add(" %s ", Token::String(op()));
+  right()->PrintTo(stream);
+  stream->Add(" then B%d else B%d", true_block_id(), false_block_id());
+}
+
+
+void LIsNullAndBranch::PrintDataTo(StringStream* stream) const {
+  stream->Add("if ");
+  input()->PrintTo(stream);
+  stream->Add(is_strict() ? " === null" : " == null");
+  stream->Add(" then B%d else B%d", true_block_id(), false_block_id());
+}
+
+
+void LIsSmiAndBranch::PrintDataTo(StringStream* stream) const {
+  stream->Add("if is_smi(");
+  input()->PrintTo(stream);
+  stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
+}
+
+
+void LHasInstanceTypeAndBranch::PrintDataTo(StringStream* stream) const {
+  stream->Add("if has_instance_type(");
+  input()->PrintTo(stream);
+  stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
+}
+
+
+void LHasCachedArrayIndexAndBranch::PrintDataTo(StringStream* stream) const {
+  stream->Add("if has_cached_array_index(");
+  input()->PrintTo(stream);
+  stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
+}
+
+
+void LClassOfTestAndBranch::PrintDataTo(StringStream* stream) const {
+  stream->Add("if class_of_test(");
+  input()->PrintTo(stream);
+  stream->Add(", \"%o\") then B%d else B%d",
+              *hydrogen()->class_name(),
+              true_block_id(),
+              false_block_id());
+}
+
+
+void LTypeofIs::PrintDataTo(StringStream* stream) const {
+  input()->PrintTo(stream);
+  stream->Add(" == \"%s\"", *hydrogen()->type_literal()->ToCString());
+}
+
+
+void LTypeofIsAndBranch::PrintDataTo(StringStream* stream) const {
+  stream->Add("if typeof ");
+  input()->PrintTo(stream);
+  stream->Add(" == \"%s\" then B%d else B%d",
+              *hydrogen()->type_literal()->ToCString(),
+              true_block_id(), false_block_id());
+}
+
+
+void LCallConstantFunction::PrintDataTo(StringStream* stream) const {
+  stream->Add("#%d / ", arity());
+}
+
+
+void LUnaryMathOperation::PrintDataTo(StringStream* stream) const {
+  stream->Add("/%s ", hydrogen()->OpName());
+  input()->PrintTo(stream);
+}
+
+
+void LCallKeyed::PrintDataTo(StringStream* stream) const {
+  stream->Add("[r2] #%d / ", arity());
+}
+
+
+void LCallNamed::PrintDataTo(StringStream* stream) const {
+  SmartPointer<char> name_string = name()->ToCString();
+  stream->Add("%s #%d / ", *name_string, arity());
+}
+
+
+void LCallGlobal::PrintDataTo(StringStream* stream) const {
+  SmartPointer<char> name_string = name()->ToCString();
+  stream->Add("%s #%d / ", *name_string, arity());
+}
+
+
+void LCallKnownGlobal::PrintDataTo(StringStream* stream) const {
+  stream->Add("#%d / ", arity());
+}
+
+
+void LCallNew::PrintDataTo(StringStream* stream) const {
+  LUnaryOperation::PrintDataTo(stream);
+  stream->Add(" #%d / ", arity());
+}
+
+
+void LClassOfTest::PrintDataTo(StringStream* stream) const {
+  stream->Add("= class_of_test(");
+  input()->PrintTo(stream);
+  stream->Add(", \"%o\")", *hydrogen()->class_name());
+}
+
+
+void LUnaryOperation::PrintDataTo(StringStream* stream) const {
+  stream->Add("= ");
+  input()->PrintTo(stream);
+}
+
+
+void LAccessArgumentsAt::PrintDataTo(StringStream* stream) const {
+  arguments()->PrintTo(stream);
+
+  stream->Add(" length ");
+  length()->PrintTo(stream);
+
+  stream->Add(" index ");
+  index()->PrintTo(stream);
+}
+
+
+LChunk::LChunk(HGraph* graph)
+    : spill_slot_count_(0),
+      graph_(graph),
+      instructions_(32),
+      pointer_maps_(8),
+      inlined_closures_(1) {
+}
+
+
+void LChunk::Verify() const {
+  // TODO(twuerthinger): Implement verification for chunk.
+}
+
+
+int LChunk::GetNextSpillIndex(bool is_double) {
+  // Skip a slot if for a double-width slot.
+  if (is_double) spill_slot_count_++;
+  return spill_slot_count_++;
+}
+
+
+LOperand* LChunk::GetNextSpillSlot(bool is_double)  {
+  int index = GetNextSpillIndex(is_double);
+  if (is_double) {
+    return LDoubleStackSlot::Create(index);
+  } else {
+    return LStackSlot::Create(index);
+  }
+}
+
+
+void LChunk::MarkEmptyBlocks() {
+  HPhase phase("Mark empty blocks", this);
+  for (int i = 0; i < graph()->blocks()->length(); ++i) {
+    HBasicBlock* block = graph()->blocks()->at(i);
+    int first = block->first_instruction_index();
+    int last = block->last_instruction_index();
+    LInstruction* first_instr = instructions()->at(first);
+    LInstruction* last_instr = instructions()->at(last);
+
+    LLabel* label = LLabel::cast(first_instr);
+    if (last_instr->IsGoto()) {
+      LGoto* goto_instr = LGoto::cast(last_instr);
+      if (!goto_instr->include_stack_check() &&
+          label->IsRedundant() &&
+          !label->is_loop_header()) {
+        bool can_eliminate = true;
+        for (int i = first + 1; i < last && can_eliminate; ++i) {
+          LInstruction* cur = instructions()->at(i);
+          if (cur->IsGap()) {
+            LGap* gap = LGap::cast(cur);
+            if (!gap->IsRedundant()) {
+              can_eliminate = false;
+            }
+          } else {
+            can_eliminate = false;
+          }
+        }
+
+        if (can_eliminate) {
+          label->set_replacement(GetLabel(goto_instr->block_id()));
+        }
+      }
+    }
+  }
+}
+
+
+void LStoreNamed::PrintDataTo(StringStream* stream) const {
+  object()->PrintTo(stream);
+  stream->Add(".");
+  stream->Add(*String::cast(*name())->ToCString());
+  stream->Add(" <- ");
+  value()->PrintTo(stream);
+}
+
+
+void LStoreKeyed::PrintDataTo(StringStream* stream) const {
+  object()->PrintTo(stream);
+  stream->Add("[");
+  key()->PrintTo(stream);
+  stream->Add("] <- ");
+  value()->PrintTo(stream);
+}
+
+
+int LChunk::AddInstruction(LInstruction* instr, HBasicBlock* block) {
+  LGap* gap = new LGap(block);
+  int index = -1;
+  if (instr->IsControl()) {
+    instructions_.Add(gap);
+    index = instructions_.length();
+    instructions_.Add(instr);
+  } else {
+    index = instructions_.length();
+    instructions_.Add(instr);
+    instructions_.Add(gap);
+  }
+  if (instr->HasPointerMap()) {
+    pointer_maps_.Add(instr->pointer_map());
+    instr->pointer_map()->set_lithium_position(index);
+  }
+  return index;
+}
+
+
+LConstantOperand* LChunk::DefineConstantOperand(HConstant* constant) {
+  return LConstantOperand::Create(constant->id());
+}
+
+
+int LChunk::GetParameterStackSlot(int index) const {
+  // The receiver is at index 0, the first parameter at index 1, so we
+  // shift all parameter indexes down by the number of parameters, and
+  // make sure they end up negative so they are distinguishable from
+  // spill slots.
+  int result = index - graph()->info()->scope()->num_parameters() - 1;
+  ASSERT(result < 0);
+  return result;
+}
+
+// A parameter relative to ebp in the arguments stub.
+int LChunk::ParameterAt(int index) {
+  ASSERT(-1 <= index);  // -1 is the receiver.
+  return (1 + graph()->info()->scope()->num_parameters() - index) *
+      kPointerSize;
+}
+
+
+LGap* LChunk::GetGapAt(int index) const {
+  return LGap::cast(instructions_[index]);
+}
+
+
+bool LChunk::IsGapAt(int index) const {
+  return instructions_[index]->IsGap();
+}
+
+
+int LChunk::NearestGapPos(int index) const {
+  while (!IsGapAt(index)) index--;
+  return index;
+}
+
+
+int LChunk::NearestNextGapPos(int index) const {
+  while (!IsGapAt(index)) index++;
+  return index;
+}
+
+
+void LChunk::AddGapMove(int index, LOperand* from, LOperand* to) {
+  GetGapAt(index)->GetOrCreateParallelMove(LGap::START)->AddMove(from, to);
+}
+
+
+class LGapNode: public ZoneObject {
+ public:
+  explicit LGapNode(LOperand* operand)
+      : operand_(operand), resolved_(false), visited_id_(-1) { }
+
+  LOperand* operand() const { return operand_; }
+  bool IsResolved() const { return !IsAssigned() || resolved_; }
+  void MarkResolved() {
+    ASSERT(!IsResolved());
+    resolved_ = true;
+  }
+  int visited_id() const { return visited_id_; }
+  void set_visited_id(int id) {
+    ASSERT(id > visited_id_);
+    visited_id_ = id;
+  }
+
+  bool IsAssigned() const { return assigned_from_.is_set(); }
+  LGapNode* assigned_from() const { return assigned_from_.get(); }
+  void set_assigned_from(LGapNode* n) { assigned_from_.set(n); }
+
+ private:
+  LOperand* operand_;
+  SetOncePointer<LGapNode> assigned_from_;
+  bool resolved_;
+  int visited_id_;
+};
+
+
+LGapResolver::LGapResolver(const ZoneList<LMoveOperands>* moves,
+                           LOperand* marker_operand)
+    : nodes_(4),
+      identified_cycles_(4),
+      result_(4),
+      marker_operand_(marker_operand),
+      next_visited_id_(0) {
+  for (int i = 0; i < moves->length(); ++i) {
+    LMoveOperands move = moves->at(i);
+    if (!move.IsRedundant()) RegisterMove(move);
+  }
+}
+
+
+const ZoneList<LMoveOperands>* LGapResolver::ResolveInReverseOrder() {
+  for (int i = 0; i < identified_cycles_.length(); ++i) {
+    ResolveCycle(identified_cycles_[i]);
+  }
+
+  int unresolved_nodes;
+  do {
+    unresolved_nodes = 0;
+    for (int j = 0; j < nodes_.length(); j++) {
+      LGapNode* node = nodes_[j];
+      if (!node->IsResolved() && node->assigned_from()->IsResolved()) {
+        AddResultMove(node->assigned_from(), node);
+        node->MarkResolved();
+      }
+      if (!node->IsResolved()) ++unresolved_nodes;
+    }
+  } while (unresolved_nodes > 0);
+  return &result_;
+}
+
+
+void LGapResolver::AddResultMove(LGapNode* from, LGapNode* to) {
+  AddResultMove(from->operand(), to->operand());
+}
+
+
+void LGapResolver::AddResultMove(LOperand* from, LOperand* to) {
+  result_.Add(LMoveOperands(from, to));
+}
+
+
+void LGapResolver::ResolveCycle(LGapNode* start) {
+  ZoneList<LOperand*> circle_operands(8);
+  circle_operands.Add(marker_operand_);
+  LGapNode* cur = start;
+  do {
+    cur->MarkResolved();
+    circle_operands.Add(cur->operand());
+    cur = cur->assigned_from();
+  } while (cur != start);
+  circle_operands.Add(marker_operand_);
+
+  for (int i = circle_operands.length() - 1; i > 0; --i) {
+    LOperand* from = circle_operands[i];
+    LOperand* to = circle_operands[i - 1];
+    AddResultMove(from, to);
+  }
+}
+
+
+bool LGapResolver::CanReach(LGapNode* a, LGapNode* b, int visited_id) {
+  ASSERT(a != b);
+  LGapNode* cur = a;
+  while (cur != b && cur->visited_id() != visited_id && cur->IsAssigned()) {
+    cur->set_visited_id(visited_id);
+    cur = cur->assigned_from();
+  }
+
+  return cur == b;
+}
+
+
+bool LGapResolver::CanReach(LGapNode* a, LGapNode* b) {
+  ASSERT(a != b);
+  return CanReach(a, b, next_visited_id_++);
+}
+
+
+void LGapResolver::RegisterMove(LMoveOperands move) {
+  if (move.from()->IsConstantOperand()) {
+    // Constant moves should be last in the machine code. Therefore add them
+    // first to the result set.
+    AddResultMove(move.from(), move.to());
+  } else {
+    LGapNode* from = LookupNode(move.from());
+    LGapNode* to = LookupNode(move.to());
+    if (to->IsAssigned() && to->assigned_from() == from) {
+      move.Eliminate();
+      return;
+    }
+    ASSERT(!to->IsAssigned());
+    if (CanReach(from, to)) {
+      // This introduces a circle. Save.
+      identified_cycles_.Add(from);
+    }
+    to->set_assigned_from(from);
+  }
+}
+
+
+LGapNode* LGapResolver::LookupNode(LOperand* operand) {
+  for (int i = 0; i < nodes_.length(); ++i) {
+    if (nodes_[i]->operand()->Equals(operand)) return nodes_[i];
+  }
+
+  // No node found => create a new one.
+  LGapNode* result = new LGapNode(operand);
+  nodes_.Add(result);
+  return result;
+}
+
+
+Handle<Object> LChunk::LookupLiteral(LConstantOperand* operand) const {
+  return HConstant::cast(graph_->LookupValue(operand->index()))->handle();
+}
+
+
+Representation LChunk::LookupLiteralRepresentation(
+    LConstantOperand* operand) const {
+  return graph_->LookupValue(operand->index())->representation();
+}
+
+
+LChunk* LChunkBuilder::Build() {
+  ASSERT(is_unused());
+  chunk_ = new LChunk(graph());
+  HPhase phase("Building chunk", chunk_);
+  status_ = BUILDING;
+  const ZoneList<HBasicBlock*>* blocks = graph()->blocks();
+  for (int i = 0; i < blocks->length(); i++) {
+    HBasicBlock* next = NULL;
+    if (i < blocks->length() - 1) next = blocks->at(i + 1);
+    DoBasicBlock(blocks->at(i), next);
+    if (is_aborted()) return NULL;
+  }
+  status_ = DONE;
+  return chunk_;
+}
+
+
+void LChunkBuilder::Abort(const char* format, ...) {
+  if (FLAG_trace_bailout) {
+    SmartPointer<char> debug_name = graph()->debug_name()->ToCString();
+    PrintF("Aborting LChunk building in @\"%s\": ", *debug_name);
+    va_list arguments;
+    va_start(arguments, format);
+    OS::VPrint(format, arguments);
+    va_end(arguments);
+    PrintF("\n");
+  }
+  status_ = ABORTED;
+}
+
+
+LRegister* LChunkBuilder::ToOperand(Register reg) {
+  return LRegister::Create(Register::ToAllocationIndex(reg));
+}
+
+
+LUnallocated* LChunkBuilder::ToUnallocated(Register reg) {
+  return new LUnallocated(LUnallocated::FIXED_REGISTER,
+                          Register::ToAllocationIndex(reg));
+}
+
+
+LUnallocated* LChunkBuilder::ToUnallocated(DoubleRegister reg) {
+  return new LUnallocated(LUnallocated::FIXED_DOUBLE_REGISTER,
+                          DoubleRegister::ToAllocationIndex(reg));
+}
+
+
+LOperand* LChunkBuilder::UseFixed(HValue* value, Register fixed_register) {
+  return Use(value, ToUnallocated(fixed_register));
+}
+
+
+LOperand* LChunkBuilder::UseFixedDouble(HValue* value, DoubleRegister reg) {
+  return Use(value, ToUnallocated(reg));
+}
+
+
+LOperand* LChunkBuilder::UseRegister(HValue* value) {
+  return Use(value, new LUnallocated(LUnallocated::MUST_HAVE_REGISTER));
+}
+
+
+LOperand* LChunkBuilder::UseRegisterAtStart(HValue* value) {
+  return Use(value,
+             new LUnallocated(LUnallocated::MUST_HAVE_REGISTER,
+                              LUnallocated::USED_AT_START));
+}
+
+
+LOperand* LChunkBuilder::UseTempRegister(HValue* value) {
+  return Use(value, new LUnallocated(LUnallocated::WRITABLE_REGISTER));
+}
+
+
+LOperand* LChunkBuilder::Use(HValue* value) {
+  return Use(value, new LUnallocated(LUnallocated::NONE));
+}
+
+
+LOperand* LChunkBuilder::UseAtStart(HValue* value) {
+  return Use(value, new LUnallocated(LUnallocated::NONE,
+                                     LUnallocated::USED_AT_START));
+}
+
+
+LOperand* LChunkBuilder::UseOrConstant(HValue* value) {
+  return value->IsConstant()
+      ? chunk_->DefineConstantOperand(HConstant::cast(value))
+      : Use(value);
+}
+
+
+LOperand* LChunkBuilder::UseOrConstantAtStart(HValue* value) {
+  return value->IsConstant()
+      ? chunk_->DefineConstantOperand(HConstant::cast(value))
+      : UseAtStart(value);
+}
+
+
+LOperand* LChunkBuilder::UseRegisterOrConstant(HValue* value) {
+  return value->IsConstant()
+      ? chunk_->DefineConstantOperand(HConstant::cast(value))
+      : UseRegister(value);
+}
+
+
+LOperand* LChunkBuilder::UseRegisterOrConstantAtStart(HValue* value) {
+  return value->IsConstant()
+      ? chunk_->DefineConstantOperand(HConstant::cast(value))
+      : UseRegisterAtStart(value);
+}
+
+
+LOperand* LChunkBuilder::Use(HValue* value, LUnallocated* operand) {
+  if (value->EmitAtUses()) {
+    HInstruction* instr = HInstruction::cast(value);
+    VisitInstruction(instr);
+  }
+  allocator_->RecordUse(value, operand);
+  return operand;
+}
+
+
+LInstruction* LChunkBuilder::Define(LInstruction* instr) {
+  return Define(instr, new LUnallocated(LUnallocated::NONE));
+}
+
+
+LInstruction* LChunkBuilder::DefineAsRegister(LInstruction* instr) {
+  return Define(instr, new LUnallocated(LUnallocated::MUST_HAVE_REGISTER));
+}
+
+
+LInstruction* LChunkBuilder::DefineAsSpilled(LInstruction* instr, int index) {
+  return Define(instr, new LUnallocated(LUnallocated::FIXED_SLOT, index));
+}
+
+
+LInstruction* LChunkBuilder::DefineSameAsAny(LInstruction* instr) {
+  return Define(instr, new LUnallocated(LUnallocated::SAME_AS_ANY_INPUT));
+}
+
+
+LInstruction* LChunkBuilder::DefineSameAsFirst(LInstruction* instr) {
+  return Define(instr, new LUnallocated(LUnallocated::SAME_AS_FIRST_INPUT));
+}
+
+
+LInstruction* LChunkBuilder::DefineFixed(LInstruction* instr, Register reg) {
+  return Define(instr, ToUnallocated(reg));
+}
+
+
+LInstruction* LChunkBuilder::DefineFixedDouble(LInstruction* instr,
+                                               DoubleRegister reg) {
+  return Define(instr, ToUnallocated(reg));
+}
+
+
+LInstruction* LChunkBuilder::AssignEnvironment(LInstruction* instr) {
+  HEnvironment* hydrogen_env = current_block_->last_environment();
+  instr->set_environment(CreateEnvironment(hydrogen_env));
+  return instr;
+}
+
+
+LInstruction* LChunkBuilder::SetInstructionPendingDeoptimizationEnvironment(
+    LInstruction* instr, int ast_id) {
+  ASSERT(instructions_pending_deoptimization_environment_ == NULL);
+  ASSERT(pending_deoptimization_ast_id_ == AstNode::kNoNumber);
+  instructions_pending_deoptimization_environment_ = instr;
+  pending_deoptimization_ast_id_ = ast_id;
+  return instr;
+}
+
+
+void LChunkBuilder::ClearInstructionPendingDeoptimizationEnvironment() {
+  instructions_pending_deoptimization_environment_ = NULL;
+  pending_deoptimization_ast_id_ = AstNode::kNoNumber;
+}
+
+
+LInstruction* LChunkBuilder::MarkAsCall(LInstruction* instr,
+                                        HInstruction* hinstr,
+                                        CanDeoptimize can_deoptimize) {
+  allocator_->MarkAsCall();
+  instr = AssignPointerMap(instr);
+
+  if (hinstr->HasSideEffects()) {
+    ASSERT(hinstr->next()->IsSimulate());
+    HSimulate* sim = HSimulate::cast(hinstr->next());
+    instr = SetInstructionPendingDeoptimizationEnvironment(
+        instr, sim->ast_id());
+  }
+
+  // If instruction does not have side-effects lazy deoptimization
+  // after the call will try to deoptimize to the point before the call.
+  // Thus we still need to attach environment to this call even if
+  // call sequence can not deoptimize eagerly.
+  bool needs_environment =
+      (can_deoptimize == CAN_DEOPTIMIZE_EAGERLY) || !hinstr->HasSideEffects();
+  if (needs_environment && !instr->HasEnvironment()) {
+    instr = AssignEnvironment(instr);
+  }
+
+  return instr;
+}
+
+
+LInstruction* LChunkBuilder::AssignPointerMap(LInstruction* instr) {
+  ASSERT(!instr->HasPointerMap());
+  instr->set_pointer_map(new LPointerMap(position_));
+  return instr;
+}
+
+
+LInstruction* LChunkBuilder::Define(LInstruction* instr, LUnallocated* result) {
+  allocator_->RecordDefinition(current_instruction_, result);
+  instr->set_result(result);
+  return instr;
+}
+
+
+LOperand* LChunkBuilder::Temp() {
+  LUnallocated* operand = new LUnallocated(LUnallocated::NONE);
+  allocator_->RecordTemporary(operand);
+  return operand;
+}
+
+
+LUnallocated* LChunkBuilder::TempRegister() {
+  LUnallocated* operand = new LUnallocated(LUnallocated::MUST_HAVE_REGISTER);
+  allocator_->RecordTemporary(operand);
+  return operand;
+}
+
+
+LOperand* LChunkBuilder::FixedTemp(Register reg) {
+  LUnallocated* operand = ToUnallocated(reg);
+  allocator_->RecordTemporary(operand);
+  return operand;
+}
+
+
+LOperand* LChunkBuilder::FixedTemp(DoubleRegister reg) {
+  LUnallocated* operand = ToUnallocated(reg);
+  allocator_->RecordTemporary(operand);
+  return operand;
+}
+
+
+LInstruction* LChunkBuilder::DoBlockEntry(HBlockEntry* instr) {
+  return new LLabel(instr->block());
+}
+
+
+LInstruction* LChunkBuilder::DoDeoptimize(HDeoptimize* instr) {
+  return AssignEnvironment(new LDeoptimize);
+}
+
+
+LInstruction* LChunkBuilder::DoBit(Token::Value op,
+                                   HBitwiseBinaryOperation* instr) {
+  ASSERT(instr->representation().IsInteger32());
+  ASSERT(instr->left()->representation().IsInteger32());
+  ASSERT(instr->right()->representation().IsInteger32());
+
+  LOperand* left = UseRegisterAtStart(instr->LeastConstantOperand());
+  LOperand* right = UseOrConstantAtStart(instr->MostConstantOperand());
+  return DefineSameAsFirst(new LBitI(op, left, right));
+}
+
+
+LInstruction* LChunkBuilder::DoShift(Token::Value op,
+                                     HBitwiseBinaryOperation* instr) {
+  ASSERT(instr->representation().IsInteger32());
+  ASSERT(instr->OperandAt(0)->representation().IsInteger32());
+  ASSERT(instr->OperandAt(1)->representation().IsInteger32());
+  LOperand* left = UseRegisterAtStart(instr->OperandAt(0));
+
+  HValue* right_value = instr->OperandAt(1);
+  LOperand* right = NULL;
+  int constant_value = 0;
+  if (right_value->IsConstant()) {
+    HConstant* constant = HConstant::cast(right_value);
+    right = chunk_->DefineConstantOperand(constant);
+    constant_value = constant->Integer32Value() & 0x1f;
+  } else {
+    right = UseRegister(right_value);
+  }
+
+  // Shift operations can only deoptimize if we do a logical shift
+  // by 0 and the result cannot be truncated to int32.
+  bool can_deopt = (op == Token::SHR && constant_value == 0);
+  if (can_deopt) {
+    bool can_truncate = true;
+    for (int i = 0; i < instr->uses()->length(); i++) {
+      if (!instr->uses()->at(i)->CheckFlag(HValue::kTruncatingToInt32)) {
+        can_truncate = false;
+        break;
+      }
+    }
+    can_deopt = !can_truncate;
+  }
+
+  LInstruction* result =
+      DefineSameAsFirst(new LShiftI(op, left, right, can_deopt));
+  if (can_deopt) AssignEnvironment(result);
+  return result;
+}
+
+
+LInstruction* LChunkBuilder::DoArithmeticD(Token::Value op,
+                                           HArithmeticBinaryOperation* instr) {
+  ASSERT(instr->representation().IsDouble());
+  ASSERT(instr->left()->representation().IsDouble());
+  ASSERT(instr->right()->representation().IsDouble());
+  LOperand* left = UseRegisterAtStart(instr->left());
+  LOperand* right = UseRegisterAtStart(instr->right());
+  LArithmeticD* result = new LArithmeticD(op, left, right);
+  return DefineSameAsFirst(result);
+}
+
+
+LInstruction* LChunkBuilder::DoArithmeticT(Token::Value op,
+                                           HArithmeticBinaryOperation* instr) {
+  ASSERT(op == Token::ADD ||
+         op == Token::DIV ||
+         op == Token::MOD ||
+         op == Token::MUL ||
+         op == Token::SUB);
+  HValue* left = instr->left();
+  HValue* right = instr->right();
+  ASSERT(left->representation().IsTagged());
+  ASSERT(right->representation().IsTagged());
+  LOperand* left_operand = UseFixed(left, r1);
+  LOperand* right_operand = UseFixed(right, r0);
+  LInstruction* result = new LArithmeticT(op, left_operand, right_operand);
+  return MarkAsCall(DefineFixed(result, r0), instr);
+}
+
+void LChunkBuilder::DoBasicBlock(HBasicBlock* block, HBasicBlock* next_block) {
+  ASSERT(is_building());
+  current_block_ = block;
+  next_block_ = next_block;
+  if (block->IsStartBlock()) {
+    block->UpdateEnvironment(graph_->start_environment());
+    argument_count_ = 0;
+  } else if (block->predecessors()->length() == 1) {
+    // We have a single predecessor => copy environment and outgoing
+    // argument count from the predecessor.
+    ASSERT(block->phis()->length() == 0);
+    HBasicBlock* pred = block->predecessors()->at(0);
+    HEnvironment* last_environment = pred->last_environment();
+    ASSERT(last_environment != NULL);
+    // Only copy the environment, if it is later used again.
+    if (pred->end()->SecondSuccessor() == NULL) {
+      ASSERT(pred->end()->FirstSuccessor() == block);
+    } else {
+      if (pred->end()->FirstSuccessor()->block_id() > block->block_id() ||
+          pred->end()->SecondSuccessor()->block_id() > block->block_id()) {
+        last_environment = last_environment->Copy();
+      }
+    }
+    block->UpdateEnvironment(last_environment);
+    ASSERT(pred->argument_count() >= 0);
+    argument_count_ = pred->argument_count();
+  } else {
+    // We are at a state join => process phis.
+    HBasicBlock* pred = block->predecessors()->at(0);
+    // No need to copy the environment, it cannot be used later.
+    HEnvironment* last_environment = pred->last_environment();
+    for (int i = 0; i < block->phis()->length(); ++i) {
+      HPhi* phi = block->phis()->at(i);
+      last_environment->SetValueAt(phi->merged_index(), phi);
+    }
+    for (int i = 0; i < block->deleted_phis()->length(); ++i) {
+      last_environment->SetValueAt(block->deleted_phis()->at(i),
+                                   graph_->GetConstantUndefined());
+    }
+    block->UpdateEnvironment(last_environment);
+    // Pick up the outgoing argument count of one of the predecessors.
+    argument_count_ = pred->argument_count();
+  }
+  HInstruction* current = block->first();
+  int start = chunk_->instructions()->length();
+  while (current != NULL && !is_aborted()) {
+    if (FLAG_trace_environment) {
+      PrintF("Process instruction %d\n", current->id());
+    }
+    // Code for constants in registers is generated lazily.
+    if (!current->EmitAtUses()) {
+      VisitInstruction(current);
+    }
+    current = current->next();
+  }
+  int end = chunk_->instructions()->length() - 1;
+  if (end >= start) {
+    block->set_first_instruction_index(start);
+    block->set_last_instruction_index(end);
+  }
+  block->set_argument_count(argument_count_);
+  next_block_ = NULL;
+  current_block_ = NULL;
+}
+
+
+void LChunkBuilder::VisitInstruction(HInstruction* current) {
+  HInstruction* old_current = current_instruction_;
+  current_instruction_ = current;
+  allocator_->BeginInstruction();
+  if (current->has_position()) position_ = current->position();
+  LInstruction* instr = current->CompileToLithium(this);
+
+  if (instr != NULL) {
+    if (FLAG_stress_pointer_maps && !instr->HasPointerMap()) {
+      instr = AssignPointerMap(instr);
+    }
+    if (FLAG_stress_environments && !instr->HasEnvironment()) {
+      instr = AssignEnvironment(instr);
+    }
+    if (current->IsBranch()) {
+      instr->set_hydrogen_value(HBranch::cast(current)->value());
+    } else {
+      instr->set_hydrogen_value(current);
+    }
+
+    int index = chunk_->AddInstruction(instr, current_block_);
+    allocator_->SummarizeInstruction(index);
+  } else {
+    // This instruction should be omitted.
+    allocator_->OmitInstruction();
+  }
+  current_instruction_ = old_current;
+}
+
+
+void LEnvironment::WriteTranslation(LCodeGen* cgen,
+                                    Translation* translation) const {
+  if (this == NULL) return;
+
+  // The translation includes one command per value in the environment.
+  int translation_size = values()->length();
+  // The output frame height does not include the parameters.
+  int height = translation_size - parameter_count();
+
+  outer()->WriteTranslation(cgen, translation);
+  int closure_id = cgen->DefineDeoptimizationLiteral(closure());
+  translation->BeginFrame(ast_id(), closure_id, height);
+  for (int i = 0; i < translation_size; ++i) {
+    LOperand* value = values()->at(i);
+    // spilled_registers_ and spilled_double_registers_ are either
+    // both NULL or both set.
+    if (spilled_registers_ != NULL && value != NULL) {
+      if (value->IsRegister() &&
+          spilled_registers_[value->index()] != NULL) {
+        translation->MarkDuplicate();
+        cgen->AddToTranslation(translation,
+                               spilled_registers_[value->index()],
+                               HasTaggedValueAt(i));
+      } else if (value->IsDoubleRegister() &&
+                 spilled_double_registers_[value->index()] != NULL) {
+        translation->MarkDuplicate();
+        cgen->AddToTranslation(translation,
+                               spilled_double_registers_[value->index()],
+                               false);
+      }
+    }
+
+    cgen->AddToTranslation(translation, value, HasTaggedValueAt(i));
+  }
+}
+
+
+void LEnvironment::PrintTo(StringStream* stream) const {
+  stream->Add("[id=%d|", ast_id());
+  stream->Add("[parameters=%d|", parameter_count());
+  stream->Add("[arguments_stack_height=%d|", arguments_stack_height());
+  for (int i = 0; i < values_.length(); ++i) {
+    if (i != 0) stream->Add(";");
+    if (values_[i] == NULL) {
+      stream->Add("[hole]");
+    } else {
+      values_[i]->PrintTo(stream);
+    }
+  }
+  stream->Add("]");
+}
+
+
+LEnvironment* LChunkBuilder::CreateEnvironment(HEnvironment* hydrogen_env) {
+  if (hydrogen_env == NULL) return NULL;
+
+  LEnvironment* outer = CreateEnvironment(hydrogen_env->outer());
+  int ast_id = hydrogen_env->ast_id();
+  ASSERT(ast_id != AstNode::kNoNumber);
+  int value_count = hydrogen_env->values()->length();
+  LEnvironment* result = new LEnvironment(hydrogen_env->closure(),
+                                          ast_id,
+                                          hydrogen_env->parameter_count(),
+                                          argument_count_,
+                                          value_count,
+                                          outer);
+  int argument_index = 0;
+  for (int i = 0; i < value_count; ++i) {
+    HValue* value = hydrogen_env->values()->at(i);
+    LOperand* op = NULL;
+    if (value->IsArgumentsObject()) {
+      op = NULL;
+    } else if (value->IsPushArgument()) {
+      op = new LArgument(argument_index++);
+    } else {
+      op = UseOrConstant(value);
+      if (op->IsUnallocated()) {
+        LUnallocated* unalloc = LUnallocated::cast(op);
+        unalloc->set_policy(LUnallocated::ANY);
+      }
+    }
+    result->AddValue(op, value->representation());
+  }
+
+  return result;
+}
+
+
+LInstruction* LChunkBuilder::DoGoto(HGoto* instr) {
+  LInstruction* result = new LGoto(instr->FirstSuccessor()->block_id(),
+                                   instr->include_stack_check());
+  if (instr->include_stack_check())  result = AssignPointerMap(result);
+  return result;
+}
+
+
+LInstruction* LChunkBuilder::DoBranch(HBranch* instr) {
+  HValue* v = instr->value();
+  HBasicBlock* first = instr->FirstSuccessor();
+  HBasicBlock* second = instr->SecondSuccessor();
+  ASSERT(first != NULL && second != NULL);
+  int first_id = first->block_id();
+  int second_id = second->block_id();
+
+  if (v->EmitAtUses()) {
+    if (v->IsClassOfTest()) {
+      HClassOfTest* compare = HClassOfTest::cast(v);
+      ASSERT(compare->value()->representation().IsTagged());
+
+      return new LClassOfTestAndBranch(UseTempRegister(compare->value()),
+                                       TempRegister(),
+                                       TempRegister(),
+                                       first_id,
+                                       second_id);
+    } else if (v->IsCompare()) {
+      HCompare* compare = HCompare::cast(v);
+      Token::Value op = compare->token();
+      HValue* left = compare->left();
+      HValue* right = compare->right();
+      if (left->representation().IsInteger32()) {
+        ASSERT(right->representation().IsInteger32());
+        return new LCmpIDAndBranch(op,
+                                   UseRegisterAtStart(left),
+                                   UseOrConstantAtStart(right),
+                                   first_id,
+                                   second_id,
+                                   false);
+      } else if (left->representation().IsDouble()) {
+        ASSERT(right->representation().IsDouble());
+        return new LCmpIDAndBranch(op,
+                                   UseRegisterAtStart(left),
+                                   UseRegisterAtStart(right),
+                                   first_id,
+                                   second_id,
+                                   true);
+      } else {
+        ASSERT(left->representation().IsTagged());
+        ASSERT(right->representation().IsTagged());
+        bool reversed = op == Token::GT || op == Token::LTE;
+        LOperand* left_operand = UseFixed(left, reversed ? r0 : r1);
+        LOperand* right_operand = UseFixed(right, reversed ? r1 : r0);
+        LInstruction* result = new LCmpTAndBranch(left_operand,
+                                                  right_operand,
+                                                  first_id,
+                                                  second_id);
+        return MarkAsCall(result, instr);
+      }
+    } else if (v->IsIsSmi()) {
+      HIsSmi* compare = HIsSmi::cast(v);
+      ASSERT(compare->value()->representation().IsTagged());
+
+      return new LIsSmiAndBranch(Use(compare->value()),
+                                 first_id,
+                                 second_id);
+    } else if (v->IsHasInstanceType()) {
+      HHasInstanceType* compare = HHasInstanceType::cast(v);
+      ASSERT(compare->value()->representation().IsTagged());
+
+      return new LHasInstanceTypeAndBranch(UseRegisterAtStart(compare->value()),
+                                           TempRegister(),
+                                           first_id,
+                                           second_id);
+    } else if (v->IsHasCachedArrayIndex()) {
+      HHasCachedArrayIndex* compare = HHasCachedArrayIndex::cast(v);
+      ASSERT(compare->value()->representation().IsTagged());
+
+      return new LHasCachedArrayIndexAndBranch(
+          UseRegisterAtStart(compare->value()), first_id, second_id);
+    } else if (v->IsIsNull()) {
+      HIsNull* compare = HIsNull::cast(v);
+      ASSERT(compare->value()->representation().IsTagged());
+
+      // We only need a temp register for non-strict compare.
+      LOperand* temp = compare->is_strict() ? NULL : TempRegister();
+      return new LIsNullAndBranch(UseRegisterAtStart(compare->value()),
+                                  compare->is_strict(),
+                                  temp,
+                                  first_id,
+                                  second_id);
+    } else if (v->IsCompareJSObjectEq()) {
+      HCompareJSObjectEq* compare = HCompareJSObjectEq::cast(v);
+      return new LCmpJSObjectEqAndBranch(UseRegisterAtStart(compare->left()),
+                                         UseRegisterAtStart(compare->right()),
+                                         first_id,
+                                         second_id);
+    } else if (v->IsInstanceOf()) {
+      HInstanceOf* instance_of = HInstanceOf::cast(v);
+      LInstruction* result =
+          new LInstanceOfAndBranch(Use(instance_of->left()),
+                                   Use(instance_of->right()),
+                                   first_id,
+                                   second_id);
+      return MarkAsCall(result, instr);
+    } else if (v->IsTypeofIs()) {
+      HTypeofIs* typeof_is = HTypeofIs::cast(v);
+      return new LTypeofIsAndBranch(UseTempRegister(typeof_is->value()),
+                                    first_id,
+                                    second_id);
+    } else {
+      if (v->IsConstant()) {
+        if (HConstant::cast(v)->handle()->IsTrue()) {
+          return new LGoto(first_id);
+        } else if (HConstant::cast(v)->handle()->IsFalse()) {
+          return new LGoto(second_id);
+        }
+      }
+      Abort("Undefined compare before branch");
+      return NULL;
+    }
+  }
+  return new LBranch(UseRegisterAtStart(v), first_id, second_id);
+}
+
+
+LInstruction* LChunkBuilder::DoCompareMapAndBranch(
+    HCompareMapAndBranch* instr) {
+  ASSERT(instr->value()->representation().IsTagged());
+  LOperand* value = UseRegisterAtStart(instr->value());
+  HBasicBlock* first = instr->FirstSuccessor();
+  HBasicBlock* second = instr->SecondSuccessor();
+  return new LCmpMapAndBranch(value,
+                              instr->map(),
+                              first->block_id(),
+                              second->block_id());
+}
+
+
+LInstruction* LChunkBuilder::DoArgumentsLength(HArgumentsLength* length) {
+  return DefineAsRegister(new LArgumentsLength(Use(length->value())));
+}
+
+
+LInstruction* LChunkBuilder::DoArgumentsElements(HArgumentsElements* elems) {
+  return DefineAsRegister(new LArgumentsElements);
+}
+
+
+LInstruction* LChunkBuilder::DoInstanceOf(HInstanceOf* instr) {
+  LInstruction* result =
+      new LInstanceOf(Use(instr->left()), Use(instr->right()));
+  return MarkAsCall(DefineFixed(result, r0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoApplyArguments(HApplyArguments* instr) {
+  LOperand* function = UseFixed(instr->function(), r1);
+  LOperand* receiver = UseFixed(instr->receiver(), r0);
+  LOperand* length = UseRegisterAtStart(instr->length());
+  LOperand* elements = UseRegisterAtStart(instr->elements());
+  LInstruction* result = new LApplyArguments(function,
+                                             receiver,
+                                             length,
+                                             elements);
+  return MarkAsCall(DefineFixed(result, r0), instr, CAN_DEOPTIMIZE_EAGERLY);
+}
+
+
+LInstruction* LChunkBuilder::DoPushArgument(HPushArgument* instr) {
+  ++argument_count_;
+  LOperand* argument = Use(instr->argument());
+  return new LPushArgument(argument);
+}
+
+
+LInstruction* LChunkBuilder::DoGlobalObject(HGlobalObject* instr) {
+  return DefineAsRegister(new LGlobalObject);
+}
+
+
+LInstruction* LChunkBuilder::DoGlobalReceiver(HGlobalReceiver* instr) {
+  return DefineAsRegister(new LGlobalReceiver);
+}
+
+
+LInstruction* LChunkBuilder::DoCallConstantFunction(
+    HCallConstantFunction* instr) {
+  argument_count_ -= instr->argument_count();
+  return MarkAsCall(DefineFixed(new LCallConstantFunction, r0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoUnaryMathOperation(HUnaryMathOperation* instr) {
+  MathFunctionId op = instr->op();
+  LOperand* input = UseRegisterAtStart(instr->value());
+  LInstruction* result = new LUnaryMathOperation(input);
+  switch (op) {
+    case kMathAbs:
+      return AssignEnvironment(AssignPointerMap(DefineSameAsFirst(result)));
+    case kMathFloor:
+      return AssignEnvironment(DefineAsRegister(result));
+    case kMathSqrt:
+      return DefineSameAsFirst(result);
+    default:
+      UNREACHABLE();
+      return NULL;
+  }
+}
+
+
+LInstruction* LChunkBuilder::DoCallKeyed(HCallKeyed* instr) {
+  ASSERT(instr->key()->representation().IsTagged());
+  argument_count_ -= instr->argument_count();
+  UseFixed(instr->key(), r2);
+  return MarkAsCall(DefineFixed(new LCallKeyed, r0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoCallNamed(HCallNamed* instr) {
+  argument_count_ -= instr->argument_count();
+  return MarkAsCall(DefineFixed(new LCallNamed, r0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoCallGlobal(HCallGlobal* instr) {
+  argument_count_ -= instr->argument_count();
+  return MarkAsCall(DefineFixed(new LCallGlobal, r0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoCallKnownGlobal(HCallKnownGlobal* instr) {
+  argument_count_ -= instr->argument_count();
+  return MarkAsCall(DefineFixed(new LCallKnownGlobal, r0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoCallNew(HCallNew* instr) {
+  LOperand* constructor = UseFixed(instr->constructor(), r1);
+  argument_count_ -= instr->argument_count();
+  LInstruction* result = new LCallNew(constructor);
+  return MarkAsCall(DefineFixed(result, r0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoCallFunction(HCallFunction* instr) {
+  argument_count_ -= instr->argument_count();
+  return MarkAsCall(DefineFixed(new LCallFunction, r0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoCallRuntime(HCallRuntime* instr) {
+  argument_count_ -= instr->argument_count();
+  return MarkAsCall(DefineFixed(new LCallRuntime, r0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoShr(HShr* instr) {
+  return DoShift(Token::SHR, instr);
+}
+
+
+LInstruction* LChunkBuilder::DoSar(HSar* instr) {
+  return DoShift(Token::SAR, instr);
+}
+
+
+LInstruction* LChunkBuilder::DoShl(HShl* instr) {
+  return DoShift(Token::SHL, instr);
+}
+
+
+LInstruction* LChunkBuilder::DoBitAnd(HBitAnd* instr) {
+  return DoBit(Token::BIT_AND, instr);
+}
+
+
+LInstruction* LChunkBuilder::DoBitNot(HBitNot* instr) {
+  ASSERT(instr->value()->representation().IsInteger32());
+  ASSERT(instr->representation().IsInteger32());
+  return DefineSameAsFirst(new LBitNotI(UseRegisterAtStart(instr->value())));
+}
+
+
+LInstruction* LChunkBuilder::DoBitOr(HBitOr* instr) {
+  return DoBit(Token::BIT_OR, instr);
+}
+
+
+LInstruction* LChunkBuilder::DoBitXor(HBitXor* instr) {
+  return DoBit(Token::BIT_XOR, instr);
+}
+
+
+LInstruction* LChunkBuilder::DoDiv(HDiv* instr) {
+  if (instr->representation().IsDouble()) {
+    return DoArithmeticD(Token::DIV, instr);
+  } else if (instr->representation().IsInteger32()) {
+    // The temporary operand is necessary to ensure that right is not allocated
+    // into edx.
+    FixedTemp(r1);
+    LOperand* value = UseFixed(instr->left(), r0);
+    LOperand* divisor = UseRegister(instr->right());
+    return AssignEnvironment(DefineFixed(new LDivI(value, divisor), r0));
+  } else {
+    return DoArithmeticT(Token::DIV, instr);
+  }
+}
+
+
+LInstruction* LChunkBuilder::DoMod(HMod* instr) {
+  if (instr->representation().IsInteger32()) {
+    ASSERT(instr->left()->representation().IsInteger32());
+    ASSERT(instr->right()->representation().IsInteger32());
+    // The temporary operand is necessary to ensure that right is not allocated
+    // into edx.
+    FixedTemp(r1);
+    LOperand* value = UseFixed(instr->left(), r0);
+    LOperand* divisor = UseRegister(instr->right());
+    LInstruction* result = DefineFixed(new LModI(value, divisor), r1);
+    if (instr->CheckFlag(HValue::kBailoutOnMinusZero) ||
+        instr->CheckFlag(HValue::kCanBeDivByZero)) {
+      result = AssignEnvironment(result);
+    }
+    return result;
+  } else if (instr->representation().IsTagged()) {
+    return DoArithmeticT(Token::MOD, instr);
+  } else {
+    ASSERT(instr->representation().IsDouble());
+    // We call a C function for double modulo. It can't trigger a GC.
+    // We need to use fixed result register for the call.
+    // TODO(fschneider): Allow any register as input registers.
+    LOperand* left = UseFixedDouble(instr->left(), d1);
+    LOperand* right = UseFixedDouble(instr->right(), d2);
+    LArithmeticD* result = new LArithmeticD(Token::MOD, left, right);
+    return MarkAsCall(DefineFixedDouble(result, d1), instr);
+  }
+}
+
+
+LInstruction* LChunkBuilder::DoMul(HMul* instr) {
+  if (instr->representation().IsInteger32()) {
+    ASSERT(instr->left()->representation().IsInteger32());
+    ASSERT(instr->right()->representation().IsInteger32());
+    LOperand* left = UseRegisterAtStart(instr->LeastConstantOperand());
+    LOperand* right = UseOrConstant(instr->MostConstantOperand());
+    LOperand* temp = NULL;
+    if (instr->CheckFlag(HValue::kBailoutOnMinusZero)) {
+      temp = TempRegister();
+    }
+    LMulI* mul = new LMulI(left, right, temp);
+    return AssignEnvironment(DefineSameAsFirst(mul));
+  } else if (instr->representation().IsDouble()) {
+    return DoArithmeticD(Token::MUL, instr);
+  } else {
+    return DoArithmeticT(Token::MUL, instr);
+  }
+}
+
+
+LInstruction* LChunkBuilder::DoSub(HSub* instr) {
+  if (instr->representation().IsInteger32()) {
+    ASSERT(instr->left()->representation().IsInteger32());
+    ASSERT(instr->right()->representation().IsInteger32());
+    LOperand* left = UseRegisterAtStart(instr->LeastConstantOperand());
+    LOperand* right = UseOrConstantAtStart(instr->MostConstantOperand());
+    LSubI* sub = new LSubI(left, right);
+    LInstruction* result = DefineSameAsFirst(sub);
+    if (instr->CheckFlag(HValue::kCanOverflow)) {
+      result = AssignEnvironment(result);
+    }
+    return result;
+  } else if (instr->representation().IsDouble()) {
+    return DoArithmeticD(Token::SUB, instr);
+  } else {
+    return DoArithmeticT(Token::SUB, instr);
+  }
+}
+
+
+LInstruction* LChunkBuilder::DoAdd(HAdd* instr) {
+  if (instr->representation().IsInteger32()) {
+    ASSERT(instr->left()->representation().IsInteger32());
+    ASSERT(instr->right()->representation().IsInteger32());
+    LOperand* left = UseRegisterAtStart(instr->LeastConstantOperand());
+    LOperand* right = UseOrConstantAtStart(instr->MostConstantOperand());
+    LAddI* add = new LAddI(left, right);
+    LInstruction* result = DefineSameAsFirst(add);
+    if (instr->CheckFlag(HValue::kCanOverflow)) {
+      result = AssignEnvironment(result);
+    }
+    return result;
+  } else if (instr->representation().IsDouble()) {
+    return DoArithmeticD(Token::ADD, instr);
+  } else {
+    ASSERT(instr->representation().IsTagged());
+    return DoArithmeticT(Token::ADD, instr);
+  }
+}
+
+
+LInstruction* LChunkBuilder::DoCompare(HCompare* instr) {
+  Token::Value op = instr->token();
+  if (instr->left()->representation().IsInteger32()) {
+    ASSERT(instr->right()->representation().IsInteger32());
+    LOperand* left = UseRegisterAtStart(instr->left());
+    LOperand* right = UseOrConstantAtStart(instr->right());
+    return DefineAsRegister(new LCmpID(op, left, right, false));
+  } else if (instr->left()->representation().IsDouble()) {
+    ASSERT(instr->right()->representation().IsDouble());
+    LOperand* left = UseRegisterAtStart(instr->left());
+    LOperand* right = UseRegisterAtStart(instr->right());
+    return DefineAsRegister(new LCmpID(op, left, right, true));
+  } else {
+    bool reversed = (op == Token::GT || op == Token::LTE);
+    LOperand* left = UseFixed(instr->left(), reversed ? r0 : r1);
+    LOperand* right = UseFixed(instr->right(), reversed ? r1 : r0);
+    LInstruction* result = new LCmpT(left, right);
+    return MarkAsCall(DefineFixed(result, r0), instr);
+  }
+}
+
+
+LInstruction* LChunkBuilder::DoCompareJSObjectEq(
+    HCompareJSObjectEq* instr) {
+  LOperand* left = UseRegisterAtStart(instr->left());
+  LOperand* right = UseRegisterAtStart(instr->right());
+  LInstruction* result = new LCmpJSObjectEq(left, right);
+  return DefineAsRegister(result);
+}
+
+
+LInstruction* LChunkBuilder::DoIsNull(HIsNull* instr) {
+  ASSERT(instr->value()->representation().IsTagged());
+  LOperand* value = UseRegisterAtStart(instr->value());
+
+  return DefineAsRegister(new LIsNull(value,
+                                      instr->is_strict()));
+}
+
+
+LInstruction* LChunkBuilder::DoIsSmi(HIsSmi* instr) {
+  ASSERT(instr->value()->representation().IsTagged());
+  LOperand* value = UseAtStart(instr->value());
+
+  return DefineAsRegister(new LIsSmi(value));
+}
+
+
+LInstruction* LChunkBuilder::DoHasInstanceType(HHasInstanceType* instr) {
+  ASSERT(instr->value()->representation().IsTagged());
+  LOperand* value = UseRegisterAtStart(instr->value());
+
+  return DefineAsRegister(new LHasInstanceType(value));
+}
+
+
+LInstruction* LChunkBuilder::DoHasCachedArrayIndex(
+    HHasCachedArrayIndex* instr) {
+  ASSERT(instr->value()->representation().IsTagged());
+  LOperand* value = UseRegister(instr->value());
+
+  return DefineAsRegister(new LHasCachedArrayIndex(value));
+}
+
+
+LInstruction* LChunkBuilder::DoClassOfTest(HClassOfTest* instr) {
+  ASSERT(instr->value()->representation().IsTagged());
+  LOperand* value = UseTempRegister(instr->value());
+
+  return DefineSameAsFirst(new LClassOfTest(value, TempRegister()));
+}
+
+
+LInstruction* LChunkBuilder::DoArrayLength(HArrayLength* instr) {
+  LOperand* array = NULL;
+  LOperand* temporary = NULL;
+
+  if (instr->value()->IsLoadElements()) {
+    array = UseRegisterAtStart(instr->value());
+  } else {
+    array = UseRegister(instr->value());
+    temporary = TempRegister();
+  }
+
+  LInstruction* result = new LArrayLength(array, temporary);
+  return AssignEnvironment(DefineAsRegister(result));
+}
+
+
+LInstruction* LChunkBuilder::DoValueOf(HValueOf* instr) {
+  LOperand* object = UseRegister(instr->value());
+  LInstruction* result = new LValueOf(object, TempRegister());
+  return AssignEnvironment(DefineSameAsFirst(result));
+}
+
+
+LInstruction* LChunkBuilder::DoBoundsCheck(HBoundsCheck* instr) {
+  return AssignEnvironment(new LBoundsCheck(UseRegisterAtStart(instr->index()),
+                                            Use(instr->length())));
+}
+
+
+LInstruction* LChunkBuilder::DoThrow(HThrow* instr) {
+  LOperand* value = UseFixed(instr->value(), r0);
+  return MarkAsCall(new LThrow(value), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoChange(HChange* instr) {
+  Representation from = instr->from();
+  Representation to = instr->to();
+  if (from.IsTagged()) {
+    if (to.IsDouble()) {
+      LOperand* value = UseRegister(instr->value());
+      LInstruction* res = new LNumberUntagD(value);
+      return AssignEnvironment(DefineAsRegister(res));
+    } else {
+      ASSERT(to.IsInteger32());
+      LOperand* value = UseRegister(instr->value());
+      bool needs_check = !instr->value()->type().IsSmi();
+      LInstruction* res = NULL;
+      if (needs_check) {
+        res = DefineSameAsFirst(new LTaggedToI(value, FixedTemp(d1)));
+      } else {
+        res = DefineSameAsFirst(new LSmiUntag(value, needs_check));
+      }
+      if (needs_check) {
+        res = AssignEnvironment(res);
+      }
+      return res;
+    }
+  } else if (from.IsDouble()) {
+    if (to.IsTagged()) {
+      LOperand* value = UseRegister(instr->value());
+      LOperand* temp = TempRegister();
+
+      // Make sure that temp and result_temp are different registers.
+      LUnallocated* result_temp = TempRegister();
+      LInstruction* result = new LNumberTagD(value, temp);
+      Define(result, result_temp);
+      return AssignPointerMap(result);
+    } else {
+      ASSERT(to.IsInteger32());
+      LOperand* value = UseRegister(instr->value());
+      LInstruction* res = new LDoubleToI(value);
+      return AssignEnvironment(DefineAsRegister(res));
+    }
+  } else if (from.IsInteger32()) {
+    if (to.IsTagged()) {
+      HValue* val = instr->value();
+      LOperand* value = UseRegister(val);
+      if (val->HasRange() && val->range()->IsInSmiRange()) {
+        return DefineSameAsFirst(new LSmiTag(value));
+      } else {
+        LInstruction* result = new LNumberTagI(value);
+        return AssignEnvironment(AssignPointerMap(DefineSameAsFirst(result)));
+      }
+    } else {
+      ASSERT(to.IsDouble());
+      LOperand* value = Use(instr->value());
+      return DefineAsRegister(new LInteger32ToDouble(value));
+    }
+  }
+  UNREACHABLE();
+  return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoCheckNonSmi(HCheckNonSmi* instr) {
+  LOperand* value = UseRegisterAtStart(instr->value());
+  return AssignEnvironment(new LCheckSmi(value, eq));
+}
+
+
+LInstruction* LChunkBuilder::DoCheckInstanceType(HCheckInstanceType* instr) {
+  LOperand* value = UseRegisterAtStart(instr->value());
+  LOperand* temp = TempRegister();
+  LInstruction* result = new LCheckInstanceType(value, temp);
+  return AssignEnvironment(result);
+}
+
+
+LInstruction* LChunkBuilder::DoCheckPrototypeMaps(HCheckPrototypeMaps* instr) {
+  LOperand* temp = TempRegister();
+  LInstruction* result =
+      new LCheckPrototypeMaps(temp,
+                              instr->holder(),
+                              instr->receiver_map());
+  return AssignEnvironment(result);
+}
+
+
+LInstruction* LChunkBuilder::DoCheckSmi(HCheckSmi* instr) {
+  LOperand* value = UseRegisterAtStart(instr->value());
+  return AssignEnvironment(new LCheckSmi(value, ne));
+}
+
+
+LInstruction* LChunkBuilder::DoCheckFunction(HCheckFunction* instr) {
+  LOperand* value = UseRegisterAtStart(instr->value());
+  return AssignEnvironment(new LCheckFunction(value));
+}
+
+
+LInstruction* LChunkBuilder::DoCheckMap(HCheckMap* instr) {
+  LOperand* value = UseRegisterAtStart(instr->value());
+  LInstruction* result = new LCheckMap(value);
+  return AssignEnvironment(result);
+}
+
+
+LInstruction* LChunkBuilder::DoReturn(HReturn* instr) {
+  return new LReturn(UseFixed(instr->value(), r0));
+}
+
+
+LInstruction* LChunkBuilder::DoConstant(HConstant* instr) {
+  Representation r = instr->representation();
+  if (r.IsInteger32()) {
+    int32_t value = instr->Integer32Value();
+    return DefineAsRegister(new LConstantI(value));
+  } else if (r.IsDouble()) {
+    double value = instr->DoubleValue();
+    return DefineAsRegister(new LConstantD(value));
+  } else if (r.IsTagged()) {
+    return DefineAsRegister(new LConstantT(instr->handle()));
+  } else {
+    Abort("unsupported constant of type double");
+    return NULL;
+  }
+}
+
+
+LInstruction* LChunkBuilder::DoLoadGlobal(HLoadGlobal* instr) {
+  LInstruction* result = new LLoadGlobal();
+  return instr->check_hole_value()
+      ? AssignEnvironment(DefineAsRegister(result))
+      : DefineAsRegister(result);
+}
+
+
+LInstruction* LChunkBuilder::DoStoreGlobal(HStoreGlobal* instr) {
+  return new LStoreGlobal(UseRegisterAtStart(instr->value()));
+}
+
+
+LInstruction* LChunkBuilder::DoLoadNamedField(HLoadNamedField* instr) {
+  return DefineAsRegister(
+      new LLoadNamedField(UseRegisterAtStart(instr->object())));
+}
+
+
+LInstruction* LChunkBuilder::DoLoadNamedGeneric(HLoadNamedGeneric* instr) {
+  LOperand* object = UseFixed(instr->object(), r0);
+  LInstruction* result = DefineFixed(new LLoadNamedGeneric(object), r0);
+  return MarkAsCall(result, instr);
+}
+
+
+LInstruction* LChunkBuilder::DoLoadElements(HLoadElements* instr) {
+  LOperand* input = UseRegisterAtStart(instr->value());
+  return DefineSameAsFirst(new LLoadElements(input));
+}
+
+
+LInstruction* LChunkBuilder::DoLoadKeyedFastElement(
+    HLoadKeyedFastElement* instr) {
+  Representation r = instr->representation();
+  LOperand* obj = UseRegisterAtStart(instr->object());
+  ASSERT(instr->key()->representation().IsInteger32());
+  LOperand* key = UseRegisterAtStart(instr->key());
+  LOperand* load_result = NULL;
+  // Double needs an extra temp, because the result is converted from heap
+  // number to a double register.
+  if (r.IsDouble()) load_result = TempRegister();
+  LInstruction* result = new LLoadKeyedFastElement(obj,
+                                                   key,
+                                                   load_result);
+  if (r.IsDouble()) {
+    result = DefineAsRegister(result);
+  } else {
+    result = DefineSameAsFirst(result);
+  }
+  return AssignEnvironment(result);
+}
+
+
+LInstruction* LChunkBuilder::DoLoadKeyedGeneric(HLoadKeyedGeneric* instr) {
+  LOperand* object = UseFixed(instr->object(), r1);
+  LOperand* key = UseFixed(instr->key(), r0);
+
+  LInstruction* result =
+      DefineFixed(new LLoadKeyedGeneric(object, key), r0);
+  return MarkAsCall(result, instr);
+}
+
+
+LInstruction* LChunkBuilder::DoStoreKeyedFastElement(
+    HStoreKeyedFastElement* instr) {
+  bool needs_write_barrier = instr->NeedsWriteBarrier();
+  ASSERT(instr->value()->representation().IsTagged());
+  ASSERT(instr->object()->representation().IsTagged());
+  ASSERT(instr->key()->representation().IsInteger32());
+
+  LOperand* obj = UseTempRegister(instr->object());
+  LOperand* val = needs_write_barrier
+      ? UseTempRegister(instr->value())
+      : UseRegisterAtStart(instr->value());
+  LOperand* key = needs_write_barrier
+      ? UseTempRegister(instr->key())
+      : UseRegisterOrConstantAtStart(instr->key());
+
+  return AssignEnvironment(new LStoreKeyedFastElement(obj, key, val));
+}
+
+
+LInstruction* LChunkBuilder::DoStoreKeyedGeneric(HStoreKeyedGeneric* instr) {
+  LOperand* obj = UseFixed(instr->object(), r2);
+  LOperand* key = UseFixed(instr->key(), r1);
+  LOperand* val = UseFixed(instr->value(), r0);
+
+  ASSERT(instr->object()->representation().IsTagged());
+  ASSERT(instr->key()->representation().IsTagged());
+  ASSERT(instr->value()->representation().IsTagged());
+
+  return MarkAsCall(new LStoreKeyedGeneric(obj, key, val), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) {
+  bool needs_write_barrier = !instr->value()->type().IsSmi();
+
+  LOperand* obj = needs_write_barrier
+      ? UseTempRegister(instr->object())
+      : UseRegisterAtStart(instr->object());
+
+  LOperand* val = needs_write_barrier
+      ? UseTempRegister(instr->value())
+      : UseRegister(instr->value());
+
+  // We only need a scratch register if we have a write barrier or we
+  // have a store into the properties array (not in-object-property).
+  LOperand* temp = (!instr->is_in_object() || needs_write_barrier)
+      ? TempRegister() : NULL;
+
+  return new LStoreNamedField(obj,
+                              instr->name(),
+                              val,
+                              instr->is_in_object(),
+                              instr->offset(),
+                              temp,
+                              needs_write_barrier,
+                              instr->transition());
+}
+
+
+LInstruction* LChunkBuilder::DoStoreNamedGeneric(HStoreNamedGeneric* instr) {
+  LOperand* obj = UseFixed(instr->object(), r1);
+  LOperand* val = UseFixed(instr->value(), r0);
+
+  LInstruction* result = new LStoreNamedGeneric(obj, instr->name(), val);
+  return MarkAsCall(result, instr);
+}
+
+
+LInstruction* LChunkBuilder::DoArrayLiteral(HArrayLiteral* instr) {
+  return MarkAsCall(DefineFixed(new LArrayLiteral, r0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoObjectLiteral(HObjectLiteral* instr) {
+  return MarkAsCall(DefineFixed(new LObjectLiteral, r0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoRegExpLiteral(HRegExpLiteral* instr) {
+  return MarkAsCall(DefineFixed(new LRegExpLiteral, r0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoFunctionLiteral(HFunctionLiteral* instr) {
+  return MarkAsCall(DefineFixed(new LFunctionLiteral, r0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoDeleteProperty(HDeleteProperty* instr) {
+  LInstruction* result = new LDeleteProperty(Use(instr->object()),
+                                             UseOrConstant(instr->key()));
+  return MarkAsCall(DefineFixed(result, r0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoOsrEntry(HOsrEntry* instr) {
+  allocator_->MarkAsOsrEntry();
+  current_block_->last_environment()->set_ast_id(instr->ast_id());
+  return AssignEnvironment(new LOsrEntry);
+}
+
+
+LInstruction* LChunkBuilder::DoParameter(HParameter* instr) {
+  int spill_index = chunk()->GetParameterStackSlot(instr->index());
+  return DefineAsSpilled(new LParameter, spill_index);
+}
+
+
+LInstruction* LChunkBuilder::DoUnknownOSRValue(HUnknownOSRValue* instr) {
+  int spill_index = chunk()->GetNextSpillIndex(false);  // Not double-width.
+  return DefineAsSpilled(new LUnknownOSRValue, spill_index);
+}
+
+
+LInstruction* LChunkBuilder::DoCallStub(HCallStub* instr) {
+  argument_count_ -= instr->argument_count();
+  return MarkAsCall(DefineFixed(new LCallStub, r0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoArgumentsObject(HArgumentsObject* instr) {
+  // There are no real uses of the arguments object (we bail out in all other
+  // cases).
+  return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoAccessArgumentsAt(HAccessArgumentsAt* instr) {
+  LOperand* arguments = UseRegister(instr->arguments());
+  LOperand* length = UseTempRegister(instr->length());
+  LOperand* index = Use(instr->index());
+  LInstruction* result = new LAccessArgumentsAt(arguments, length, index);
+  return DefineAsRegister(AssignEnvironment(result));
+}
+
+
+LInstruction* LChunkBuilder::DoTypeof(HTypeof* instr) {
+  LInstruction* result = new LTypeof(Use(instr->value()));
+  return MarkAsCall(DefineFixed(result, r0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoTypeofIs(HTypeofIs* instr) {
+  return DefineSameAsFirst(new LTypeofIs(UseRegister(instr->value())));
+}
+
+LInstruction* LChunkBuilder::DoSimulate(HSimulate* instr) {
+  HEnvironment* env = current_block_->last_environment();
+  ASSERT(env != NULL);
+
+  env->set_ast_id(instr->ast_id());
+
+  env->Drop(instr->pop_count());
+  for (int i = 0; i < instr->values()->length(); ++i) {
+    HValue* value = instr->values()->at(i);
+    if (instr->HasAssignedIndexAt(i)) {
+      env->Bind(instr->GetAssignedIndexAt(i), value);
+    } else {
+      env->Push(value);
+    }
+  }
+
+  if (FLAG_trace_environment) {
+    PrintF("Reconstructed environment ast_id=%d, instr_id=%d\n",
+           instr->ast_id(),
+           instr->id());
+    env->PrintToStd();
+  }
+  ASSERT(env->values()->length() == instr->environment_height());
+
+  // If there is an instruction pending deoptimization environment create a
+  // lazy bailout instruction to capture the environment.
+  if (pending_deoptimization_ast_id_ == instr->ast_id()) {
+    LInstruction* result = new LLazyBailout;
+    result = AssignEnvironment(result);
+    instructions_pending_deoptimization_environment_->
+        set_deoptimization_environment(result->environment());
+    ClearInstructionPendingDeoptimizationEnvironment();
+    return result;
+  }
+
+  return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoStackCheck(HStackCheck* instr) {
+  return MarkAsCall(new LStackCheck, instr);
+}
+
+
+LInstruction* LChunkBuilder::DoEnterInlined(HEnterInlined* instr) {
+  HEnvironment* outer = current_block_->last_environment();
+  HConstant* undefined = graph()->GetConstantUndefined();
+  HEnvironment* inner = outer->CopyForInlining(instr->closure(),
+                                               instr->function(),
+                                               false,
+                                               undefined);
+  current_block_->UpdateEnvironment(inner);
+  chunk_->AddInlinedClosure(instr->closure());
+  return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoLeaveInlined(HLeaveInlined* instr) {
+  HEnvironment* outer = current_block_->last_environment()->outer();
+  current_block_->UpdateEnvironment(outer);
+  return NULL;
+}
+
+
+void LPointerMap::RecordPointer(LOperand* op) {
+  // Do not record arguments as pointers.
+  if (op->IsStackSlot() && op->index() < 0) return;
+  ASSERT(!op->IsDoubleRegister() && !op->IsDoubleStackSlot());
+  pointer_operands_.Add(op);
+}
+
+
+void LPointerMap::PrintTo(StringStream* stream) const {
+  stream->Add("{");
+  for (int i = 0; i < pointer_operands_.length(); ++i) {
+    if (i != 0) stream->Add(";");
+    pointer_operands_[i]->PrintTo(stream);
+  }
+  stream->Add("} @%d", position());
+}
+
+} }  // namespace v8::internal
diff --git a/src/arm/lithium-arm.h b/src/arm/lithium-arm.h
new file mode 100644 (file)
index 0000000..0d5ba0f
--- /dev/null
@@ -0,0 +1,2068 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_ARM_LITHIUM_ARM_H_
+#define V8_ARM_LITHIUM_ARM_H_
+
+#include "hydrogen.h"
+#include "lithium-allocator.h"
+#include "safepoint-table.h"
+
+namespace v8 {
+namespace internal {
+
+// Forward declarations.
+class LCodeGen;
+class LEnvironment;
+class Translation;
+
+
+// Type hierarchy:
+//
+// LInstruction
+//   LAccessArgumentsAt
+//   LArgumentsElements
+//   LArgumentsLength
+//   LBinaryOperation
+//     LAddI
+//     LApplyArguments
+//     LArithmeticD
+//     LArithmeticT
+//     LBitI
+//     LBoundsCheck
+//     LCmpID
+//     LCmpIDAndBranch
+//     LCmpJSObjectEq
+//     LCmpJSObjectEqAndBranch
+//     LCmpT
+//     LDivI
+//     LInstanceOf
+//     LInstanceOfAndBranch
+//     LLoadKeyedFastElement
+//     LLoadKeyedGeneric
+//     LModI
+//     LMulI
+//     LShiftI
+//     LSubI
+//   LCallConstantFunction
+//   LCallFunction
+//   LCallGlobal
+//   LCallKeyed
+//   LCallKnownGlobal
+//   LCallNamed
+//   LCallRuntime
+//   LCallStub
+//   LConstant
+//     LConstantD
+//     LConstantI
+//     LConstantT
+//   LDeoptimize
+//   LFunctionLiteral
+//   LGlobalObject
+//   LGlobalReceiver
+//   LLabel
+//   LLayzBailout
+//   LLoadGlobal
+//   LMaterializedLiteral
+//     LArrayLiteral
+//     LObjectLiteral
+//     LRegExpLiteral
+//   LOsrEntry
+//   LParameter
+//   LStackCheck
+//   LStoreKeyed
+//     LStoreKeyedFastElement
+//     LStoreKeyedGeneric
+//   LStoreNamed
+//     LStoreNamedField
+//     LStoreNamedGeneric
+//   LUnaryOperation
+//     LArrayLength
+//     LBitNotI
+//     LBranch
+//     LCallNew
+//     LCheckFunction
+//     LCheckInstanceType
+//     LCheckMap
+//     LCheckPrototypeMaps
+//     LCheckSmi
+//     LClassOfTest
+//     LClassOfTestAndBranch
+//     LDeleteProperty
+//     LDoubleToI
+//     LHasCachedArrayIndex
+//     LHasCachedArrayIndexAndBranch
+//     LHasInstanceType
+//     LHasInstanceTypeAndBranch
+//     LInteger32ToDouble
+//     LIsNull
+//     LIsNullAndBranch
+//     LIsSmi
+//     LIsSmiAndBranch
+//     LLoadNamedField
+//     LLoadNamedGeneric
+//     LNumberTagD
+//     LNumberTagI
+//     LPushArgument
+//     LReturn
+//     LSmiTag
+//     LStoreGlobal
+//     LTaggedToI
+//     LThrow
+//     LTypeof
+//     LTypeofIs
+//     LTypeofIsAndBranch
+//     LUnaryMathOperation
+//     LValueOf
+//   LUnknownOSRValue
+
+#define LITHIUM_ALL_INSTRUCTION_LIST(V)         \
+  V(BinaryOperation)                            \
+  V(Constant)                                   \
+  V(Call)                                       \
+  V(MaterializedLiteral)                        \
+  V(StoreKeyed)                                 \
+  V(StoreNamed)                                 \
+  V(UnaryOperation)                             \
+  LITHIUM_CONCRETE_INSTRUCTION_LIST(V)
+
+
+#define LITHIUM_CONCRETE_INSTRUCTION_LIST(V)    \
+  V(AccessArgumentsAt)                          \
+  V(AddI)                                       \
+  V(ApplyArguments)                             \
+  V(ArgumentsElements)                          \
+  V(ArgumentsLength)                            \
+  V(ArithmeticD)                                \
+  V(ArithmeticT)                                \
+  V(ArrayLength)                                \
+  V(ArrayLiteral)                               \
+  V(BitI)                                       \
+  V(BitNotI)                                    \
+  V(BoundsCheck)                                \
+  V(Branch)                                     \
+  V(CallConstantFunction)                       \
+  V(CallFunction)                               \
+  V(CallGlobal)                                 \
+  V(CallKeyed)                                  \
+  V(CallKnownGlobal)                            \
+  V(CallNamed)                                  \
+  V(CallNew)                                    \
+  V(CallRuntime)                                \
+  V(CallStub)                                   \
+  V(CheckFunction)                              \
+  V(CheckInstanceType)                          \
+  V(CheckMap)                                   \
+  V(CheckPrototypeMaps)                         \
+  V(CheckSmi)                                   \
+  V(CmpID)                                      \
+  V(CmpIDAndBranch)                             \
+  V(CmpJSObjectEq)                              \
+  V(CmpJSObjectEqAndBranch)                     \
+  V(CmpMapAndBranch)                            \
+  V(CmpT)                                       \
+  V(CmpTAndBranch)                              \
+  V(ConstantD)                                  \
+  V(ConstantI)                                  \
+  V(ConstantT)                                  \
+  V(DeleteProperty)                             \
+  V(Deoptimize)                                 \
+  V(DivI)                                       \
+  V(DoubleToI)                                  \
+  V(FunctionLiteral)                            \
+  V(Gap)                                        \
+  V(GlobalObject)                               \
+  V(GlobalReceiver)                             \
+  V(Goto)                                       \
+  V(InstanceOf)                                 \
+  V(InstanceOfAndBranch)                        \
+  V(Integer32ToDouble)                          \
+  V(IsNull)                                     \
+  V(IsNullAndBranch)                            \
+  V(IsSmi)                                      \
+  V(IsSmiAndBranch)                             \
+  V(HasInstanceType)                            \
+  V(HasInstanceTypeAndBranch)                   \
+  V(HasCachedArrayIndex)                        \
+  V(HasCachedArrayIndexAndBranch)               \
+  V(ClassOfTest)                                \
+  V(ClassOfTestAndBranch)                       \
+  V(Label)                                      \
+  V(LazyBailout)                                \
+  V(LoadElements)                               \
+  V(LoadGlobal)                                 \
+  V(LoadKeyedFastElement)                       \
+  V(LoadKeyedGeneric)                           \
+  V(LoadNamedField)                             \
+  V(LoadNamedGeneric)                           \
+  V(ModI)                                       \
+  V(MulI)                                       \
+  V(NumberTagD)                                 \
+  V(NumberTagI)                                 \
+  V(NumberUntagD)                               \
+  V(ObjectLiteral)                              \
+  V(OsrEntry)                                   \
+  V(Parameter)                                  \
+  V(PushArgument)                               \
+  V(RegExpLiteral)                              \
+  V(Return)                                     \
+  V(ShiftI)                                     \
+  V(SmiTag)                                     \
+  V(SmiUntag)                                   \
+  V(StackCheck)                                 \
+  V(StoreGlobal)                                \
+  V(StoreKeyedFastElement)                      \
+  V(StoreKeyedGeneric)                          \
+  V(StoreNamedField)                            \
+  V(StoreNamedGeneric)                          \
+  V(SubI)                                       \
+  V(TaggedToI)                                  \
+  V(Throw)                                      \
+  V(Typeof)                                     \
+  V(TypeofIs)                                   \
+  V(TypeofIsAndBranch)                          \
+  V(UnaryMathOperation)                         \
+  V(UnknownOSRValue)                            \
+  V(ValueOf)
+
+
+#define DECLARE_INSTRUCTION(type)                \
+  virtual bool Is##type() const { return true; } \
+  static L##type* cast(LInstruction* instr) {    \
+    ASSERT(instr->Is##type());                   \
+    return reinterpret_cast<L##type*>(instr);    \
+  }
+
+
+#define DECLARE_CONCRETE_INSTRUCTION(type, mnemonic)        \
+  virtual void CompileToNative(LCodeGen* generator);        \
+  virtual const char* Mnemonic() const { return mnemonic; } \
+  DECLARE_INSTRUCTION(type)
+
+
+#define DECLARE_HYDROGEN_ACCESSOR(type)     \
+  H##type* hydrogen() const {               \
+    return H##type::cast(hydrogen_value()); \
+  }
+
+
+class LInstruction: public ZoneObject {
+ public:
+  LInstruction()
+      : hydrogen_value_(NULL) { }
+  virtual ~LInstruction() { }
+
+  virtual void CompileToNative(LCodeGen* generator) = 0;
+  virtual const char* Mnemonic() const = 0;
+  virtual void PrintTo(StringStream* stream) const;
+  virtual void PrintDataTo(StringStream* stream) const { }
+
+  // Declare virtual type testers.
+#define DECLARE_DO(type) virtual bool Is##type() const { return false; }
+  LITHIUM_ALL_INSTRUCTION_LIST(DECLARE_DO)
+#undef DECLARE_DO
+  virtual bool IsControl() const { return false; }
+
+  void set_environment(LEnvironment* env) { environment_.set(env); }
+  LEnvironment* environment() const { return environment_.get(); }
+  bool HasEnvironment() const { return environment_.is_set(); }
+
+  void set_pointer_map(LPointerMap* p) { pointer_map_.set(p); }
+  LPointerMap* pointer_map() const { return pointer_map_.get(); }
+  bool HasPointerMap() const { return pointer_map_.is_set(); }
+
+  void set_result(LOperand* operand) { result_.set(operand); }
+  LOperand* result() const { return result_.get(); }
+  bool HasResult() const { return result_.is_set(); }
+
+  void set_hydrogen_value(HValue* value) { hydrogen_value_ = value; }
+  HValue* hydrogen_value() const { return hydrogen_value_; }
+
+  void set_deoptimization_environment(LEnvironment* env) {
+    deoptimization_environment_.set(env);
+  }
+  LEnvironment* deoptimization_environment() const {
+    return deoptimization_environment_.get();
+  }
+  bool HasDeoptimizationEnvironment() const {
+    return deoptimization_environment_.is_set();
+  }
+
+ private:
+  SetOncePointer<LEnvironment> environment_;
+  SetOncePointer<LPointerMap> pointer_map_;
+  SetOncePointer<LOperand> result_;
+  HValue* hydrogen_value_;
+  SetOncePointer<LEnvironment> deoptimization_environment_;
+};
+
+
+class LGapNode;
+
+
+class LGapResolver BASE_EMBEDDED {
+ public:
+  LGapResolver(const ZoneList<LMoveOperands>* moves, LOperand* marker_operand);
+  const ZoneList<LMoveOperands>* ResolveInReverseOrder();
+
+ private:
+  LGapNode* LookupNode(LOperand* operand);
+  bool CanReach(LGapNode* a, LGapNode* b, int visited_id);
+  bool CanReach(LGapNode* a, LGapNode* b);
+  void RegisterMove(LMoveOperands move);
+  void AddResultMove(LOperand* from, LOperand* to);
+  void AddResultMove(LGapNode* from, LGapNode* to);
+  void ResolveCycle(LGapNode* start);
+
+  ZoneList<LGapNode*> nodes_;
+  ZoneList<LGapNode*> identified_cycles_;
+  ZoneList<LMoveOperands> result_;
+  LOperand* marker_operand_;
+  int next_visited_id_;
+  int bailout_after_ast_id_;
+};
+
+
+class LParallelMove : public ZoneObject {
+ public:
+  LParallelMove() : move_operands_(4) { }
+
+  void AddMove(LOperand* from, LOperand* to) {
+    move_operands_.Add(LMoveOperands(from, to));
+  }
+
+  bool IsRedundant() const;
+
+  const ZoneList<LMoveOperands>* move_operands() const {
+    return &move_operands_;
+  }
+
+  void PrintDataTo(StringStream* stream) const;
+
+ private:
+  ZoneList<LMoveOperands> move_operands_;
+};
+
+
+class LGap: public LInstruction {
+ public:
+  explicit LGap(HBasicBlock* block)
+      : block_(block) {
+    parallel_moves_[BEFORE] = NULL;
+    parallel_moves_[START] = NULL;
+    parallel_moves_[END] = NULL;
+    parallel_moves_[AFTER] = NULL;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(Gap, "gap")
+  virtual void PrintDataTo(StringStream* stream) const;
+
+  bool IsRedundant() const;
+
+  HBasicBlock* block() const { return block_; }
+
+  enum InnerPosition {
+    BEFORE,
+    START,
+    END,
+    AFTER,
+    FIRST_INNER_POSITION = BEFORE,
+    LAST_INNER_POSITION = AFTER
+  };
+
+  LParallelMove* GetOrCreateParallelMove(InnerPosition pos)  {
+    if (parallel_moves_[pos] == NULL) parallel_moves_[pos] = new LParallelMove;
+    return parallel_moves_[pos];
+  }
+
+  LParallelMove* GetParallelMove(InnerPosition pos)  {
+    return parallel_moves_[pos];
+  }
+
+ private:
+  LParallelMove* parallel_moves_[LAST_INNER_POSITION + 1];
+  HBasicBlock* block_;
+};
+
+
+class LGoto: public LInstruction {
+ public:
+  LGoto(int block_id, bool include_stack_check = false)
+    : block_id_(block_id), include_stack_check_(include_stack_check) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(Goto, "goto")
+  virtual void PrintDataTo(StringStream* stream) const;
+  virtual bool IsControl() const { return true; }
+
+  int block_id() const { return block_id_; }
+  bool include_stack_check() const { return include_stack_check_; }
+
+ private:
+  int block_id_;
+  bool include_stack_check_;
+};
+
+
+class LLazyBailout: public LInstruction {
+ public:
+  LLazyBailout() : gap_instructions_size_(0) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(LazyBailout, "lazy-bailout")
+
+  void set_gap_instructions_size(int gap_instructions_size) {
+    gap_instructions_size_ = gap_instructions_size;
+  }
+  int gap_instructions_size() { return gap_instructions_size_; }
+
+ private:
+  int gap_instructions_size_;
+};
+
+
+class LDeoptimize: public LInstruction {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(Deoptimize, "deoptimize")
+};
+
+
+class LLabel: public LGap {
+ public:
+  explicit LLabel(HBasicBlock* block)
+      : LGap(block), replacement_(NULL) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(Label, "label")
+
+  virtual void PrintDataTo(StringStream* stream) const;
+
+  int block_id() const { return block()->block_id(); }
+  bool is_loop_header() const { return block()->IsLoopHeader(); }
+  Label* label() { return &label_; }
+  LLabel* replacement() const { return replacement_; }
+  void set_replacement(LLabel* label) { replacement_ = label; }
+  bool HasReplacement() const { return replacement_ != NULL; }
+
+ private:
+  Label label_;
+  LLabel* replacement_;
+};
+
+
+class LParameter: public LInstruction {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(Parameter, "parameter")
+};
+
+
+class LCallStub: public LInstruction {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(CallStub, "call-stub")
+  DECLARE_HYDROGEN_ACCESSOR(CallStub)
+};
+
+
+class LUnknownOSRValue: public LInstruction {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(UnknownOSRValue, "unknown-osr-value")
+};
+
+
+class LUnaryOperation: public LInstruction {
+ public:
+  explicit LUnaryOperation(LOperand* input) : input_(input) { }
+
+  DECLARE_INSTRUCTION(UnaryOperation)
+
+  LOperand* input() const { return input_; }
+
+  virtual void PrintDataTo(StringStream* stream) const;
+
+ private:
+  LOperand* input_;
+};
+
+
+class LBinaryOperation: public LInstruction {
+ public:
+  LBinaryOperation(LOperand* left, LOperand* right)
+      : left_(left), right_(right) { }
+
+  DECLARE_INSTRUCTION(BinaryOperation)
+
+  LOperand* left() const { return left_; }
+  LOperand* right() const { return right_; }
+  virtual void PrintDataTo(StringStream* stream) const;
+
+ private:
+  LOperand* left_;
+  LOperand* right_;
+};
+
+
+class LApplyArguments: public LBinaryOperation {
+ public:
+  LApplyArguments(LOperand* function,
+                  LOperand* receiver,
+                  LOperand* length,
+                  LOperand* elements)
+      : LBinaryOperation(function, receiver),
+        length_(length),
+        elements_(elements) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(ApplyArguments, "apply-arguments")
+
+  LOperand* function() const { return left(); }
+  LOperand* receiver() const { return right(); }
+  LOperand* length() const { return length_; }
+  LOperand* elements() const { return elements_; }
+
+ private:
+  LOperand* length_;
+  LOperand* elements_;
+};
+
+
+class LAccessArgumentsAt: public LInstruction {
+ public:
+  LAccessArgumentsAt(LOperand* arguments, LOperand* length, LOperand* index)
+      : arguments_(arguments), length_(length), index_(index) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(AccessArgumentsAt, "access-arguments-at")
+
+  LOperand* arguments() const { return arguments_; }
+  LOperand* length() const { return length_; }
+  LOperand* index() const { return index_; }
+
+  virtual void PrintDataTo(StringStream* stream) const;
+
+ private:
+  LOperand* arguments_;
+  LOperand* length_;
+  LOperand* index_;
+};
+
+
+class LArgumentsLength: public LUnaryOperation {
+ public:
+  explicit LArgumentsLength(LOperand* elements) : LUnaryOperation(elements) {}
+
+  DECLARE_CONCRETE_INSTRUCTION(ArgumentsLength, "arguments-length")
+};
+
+
+class LArgumentsElements: public LInstruction {
+ public:
+  LArgumentsElements() { }
+
+  DECLARE_CONCRETE_INSTRUCTION(ArgumentsElements, "arguments-elements")
+};
+
+
+class LModI: public LBinaryOperation {
+ public:
+  LModI(LOperand* left, LOperand* right) : LBinaryOperation(left, right) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(ModI, "mod-i")
+  DECLARE_HYDROGEN_ACCESSOR(Mod)
+};
+
+
+class LDivI: public LBinaryOperation {
+ public:
+  LDivI(LOperand* left, LOperand* right)
+      : LBinaryOperation(left, right) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(DivI, "div-i")
+  DECLARE_HYDROGEN_ACCESSOR(Div)
+};
+
+
+class LMulI: public LBinaryOperation {
+ public:
+  LMulI(LOperand* left, LOperand* right, LOperand* temp)
+      : LBinaryOperation(left, right), temp_(temp) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(MulI, "mul-i")
+  DECLARE_HYDROGEN_ACCESSOR(Mul)
+
+  LOperand* temp() const { return temp_; }
+
+ private:
+  LOperand* temp_;
+};
+
+
+class LCmpID: public LBinaryOperation {
+ public:
+  LCmpID(Token::Value op, LOperand* left, LOperand* right, bool is_double)
+      : LBinaryOperation(left, right), op_(op), is_double_(is_double) { }
+
+  Token::Value op() const { return op_; }
+  bool is_double() const { return is_double_; }
+
+  DECLARE_CONCRETE_INSTRUCTION(CmpID, "cmp-id")
+
+ private:
+  Token::Value op_;
+  bool is_double_;
+};
+
+
+class LCmpIDAndBranch: public LCmpID {
+ public:
+  LCmpIDAndBranch(Token::Value op,
+                  LOperand* left,
+                  LOperand* right,
+                  int true_block_id,
+                  int false_block_id,
+                  bool is_double)
+      : LCmpID(op, left, right, is_double),
+        true_block_id_(true_block_id),
+        false_block_id_(false_block_id) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(CmpIDAndBranch, "cmp-id-and-branch")
+  virtual void PrintDataTo(StringStream* stream) const;
+  virtual bool IsControl() const { return true; }
+
+  int true_block_id() const { return true_block_id_; }
+  int false_block_id() const { return false_block_id_; }
+
+ private:
+  int true_block_id_;
+  int false_block_id_;
+};
+
+
+class LUnaryMathOperation: public LUnaryOperation {
+ public:
+  explicit LUnaryMathOperation(LOperand* value)
+      : LUnaryOperation(value) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(UnaryMathOperation, "unary-math-operation")
+  DECLARE_HYDROGEN_ACCESSOR(UnaryMathOperation)
+
+  virtual void PrintDataTo(StringStream* stream) const;
+  MathFunctionId op() const { return hydrogen()->op(); }
+};
+
+
+class LCmpJSObjectEq: public LBinaryOperation {
+ public:
+  LCmpJSObjectEq(LOperand* left, LOperand* right)
+      : LBinaryOperation(left, right) {}
+
+  DECLARE_CONCRETE_INSTRUCTION(CmpJSObjectEq, "cmp-jsobject-eq")
+};
+
+
+class LCmpJSObjectEqAndBranch: public LCmpJSObjectEq {
+ public:
+  LCmpJSObjectEqAndBranch(LOperand* left,
+                          LOperand* right,
+                          int true_block_id,
+                          int false_block_id)
+      : LCmpJSObjectEq(left, right),
+        true_block_id_(true_block_id),
+        false_block_id_(false_block_id) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(CmpJSObjectEqAndBranch,
+                               "cmp-jsobject-eq-and-branch")
+
+  int true_block_id() const { return true_block_id_; }
+  int false_block_id() const { return false_block_id_; }
+
+ private:
+  int true_block_id_;
+  int false_block_id_;
+};
+
+
+class LIsNull: public LUnaryOperation {
+ public:
+  LIsNull(LOperand* value, bool is_strict)
+      : LUnaryOperation(value), is_strict_(is_strict) {}
+
+  DECLARE_CONCRETE_INSTRUCTION(IsNull, "is-null")
+
+  bool is_strict() const { return is_strict_; }
+
+ private:
+  bool is_strict_;
+};
+
+
+class LIsNullAndBranch: public LIsNull {
+ public:
+  LIsNullAndBranch(LOperand* value,
+                   bool is_strict,
+                   LOperand* temp,
+                   int true_block_id,
+                   int false_block_id)
+      : LIsNull(value, is_strict),
+        temp_(temp),
+        true_block_id_(true_block_id),
+        false_block_id_(false_block_id) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(IsNullAndBranch, "is-null-and-branch")
+  virtual void PrintDataTo(StringStream* stream) const;
+  virtual bool IsControl() const { return true; }
+
+  int true_block_id() const { return true_block_id_; }
+  int false_block_id() const { return false_block_id_; }
+
+  LOperand* temp() const { return temp_; }
+
+ private:
+  LOperand* temp_;
+  int true_block_id_;
+  int false_block_id_;
+};
+
+
+class LIsSmi: public LUnaryOperation {
+ public:
+  explicit LIsSmi(LOperand* value) : LUnaryOperation(value) {}
+
+  DECLARE_CONCRETE_INSTRUCTION(IsSmi, "is-smi")
+  DECLARE_HYDROGEN_ACCESSOR(IsSmi)
+};
+
+
+class LIsSmiAndBranch: public LIsSmi {
+ public:
+  LIsSmiAndBranch(LOperand* value,
+                  int true_block_id,
+                  int false_block_id)
+      : LIsSmi(value),
+        true_block_id_(true_block_id),
+        false_block_id_(false_block_id) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(IsSmiAndBranch, "is-smi-and-branch")
+  virtual void PrintDataTo(StringStream* stream) const;
+  virtual bool IsControl() const { return true; }
+
+  int true_block_id() const { return true_block_id_; }
+  int false_block_id() const { return false_block_id_; }
+
+ private:
+  int true_block_id_;
+  int false_block_id_;
+};
+
+
+class LHasInstanceType: public LUnaryOperation {
+ public:
+  explicit LHasInstanceType(LOperand* value)
+      : LUnaryOperation(value) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(HasInstanceType, "has-instance-type")
+  DECLARE_HYDROGEN_ACCESSOR(HasInstanceType)
+
+  InstanceType TestType();  // The type to test against when generating code.
+  Condition BranchCondition();  // The branch condition for 'true'.
+};
+
+
+class LHasInstanceTypeAndBranch: public LHasInstanceType {
+ public:
+  LHasInstanceTypeAndBranch(LOperand* value,
+                            LOperand* temporary,
+                            int true_block_id,
+                            int false_block_id)
+      : LHasInstanceType(value),
+        temp_(temporary),
+        true_block_id_(true_block_id),
+        false_block_id_(false_block_id) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(HasInstanceTypeAndBranch,
+                               "has-instance-type-and-branch")
+  virtual void PrintDataTo(StringStream* stream) const;
+  virtual bool IsControl() const { return true; }
+
+  int true_block_id() const { return true_block_id_; }
+  int false_block_id() const { return false_block_id_; }
+
+  LOperand* temp() { return temp_; }
+
+ private:
+  LOperand* temp_;
+  int true_block_id_;
+  int false_block_id_;
+};
+
+
+class LHasCachedArrayIndex: public LUnaryOperation {
+ public:
+  explicit LHasCachedArrayIndex(LOperand* value) : LUnaryOperation(value) {}
+
+  DECLARE_CONCRETE_INSTRUCTION(HasCachedArrayIndex, "has-cached-array-index")
+  DECLARE_HYDROGEN_ACCESSOR(HasCachedArrayIndex)
+};
+
+
+class LHasCachedArrayIndexAndBranch: public LHasCachedArrayIndex {
+ public:
+  LHasCachedArrayIndexAndBranch(LOperand* value,
+                                int true_block_id,
+                                int false_block_id)
+      : LHasCachedArrayIndex(value),
+        true_block_id_(true_block_id),
+        false_block_id_(false_block_id) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(HasCachedArrayIndexAndBranch,
+                               "has-cached-array-index-and-branch")
+  virtual void PrintDataTo(StringStream* stream) const;
+  virtual bool IsControl() const { return true; }
+
+  int true_block_id() const { return true_block_id_; }
+  int false_block_id() const { return false_block_id_; }
+
+ private:
+  int true_block_id_;
+  int false_block_id_;
+};
+
+
+class LClassOfTest: public LUnaryOperation {
+ public:
+  LClassOfTest(LOperand* value, LOperand* temp)
+      : LUnaryOperation(value), temporary_(temp) {}
+
+  DECLARE_CONCRETE_INSTRUCTION(ClassOfTest, "class-of-test")
+  DECLARE_HYDROGEN_ACCESSOR(ClassOfTest)
+
+  virtual void PrintDataTo(StringStream* stream) const;
+
+  LOperand* temporary() { return temporary_; }
+
+ private:
+  LOperand *temporary_;
+};
+
+
+class LClassOfTestAndBranch: public LClassOfTest {
+ public:
+  LClassOfTestAndBranch(LOperand* value,
+                        LOperand* temporary,
+                        LOperand* temporary2,
+                        int true_block_id,
+                        int false_block_id)
+      : LClassOfTest(value, temporary),
+        temporary2_(temporary2),
+        true_block_id_(true_block_id),
+        false_block_id_(false_block_id) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(ClassOfTestAndBranch,
+                               "class-of-test-and-branch")
+  virtual void PrintDataTo(StringStream* stream) const;
+  virtual bool IsControl() const { return true; }
+
+  int true_block_id() const { return true_block_id_; }
+  int false_block_id() const { return false_block_id_; }
+  LOperand* temporary2() { return temporary2_; }
+
+ private:
+  LOperand* temporary2_;
+  int true_block_id_;
+  int false_block_id_;
+};
+
+
+class LCmpT: public LBinaryOperation {
+ public:
+  LCmpT(LOperand* left, LOperand* right) : LBinaryOperation(left, right) {}
+
+  DECLARE_CONCRETE_INSTRUCTION(CmpT, "cmp-t")
+  DECLARE_HYDROGEN_ACCESSOR(Compare)
+
+  Token::Value op() const { return hydrogen()->token(); }
+};
+
+
+class LCmpTAndBranch: public LCmpT {
+ public:
+  LCmpTAndBranch(LOperand* left,
+                 LOperand* right,
+                 int true_block_id,
+                 int false_block_id)
+      : LCmpT(left, right),
+        true_block_id_(true_block_id),
+        false_block_id_(false_block_id) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(CmpTAndBranch, "cmp-t-and-branch")
+
+  int true_block_id() const { return true_block_id_; }
+  int false_block_id() const { return false_block_id_; }
+
+ private:
+  int true_block_id_;
+  int false_block_id_;
+};
+
+
+class LInstanceOf: public LBinaryOperation {
+ public:
+  LInstanceOf(LOperand* left, LOperand* right)
+      : LBinaryOperation(left, right) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(InstanceOf, "instance-of")
+};
+
+
+class LInstanceOfAndBranch: public LInstanceOf {
+ public:
+  LInstanceOfAndBranch(LOperand* left,
+                       LOperand* right,
+                       int true_block_id,
+                       int false_block_id)
+      : LInstanceOf(left, right),
+        true_block_id_(true_block_id),
+        false_block_id_(false_block_id) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(InstanceOfAndBranch, "instance-of-and-branch")
+
+  int true_block_id() const { return true_block_id_; }
+  int false_block_id() const { return false_block_id_; }
+
+ private:
+  int true_block_id_;
+  int false_block_id_;
+};
+
+
+class LBoundsCheck: public LBinaryOperation {
+ public:
+  LBoundsCheck(LOperand* index, LOperand* length)
+      : LBinaryOperation(index, length) { }
+
+  LOperand* index() const { return left(); }
+  LOperand* length() const { return right(); }
+
+  DECLARE_CONCRETE_INSTRUCTION(BoundsCheck, "bounds-check")
+};
+
+
+class LBitI: public LBinaryOperation {
+ public:
+  LBitI(Token::Value op, LOperand* left, LOperand* right)
+      : LBinaryOperation(left, right), op_(op) { }
+
+  Token::Value op() const { return op_; }
+
+  DECLARE_CONCRETE_INSTRUCTION(BitI, "bit-i")
+
+ private:
+  Token::Value op_;
+};
+
+
+class LShiftI: public LBinaryOperation {
+ public:
+  LShiftI(Token::Value op, LOperand* left, LOperand* right, bool can_deopt)
+      : LBinaryOperation(left, right), op_(op), can_deopt_(can_deopt) { }
+
+  Token::Value op() const { return op_; }
+
+  bool can_deopt() const { return can_deopt_; }
+
+  DECLARE_CONCRETE_INSTRUCTION(ShiftI, "shift-i")
+
+ private:
+  Token::Value op_;
+  bool can_deopt_;
+};
+
+
+class LSubI: public LBinaryOperation {
+ public:
+  LSubI(LOperand* left, LOperand* right)
+      : LBinaryOperation(left, right) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(SubI, "sub-i")
+  DECLARE_HYDROGEN_ACCESSOR(Sub)
+};
+
+
+class LConstant: public LInstruction {
+  DECLARE_INSTRUCTION(Constant)
+};
+
+
+class LConstantI: public LConstant {
+ public:
+  explicit LConstantI(int32_t value) : value_(value) { }
+  int32_t value() const { return value_; }
+
+  DECLARE_CONCRETE_INSTRUCTION(ConstantI, "constant-i")
+
+ private:
+  int32_t value_;
+};
+
+
+class LConstantD: public LConstant {
+ public:
+  explicit LConstantD(double value) : value_(value) { }
+  double value() const { return value_; }
+
+  DECLARE_CONCRETE_INSTRUCTION(ConstantD, "constant-d")
+
+ private:
+  double value_;
+};
+
+
+class LConstantT: public LConstant {
+ public:
+  explicit LConstantT(Handle<Object> value) : value_(value) { }
+  Handle<Object> value() const { return value_; }
+
+  DECLARE_CONCRETE_INSTRUCTION(ConstantT, "constant-t")
+
+ private:
+  Handle<Object> value_;
+};
+
+
+class LBranch: public LUnaryOperation {
+ public:
+  LBranch(LOperand* input, int true_block_id, int false_block_id)
+      : LUnaryOperation(input),
+        true_block_id_(true_block_id),
+        false_block_id_(false_block_id) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(Branch, "branch")
+  DECLARE_HYDROGEN_ACCESSOR(Value)
+
+  virtual void PrintDataTo(StringStream* stream) const;
+  virtual bool IsControl() const { return true; }
+
+  int true_block_id() const { return true_block_id_; }
+  int false_block_id() const { return false_block_id_; }
+
+ private:
+  int true_block_id_;
+  int false_block_id_;
+};
+
+
+class LCmpMapAndBranch: public LUnaryOperation {
+ public:
+  LCmpMapAndBranch(LOperand* value,
+                   Handle<Map> map,
+                   int true_block_id,
+                   int false_block_id)
+      : LUnaryOperation(value),
+        map_(map),
+        true_block_id_(true_block_id),
+        false_block_id_(false_block_id) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(CmpMapAndBranch, "cmp-map-and-branch")
+
+  virtual bool IsControl() const { return true; }
+
+  Handle<Map> map() const { return map_; }
+  int true_block_id() const { return true_block_id_; }
+  int false_block_id() const { return false_block_id_; }
+
+ private:
+  Handle<Map> map_;
+  int true_block_id_;
+  int false_block_id_;
+};
+
+
+class LArrayLength: public LUnaryOperation {
+ public:
+  LArrayLength(LOperand* input, LOperand* temporary)
+      : LUnaryOperation(input), temporary_(temporary) { }
+
+  LOperand* temporary() const { return temporary_; }
+
+  DECLARE_CONCRETE_INSTRUCTION(ArrayLength, "array-length")
+  DECLARE_HYDROGEN_ACCESSOR(ArrayLength)
+
+ private:
+  LOperand* temporary_;
+};
+
+
+class LValueOf: public LUnaryOperation {
+ public:
+  LValueOf(LOperand* input, LOperand* temporary)
+      : LUnaryOperation(input), temporary_(temporary) { }
+
+  LOperand* temporary() const { return temporary_; }
+
+  DECLARE_CONCRETE_INSTRUCTION(ValueOf, "value-of")
+  DECLARE_HYDROGEN_ACCESSOR(ValueOf)
+
+ private:
+  LOperand* temporary_;
+};
+
+
+class LThrow: public LUnaryOperation {
+ public:
+  explicit LThrow(LOperand* value) : LUnaryOperation(value) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(Throw, "throw")
+};
+
+
+class LBitNotI: public LUnaryOperation {
+ public:
+  explicit LBitNotI(LOperand* use) : LUnaryOperation(use) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(BitNotI, "bit-not-i")
+};
+
+
+class LAddI: public LBinaryOperation {
+ public:
+  LAddI(LOperand* left, LOperand* right)
+      : LBinaryOperation(left, right) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(AddI, "add-i")
+  DECLARE_HYDROGEN_ACCESSOR(Add)
+};
+
+
+class LArithmeticD: public LBinaryOperation {
+ public:
+  LArithmeticD(Token::Value op, LOperand* left, LOperand* right)
+      : LBinaryOperation(left, right), op_(op) { }
+
+  Token::Value op() const { return op_; }
+
+  virtual void CompileToNative(LCodeGen* generator);
+  virtual const char* Mnemonic() const;
+
+ private:
+  Token::Value op_;
+};
+
+
+class LArithmeticT: public LBinaryOperation {
+ public:
+  LArithmeticT(Token::Value op, LOperand* left, LOperand* right)
+      : LBinaryOperation(left, right), op_(op) { }
+
+  virtual void CompileToNative(LCodeGen* generator);
+  virtual const char* Mnemonic() const;
+
+  Token::Value op() const { return op_; }
+
+ private:
+  Token::Value op_;
+};
+
+
+class LReturn: public LUnaryOperation {
+ public:
+  explicit LReturn(LOperand* use) : LUnaryOperation(use) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(Return, "return")
+};
+
+
+class LLoadNamedField: public LUnaryOperation {
+ public:
+  explicit LLoadNamedField(LOperand* object) : LUnaryOperation(object) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(LoadNamedField, "load-named-field")
+  DECLARE_HYDROGEN_ACCESSOR(LoadNamedField)
+};
+
+
+class LLoadNamedGeneric: public LUnaryOperation {
+ public:
+  explicit LLoadNamedGeneric(LOperand* object) : LUnaryOperation(object) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(LoadNamedGeneric, "load-named-generic")
+  DECLARE_HYDROGEN_ACCESSOR(LoadNamedGeneric)
+
+  LOperand* object() const { return input(); }
+  Handle<Object> name() const { return hydrogen()->name(); }
+};
+
+
+class LLoadElements: public LUnaryOperation {
+ public:
+  explicit LLoadElements(LOperand* obj) : LUnaryOperation(obj) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(LoadElements, "load-elements")
+};
+
+
+class LLoadKeyedFastElement: public LBinaryOperation {
+ public:
+  LLoadKeyedFastElement(LOperand* elements,
+                        LOperand* key,
+                        LOperand* load_result)
+      : LBinaryOperation(elements, key),
+        load_result_(load_result) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(LoadKeyedFastElement, "load-keyed-fast-element")
+  DECLARE_HYDROGEN_ACCESSOR(LoadKeyedFastElement)
+
+  LOperand* elements() const { return left(); }
+  LOperand* key() const { return right(); }
+  LOperand* load_result() const { return load_result_; }
+
+ private:
+  LOperand* load_result_;
+};
+
+
+class LLoadKeyedGeneric: public LBinaryOperation {
+ public:
+  LLoadKeyedGeneric(LOperand* obj, LOperand* key)
+      : LBinaryOperation(obj, key) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(LoadKeyedGeneric, "load-keyed-generic")
+
+  LOperand* object() const { return left(); }
+  LOperand* key() const { return right(); }
+};
+
+
+class LLoadGlobal: public LInstruction {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(LoadGlobal, "load-global")
+  DECLARE_HYDROGEN_ACCESSOR(LoadGlobal)
+};
+
+
+class LStoreGlobal: public LUnaryOperation {
+ public:
+  explicit LStoreGlobal(LOperand* value) : LUnaryOperation(value) {}
+
+  DECLARE_CONCRETE_INSTRUCTION(StoreGlobal, "store-global")
+  DECLARE_HYDROGEN_ACCESSOR(StoreGlobal)
+};
+
+
+class LPushArgument: public LUnaryOperation {
+ public:
+  explicit LPushArgument(LOperand* argument) : LUnaryOperation(argument) {}
+
+  DECLARE_CONCRETE_INSTRUCTION(PushArgument, "push-argument")
+};
+
+
+class LGlobalObject: public LInstruction {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(GlobalObject, "global-object")
+};
+
+
+class LGlobalReceiver: public LInstruction {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(GlobalReceiver, "global-receiver")
+};
+
+
+class LCallConstantFunction: public LInstruction {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(CallConstantFunction, "call-constant-function")
+  DECLARE_HYDROGEN_ACCESSOR(CallConstantFunction)
+
+  virtual void PrintDataTo(StringStream* stream) const;
+
+  Handle<JSFunction> function() const { return hydrogen()->function(); }
+  int arity() const { return hydrogen()->argument_count() - 1; }
+};
+
+
+class LCallKeyed: public LInstruction {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(CallKeyed, "call-keyed")
+  DECLARE_HYDROGEN_ACCESSOR(CallKeyed)
+
+  virtual void PrintDataTo(StringStream* stream) const;
+
+  int arity() const { return hydrogen()->argument_count() - 1; }
+};
+
+
+class LCallNamed: public LInstruction {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(CallNamed, "call-named")
+  DECLARE_HYDROGEN_ACCESSOR(CallNamed)
+
+  virtual void PrintDataTo(StringStream* stream) const;
+
+  Handle<String> name() const { return hydrogen()->name(); }
+  int arity() const { return hydrogen()->argument_count() - 1; }
+};
+
+
+class LCallFunction: public LInstruction {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(CallFunction, "call-function")
+  DECLARE_HYDROGEN_ACCESSOR(CallFunction)
+
+  int arity() const { return hydrogen()->argument_count() - 2; }
+};
+
+
+class LCallGlobal: public LInstruction {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(CallGlobal, "call-global")
+  DECLARE_HYDROGEN_ACCESSOR(CallGlobal)
+
+  virtual void PrintDataTo(StringStream* stream) const;
+
+  Handle<String> name() const {return hydrogen()->name(); }
+  int arity() const { return hydrogen()->argument_count() - 1; }
+};
+
+
+class LCallKnownGlobal: public LInstruction {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(CallKnownGlobal, "call-known-global")
+  DECLARE_HYDROGEN_ACCESSOR(CallKnownGlobal)
+
+  virtual void PrintDataTo(StringStream* stream) const;
+
+  Handle<JSFunction> target() const { return hydrogen()->target();  }
+  int arity() const { return hydrogen()->argument_count() - 1;  }
+};
+
+
+class LCallNew: public LUnaryOperation {
+ public:
+  explicit LCallNew(LOperand* constructor) : LUnaryOperation(constructor) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(CallNew, "call-new")
+  DECLARE_HYDROGEN_ACCESSOR(CallNew)
+
+  virtual void PrintDataTo(StringStream* stream) const;
+
+  int arity() const { return hydrogen()->argument_count() - 1; }
+};
+
+
+class LCallRuntime: public LInstruction {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(CallRuntime, "call-runtime")
+  DECLARE_HYDROGEN_ACCESSOR(CallRuntime)
+
+  Runtime::Function* function() const { return hydrogen()->function(); }
+  int arity() const { return hydrogen()->argument_count(); }
+};
+
+
+class LInteger32ToDouble: public LUnaryOperation {
+ public:
+  explicit LInteger32ToDouble(LOperand* use) : LUnaryOperation(use) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(Integer32ToDouble, "int32-to-double")
+};
+
+
+class LNumberTagI: public LUnaryOperation {
+ public:
+  explicit LNumberTagI(LOperand* use) : LUnaryOperation(use) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(NumberTagI, "number-tag-i")
+};
+
+
+class LNumberTagD: public LUnaryOperation {
+ public:
+  explicit LNumberTagD(LOperand* value, LOperand* temp)
+      : LUnaryOperation(value), temp_(temp) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(NumberTagD, "number-tag-d")
+
+  LOperand* temp() const { return temp_; }
+
+ private:
+  LOperand* temp_;
+};
+
+
+// Sometimes truncating conversion from a tagged value to an int32.
+class LDoubleToI: public LUnaryOperation {
+ public:
+  explicit LDoubleToI(LOperand* value) : LUnaryOperation(value) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(DoubleToI, "double-to-i")
+  DECLARE_HYDROGEN_ACCESSOR(Change)
+
+  bool truncating() { return hydrogen()->CanTruncateToInt32(); }
+};
+
+
+// Truncating conversion from a tagged value to an int32.
+class LTaggedToI: public LUnaryOperation {
+ public:
+  LTaggedToI(LOperand* value, LOperand* temp)
+      : LUnaryOperation(value), temp_(temp) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(TaggedToI, "tagged-to-i")
+  DECLARE_HYDROGEN_ACCESSOR(Change)
+
+  bool truncating() { return hydrogen()->CanTruncateToInt32(); }
+  LOperand* temp() const { return temp_; }
+
+ private:
+  LOperand* temp_;
+};
+
+
+class LSmiTag: public LUnaryOperation {
+ public:
+  explicit LSmiTag(LOperand* use) : LUnaryOperation(use) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(SmiTag, "smi-tag")
+};
+
+
+class LNumberUntagD: public LUnaryOperation {
+ public:
+  explicit LNumberUntagD(LOperand* value) : LUnaryOperation(value) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(NumberUntagD, "double-untag")
+};
+
+
+class LSmiUntag: public LUnaryOperation {
+ public:
+  LSmiUntag(LOperand* use, bool needs_check)
+      : LUnaryOperation(use), needs_check_(needs_check) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(SmiUntag, "smi-untag")
+
+  bool needs_check() const { return needs_check_; }
+
+ private:
+  bool needs_check_;
+};
+
+
+class LStoreNamed: public LInstruction {
+ public:
+  LStoreNamed(LOperand* obj, Handle<Object> name, LOperand* val)
+      : object_(obj), name_(name), value_(val) { }
+
+  DECLARE_INSTRUCTION(StoreNamed)
+
+  virtual void PrintDataTo(StringStream* stream) const;
+
+  LOperand* object() const { return object_; }
+  Handle<Object> name() const { return name_; }
+  LOperand* value() const { return value_; }
+
+ private:
+  LOperand* object_;
+  Handle<Object> name_;
+  LOperand* value_;
+};
+
+
+class LStoreNamedField: public LStoreNamed {
+ public:
+  LStoreNamedField(LOperand* obj,
+                   Handle<Object> name,
+                   LOperand* val,
+                   bool in_object,
+                   int offset,
+                   LOperand* temp,
+                   bool needs_write_barrier,
+                   Handle<Map> transition)
+      : LStoreNamed(obj, name, val),
+        is_in_object_(in_object),
+        offset_(offset),
+        temp_(temp),
+        needs_write_barrier_(needs_write_barrier),
+        transition_(transition) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(StoreNamedField, "store-named-field")
+
+  bool is_in_object() { return is_in_object_; }
+  int offset() { return offset_; }
+  LOperand* temp() { return temp_; }
+  bool needs_write_barrier() { return needs_write_barrier_; }
+  Handle<Map> transition() const { return transition_; }
+  void set_transition(Handle<Map> map) { transition_ = map; }
+
+ private:
+  bool is_in_object_;
+  int offset_;
+  LOperand* temp_;
+  bool needs_write_barrier_;
+  Handle<Map> transition_;
+};
+
+
+class LStoreNamedGeneric: public LStoreNamed {
+ public:
+  LStoreNamedGeneric(LOperand* obj,
+                     Handle<Object> name,
+                     LOperand* val)
+      : LStoreNamed(obj, name, val) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(StoreNamedGeneric, "store-named-generic")
+};
+
+
+class LStoreKeyed: public LInstruction {
+ public:
+  LStoreKeyed(LOperand* obj, LOperand* key, LOperand* val)
+      : object_(obj), key_(key), value_(val) { }
+
+  DECLARE_INSTRUCTION(StoreKeyed)
+
+  virtual void PrintDataTo(StringStream* stream) const;
+
+  LOperand* object() const { return object_; }
+  LOperand* key() const { return key_; }
+  LOperand* value() const { return value_; }
+
+ private:
+  LOperand* object_;
+  LOperand* key_;
+  LOperand* value_;
+};
+
+
+class LStoreKeyedFastElement: public LStoreKeyed {
+ public:
+  LStoreKeyedFastElement(LOperand* obj, LOperand* key, LOperand* val)
+      : LStoreKeyed(obj, key, val) {}
+
+  DECLARE_CONCRETE_INSTRUCTION(StoreKeyedFastElement,
+                               "store-keyed-fast-element")
+  DECLARE_HYDROGEN_ACCESSOR(StoreKeyedFastElement)
+};
+
+
+class LStoreKeyedGeneric: public LStoreKeyed {
+ public:
+  LStoreKeyedGeneric(LOperand* obj, LOperand* key, LOperand* val)
+      : LStoreKeyed(obj, key, val) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(StoreKeyedGeneric, "store-keyed-generic")
+};
+
+
+class LCheckFunction: public LUnaryOperation {
+ public:
+  explicit LCheckFunction(LOperand* use) : LUnaryOperation(use) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(CheckFunction, "check-function")
+  DECLARE_HYDROGEN_ACCESSOR(CheckFunction)
+};
+
+
+class LCheckInstanceType: public LUnaryOperation {
+ public:
+  LCheckInstanceType(LOperand* use, LOperand* temp)
+      : LUnaryOperation(use), temp_(temp) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(CheckInstanceType, "check-instance-type")
+  DECLARE_HYDROGEN_ACCESSOR(CheckInstanceType)
+
+  LOperand* temp() const { return temp_; }
+
+ private:
+  LOperand* temp_;
+};
+
+
+class LCheckMap: public LUnaryOperation {
+ public:
+  explicit LCheckMap(LOperand* use) : LUnaryOperation(use) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(CheckMap, "check-map")
+  DECLARE_HYDROGEN_ACCESSOR(CheckMap)
+};
+
+
+class LCheckPrototypeMaps: public LInstruction {
+ public:
+  LCheckPrototypeMaps(LOperand* temp,
+                      Handle<JSObject> holder,
+                      Handle<Map> receiver_map)
+      : temp_(temp),
+        holder_(holder),
+        receiver_map_(receiver_map) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(CheckPrototypeMaps, "check-prototype-maps")
+
+  LOperand* temp() const { return temp_; }
+  Handle<JSObject> holder() const { return holder_; }
+  Handle<Map> receiver_map() const { return receiver_map_; }
+
+ private:
+  LOperand* temp_;
+  Handle<JSObject> holder_;
+  Handle<Map> receiver_map_;
+};
+
+
+class LCheckSmi: public LUnaryOperation {
+ public:
+  LCheckSmi(LOperand* use, Condition condition)
+      : LUnaryOperation(use), condition_(condition) { }
+
+  Condition condition() const { return condition_; }
+
+  virtual void CompileToNative(LCodeGen* generator);
+  virtual const char* Mnemonic() const {
+    return (condition_ == eq) ? "check-non-smi" : "check-smi";
+  }
+
+ private:
+  Condition condition_;
+};
+
+
+class LMaterializedLiteral: public LInstruction {
+ public:
+  DECLARE_INSTRUCTION(MaterializedLiteral)
+};
+
+
+class LArrayLiteral: public LMaterializedLiteral {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(ArrayLiteral, "array-literal")
+  DECLARE_HYDROGEN_ACCESSOR(ArrayLiteral)
+};
+
+
+class LObjectLiteral: public LMaterializedLiteral {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(ObjectLiteral, "object-literal")
+  DECLARE_HYDROGEN_ACCESSOR(ObjectLiteral)
+};
+
+
+class LRegExpLiteral: public LMaterializedLiteral {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(RegExpLiteral, "regexp-literal")
+  DECLARE_HYDROGEN_ACCESSOR(RegExpLiteral)
+};
+
+
+class LFunctionLiteral: public LInstruction {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(FunctionLiteral, "function-literal")
+  DECLARE_HYDROGEN_ACCESSOR(FunctionLiteral)
+
+  Handle<SharedFunctionInfo> shared_info() { return hydrogen()->shared_info(); }
+};
+
+
+class LTypeof: public LUnaryOperation {
+ public:
+  explicit LTypeof(LOperand* input) : LUnaryOperation(input) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(Typeof, "typeof")
+};
+
+
+class LTypeofIs: public LUnaryOperation {
+ public:
+  explicit LTypeofIs(LOperand* input) : LUnaryOperation(input) { }
+  virtual void PrintDataTo(StringStream* stream) const;
+
+  DECLARE_CONCRETE_INSTRUCTION(TypeofIs, "typeof-is")
+  DECLARE_HYDROGEN_ACCESSOR(TypeofIs)
+
+  Handle<String> type_literal() { return hydrogen()->type_literal(); }
+};
+
+
+class LTypeofIsAndBranch: public LTypeofIs {
+ public:
+  LTypeofIsAndBranch(LOperand* value,
+                     int true_block_id,
+                     int false_block_id)
+      : LTypeofIs(value),
+        true_block_id_(true_block_id),
+        false_block_id_(false_block_id) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(TypeofIsAndBranch, "typeof-is-and-branch")
+
+  virtual void PrintDataTo(StringStream* stream) const;
+  virtual bool IsControl() const { return true; }
+
+  int true_block_id() const { return true_block_id_; }
+  int false_block_id() const { return false_block_id_; }
+
+ private:
+  int true_block_id_;
+  int false_block_id_;
+};
+
+
+class LDeleteProperty: public LBinaryOperation {
+ public:
+  LDeleteProperty(LOperand* obj, LOperand* key) : LBinaryOperation(obj, key) {}
+
+  DECLARE_CONCRETE_INSTRUCTION(DeleteProperty, "delete-property")
+
+  LOperand* object() const { return left(); }
+  LOperand* key() const { return right(); }
+};
+
+
+class LOsrEntry: public LInstruction {
+ public:
+  LOsrEntry();
+
+  DECLARE_CONCRETE_INSTRUCTION(OsrEntry, "osr-entry")
+
+  LOperand** SpilledRegisterArray() { return register_spills_; }
+  LOperand** SpilledDoubleRegisterArray() { return double_register_spills_; }
+
+  void MarkSpilledRegister(int allocation_index, LOperand* spill_operand);
+  void MarkSpilledDoubleRegister(int allocation_index,
+                                 LOperand* spill_operand);
+
+ private:
+  // Arrays of spill slot operands for registers with an assigned spill
+  // slot, i.e., that must also be restored to the spill slot on OSR entry.
+  // NULL if the register has no assigned spill slot.  Indexed by allocation
+  // index.
+  LOperand* register_spills_[Register::kNumAllocatableRegisters];
+  LOperand* double_register_spills_[DoubleRegister::kNumAllocatableRegisters];
+};
+
+
+class LStackCheck: public LInstruction {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(StackCheck, "stack-check")
+};
+
+
+class LPointerMap: public ZoneObject {
+ public:
+  explicit LPointerMap(int position)
+      : pointer_operands_(8), position_(position), lithium_position_(-1) { }
+
+  const ZoneList<LOperand*>* operands() const { return &pointer_operands_; }
+  int position() const { return position_; }
+  int lithium_position() const { return lithium_position_; }
+
+  void set_lithium_position(int pos) {
+    ASSERT(lithium_position_ == -1);
+    lithium_position_ = pos;
+  }
+
+  void RecordPointer(LOperand* op);
+  void PrintTo(StringStream* stream) const;
+
+ private:
+  ZoneList<LOperand*> pointer_operands_;
+  int position_;
+  int lithium_position_;
+};
+
+
+class LEnvironment: public ZoneObject {
+ public:
+  LEnvironment(Handle<JSFunction> closure,
+               int ast_id,
+               int parameter_count,
+               int argument_count,
+               int value_count,
+               LEnvironment* outer)
+      : closure_(closure),
+        arguments_stack_height_(argument_count),
+        deoptimization_index_(Safepoint::kNoDeoptimizationIndex),
+        translation_index_(-1),
+        ast_id_(ast_id),
+        parameter_count_(parameter_count),
+        values_(value_count),
+        representations_(value_count),
+        spilled_registers_(NULL),
+        spilled_double_registers_(NULL),
+        outer_(outer) {
+  }
+
+  Handle<JSFunction> closure() const { return closure_; }
+  int arguments_stack_height() const { return arguments_stack_height_; }
+  int deoptimization_index() const { return deoptimization_index_; }
+  int translation_index() const { return translation_index_; }
+  int ast_id() const { return ast_id_; }
+  int parameter_count() const { return parameter_count_; }
+  const ZoneList<LOperand*>* values() const { return &values_; }
+  LEnvironment* outer() const { return outer_; }
+
+  void AddValue(LOperand* operand, Representation representation) {
+    values_.Add(operand);
+    representations_.Add(representation);
+  }
+
+  bool HasTaggedValueAt(int index) const {
+    return representations_[index].IsTagged();
+  }
+
+  void Register(int deoptimization_index, int translation_index) {
+    ASSERT(!HasBeenRegistered());
+    deoptimization_index_ = deoptimization_index;
+    translation_index_ = translation_index;
+  }
+  bool HasBeenRegistered() const {
+    return deoptimization_index_ != Safepoint::kNoDeoptimizationIndex;
+  }
+
+  void SetSpilledRegisters(LOperand** registers,
+                           LOperand** double_registers) {
+    spilled_registers_ = registers;
+    spilled_double_registers_ = double_registers;
+  }
+
+  // Emit frame translation commands for this environment.
+  void WriteTranslation(LCodeGen* cgen, Translation* translation) const;
+
+  void PrintTo(StringStream* stream) const;
+
+ private:
+  Handle<JSFunction> closure_;
+  int arguments_stack_height_;
+  int deoptimization_index_;
+  int translation_index_;
+  int ast_id_;
+  int parameter_count_;
+  ZoneList<LOperand*> values_;
+  ZoneList<Representation> representations_;
+
+  // Allocation index indexed arrays of spill slot operands for registers
+  // that are also in spill slots at an OSR entry.  NULL for environments
+  // that do not correspond to an OSR entry.
+  LOperand** spilled_registers_;
+  LOperand** spilled_double_registers_;
+
+  LEnvironment* outer_;
+};
+
+class LChunkBuilder;
+class LChunk: public ZoneObject {
+ public:
+  explicit LChunk(HGraph* graph);
+
+  int AddInstruction(LInstruction* instruction, HBasicBlock* block);
+  LConstantOperand* DefineConstantOperand(HConstant* constant);
+  Handle<Object> LookupLiteral(LConstantOperand* operand) const;
+  Representation LookupLiteralRepresentation(LConstantOperand* operand) const;
+
+  int GetNextSpillIndex(bool is_double);
+  LOperand* GetNextSpillSlot(bool is_double);
+
+  int ParameterAt(int index);
+  int GetParameterStackSlot(int index) const;
+  int spill_slot_count() const { return spill_slot_count_; }
+  HGraph* graph() const { return graph_; }
+  const ZoneList<LInstruction*>* instructions() const { return &instructions_; }
+  void AddGapMove(int index, LOperand* from, LOperand* to);
+  LGap* GetGapAt(int index) const;
+  bool IsGapAt(int index) const;
+  int NearestGapPos(int index) const;
+  int NearestNextGapPos(int index) const;
+  void MarkEmptyBlocks();
+  const ZoneList<LPointerMap*>* pointer_maps() const { return &pointer_maps_; }
+  LLabel* GetLabel(int block_id) const {
+    HBasicBlock* block = graph_->blocks()->at(block_id);
+    int first_instruction = block->first_instruction_index();
+    return LLabel::cast(instructions_[first_instruction]);
+  }
+  int LookupDestination(int block_id) const {
+    LLabel* cur = GetLabel(block_id);
+    while (cur->replacement() != NULL) {
+      cur = cur->replacement();
+    }
+    return cur->block_id();
+  }
+  Label* GetAssemblyLabel(int block_id) const {
+    LLabel* label = GetLabel(block_id);
+    ASSERT(!label->HasReplacement());
+    return label->label();
+  }
+
+  const ZoneList<Handle<JSFunction> >* inlined_closures() const {
+    return &inlined_closures_;
+  }
+
+  void AddInlinedClosure(Handle<JSFunction> closure) {
+    inlined_closures_.Add(closure);
+  }
+
+  void Verify() const;
+
+ private:
+  int spill_slot_count_;
+  HGraph* const graph_;
+  ZoneList<LInstruction*> instructions_;
+  ZoneList<LPointerMap*> pointer_maps_;
+  ZoneList<Handle<JSFunction> > inlined_closures_;
+};
+
+
+class LChunkBuilder BASE_EMBEDDED {
+ public:
+  LChunkBuilder(HGraph* graph, LAllocator* allocator)
+      : chunk_(NULL),
+        graph_(graph),
+        status_(UNUSED),
+        current_instruction_(NULL),
+        current_block_(NULL),
+        next_block_(NULL),
+        argument_count_(0),
+        allocator_(allocator),
+        position_(RelocInfo::kNoPosition),
+        instructions_pending_deoptimization_environment_(NULL),
+        pending_deoptimization_ast_id_(AstNode::kNoNumber) { }
+
+  // Build the sequence for the graph.
+  LChunk* Build();
+
+  // Declare methods that deal with the individual node types.
+#define DECLARE_DO(type) LInstruction* Do##type(H##type* node);
+  HYDROGEN_CONCRETE_INSTRUCTION_LIST(DECLARE_DO)
+#undef DECLARE_DO
+
+ private:
+  enum Status {
+    UNUSED,
+    BUILDING,
+    DONE,
+    ABORTED
+  };
+
+  LChunk* chunk() const { return chunk_; }
+  HGraph* graph() const { return graph_; }
+
+  bool is_unused() const { return status_ == UNUSED; }
+  bool is_building() const { return status_ == BUILDING; }
+  bool is_done() const { return status_ == DONE; }
+  bool is_aborted() const { return status_ == ABORTED; }
+
+  void Abort(const char* format, ...);
+
+  // Methods for getting operands for Use / Define / Temp.
+  LRegister* ToOperand(Register reg);
+  LUnallocated* ToUnallocated(Register reg);
+  LUnallocated* ToUnallocated(DoubleRegister reg);
+
+  // Methods for setting up define-use relationships.
+  LOperand* Use(HValue* value, LUnallocated* operand);
+  LOperand* UseFixed(HValue* value, Register fixed_register);
+  LOperand* UseFixedDouble(HValue* value, DoubleRegister fixed_register);
+
+  // A value that is guaranteed to be allocated to a register.
+  // Operand created by UseRegister is guaranteed to be live until the end of
+  // instruction. This means that register allocator will not reuse it's
+  // register for any other operand inside instruction.
+  // Operand created by UseRegisterAtStart is guaranteed to be live only at
+  // instruction start. Register allocator is free to assign the same register
+  // to some other operand used inside instruction (i.e. temporary or
+  // output).
+  LOperand* UseRegister(HValue* value);
+  LOperand* UseRegisterAtStart(HValue* value);
+
+  // A value in a register that may be trashed.
+  LOperand* UseTempRegister(HValue* value);
+  LOperand* Use(HValue* value);
+  LOperand* UseAtStart(HValue* value);
+  LOperand* UseOrConstant(HValue* value);
+  LOperand* UseOrConstantAtStart(HValue* value);
+  LOperand* UseRegisterOrConstant(HValue* value);
+  LOperand* UseRegisterOrConstantAtStart(HValue* value);
+
+  // Methods for setting up define-use relationships.
+  // Return the same instruction that they are passed.
+  LInstruction* Define(LInstruction* instr, LUnallocated* result);
+  LInstruction* Define(LInstruction* instr);
+  LInstruction* DefineAsRegister(LInstruction* instr);
+  LInstruction* DefineAsSpilled(LInstruction* instr, int index);
+  LInstruction* DefineSameAsAny(LInstruction* instr);
+  LInstruction* DefineSameAsFirst(LInstruction* instr);
+  LInstruction* DefineFixed(LInstruction* instr, Register reg);
+  LInstruction* DefineFixedDouble(LInstruction* instr, DoubleRegister reg);
+  LInstruction* AssignEnvironment(LInstruction* instr);
+  LInstruction* AssignPointerMap(LInstruction* instr);
+
+  enum CanDeoptimize { CAN_DEOPTIMIZE_EAGERLY, CANNOT_DEOPTIMIZE_EAGERLY };
+
+  // By default we assume that instruction sequences generated for calls
+  // cannot deoptimize eagerly and we do not attach environment to this
+  // instruction.
+  LInstruction* MarkAsCall(
+      LInstruction* instr,
+      HInstruction* hinstr,
+      CanDeoptimize can_deoptimize = CANNOT_DEOPTIMIZE_EAGERLY);
+
+  LInstruction* SetInstructionPendingDeoptimizationEnvironment(
+      LInstruction* instr, int ast_id);
+  void ClearInstructionPendingDeoptimizationEnvironment();
+
+  LEnvironment* CreateEnvironment(HEnvironment* hydrogen_env);
+
+  // Temporary operand that may be a memory location.
+  LOperand* Temp();
+  // Temporary operand that must be in a register.
+  LUnallocated* TempRegister();
+  LOperand* FixedTemp(Register reg);
+  LOperand* FixedTemp(DoubleRegister reg);
+
+  void VisitInstruction(HInstruction* current);
+
+  void DoBasicBlock(HBasicBlock* block, HBasicBlock* next_block);
+  LInstruction* DoBit(Token::Value op, HBitwiseBinaryOperation* instr);
+  LInstruction* DoShift(Token::Value op, HBitwiseBinaryOperation* instr);
+  LInstruction* DoArithmeticD(Token::Value op,
+                              HArithmeticBinaryOperation* instr);
+  LInstruction* DoArithmeticT(Token::Value op,
+                              HArithmeticBinaryOperation* instr);
+
+  LChunk* chunk_;
+  HGraph* const graph_;
+  Status status_;
+  HInstruction* current_instruction_;
+  HBasicBlock* current_block_;
+  HBasicBlock* next_block_;
+  int argument_count_;
+  LAllocator* allocator_;
+  int position_;
+  LInstruction* instructions_pending_deoptimization_environment_;
+  int pending_deoptimization_ast_id_;
+
+  DISALLOW_COPY_AND_ASSIGN(LChunkBuilder);
+};
+
+#undef DECLARE_HYDROGEN_ACCESSOR
+#undef DECLARE_INSTRUCTION
+#undef DECLARE_CONCRETE_INSTRUCTION
+
+} }  // namespace v8::internal
+
+#endif  // V8_ARM_LITHIUM_ARM_H_
diff --git a/src/arm/lithium-codegen-arm.cc b/src/arm/lithium-codegen-arm.cc
new file mode 100644 (file)
index 0000000..db8037a
--- /dev/null
@@ -0,0 +1,2146 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "arm/lithium-codegen-arm.h"
+#include "code-stubs.h"
+#include "stub-cache.h"
+
+namespace v8 {
+namespace internal {
+
+
+class SafepointGenerator : public PostCallGenerator {
+ public:
+  SafepointGenerator(LCodeGen* codegen,
+                     LPointerMap* pointers,
+                     int deoptimization_index)
+      : codegen_(codegen),
+        pointers_(pointers),
+        deoptimization_index_(deoptimization_index) { }
+  virtual ~SafepointGenerator() { }
+
+  virtual void Generate() {
+    codegen_->RecordSafepoint(pointers_, deoptimization_index_);
+  }
+
+ private:
+  LCodeGen* codegen_;
+  LPointerMap* pointers_;
+  int deoptimization_index_;
+};
+
+
+#define __ masm()->
+
+bool LCodeGen::GenerateCode() {
+  HPhase phase("Code generation", chunk());
+  ASSERT(is_unused());
+  status_ = GENERATING;
+  CpuFeatures::Scope scope1(VFP3);
+  CpuFeatures::Scope scope2(ARMv7);
+  return GeneratePrologue() &&
+      GenerateBody() &&
+      GenerateDeferredCode() &&
+      GenerateSafepointTable();
+}
+
+
+void LCodeGen::FinishCode(Handle<Code> code) {
+  ASSERT(is_done());
+  code->set_stack_slots(StackSlotCount());
+  code->set_safepoint_table_start(safepoints_.GetCodeOffset());
+  PopulateDeoptimizationData(code);
+}
+
+
+void LCodeGen::Abort(const char* format, ...) {
+  if (FLAG_trace_bailout) {
+    SmartPointer<char> debug_name = graph()->debug_name()->ToCString();
+    PrintF("Aborting LCodeGen in @\"%s\": ", *debug_name);
+    va_list arguments;
+    va_start(arguments, format);
+    OS::VPrint(format, arguments);
+    va_end(arguments);
+    PrintF("\n");
+  }
+  status_ = ABORTED;
+}
+
+
+void LCodeGen::Comment(const char* format, ...) {
+  if (!FLAG_code_comments) return;
+  char buffer[4 * KB];
+  StringBuilder builder(buffer, ARRAY_SIZE(buffer));
+  va_list arguments;
+  va_start(arguments, format);
+  builder.AddFormattedList(format, arguments);
+  va_end(arguments);
+
+  // Copy the string before recording it in the assembler to avoid
+  // issues when the stack allocated buffer goes out of scope.
+  size_t length = builder.position();
+  Vector<char> copy = Vector<char>::New(length + 1);
+  memcpy(copy.start(), builder.Finalize(), copy.length());
+  masm()->RecordComment(copy.start());
+}
+
+
+bool LCodeGen::GeneratePrologue() {
+  ASSERT(is_generating());
+
+#ifdef DEBUG
+  if (strlen(FLAG_stop_at) > 0 &&
+      info_->function()->name()->IsEqualTo(CStrVector(FLAG_stop_at))) {
+    __ stop("stop_at");
+  }
+#endif
+
+  // r1: Callee's JS function.
+  // cp: Callee's context.
+  // fp: Caller's frame pointer.
+  // lr: Caller's pc.
+
+  __ stm(db_w, sp, r1.bit() | cp.bit() | fp.bit() | lr.bit());
+  __ add(fp, sp, Operand(2 * kPointerSize));  // Adjust FP to point to saved FP.
+
+  // Reserve space for the stack slots needed by the code.
+  int slots = StackSlotCount();
+  if (slots > 0) {
+    if (FLAG_debug_code) {
+      __ mov(r0, Operand(slots));
+      __ mov(r2, Operand(kSlotsZapValue));
+      Label loop;
+      __ bind(&loop);
+      __ push(r2);
+      __ sub(r0, r0, Operand(1));
+      __ b(ne, &loop);
+    } else {
+      __ sub(sp,  sp, Operand(slots * kPointerSize));
+    }
+  }
+
+  // Trace the call.
+  if (FLAG_trace) {
+    __ CallRuntime(Runtime::kTraceEnter, 0);
+  }
+  return !is_aborted();
+}
+
+
+bool LCodeGen::GenerateBody() {
+  ASSERT(is_generating());
+  bool emit_instructions = true;
+  for (current_instruction_ = 0;
+       !is_aborted() && current_instruction_ < instructions_->length();
+       current_instruction_++) {
+    LInstruction* instr = instructions_->at(current_instruction_);
+    if (instr->IsLabel()) {
+      LLabel* label = LLabel::cast(instr);
+      emit_instructions = !label->HasReplacement();
+    }
+
+    if (emit_instructions) {
+      Comment(";;; @%d: %s.", current_instruction_, instr->Mnemonic());
+      instr->CompileToNative(this);
+    }
+  }
+  return !is_aborted();
+}
+
+
+LInstruction* LCodeGen::GetNextInstruction() {
+  if (current_instruction_ < instructions_->length() - 1) {
+    return instructions_->at(current_instruction_ + 1);
+  } else {
+    return NULL;
+  }
+}
+
+
+bool LCodeGen::GenerateDeferredCode() {
+  ASSERT(is_generating());
+  for (int i = 0; !is_aborted() && i < deferred_.length(); i++) {
+    LDeferredCode* code = deferred_[i];
+    __ bind(code->entry());
+    code->Generate();
+    __ jmp(code->exit());
+  }
+
+  // Deferred code is the last part of the instruction sequence. Mark
+  // the generated code as done unless we bailed out.
+  if (!is_aborted()) status_ = DONE;
+  return !is_aborted();
+}
+
+
+bool LCodeGen::GenerateSafepointTable() {
+  ASSERT(is_done());
+  safepoints_.Emit(masm(), StackSlotCount());
+  return !is_aborted();
+}
+
+
+Register LCodeGen::ToRegister(int index) const {
+  return Register::FromAllocationIndex(index);
+}
+
+
+DoubleRegister LCodeGen::ToDoubleRegister(int index) const {
+  return DoubleRegister::FromAllocationIndex(index);
+}
+
+
+Register LCodeGen::ToRegister(LOperand* op) const {
+  ASSERT(op->IsRegister());
+  return ToRegister(op->index());
+}
+
+
+Register LCodeGen::EmitLoadRegister(LOperand* op, Register scratch) {
+  if (op->IsRegister()) {
+    return ToRegister(op->index());
+  } else if (op->IsConstantOperand()) {
+    __ mov(scratch, ToOperand(op));
+    return scratch;
+  } else if (op->IsStackSlot() || op->IsArgument()) {
+    __ ldr(scratch, ToMemOperand(op));
+    return scratch;
+  }
+  UNREACHABLE();
+  return scratch;
+}
+
+
+DoubleRegister LCodeGen::ToDoubleRegister(LOperand* op) const {
+  ASSERT(op->IsDoubleRegister());
+  return ToDoubleRegister(op->index());
+}
+
+
+DoubleRegister LCodeGen::EmitLoadDoubleRegister(LOperand* op,
+                                                SwVfpRegister flt_scratch,
+                                                DoubleRegister dbl_scratch) {
+  if (op->IsDoubleRegister()) {
+    return ToDoubleRegister(op->index());
+  } else if (op->IsConstantOperand()) {
+    LConstantOperand* const_op = LConstantOperand::cast(op);
+    Handle<Object> literal = chunk_->LookupLiteral(const_op);
+    Representation r = chunk_->LookupLiteralRepresentation(const_op);
+    if (r.IsInteger32()) {
+      ASSERT(literal->IsNumber());
+      __ mov(ip, Operand(static_cast<int32_t>(literal->Number())));
+      __ vmov(flt_scratch, ip);
+      __ vcvt_f64_s32(dbl_scratch, flt_scratch);
+      return dbl_scratch;
+    } else if (r.IsDouble()) {
+      Abort("unsupported double immediate");
+    } else if (r.IsTagged()) {
+      Abort("unsupported tagged immediate");
+    }
+  } else if (op->IsStackSlot() || op->IsArgument()) {
+    // TODO(regis): Why is vldr not taking a MemOperand?
+    // __ vldr(dbl_scratch, ToMemOperand(op));
+    MemOperand mem_op = ToMemOperand(op);
+    __ vldr(dbl_scratch, mem_op.rn(), mem_op.offset());
+    return dbl_scratch;
+  }
+  UNREACHABLE();
+  return dbl_scratch;
+}
+
+
+int LCodeGen::ToInteger32(LConstantOperand* op) const {
+  Handle<Object> value = chunk_->LookupLiteral(op);
+  ASSERT(chunk_->LookupLiteralRepresentation(op).IsInteger32());
+  ASSERT(static_cast<double>(static_cast<int32_t>(value->Number())) ==
+      value->Number());
+  return static_cast<int32_t>(value->Number());
+}
+
+
+Operand LCodeGen::ToOperand(LOperand* op) {
+  if (op->IsConstantOperand()) {
+    LConstantOperand* const_op = LConstantOperand::cast(op);
+    Handle<Object> literal = chunk_->LookupLiteral(const_op);
+    Representation r = chunk_->LookupLiteralRepresentation(const_op);
+    if (r.IsInteger32()) {
+      ASSERT(literal->IsNumber());
+      return Operand(static_cast<int32_t>(literal->Number()));
+    } else if (r.IsDouble()) {
+      Abort("ToOperand Unsupported double immediate.");
+    }
+    ASSERT(r.IsTagged());
+    return Operand(literal);
+  } else if (op->IsRegister()) {
+    return Operand(ToRegister(op));
+  } else if (op->IsDoubleRegister()) {
+    Abort("ToOperand IsDoubleRegister unimplemented");
+    return Operand(0);
+  }
+  // Stack slots not implemented, use ToMemOperand instead.
+  UNREACHABLE();
+  return Operand(0);
+}
+
+
+MemOperand LCodeGen::ToMemOperand(LOperand* op) const {
+  // TODO(regis): Revisit.
+  ASSERT(!op->IsRegister());
+  ASSERT(!op->IsDoubleRegister());
+  ASSERT(op->IsStackSlot() || op->IsDoubleStackSlot());
+  int index = op->index();
+  if (index >= 0) {
+    // Local or spill slot. Skip the frame pointer, function, and
+    // context in the fixed part of the frame.
+    return MemOperand(fp, -(index + 3) * kPointerSize);
+  } else {
+    // Incoming parameter. Skip the return address.
+    return MemOperand(fp, -(index - 1) * kPointerSize);
+  }
+}
+
+
+void LCodeGen::AddToTranslation(Translation* translation,
+                                LOperand* op,
+                                bool is_tagged) {
+  if (op == NULL) {
+    // TODO(twuerthinger): Introduce marker operands to indicate that this value
+    // is not present and must be reconstructed from the deoptimizer. Currently
+    // this is only used for the arguments object.
+    translation->StoreArgumentsObject();
+  } else if (op->IsStackSlot()) {
+    if (is_tagged) {
+      translation->StoreStackSlot(op->index());
+    } else {
+      translation->StoreInt32StackSlot(op->index());
+    }
+  } else if (op->IsDoubleStackSlot()) {
+    translation->StoreDoubleStackSlot(op->index());
+  } else if (op->IsArgument()) {
+    ASSERT(is_tagged);
+    int src_index = StackSlotCount() + op->index();
+    translation->StoreStackSlot(src_index);
+  } else if (op->IsRegister()) {
+    Register reg = ToRegister(op);
+    if (is_tagged) {
+      translation->StoreRegister(reg);
+    } else {
+      translation->StoreInt32Register(reg);
+    }
+  } else if (op->IsDoubleRegister()) {
+    DoubleRegister reg = ToDoubleRegister(op);
+    translation->StoreDoubleRegister(reg);
+  } else if (op->IsConstantOperand()) {
+    Handle<Object> literal = chunk()->LookupLiteral(LConstantOperand::cast(op));
+    int src_index = DefineDeoptimizationLiteral(literal);
+    translation->StoreLiteral(src_index);
+  } else {
+    UNREACHABLE();
+  }
+}
+
+
+void LCodeGen::CallCode(Handle<Code> code,
+                        RelocInfo::Mode mode,
+                        LInstruction* instr) {
+  if (instr != NULL) {
+    LPointerMap* pointers = instr->pointer_map();
+    RecordPosition(pointers->position());
+    __ Call(code, mode);
+    RegisterLazyDeoptimization(instr);
+  } else {
+    LPointerMap no_pointers(0);
+    RecordPosition(no_pointers.position());
+    __ Call(code, mode);
+    RecordSafepoint(&no_pointers, Safepoint::kNoDeoptimizationIndex);
+  }
+}
+
+
+void LCodeGen::CallRuntime(Runtime::Function* function,
+                           int num_arguments,
+                           LInstruction* instr) {
+  ASSERT(instr != NULL);
+  LPointerMap* pointers = instr->pointer_map();
+  ASSERT(pointers != NULL);
+  RecordPosition(pointers->position());
+
+  __ CallRuntime(function, num_arguments);
+  // Runtime calls to Throw are not supposed to ever return at the
+  // call site, so don't register lazy deoptimization for these. We do
+  // however have to record a safepoint since throwing exceptions can
+  // cause garbage collections.
+  if (!instr->IsThrow()) {
+    RegisterLazyDeoptimization(instr);
+  } else {
+    RecordSafepoint(instr->pointer_map(), Safepoint::kNoDeoptimizationIndex);
+  }
+}
+
+
+void LCodeGen::RegisterLazyDeoptimization(LInstruction* instr) {
+  // Create the environment to bailout to. If the call has side effects
+  // execution has to continue after the call otherwise execution can continue
+  // from a previous bailout point repeating the call.
+  LEnvironment* deoptimization_environment;
+  if (instr->HasDeoptimizationEnvironment()) {
+    deoptimization_environment = instr->deoptimization_environment();
+  } else {
+    deoptimization_environment = instr->environment();
+  }
+
+  RegisterEnvironmentForDeoptimization(deoptimization_environment);
+  RecordSafepoint(instr->pointer_map(),
+                  deoptimization_environment->deoptimization_index());
+}
+
+
+void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment) {
+  if (!environment->HasBeenRegistered()) {
+    // Physical stack frame layout:
+    // -x ............. -4  0 ..................................... y
+    // [incoming arguments] [spill slots] [pushed outgoing arguments]
+
+    // Layout of the environment:
+    // 0 ..................................................... size-1
+    // [parameters] [locals] [expression stack including arguments]
+
+    // Layout of the translation:
+    // 0 ........................................................ size - 1 + 4
+    // [expression stack including arguments] [locals] [4 words] [parameters]
+    // |>------------  translation_size ------------<|
+
+    int frame_count = 0;
+    for (LEnvironment* e = environment; e != NULL; e = e->outer()) {
+      ++frame_count;
+    }
+    Translation translation(&translations_, frame_count);
+    environment->WriteTranslation(this, &translation);
+    int deoptimization_index = deoptimizations_.length();
+    environment->Register(deoptimization_index, translation.index());
+    deoptimizations_.Add(environment);
+  }
+}
+
+
+void LCodeGen::DeoptimizeIf(Condition cc, LEnvironment* environment) {
+  RegisterEnvironmentForDeoptimization(environment);
+  ASSERT(environment->HasBeenRegistered());
+  int id = environment->deoptimization_index();
+  Address entry = Deoptimizer::GetDeoptimizationEntry(id, Deoptimizer::EAGER);
+  ASSERT(entry != NULL);
+  if (entry == NULL) {
+    Abort("bailout was not prepared");
+    return;
+  }
+
+  ASSERT(FLAG_deopt_every_n_times < 2);  // Other values not supported on ARM.
+
+  if (FLAG_deopt_every_n_times == 1 &&
+      info_->shared_info()->opt_count() == id) {
+    __ Jump(entry, RelocInfo::RUNTIME_ENTRY);
+    return;
+  }
+
+  if (cc == no_condition) {
+    if (FLAG_trap_on_deopt) __ stop("trap_on_deopt");
+    __ Jump(entry, RelocInfo::RUNTIME_ENTRY);
+  } else {
+    if (FLAG_trap_on_deopt) {
+      Label done;
+      __ b(&done, NegateCondition(cc));
+      __ stop("trap_on_deopt");
+      __ Jump(entry, RelocInfo::RUNTIME_ENTRY);
+      __ bind(&done);
+    } else {
+      __ Jump(entry, RelocInfo::RUNTIME_ENTRY, cc);
+    }
+  }
+}
+
+
+void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
+  int length = deoptimizations_.length();
+  if (length == 0) return;
+  ASSERT(FLAG_deopt);
+  Handle<DeoptimizationInputData> data =
+      Factory::NewDeoptimizationInputData(length, TENURED);
+
+  data->SetTranslationByteArray(*translations_.CreateByteArray());
+  data->SetInlinedFunctionCount(Smi::FromInt(inlined_function_count_));
+
+  Handle<FixedArray> literals =
+      Factory::NewFixedArray(deoptimization_literals_.length(), TENURED);
+  for (int i = 0; i < deoptimization_literals_.length(); i++) {
+    literals->set(i, *deoptimization_literals_[i]);
+  }
+  data->SetLiteralArray(*literals);
+
+  data->SetOsrAstId(Smi::FromInt(info_->osr_ast_id()));
+  data->SetOsrPcOffset(Smi::FromInt(osr_pc_offset_));
+
+  // Populate the deoptimization entries.
+  for (int i = 0; i < length; i++) {
+    LEnvironment* env = deoptimizations_[i];
+    data->SetAstId(i, Smi::FromInt(env->ast_id()));
+    data->SetTranslationIndex(i, Smi::FromInt(env->translation_index()));
+    data->SetArgumentsStackHeight(i,
+                                  Smi::FromInt(env->arguments_stack_height()));
+  }
+  code->set_deoptimization_data(*data);
+}
+
+
+int LCodeGen::DefineDeoptimizationLiteral(Handle<Object> literal) {
+  int result = deoptimization_literals_.length();
+  for (int i = 0; i < deoptimization_literals_.length(); ++i) {
+    if (deoptimization_literals_[i].is_identical_to(literal)) return i;
+  }
+  deoptimization_literals_.Add(literal);
+  return result;
+}
+
+
+void LCodeGen::PopulateDeoptimizationLiteralsWithInlinedFunctions() {
+  ASSERT(deoptimization_literals_.length() == 0);
+
+  const ZoneList<Handle<JSFunction> >* inlined_closures =
+      chunk()->inlined_closures();
+
+  for (int i = 0, length = inlined_closures->length();
+       i < length;
+       i++) {
+    DefineDeoptimizationLiteral(inlined_closures->at(i));
+  }
+
+  inlined_function_count_ = deoptimization_literals_.length();
+}
+
+
+void LCodeGen::RecordSafepoint(LPointerMap* pointers,
+                               int deoptimization_index) {
+  const ZoneList<LOperand*>* operands = pointers->operands();
+  Safepoint safepoint = safepoints_.DefineSafepoint(masm(),
+                                                    deoptimization_index);
+  for (int i = 0; i < operands->length(); i++) {
+    LOperand* pointer = operands->at(i);
+    if (pointer->IsStackSlot()) {
+      safepoint.DefinePointerSlot(pointer->index());
+    }
+  }
+}
+
+
+void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers,
+                                            int arguments,
+                                            int deoptimization_index) {
+  const ZoneList<LOperand*>* operands = pointers->operands();
+  Safepoint safepoint =
+      safepoints_.DefineSafepointWithRegisters(
+          masm(), arguments, deoptimization_index);
+  for (int i = 0; i < operands->length(); i++) {
+    LOperand* pointer = operands->at(i);
+    if (pointer->IsStackSlot()) {
+      safepoint.DefinePointerSlot(pointer->index());
+    } else if (pointer->IsRegister()) {
+      safepoint.DefinePointerRegister(ToRegister(pointer));
+    }
+  }
+  // Register cp always contains a pointer to the context.
+  safepoint.DefinePointerRegister(cp);
+}
+
+
+void LCodeGen::RecordPosition(int position) {
+  if (!FLAG_debug_info || position == RelocInfo::kNoPosition) return;
+  masm()->positions_recorder()->RecordPosition(position);
+}
+
+
+void LCodeGen::DoLabel(LLabel* label) {
+  if (label->is_loop_header()) {
+    Comment(";;; B%d - LOOP entry", label->block_id());
+  } else {
+    Comment(";;; B%d", label->block_id());
+  }
+  __ bind(label->label());
+  current_block_ = label->block_id();
+  LCodeGen::DoGap(label);
+}
+
+
+void LCodeGen::DoParallelMove(LParallelMove* move) {
+  // d0 must always be a scratch register.
+  DoubleRegister dbl_scratch = d0;
+  LUnallocated marker_operand(LUnallocated::NONE);
+
+  Register core_scratch = r9;
+  bool destroys_core_scratch = false;
+
+  LGapResolver resolver(move->move_operands(), &marker_operand);
+  const ZoneList<LMoveOperands>* moves = resolver.ResolveInReverseOrder();
+  for (int i = moves->length() - 1; i >= 0; --i) {
+    LMoveOperands move = moves->at(i);
+    LOperand* from = move.from();
+    LOperand* to = move.to();
+    ASSERT(!from->IsDoubleRegister() ||
+           !ToDoubleRegister(from).is(dbl_scratch));
+    ASSERT(!to->IsDoubleRegister() || !ToDoubleRegister(to).is(dbl_scratch));
+    ASSERT(!from->IsRegister() || !ToRegister(from).is(core_scratch));
+    ASSERT(!to->IsRegister() || !ToRegister(to).is(core_scratch));
+    if (from == &marker_operand) {
+      if (to->IsRegister()) {
+        __ mov(ToRegister(to), core_scratch);
+        ASSERT(destroys_core_scratch);
+      } else if (to->IsStackSlot()) {
+        __ str(core_scratch, ToMemOperand(to));
+        ASSERT(destroys_core_scratch);
+      } else if (to->IsDoubleRegister()) {
+        __ vmov(ToDoubleRegister(to), dbl_scratch);
+      } else {
+        ASSERT(to->IsDoubleStackSlot());
+        // TODO(regis): Why is vstr not taking a MemOperand?
+        // __ vstr(dbl_scratch, ToMemOperand(to));
+        MemOperand to_operand = ToMemOperand(to);
+        __ vstr(dbl_scratch, to_operand.rn(), to_operand.offset());
+      }
+    } else if (to == &marker_operand) {
+      if (from->IsRegister() || from->IsConstantOperand()) {
+        __ mov(core_scratch, ToOperand(from));
+        destroys_core_scratch = true;
+      } else if (from->IsStackSlot()) {
+        __ ldr(core_scratch, ToMemOperand(from));
+        destroys_core_scratch = true;
+      } else if (from->IsDoubleRegister()) {
+        __ vmov(dbl_scratch, ToDoubleRegister(from));
+      } else {
+        ASSERT(from->IsDoubleStackSlot());
+        // TODO(regis): Why is vldr not taking a MemOperand?
+        // __ vldr(dbl_scratch, ToMemOperand(from));
+        MemOperand from_operand = ToMemOperand(from);
+        __ vldr(dbl_scratch, from_operand.rn(), from_operand.offset());
+      }
+    } else if (from->IsConstantOperand()) {
+      if (to->IsRegister()) {
+        __ mov(ToRegister(to), ToOperand(from));
+      } else {
+        ASSERT(to->IsStackSlot());
+        __ mov(ip, ToOperand(from));
+        __ str(ip, ToMemOperand(to));
+      }
+    } else if (from->IsRegister()) {
+      if (to->IsRegister()) {
+        __ mov(ToRegister(to), ToOperand(from));
+      } else {
+        ASSERT(to->IsStackSlot());
+        __ str(ToRegister(from), ToMemOperand(to));
+      }
+    } else if (to->IsRegister()) {
+      ASSERT(from->IsStackSlot());
+      __ ldr(ToRegister(to), ToMemOperand(from));
+    } else if (from->IsStackSlot()) {
+      ASSERT(to->IsStackSlot());
+      __ ldr(ip, ToMemOperand(from));
+      __ str(ip, ToMemOperand(to));
+    } else if (from->IsDoubleRegister()) {
+      if (to->IsDoubleRegister()) {
+      __ vmov(ToDoubleRegister(to), ToDoubleRegister(from));
+      } else {
+        ASSERT(to->IsDoubleStackSlot());
+        // TODO(regis): Why is vstr not taking a MemOperand?
+        // __ vstr(dbl_scratch, ToMemOperand(to));
+        MemOperand to_operand = ToMemOperand(to);
+        __ vstr(ToDoubleRegister(from), to_operand.rn(), to_operand.offset());
+      }
+    } else if (to->IsDoubleRegister()) {
+      ASSERT(from->IsDoubleStackSlot());
+      // TODO(regis): Why is vldr not taking a MemOperand?
+      // __ vldr(ToDoubleRegister(to), ToMemOperand(from));
+      MemOperand from_operand = ToMemOperand(from);
+      __ vldr(ToDoubleRegister(to), from_operand.rn(), from_operand.offset());
+    } else {
+      ASSERT(to->IsDoubleStackSlot() && from->IsDoubleStackSlot());
+      // TODO(regis): Why is vldr not taking a MemOperand?
+      // __ vldr(dbl_scratch, ToMemOperand(from));
+      MemOperand from_operand = ToMemOperand(from);
+      __ vldr(dbl_scratch, from_operand.rn(), from_operand.offset());
+      // TODO(regis): Why is vstr not taking a MemOperand?
+      // __ vstr(dbl_scratch, ToMemOperand(to));
+      MemOperand to_operand = ToMemOperand(to);
+      __ vstr(dbl_scratch, to_operand.rn(), to_operand.offset());
+    }
+  }
+
+  if (destroys_core_scratch) {
+    __ ldr(core_scratch, MemOperand(fp, -kPointerSize));
+  }
+
+  LInstruction* next = GetNextInstruction();
+  if (next != NULL && next->IsLazyBailout()) {
+    int pc = masm()->pc_offset();
+    safepoints_.SetPcAfterGap(pc);
+  }
+}
+
+
+void LCodeGen::DoGap(LGap* gap) {
+  for (int i = LGap::FIRST_INNER_POSITION;
+       i <= LGap::LAST_INNER_POSITION;
+       i++) {
+    LGap::InnerPosition inner_pos = static_cast<LGap::InnerPosition>(i);
+    LParallelMove* move = gap->GetParallelMove(inner_pos);
+    if (move != NULL) DoParallelMove(move);
+  }
+
+  LInstruction* next = GetNextInstruction();
+  if (next != NULL && next->IsLazyBailout()) {
+    int pc = masm()->pc_offset();
+    safepoints_.SetPcAfterGap(pc);
+  }
+}
+
+
+void LCodeGen::DoParameter(LParameter* instr) {
+  // Nothing to do.
+}
+
+
+void LCodeGen::DoCallStub(LCallStub* instr) {
+  Abort("DoCallStub unimplemented.");
+}
+
+
+void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) {
+  // Nothing to do.
+}
+
+
+void LCodeGen::DoModI(LModI* instr) {
+  Abort("DoModI unimplemented.");
+}
+
+
+void LCodeGen::DoDivI(LDivI* instr) {
+  Abort("DoDivI unimplemented.");
+}
+
+
+void LCodeGen::DoMulI(LMulI* instr) {
+  Register left = ToRegister(instr->left());
+  Register scratch = r9;
+  Register right = EmitLoadRegister(instr->right(), scratch);
+
+  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero) &&
+      !instr->right()->IsConstantOperand()) {
+    __ orr(ToRegister(instr->temp()), left, right);
+  }
+
+  if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
+    // scratch:left = left * right.
+    __ smull(scratch, left, left, right);
+    __ mov(ip, Operand(left, ASR, 31));
+    __ cmp(ip, Operand(scratch));
+    DeoptimizeIf(ne, instr->environment());
+  } else {
+    __ mul(left, left, right);
+  }
+
+  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+    // Bail out if the result is supposed to be negative zero.
+    Label done;
+    __ tst(left, Operand(left));
+    __ b(ne, &done);
+    if (instr->right()->IsConstantOperand()) {
+      if (ToInteger32(LConstantOperand::cast(instr->right())) < 0) {
+        DeoptimizeIf(no_condition, instr->environment());
+      }
+    } else {
+      // Test the non-zero operand for negative sign.
+      __ cmp(ToRegister(instr->temp()), Operand(0));
+      DeoptimizeIf(mi, instr->environment());
+    }
+    __ bind(&done);
+  }
+}
+
+
+void LCodeGen::DoBitI(LBitI* instr) {
+  LOperand* left = instr->left();
+  LOperand* right = instr->right();
+  ASSERT(left->Equals(instr->result()));
+  ASSERT(left->IsRegister());
+  Register result = ToRegister(left);
+  Register right_reg = EmitLoadRegister(right, ip);
+  switch (instr->op()) {
+    case Token::BIT_AND:
+      __ and_(result, ToRegister(left), Operand(right_reg));
+      break;
+    case Token::BIT_OR:
+      __ orr(result, ToRegister(left), Operand(right_reg));
+      break;
+    case Token::BIT_XOR:
+      __ eor(result, ToRegister(left), Operand(right_reg));
+      break;
+    default:
+      UNREACHABLE();
+      break;
+  }
+}
+
+
+void LCodeGen::DoShiftI(LShiftI* instr) {
+  LOperand* left = instr->left();
+  LOperand* right = instr->right();
+  ASSERT(left->Equals(instr->result()));
+  ASSERT(left->IsRegister());
+  Register result = ToRegister(left);
+  if (right->IsRegister()) {
+    // Mask the right operand.
+    __ and_(r9, ToRegister(right), Operand(0x1F));
+    switch (instr->op()) {
+      case Token::SAR:
+        __ mov(result, Operand(result, ASR, r9));
+        break;
+      case Token::SHR:
+        if (instr->can_deopt()) {
+          __ mov(result, Operand(result, LSR, r9), SetCC);
+          DeoptimizeIf(mi, instr->environment());
+        } else {
+          __ mov(result, Operand(result, LSR, r9));
+        }
+        break;
+      case Token::SHL:
+        __ mov(result, Operand(result, LSL, r9));
+        break;
+      default:
+        UNREACHABLE();
+        break;
+    }
+  } else {
+    int value = ToInteger32(LConstantOperand::cast(right));
+    uint8_t shift_count = static_cast<uint8_t>(value & 0x1F);
+    switch (instr->op()) {
+      case Token::SAR:
+        if (shift_count != 0) {
+          __ mov(result, Operand(result, ASR, shift_count));
+        }
+        break;
+      case Token::SHR:
+        if (shift_count == 0 && instr->can_deopt()) {
+          __ tst(result, Operand(0x80000000));
+          DeoptimizeIf(ne, instr->environment());
+        } else {
+          __ mov(result, Operand(result, LSR, shift_count));
+        }
+        break;
+      case Token::SHL:
+        if (shift_count != 0) {
+          __ mov(result, Operand(result, LSL, shift_count));
+        }
+        break;
+      default:
+        UNREACHABLE();
+        break;
+    }
+  }
+}
+
+
+void LCodeGen::DoSubI(LSubI* instr) {
+  Register left = ToRegister(instr->left());
+  Register right = EmitLoadRegister(instr->right(), ip);
+  ASSERT(instr->left()->Equals(instr->result()));
+  __ sub(left, left, right, SetCC);
+  if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
+    DeoptimizeIf(vs, instr->environment());
+  }
+}
+
+
+void LCodeGen::DoConstantI(LConstantI* instr) {
+  ASSERT(instr->result()->IsRegister());
+  __ mov(ToRegister(instr->result()), Operand(instr->value()));
+}
+
+
+void LCodeGen::DoConstantD(LConstantD* instr) {
+  Abort("DoConstantD unimplemented.");
+}
+
+
+void LCodeGen::DoConstantT(LConstantT* instr) {
+  ASSERT(instr->result()->IsRegister());
+  __ mov(ToRegister(instr->result()), Operand(instr->value()));
+}
+
+
+void LCodeGen::DoArrayLength(LArrayLength* instr) {
+  Register result = ToRegister(instr->result());
+
+  if (instr->hydrogen()->value()->IsLoadElements()) {
+    // We load the length directly from the elements array.
+    Register elements = ToRegister(instr->input());
+    __ ldr(result, FieldMemOperand(elements, FixedArray::kLengthOffset));
+  } else {
+    // Check that the receiver really is an array.
+    Register array = ToRegister(instr->input());
+    Register temporary = ToRegister(instr->temporary());
+    __ CompareObjectType(array, temporary, temporary, JS_ARRAY_TYPE);
+    DeoptimizeIf(ne, instr->environment());
+
+    // Load length directly from the array.
+    __ ldr(result, FieldMemOperand(array, JSArray::kLengthOffset));
+  }
+  Abort("DoArrayLength untested.");
+}
+
+
+void LCodeGen::DoValueOf(LValueOf* instr) {
+  Abort("DoValueOf unimplemented.");
+}
+
+
+void LCodeGen::DoBitNotI(LBitNotI* instr) {
+  LOperand* input = instr->input();
+  ASSERT(input->Equals(instr->result()));
+  __ mvn(ToRegister(input), Operand(ToRegister(input)));
+  Abort("DoBitNotI untested.");
+}
+
+
+void LCodeGen::DoThrow(LThrow* instr) {
+  Register input_reg = EmitLoadRegister(instr->input(), ip);
+  __ push(input_reg);
+  CallRuntime(Runtime::kThrow, 1, instr);
+
+  if (FLAG_debug_code) {
+    __ stop("Unreachable code.");
+  }
+}
+
+
+void LCodeGen::DoAddI(LAddI* instr) {
+  LOperand* left = instr->left();
+  LOperand* right = instr->right();
+  ASSERT(left->Equals(instr->result()));
+
+  Register right_reg = EmitLoadRegister(right, ip);
+  __ add(ToRegister(left), ToRegister(left), Operand(right_reg), SetCC);
+
+  if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
+    DeoptimizeIf(vs, instr->environment());
+  }
+}
+
+
+void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
+  DoubleRegister left = ToDoubleRegister(instr->left());
+  DoubleRegister right = ToDoubleRegister(instr->right());
+  switch (instr->op()) {
+    case Token::ADD:
+      __ vadd(left, left, right);
+      break;
+    case Token::SUB:
+      __ vsub(left, left, right);
+      break;
+    case Token::MUL:
+      __ vmul(left, left, right);
+      break;
+    case Token::DIV:
+      __ vdiv(left, left, right);
+      break;
+    case Token::MOD: {
+      Abort("DoArithmeticD unimplemented for MOD.");
+      break;
+    }
+    default:
+      UNREACHABLE();
+      break;
+  }
+}
+
+
+void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
+  ASSERT(ToRegister(instr->left()).is(r1));
+  ASSERT(ToRegister(instr->right()).is(r0));
+  ASSERT(ToRegister(instr->result()).is(r0));
+
+  // TODO(regis): Implement TypeRecordingBinaryOpStub and replace current
+  // GenericBinaryOpStub:
+  // TypeRecordingBinaryOpStub stub(instr->op(), NO_OVERWRITE);
+  GenericBinaryOpStub stub(instr->op(), NO_OVERWRITE, r1, r0);
+  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+}
+
+
+int LCodeGen::GetNextEmittedBlock(int block) {
+  for (int i = block + 1; i < graph()->blocks()->length(); ++i) {
+    LLabel* label = chunk_->GetLabel(i);
+    if (!label->HasReplacement()) return i;
+  }
+  return -1;
+}
+
+
+void LCodeGen::EmitBranch(int left_block, int right_block, Condition cc) {
+  int next_block = GetNextEmittedBlock(current_block_);
+  right_block = chunk_->LookupDestination(right_block);
+  left_block = chunk_->LookupDestination(left_block);
+
+  if (right_block == left_block) {
+    EmitGoto(left_block);
+  } else if (left_block == next_block) {
+    __ b(NegateCondition(cc), chunk_->GetAssemblyLabel(right_block));
+  } else if (right_block == next_block) {
+    __ b(cc, chunk_->GetAssemblyLabel(left_block));
+  } else {
+    __ b(cc, chunk_->GetAssemblyLabel(left_block));
+    __ b(chunk_->GetAssemblyLabel(right_block));
+  }
+}
+
+
+void LCodeGen::DoBranch(LBranch* instr) {
+  int true_block = chunk_->LookupDestination(instr->true_block_id());
+  int false_block = chunk_->LookupDestination(instr->false_block_id());
+
+  Representation r = instr->hydrogen()->representation();
+  if (r.IsInteger32()) {
+    Register reg = ToRegister(instr->input());
+    __ cmp(reg, Operand(0));
+    EmitBranch(true_block, false_block, nz);
+  } else if (r.IsDouble()) {
+    DoubleRegister reg = ToDoubleRegister(instr->input());
+    __ vcmp(reg, 0.0);
+    EmitBranch(true_block, false_block, ne);
+  } else {
+    ASSERT(r.IsTagged());
+    Register reg = ToRegister(instr->input());
+    if (instr->hydrogen()->type().IsBoolean()) {
+      __ LoadRoot(ip, Heap::kTrueValueRootIndex);
+      __ cmp(reg, ip);
+      EmitBranch(true_block, false_block, eq);
+    } else {
+      Label* true_label = chunk_->GetAssemblyLabel(true_block);
+      Label* false_label = chunk_->GetAssemblyLabel(false_block);
+
+      __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
+      __ cmp(reg, ip);
+      __ b(eq, false_label);
+      __ LoadRoot(ip, Heap::kTrueValueRootIndex);
+      __ cmp(reg, ip);
+      __ b(eq, true_label);
+      __ LoadRoot(ip, Heap::kFalseValueRootIndex);
+      __ cmp(reg, ip);
+      __ b(eq, false_label);
+      __ cmp(reg, Operand(0));
+      __ b(eq, false_label);
+      __ tst(reg, Operand(kSmiTagMask));
+      __ b(eq, true_label);
+
+      // Test for double values. Zero is false.
+      Label call_stub;
+      DoubleRegister dbl_scratch = d0;
+      Register core_scratch = r9;
+      ASSERT(!reg.is(core_scratch));
+      __ ldr(core_scratch, FieldMemOperand(reg, HeapObject::kMapOffset));
+      __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
+      __ cmp(core_scratch, Operand(ip));
+      __ b(ne, &call_stub);
+      __ sub(ip, reg, Operand(kHeapObjectTag));
+      __ vldr(dbl_scratch, ip, HeapNumber::kValueOffset);
+      __ vcmp(dbl_scratch, 0.0);
+      __ b(eq, false_label);
+      __ b(true_label);
+
+      // The conversion stub doesn't cause garbage collections so it's
+      // safe to not record a safepoint after the call.
+      __ bind(&call_stub);
+      ToBooleanStub stub(reg);
+      RegList saved_regs = kJSCallerSaved | kCalleeSaved;
+      __ stm(db_w, sp, saved_regs);
+      __ CallStub(&stub);
+      __ cmp(reg, Operand(0));
+      __ ldm(ia_w, sp, saved_regs);
+      EmitBranch(true_block, false_block, nz);
+    }
+  }
+}
+
+
+void LCodeGen::EmitGoto(int block, LDeferredCode* deferred_stack_check) {
+  // TODO(srdjan): Perform stack overflow check if this goto needs it
+  // before jumping.
+  block = chunk_->LookupDestination(block);
+  int next_block = GetNextEmittedBlock(current_block_);
+  if (block != next_block) {
+    __ jmp(chunk_->GetAssemblyLabel(block));
+  }
+}
+
+
+void LCodeGen::DoDeferredStackCheck(LGoto* instr) {
+  UNIMPLEMENTED();
+}
+
+
+void LCodeGen::DoGoto(LGoto* instr) {
+  // TODO(srdjan): Implement deferred stack check.
+  EmitGoto(instr->block_id(), NULL);
+}
+
+
+Condition LCodeGen::TokenToCondition(Token::Value op, bool is_unsigned) {
+  Condition cond = no_condition;
+  switch (op) {
+    case Token::EQ:
+    case Token::EQ_STRICT:
+      cond = eq;
+      break;
+    case Token::LT:
+      cond = is_unsigned ? lo : lt;
+      break;
+    case Token::GT:
+      cond = is_unsigned ? hi : gt;
+      break;
+    case Token::LTE:
+      cond = is_unsigned ? ls : le;
+      break;
+    case Token::GTE:
+      cond = is_unsigned ? hs : ge;
+      break;
+    case Token::IN:
+    case Token::INSTANCEOF:
+    default:
+      UNREACHABLE();
+  }
+  return cond;
+}
+
+
+void LCodeGen::EmitCmpI(LOperand* left, LOperand* right) {
+  __ cmp(ToRegister(left), ToOperand(right));
+  Abort("EmitCmpI untested.");
+}
+
+
+void LCodeGen::DoCmpID(LCmpID* instr) {
+  Abort("DoCmpID unimplemented.");
+}
+
+
+void LCodeGen::DoCmpIDAndBranch(LCmpIDAndBranch* instr) {
+  Abort("DoCmpIDAndBranch unimplemented.");
+}
+
+
+void LCodeGen::DoCmpJSObjectEq(LCmpJSObjectEq* instr) {
+  Register left = ToRegister(instr->left());
+  Register right = ToRegister(instr->right());
+  Register result = ToRegister(instr->result());
+
+  __ cmp(left, Operand(right));
+  __ LoadRoot(result, Heap::kTrueValueRootIndex, eq);
+  __ LoadRoot(result, Heap::kFalseValueRootIndex, ne);
+  Abort("DoCmpJSObjectEq untested.");
+}
+
+
+void LCodeGen::DoCmpJSObjectEqAndBranch(LCmpJSObjectEqAndBranch* instr) {
+  Abort("DoCmpJSObjectEqAndBranch unimplemented.");
+}
+
+
+void LCodeGen::DoIsNull(LIsNull* instr) {
+  Abort("DoIsNull unimplemented.");
+}
+
+
+void LCodeGen::DoIsNullAndBranch(LIsNullAndBranch* instr) {
+  Register reg = ToRegister(instr->input());
+
+  // TODO(fsc): If the expression is known to be a smi, then it's
+  // definitely not null. Jump to the false block.
+
+  int true_block = chunk_->LookupDestination(instr->true_block_id());
+  int false_block = chunk_->LookupDestination(instr->false_block_id());
+
+  __ LoadRoot(ip, Heap::kNullValueRootIndex);
+  __ cmp(reg, ip);
+  if (instr->is_strict()) {
+    EmitBranch(true_block, false_block, eq);
+  } else {
+    Label* true_label = chunk_->GetAssemblyLabel(true_block);
+    Label* false_label = chunk_->GetAssemblyLabel(false_block);
+    __ b(eq, true_label);
+    __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
+    __ cmp(reg, ip);
+    __ b(eq, true_label);
+    __ tst(reg, Operand(kSmiTagMask));
+    __ b(eq, false_label);
+    // Check for undetectable objects by looking in the bit field in
+    // the map. The object has already been smi checked.
+    Register scratch = ToRegister(instr->temp());
+    __ ldr(scratch, FieldMemOperand(reg, HeapObject::kMapOffset));
+    __ ldrb(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
+    __ tst(scratch, Operand(1 << Map::kIsUndetectable));
+    EmitBranch(true_block, false_block, ne);
+  }
+}
+
+
+void LCodeGen::DoIsSmi(LIsSmi* instr) {
+  ASSERT(instr->hydrogen()->value()->representation().IsTagged());
+  Register result = ToRegister(instr->result());
+  Register input_reg = EmitLoadRegister(instr->input(), ip);
+  __ tst(input_reg, Operand(kSmiTagMask));
+  __ LoadRoot(result, Heap::kTrueValueRootIndex);
+  Label done;
+  __ b(eq, &done);
+  __ LoadRoot(result, Heap::kFalseValueRootIndex);
+  __ bind(&done);
+}
+
+
+void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) {
+  int true_block = chunk_->LookupDestination(instr->true_block_id());
+  int false_block = chunk_->LookupDestination(instr->false_block_id());
+
+  Register input_reg = EmitLoadRegister(instr->input(), ip);
+  __ tst(input_reg, Operand(kSmiTagMask));
+  EmitBranch(true_block, false_block, eq);
+}
+
+
+InstanceType LHasInstanceType::TestType() {
+  InstanceType from = hydrogen()->from();
+  InstanceType to = hydrogen()->to();
+  if (from == FIRST_TYPE) return to;
+  ASSERT(from == to || to == LAST_TYPE);
+  return from;
+}
+
+
+Condition LHasInstanceType::BranchCondition() {
+  InstanceType from = hydrogen()->from();
+  InstanceType to = hydrogen()->to();
+  if (from == to) return eq;
+  if (to == LAST_TYPE) return hs;
+  if (from == FIRST_TYPE) return ls;
+  UNREACHABLE();
+  return eq;
+}
+
+
+void LCodeGen::DoHasInstanceType(LHasInstanceType* instr) {
+  Abort("DoHasInstanceType unimplemented.");
+}
+
+
+void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) {
+  Register input = ToRegister(instr->input());
+  Register temp = ToRegister(instr->temp());
+
+  int true_block = chunk_->LookupDestination(instr->true_block_id());
+  int false_block = chunk_->LookupDestination(instr->false_block_id());
+
+  Label* false_label = chunk_->GetAssemblyLabel(false_block);
+
+  __ tst(input, Operand(kSmiTagMask));
+  __ b(eq, false_label);
+
+  __ CompareObjectType(input, temp, temp, instr->TestType());
+  EmitBranch(true_block, false_block, instr->BranchCondition());
+}
+
+
+void LCodeGen::DoHasCachedArrayIndex(LHasCachedArrayIndex* instr) {
+  Abort("DoHasCachedArrayIndex unimplemented.");
+}
+
+
+void LCodeGen::DoHasCachedArrayIndexAndBranch(
+    LHasCachedArrayIndexAndBranch* instr) {
+  Abort("DoHasCachedArrayIndexAndBranch unimplemented.");
+}
+
+
+// Branches to a label or falls through with the answer in the z flag.  Trashes
+// the temp registers, but not the input.  Only input and temp2 may alias.
+void LCodeGen::EmitClassOfTest(Label* is_true,
+                               Label* is_false,
+                               Handle<String>class_name,
+                               Register input,
+                               Register temp,
+                               Register temp2) {
+  Abort("EmitClassOfTest unimplemented.");
+}
+
+
+void LCodeGen::DoClassOfTest(LClassOfTest* instr) {
+  Abort("DoClassOfTest unimplemented.");
+}
+
+
+void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) {
+  Abort("DoClassOfTestAndBranch unimplemented.");
+}
+
+
+void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) {
+  Abort("DoCmpMapAndBranch unimplemented.");
+}
+
+
+void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
+  Abort("DoInstanceOf unimplemented.");
+}
+
+
+void LCodeGen::DoInstanceOfAndBranch(LInstanceOfAndBranch* instr) {
+  Abort("DoInstanceOfAndBranch unimplemented.");
+}
+
+
+
+static Condition ComputeCompareCondition(Token::Value op) {
+  switch (op) {
+    case Token::EQ_STRICT:
+    case Token::EQ:
+      return eq;
+    case Token::LT:
+      return lt;
+    case Token::GT:
+      return gt;
+    case Token::LTE:
+      return le;
+    case Token::GTE:
+      return ge;
+    default:
+      UNREACHABLE();
+      return no_condition;
+  }
+}
+
+
+void LCodeGen::DoCmpT(LCmpT* instr) {
+  Token::Value op = instr->op();
+
+  Handle<Code> ic = CompareIC::GetUninitialized(op);
+  CallCode(ic, RelocInfo::CODE_TARGET, instr);
+
+  Condition condition = ComputeCompareCondition(op);
+  if (op == Token::GT || op == Token::LTE) {
+    condition = ReverseCondition(condition);
+  }
+  __ cmp(r0, Operand(0));
+  __ LoadRoot(ToRegister(instr->result()), Heap::kTrueValueRootIndex,
+      condition);
+  __ LoadRoot(ToRegister(instr->result()), Heap::kFalseValueRootIndex,
+      NegateCondition(condition));
+}
+
+
+void LCodeGen::DoCmpTAndBranch(LCmpTAndBranch* instr) {
+  Abort("DoCmpTAndBranch unimplemented.");
+}
+
+
+void LCodeGen::DoReturn(LReturn* instr) {
+  if (FLAG_trace) {
+    // Push the return value on the stack as the parameter.
+    // Runtime::TraceExit returns its parameter in r0.
+    __ push(r0);
+    __ CallRuntime(Runtime::kTraceExit, 1);
+  }
+  int32_t sp_delta = (ParameterCount() + 1) * kPointerSize;
+  __ mov(sp, fp);
+  __ ldm(ia_w, sp, fp.bit() | lr.bit());
+  __ add(sp, sp, Operand(sp_delta));
+  __ Jump(lr);
+}
+
+
+void LCodeGen::DoLoadGlobal(LLoadGlobal* instr) {
+  Register result = ToRegister(instr->result());
+  __ mov(ip, Operand(Handle<Object>(instr->hydrogen()->cell())));
+  __ ldr(result, FieldMemOperand(ip, JSGlobalPropertyCell::kValueOffset));
+  if (instr->hydrogen()->check_hole_value()) {
+    __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
+    __ cmp(result, ip);
+    DeoptimizeIf(eq, instr->environment());
+  }
+}
+
+
+void LCodeGen::DoStoreGlobal(LStoreGlobal* instr) {
+  Register value = ToRegister(instr->input());
+  __ mov(ip, Operand(Handle<Object>(instr->hydrogen()->cell())));
+  __ str(value, FieldMemOperand(ip, JSGlobalPropertyCell::kValueOffset));
+}
+
+
+void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
+  Abort("DoLoadNamedField unimplemented.");
+}
+
+
+void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
+  ASSERT(ToRegister(instr->object()).is(r0));
+  ASSERT(ToRegister(instr->result()).is(r0));
+
+  // Name is always in r2.
+  __ mov(r2, Operand(instr->name()));
+  Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
+  CallCode(ic, RelocInfo::CODE_TARGET, instr);
+}
+
+
+void LCodeGen::DoLoadElements(LLoadElements* instr) {
+  Abort("DoLoadElements unimplemented.");
+}
+
+
+void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
+  Abort("DoAccessArgumentsAt unimplemented.");
+}
+
+
+void LCodeGen::DoLoadKeyedFastElement(LLoadKeyedFastElement* instr) {
+  Abort("DoLoadKeyedFastElement unimplemented.");
+}
+
+
+void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
+  ASSERT(ToRegister(instr->object()).is(r1));
+  ASSERT(ToRegister(instr->key()).is(r0));
+
+  Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
+  CallCode(ic, RelocInfo::CODE_TARGET, instr);
+}
+
+
+void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
+  Abort("DoArgumentsElements unimplemented.");
+}
+
+
+void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) {
+  Abort("DoArgumentsLength unimplemented.");
+}
+
+
+void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
+  Abort("DoApplyArguments unimplemented.");
+}
+
+
+void LCodeGen::DoPushArgument(LPushArgument* instr) {
+  LOperand* argument = instr->input();
+  if (argument->IsDoubleRegister() || argument->IsDoubleStackSlot()) {
+    Abort("DoPushArgument not implemented for double type.");
+  } else {
+    Register argument_reg = EmitLoadRegister(argument, ip);
+    __ push(argument_reg);
+  }
+}
+
+
+void LCodeGen::DoGlobalObject(LGlobalObject* instr) {
+  Register result = ToRegister(instr->result());
+  __ ldr(result, ContextOperand(cp, Context::GLOBAL_INDEX));
+}
+
+
+void LCodeGen::DoGlobalReceiver(LGlobalReceiver* instr) {
+  Register result = ToRegister(instr->result());
+  __ ldr(result, ContextOperand(cp, Context::GLOBAL_INDEX));
+  __ ldr(result, FieldMemOperand(result, GlobalObject::kGlobalReceiverOffset));
+}
+
+
+void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
+                                 int arity,
+                                 LInstruction* instr) {
+  // Change context if needed.
+  bool change_context =
+      (graph()->info()->closure()->context() != function->context()) ||
+      scope()->contains_with() ||
+      (scope()->num_heap_slots() > 0);
+  if (change_context) {
+    __ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
+  }
+
+  // Set r0 to arguments count if adaption is not needed. Assumes that r0
+  // is available to write to at this point.
+  if (!function->NeedsArgumentsAdaption()) {
+    __ mov(r0, Operand(arity));
+  }
+
+  LPointerMap* pointers = instr->pointer_map();
+  RecordPosition(pointers->position());
+
+  // Invoke function.
+  __ ldr(ip, FieldMemOperand(r1, JSFunction::kCodeEntryOffset));
+  __ Call(ip);
+
+  // Setup deoptimization.
+  RegisterLazyDeoptimization(instr);
+
+  // Restore context.
+  __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+}
+
+
+void LCodeGen::DoCallConstantFunction(LCallConstantFunction* instr) {
+  Abort("DoCallConstantFunction unimplemented.");
+}
+
+
+void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LUnaryMathOperation* instr) {
+  Abort("DoDeferredMathAbsTaggedHeapNumber unimplemented.");
+}
+
+
+void LCodeGen::DoMathAbs(LUnaryMathOperation* instr) {
+  Abort("LUnaryMathOperation unimplemented.");
+}
+
+
+void LCodeGen::DoMathFloor(LUnaryMathOperation* instr) {
+  Abort("DoMathFloor unimplemented.");
+}
+
+
+void LCodeGen::DoMathSqrt(LUnaryMathOperation* instr) {
+  Abort("DoMathSqrt unimplemented.");
+}
+
+
+void LCodeGen::DoUnaryMathOperation(LUnaryMathOperation* instr) {
+  ASSERT(instr->op() == kMathFloor ||
+         instr->op() == kMathAbs);
+
+  switch (instr->op()) {
+    case kMathAbs:
+      DoMathAbs(instr);
+      break;
+    case kMathFloor:
+      DoMathFloor(instr);
+      break;
+    case kMathSqrt:
+      DoMathSqrt(instr);
+      break;
+    default:
+      UNREACHABLE();
+  }
+}
+
+
+void LCodeGen::DoCallKeyed(LCallKeyed* instr) {
+  Abort("DoCallKeyed unimplemented.");
+}
+
+
+void LCodeGen::DoCallNamed(LCallNamed* instr) {
+  ASSERT(ToRegister(instr->result()).is(r0));
+
+  int arity = instr->arity();
+  Handle<Code> ic = StubCache::ComputeCallInitialize(arity, NOT_IN_LOOP);
+  __ mov(r2, Operand(instr->name()));
+  CallCode(ic, RelocInfo::CODE_TARGET, instr);
+  // Restore context register.
+  __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+}
+
+
+void LCodeGen::DoCallFunction(LCallFunction* instr) {
+  Abort("DoCallFunction unimplemented.");
+}
+
+
+void LCodeGen::DoCallGlobal(LCallGlobal* instr) {
+  Abort("DoCallGlobal unimplemented.");
+}
+
+
+void LCodeGen::DoCallKnownGlobal(LCallKnownGlobal* instr) {
+  ASSERT(ToRegister(instr->result()).is(r0));
+  __ mov(r1, Operand(instr->target()));
+  CallKnownFunction(instr->target(), instr->arity(), instr);
+}
+
+
+void LCodeGen::DoCallNew(LCallNew* instr) {
+  ASSERT(ToRegister(instr->input()).is(r1));
+  ASSERT(ToRegister(instr->result()).is(r0));
+
+  Handle<Code> builtin(Builtins::builtin(Builtins::JSConstructCall));
+  __ mov(r0, Operand(instr->arity()));
+  CallCode(builtin, RelocInfo::CONSTRUCT_CALL, instr);
+}
+
+
+void LCodeGen::DoCallRuntime(LCallRuntime* instr) {
+  CallRuntime(instr->function(), instr->arity(), instr);
+}
+
+
+void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
+  Abort("DoStoreNamedField unimplemented.");
+}
+
+
+void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
+  ASSERT(ToRegister(instr->object()).is(r1));
+  ASSERT(ToRegister(instr->value()).is(r0));
+
+  // Name is always in r2.
+  __ mov(r2, Operand(instr->name()));
+  Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
+  CallCode(ic, RelocInfo::CODE_TARGET, instr);
+}
+
+
+void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
+  Abort("DoBoundsCheck unimplemented.");
+}
+
+
+void LCodeGen::DoStoreKeyedFastElement(LStoreKeyedFastElement* instr) {
+  Abort("DoStoreKeyedFastElement unimplemented.");
+}
+
+
+void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
+  ASSERT(ToRegister(instr->object()).is(r2));
+  ASSERT(ToRegister(instr->key()).is(r1));
+  ASSERT(ToRegister(instr->value()).is(r0));
+
+  Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
+  CallCode(ic, RelocInfo::CODE_TARGET, instr);
+}
+
+
+void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
+  Abort("DoInteger32ToDouble unimplemented.");
+}
+
+
+void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
+  class DeferredNumberTagI: public LDeferredCode {
+   public:
+    DeferredNumberTagI(LCodeGen* codegen, LNumberTagI* instr)
+        : LDeferredCode(codegen), instr_(instr) { }
+    virtual void Generate() { codegen()->DoDeferredNumberTagI(instr_); }
+   private:
+    LNumberTagI* instr_;
+  };
+
+  LOperand* input = instr->input();
+  ASSERT(input->IsRegister() && input->Equals(instr->result()));
+  Register reg = ToRegister(input);
+
+  DeferredNumberTagI* deferred = new DeferredNumberTagI(this, instr);
+  __ SmiTag(reg, SetCC);
+  __ b(vs, deferred->entry());
+  __ bind(deferred->exit());
+}
+
+
+void LCodeGen::DoDeferredNumberTagI(LNumberTagI* instr) {
+  Label slow;
+  Register reg = ToRegister(instr->input());
+  DoubleRegister dbl_scratch = d0;
+  SwVfpRegister flt_scratch = s0;
+
+  // Preserve the value of all registers.
+  __ PushSafepointRegisters();
+
+  // There was overflow, so bits 30 and 31 of the original integer
+  // disagree. Try to allocate a heap number in new space and store
+  // the value in there. If that fails, call the runtime system.
+  Label done;
+  __ SmiUntag(reg);
+  __ eor(reg, reg, Operand(0x80000000));
+  __ vmov(flt_scratch, reg);
+  __ vcvt_f64_s32(dbl_scratch, flt_scratch);
+  if (FLAG_inline_new) {
+    __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
+    __ AllocateHeapNumber(r5, r3, r4, r6, &slow);
+    if (!reg.is(r5)) __ mov(reg, r5);
+    __ b(&done);
+  }
+
+  // Slow case: Call the runtime system to do the number allocation.
+  __ bind(&slow);
+
+  // TODO(3095996): Put a valid pointer value in the stack slot where the result
+  // register is stored, as this register is in the pointer map, but contains an
+  // integer value.
+  __ mov(ip, Operand(0));
+  int reg_stack_index = __ SafepointRegisterStackIndex(reg.code());
+  __ str(ip, MemOperand(sp, reg_stack_index * kPointerSize));
+
+  __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
+  RecordSafepointWithRegisters(
+      instr->pointer_map(), 0, Safepoint::kNoDeoptimizationIndex);
+  if (!reg.is(r0)) __ mov(reg, r0);
+
+  // Done. Put the value in dbl_scratch into the value of the allocated heap
+  // number.
+  __ bind(&done);
+  __ sub(ip, reg, Operand(kHeapObjectTag));
+  __ vstr(dbl_scratch, ip, HeapNumber::kValueOffset);
+  __ str(reg, MemOperand(sp, reg_stack_index * kPointerSize));
+  __ PopSafepointRegisters();
+}
+
+
+void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
+  class DeferredNumberTagD: public LDeferredCode {
+   public:
+    DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr)
+        : LDeferredCode(codegen), instr_(instr) { }
+    virtual void Generate() { codegen()->DoDeferredNumberTagD(instr_); }
+   private:
+    LNumberTagD* instr_;
+  };
+
+  DoubleRegister input_reg = ToDoubleRegister(instr->input());
+  Register reg = ToRegister(instr->result());
+  Register tmp = ToRegister(instr->temp());
+  Register scratch = r9;
+
+  DeferredNumberTagD* deferred = new DeferredNumberTagD(this, instr);
+  if (FLAG_inline_new) {
+    __ LoadRoot(scratch, Heap::kHeapNumberMapRootIndex);
+    __ AllocateHeapNumber(reg, tmp, ip, scratch, deferred->entry());
+  } else {
+    __ jmp(deferred->entry());
+  }
+  __ bind(deferred->exit());
+  __ sub(ip, reg, Operand(kHeapObjectTag));
+  __ vstr(input_reg, ip, HeapNumber::kValueOffset);
+}
+
+
+void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
+  // TODO(3095996): Get rid of this. For now, we need to make the
+  // result register contain a valid pointer because it is already
+  // contained in the register pointer map.
+  Register reg = ToRegister(instr->result());
+  __ mov(reg, Operand(0));
+
+  __ PushSafepointRegisters();
+  __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
+  RecordSafepointWithRegisters(
+      instr->pointer_map(), 0, Safepoint::kNoDeoptimizationIndex);
+  int reg_stack_index = __ SafepointRegisterStackIndex(reg.code());
+  __ str(r0, MemOperand(sp, reg_stack_index * kPointerSize));
+  __ PopSafepointRegisters();
+}
+
+
+void LCodeGen::DoSmiTag(LSmiTag* instr) {
+  LOperand* input = instr->input();
+  ASSERT(input->IsRegister() && input->Equals(instr->result()));
+  ASSERT(!instr->hydrogen_value()->CheckFlag(HValue::kCanOverflow));
+  __ SmiTag(ToRegister(input));
+}
+
+
+void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
+  Abort("DoSmiUntag unimplemented.");
+}
+
+
+void LCodeGen::EmitNumberUntagD(Register input_reg,
+                                DoubleRegister result_reg,
+                                LEnvironment* env) {
+  Register core_scratch = r9;
+  ASSERT(!input_reg.is(core_scratch));
+  SwVfpRegister flt_scratch = s0;
+  ASSERT(!result_reg.is(d0));
+
+  Label load_smi, heap_number, done;
+
+  // Smi check.
+  __ tst(input_reg, Operand(kSmiTagMask));
+  __ b(eq, &load_smi);
+
+  // Heap number map check.
+  __ ldr(core_scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset));
+  __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
+  __ cmp(core_scratch, Operand(ip));
+  __ b(eq, &heap_number);
+
+  __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
+  __ cmp(input_reg, Operand(ip));
+  DeoptimizeIf(ne, env);
+
+  // Convert undefined to NaN.
+  __ LoadRoot(ip, Heap::kNanValueRootIndex);
+  __ sub(ip, ip, Operand(kHeapObjectTag));
+  __ vldr(result_reg, ip, HeapNumber::kValueOffset);
+  __ jmp(&done);
+
+  // Heap number to double register conversion.
+  __ bind(&heap_number);
+  __ sub(ip, input_reg, Operand(kHeapObjectTag));
+  __ vldr(result_reg, ip, HeapNumber::kValueOffset);
+  __ jmp(&done);
+
+  // Smi to double register conversion
+  __ bind(&load_smi);
+  __ SmiUntag(input_reg);  // Untag smi before converting to float.
+  __ vmov(flt_scratch, input_reg);
+  __ vcvt_f64_s32(result_reg, flt_scratch);
+  __ SmiTag(input_reg);  // Retag smi.
+  __ bind(&done);
+}
+
+
+class DeferredTaggedToI: public LDeferredCode {
+ public:
+  DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr)
+      : LDeferredCode(codegen), instr_(instr) { }
+  virtual void Generate() { codegen()->DoDeferredTaggedToI(instr_); }
+ private:
+  LTaggedToI* instr_;
+};
+
+
+void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
+  Label done;
+  Register input_reg = ToRegister(instr->input());
+  Register core_scratch = r9;
+  ASSERT(!input_reg.is(core_scratch));
+  DoubleRegister dbl_scratch = d0;
+  SwVfpRegister flt_scratch = s0;
+  DoubleRegister dbl_tmp = ToDoubleRegister(instr->temp());
+
+  // Heap number map check.
+  __ ldr(core_scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset));
+  __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
+  __ cmp(core_scratch, Operand(ip));
+
+  if (instr->truncating()) {
+    Label heap_number;
+    __ b(eq, &heap_number);
+    // Check for undefined. Undefined is converted to zero for truncating
+    // conversions.
+    __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
+    __ cmp(input_reg, Operand(ip));
+    DeoptimizeIf(ne, instr->environment());
+    __ mov(input_reg, Operand(0));
+    __ b(&done);
+
+    __ bind(&heap_number);
+    __ sub(ip, input_reg, Operand(kHeapObjectTag));
+    __ vldr(dbl_tmp, ip, HeapNumber::kValueOffset);
+    __ vcmp(dbl_tmp, 0.0);  // Sets overflow bit if NaN.
+    __ vcvt_s32_f64(flt_scratch, dbl_tmp);
+    __ vmov(input_reg, flt_scratch);  // 32-bit result of conversion.
+    __ vmrs(pc);  // Move vector status bits to normal status bits.
+    // Overflow bit is set if dbl_tmp is Nan.
+    __ cmn(input_reg, Operand(1), vc);  // 0x7fffffff + 1 -> overflow.
+    __ cmp(input_reg, Operand(1), vc);  // 0x80000000 - 1 -> overflow.
+    DeoptimizeIf(vs, instr->environment());  // Saturation may have occured.
+
+  } else {
+    // Deoptimize if we don't have a heap number.
+    DeoptimizeIf(ne, instr->environment());
+
+    __ sub(ip, input_reg, Operand(kHeapObjectTag));
+    __ vldr(dbl_tmp, ip, HeapNumber::kValueOffset);
+    __ vcvt_s32_f64(flt_scratch, dbl_tmp);
+    __ vmov(input_reg, flt_scratch);  // 32-bit result of conversion.
+    // Non-truncating conversion means that we cannot lose bits, so we convert
+    // back to check; note that using non-overlapping s and d regs would be
+    // slightly faster.
+    __ vcvt_f64_s32(dbl_scratch, flt_scratch);
+    __ vcmp(dbl_scratch, dbl_tmp);
+    __ vmrs(pc);  // Move vector status bits to normal status bits.
+    DeoptimizeIf(ne, instr->environment());  // Not equal or unordered.
+    if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+      __ tst(input_reg, Operand(input_reg));
+      __ b(ne, &done);
+      __ vmov(lr, ip, dbl_tmp);
+      __ tst(ip, Operand(1 << 31));  // Test sign bit.
+      DeoptimizeIf(ne, instr->environment());
+    }
+  }
+  __ bind(&done);
+}
+
+
+void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
+  LOperand* input = instr->input();
+  ASSERT(input->IsRegister());
+  ASSERT(input->Equals(instr->result()));
+
+  Register input_reg = ToRegister(input);
+
+  DeferredTaggedToI* deferred = new DeferredTaggedToI(this, instr);
+
+  // Smi check.
+  __ tst(input_reg, Operand(kSmiTagMask));
+  __ b(ne, deferred->entry());
+
+  // Smi to int32 conversion
+  __ SmiUntag(input_reg);  // Untag smi.
+
+  __ bind(deferred->exit());
+}
+
+
+void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
+  LOperand* input = instr->input();
+  ASSERT(input->IsRegister());
+  LOperand* result = instr->result();
+  ASSERT(result->IsDoubleRegister());
+
+  Register input_reg = ToRegister(input);
+  DoubleRegister result_reg = ToDoubleRegister(result);
+
+  EmitNumberUntagD(input_reg, result_reg, instr->environment());
+}
+
+
+void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
+  Abort("DoDoubleToI unimplemented.");
+}
+
+
+void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
+  LOperand* input = instr->input();
+  ASSERT(input->IsRegister());
+  __ tst(ToRegister(input), Operand(kSmiTagMask));
+  DeoptimizeIf(instr->condition(), instr->environment());
+}
+
+
+void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
+  Abort("DoCheckInstanceType unimplemented.");
+}
+
+
+void LCodeGen::DoCheckFunction(LCheckFunction* instr) {
+  ASSERT(instr->input()->IsRegister());
+  Register reg = ToRegister(instr->input());
+  __ cmp(reg, Operand(instr->hydrogen()->target()));
+  DeoptimizeIf(ne, instr->environment());
+}
+
+
+void LCodeGen::DoCheckMap(LCheckMap* instr) {
+  LOperand* input = instr->input();
+  ASSERT(input->IsRegister());
+  Register reg = ToRegister(input);
+  __ ldr(r9, FieldMemOperand(reg, HeapObject::kMapOffset));
+  __ cmp(r9, Operand(instr->hydrogen()->map()));
+  DeoptimizeIf(ne, instr->environment());
+}
+
+
+void LCodeGen::LoadPrototype(Register result,
+                             Handle<JSObject> prototype) {
+  Abort("LoadPrototype unimplemented.");
+}
+
+
+void LCodeGen::DoCheckPrototypeMaps(LCheckPrototypeMaps* instr) {
+  Abort("DoCheckPrototypeMaps unimplemented.");
+}
+
+
+void LCodeGen::DoArrayLiteral(LArrayLiteral* instr) {
+  Abort("DoArrayLiteral unimplemented.");
+}
+
+
+void LCodeGen::DoObjectLiteral(LObjectLiteral* instr) {
+  Abort("DoObjectLiteral unimplemented.");
+}
+
+
+void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
+  Abort("DoRegExpLiteral unimplemented.");
+}
+
+
+void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
+  Abort("DoFunctionLiteral unimplemented.");
+}
+
+
+void LCodeGen::DoTypeof(LTypeof* instr) {
+  Abort("DoTypeof unimplemented.");
+}
+
+
+void LCodeGen::DoTypeofIs(LTypeofIs* instr) {
+  Abort("DoTypeofIs unimplemented.");
+}
+
+
+void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) {
+  Register input = ToRegister(instr->input());
+  int true_block = chunk_->LookupDestination(instr->true_block_id());
+  int false_block = chunk_->LookupDestination(instr->false_block_id());
+  Label* true_label = chunk_->GetAssemblyLabel(true_block);
+  Label* false_label = chunk_->GetAssemblyLabel(false_block);
+
+  Condition final_branch_condition = EmitTypeofIs(true_label,
+                                                  false_label,
+                                                  input,
+                                                  instr->type_literal());
+
+  EmitBranch(true_block, false_block, final_branch_condition);
+}
+
+
+Condition LCodeGen::EmitTypeofIs(Label* true_label,
+                                 Label* false_label,
+                                 Register input,
+                                 Handle<String> type_name) {
+  Condition final_branch_condition = no_condition;
+  Register core_scratch = r9;
+  ASSERT(!input.is(core_scratch));
+  if (type_name->Equals(Heap::number_symbol())) {
+    __ tst(input, Operand(kSmiTagMask));
+    __ b(eq, true_label);
+    __ ldr(input, FieldMemOperand(input, HeapObject::kMapOffset));
+    __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
+    __ cmp(input, Operand(ip));
+    final_branch_condition = eq;
+
+  } else if (type_name->Equals(Heap::string_symbol())) {
+    __ tst(input, Operand(kSmiTagMask));
+    __ b(eq, false_label);
+    __ ldr(input, FieldMemOperand(input, HeapObject::kMapOffset));
+    __ ldrb(ip, FieldMemOperand(input, Map::kBitFieldOffset));
+    __ tst(ip, Operand(1 << Map::kIsUndetectable));
+    __ b(ne, false_label);
+    __ CompareInstanceType(input, core_scratch, FIRST_NONSTRING_TYPE);
+    final_branch_condition = lo;
+
+  } else if (type_name->Equals(Heap::boolean_symbol())) {
+    __ LoadRoot(ip, Heap::kTrueValueRootIndex);
+    __ cmp(input, ip);
+    __ b(eq, true_label);
+    __ LoadRoot(ip, Heap::kFalseValueRootIndex);
+    __ cmp(input, ip);
+    final_branch_condition = eq;
+
+  } else if (type_name->Equals(Heap::undefined_symbol())) {
+    __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
+    __ cmp(input, ip);
+    __ b(eq, true_label);
+    __ tst(input, Operand(kSmiTagMask));
+    __ b(eq, false_label);
+    // Check for undetectable objects => true.
+    __ ldr(input, FieldMemOperand(input, HeapObject::kMapOffset));
+    __ ldrb(ip, FieldMemOperand(input, Map::kBitFieldOffset));
+    __ tst(ip, Operand(1 << Map::kIsUndetectable));
+    final_branch_condition = ne;
+
+  } else if (type_name->Equals(Heap::function_symbol())) {
+    __ tst(input, Operand(kSmiTagMask));
+    __ b(eq, false_label);
+    __ CompareObjectType(input, input, core_scratch, JS_FUNCTION_TYPE);
+    __ b(eq, true_label);
+    // Regular expressions => 'function' (they are callable).
+    __ CompareInstanceType(input, core_scratch, JS_REGEXP_TYPE);
+    final_branch_condition = eq;
+
+  } else if (type_name->Equals(Heap::object_symbol())) {
+    __ tst(input, Operand(kSmiTagMask));
+    __ b(eq, false_label);
+    __ LoadRoot(ip, Heap::kNullValueRootIndex);
+    __ cmp(input, ip);
+    __ b(eq, true_label);
+    // Regular expressions => 'function', not 'object'.
+    __ CompareObjectType(input, input, core_scratch, JS_REGEXP_TYPE);
+    __ b(eq, false_label);
+    // Check for undetectable objects => false.
+    __ ldrb(ip, FieldMemOperand(input, Map::kBitFieldOffset));
+    __ tst(ip, Operand(1 << Map::kIsUndetectable));
+    __ b(ne, false_label);
+    // Check for JS objects => true.
+    __ CompareInstanceType(input, core_scratch, FIRST_JS_OBJECT_TYPE);
+    __ b(lo, false_label);
+    __ CompareInstanceType(input, core_scratch, LAST_JS_OBJECT_TYPE);
+    final_branch_condition = ls;
+
+  } else {
+    final_branch_condition = ne;
+    __ b(false_label);
+    // A dead branch instruction will be generated after this point.
+  }
+
+  return final_branch_condition;
+}
+
+
+void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
+  // No code for lazy bailout instruction. Used to capture environment after a
+  // call for populating the safepoint data with deoptimization data.
+}
+
+
+void LCodeGen::DoDeoptimize(LDeoptimize* instr) {
+  DeoptimizeIf(no_condition, instr->environment());
+}
+
+
+void LCodeGen::DoDeleteProperty(LDeleteProperty* instr) {
+  Abort("DoDeleteProperty unimplemented.");
+}
+
+
+void LCodeGen::DoStackCheck(LStackCheck* instr) {
+  // Perform stack overflow check.
+  Label ok;
+  __ LoadRoot(ip, Heap::kStackLimitRootIndex);
+  __ cmp(sp, Operand(ip));
+  __ b(hs, &ok);
+  StackCheckStub stub;
+  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+  __ bind(&ok);
+}
+
+
+void LCodeGen::DoOsrEntry(LOsrEntry* instr) {
+  Abort("DoOsrEntry unimplemented.");
+}
+
+
+#undef __
+
+} }  // namespace v8::internal
diff --git a/src/arm/lithium-codegen-arm.h b/src/arm/lithium-codegen-arm.h
new file mode 100644 (file)
index 0000000..846acac
--- /dev/null
@@ -0,0 +1,265 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_ARM_LITHIUM_CODEGEN_ARM_H_
+#define V8_ARM_LITHIUM_CODEGEN_ARM_H_
+
+#include "arm/lithium-arm.h"
+
+#include "deoptimizer.h"
+#include "safepoint-table.h"
+#include "scopes.h"
+
+namespace v8 {
+namespace internal {
+
+// Forward declarations.
+class LDeferredCode;
+class SafepointGenerator;
+
+
+class LCodeGen BASE_EMBEDDED {
+ public:
+  LCodeGen(LChunk* chunk, MacroAssembler* assembler, CompilationInfo* info)
+      : chunk_(chunk),
+        masm_(assembler),
+        info_(info),
+        current_block_(-1),
+        current_instruction_(-1),
+        instructions_(chunk->instructions()),
+        deoptimizations_(4),
+        deoptimization_literals_(8),
+        inlined_function_count_(0),
+        scope_(chunk->graph()->info()->scope()),
+        status_(UNUSED),
+        deferred_(8),
+        osr_pc_offset_(-1) {
+    PopulateDeoptimizationLiteralsWithInlinedFunctions();
+  }
+
+  // Try to generate code for the entire chunk, but it may fail if the
+  // chunk contains constructs we cannot handle. Returns true if the
+  // code generation attempt succeeded.
+  bool GenerateCode();
+
+  // Finish the code by setting stack height, safepoint, and bailout
+  // information on it.
+  void FinishCode(Handle<Code> code);
+
+  // Deferred code support.
+  void DoDeferredNumberTagD(LNumberTagD* instr);
+  void DoDeferredNumberTagI(LNumberTagI* instr);
+  void DoDeferredTaggedToI(LTaggedToI* instr);
+  void DoDeferredMathAbsTaggedHeapNumber(LUnaryMathOperation* instr);
+  void DoDeferredStackCheck(LGoto* instr);
+
+  // Parallel move support.
+  void DoParallelMove(LParallelMove* move);
+
+  // Declare methods that deal with the individual node types.
+#define DECLARE_DO(type) void Do##type(L##type* node);
+  LITHIUM_CONCRETE_INSTRUCTION_LIST(DECLARE_DO)
+#undef DECLARE_DO
+
+ private:
+  enum Status {
+    UNUSED,
+    GENERATING,
+    DONE,
+    ABORTED
+  };
+
+  bool is_unused() const { return status_ == UNUSED; }
+  bool is_generating() const { return status_ == GENERATING; }
+  bool is_done() const { return status_ == DONE; }
+  bool is_aborted() const { return status_ == ABORTED; }
+
+  LChunk* chunk() const { return chunk_; }
+  Scope* scope() const { return scope_; }
+  HGraph* graph() const { return chunk_->graph(); }
+  MacroAssembler* masm() const { return masm_; }
+
+  int GetNextEmittedBlock(int block);
+  LInstruction* GetNextInstruction();
+
+  void EmitClassOfTest(Label* if_true,
+                       Label* if_false,
+                       Handle<String> class_name,
+                       Register input,
+                       Register temporary,
+                       Register temporary2);
+
+  int StackSlotCount() const { return chunk()->spill_slot_count(); }
+  int ParameterCount() const { return scope()->num_parameters(); }
+
+  void Abort(const char* format, ...);
+  void Comment(const char* format, ...);
+
+  void AddDeferredCode(LDeferredCode* code) { deferred_.Add(code); }
+
+  // Code generation passes.  Returns true if code generation should
+  // continue.
+  bool GeneratePrologue();
+  bool GenerateBody();
+  bool GenerateDeferredCode();
+  bool GenerateSafepointTable();
+
+  void CallCode(Handle<Code> code,
+                RelocInfo::Mode mode,
+                LInstruction* instr);
+  void CallRuntime(Runtime::Function* function,
+                   int num_arguments,
+                   LInstruction* instr);
+  void CallRuntime(Runtime::FunctionId id,
+                   int num_arguments,
+                   LInstruction* instr) {
+    Runtime::Function* function = Runtime::FunctionForId(id);
+    CallRuntime(function, num_arguments, instr);
+  }
+
+  // Generate a direct call to a known function.  Expects the function
+  // to be in edi.
+  void CallKnownFunction(Handle<JSFunction> function,
+                         int arity,
+                         LInstruction* instr);
+
+  void LoadPrototype(Register result, Handle<JSObject> prototype);
+
+  void RegisterLazyDeoptimization(LInstruction* instr);
+  void RegisterEnvironmentForDeoptimization(LEnvironment* environment);
+  void DeoptimizeIf(Condition cc, LEnvironment* environment);
+
+  void AddToTranslation(Translation* translation,
+                        LOperand* op,
+                        bool is_tagged);
+  void PopulateDeoptimizationData(Handle<Code> code);
+  int DefineDeoptimizationLiteral(Handle<Object> literal);
+
+  void PopulateDeoptimizationLiteralsWithInlinedFunctions();
+
+  Register ToRegister(int index) const;
+  DoubleRegister ToDoubleRegister(int index) const;
+
+  // LOperand must be a register.
+  Register ToRegister(LOperand* op) const;
+
+  // LOperand is loaded into scratch, unless already a register.
+  Register EmitLoadRegister(LOperand* op, Register scratch);
+
+  // LOperand must be a double register.
+  DoubleRegister ToDoubleRegister(LOperand* op) const;
+
+  // LOperand is loaded into dbl_scratch, unless already a double register.
+  DoubleRegister EmitLoadDoubleRegister(LOperand* op,
+                                        SwVfpRegister flt_scratch,
+                                        DoubleRegister dbl_scratch);
+
+  int ToInteger32(LConstantOperand* op) const;
+  Operand ToOperand(LOperand* op);
+  MemOperand ToMemOperand(LOperand* op) const;
+
+  // Specific math operations - used from DoUnaryMathOperation.
+  void DoMathAbs(LUnaryMathOperation* instr);
+  void DoMathFloor(LUnaryMathOperation* instr);
+  void DoMathSqrt(LUnaryMathOperation* instr);
+
+  // Support for recording safepoint and position information.
+  void RecordSafepoint(LPointerMap* pointers, int deoptimization_index);
+  void RecordSafepointWithRegisters(LPointerMap* pointers,
+                                    int arguments,
+                                    int deoptimization_index);
+  void RecordPosition(int position);
+
+  static Condition TokenToCondition(Token::Value op, bool is_unsigned);
+  void EmitGoto(int block, LDeferredCode* deferred_stack_check = NULL);
+  void EmitBranch(int left_block, int right_block, Condition cc);
+  void EmitCmpI(LOperand* left, LOperand* right);
+  void EmitNumberUntagD(Register input,
+                        DoubleRegister result,
+                        LEnvironment* env);
+
+  // Emits optimized code for typeof x == "y".  Modifies input register.
+  // Returns the condition on which a final split to
+  // true and false label should be made, to optimize fallthrough.
+  Condition EmitTypeofIs(Label* true_label, Label* false_label,
+                         Register input, Handle<String> type_name);
+
+  LChunk* const chunk_;
+  MacroAssembler* const masm_;
+  CompilationInfo* const info_;
+
+  int current_block_;
+  int current_instruction_;
+  const ZoneList<LInstruction*>* instructions_;
+  ZoneList<LEnvironment*> deoptimizations_;
+  ZoneList<Handle<Object> > deoptimization_literals_;
+  int inlined_function_count_;
+  Scope* const scope_;
+  Status status_;
+  TranslationBuffer translations_;
+  ZoneList<LDeferredCode*> deferred_;
+  int osr_pc_offset_;
+
+  // Builder that keeps track of safepoints in the code. The table
+  // itself is emitted at the end of the generated code.
+  SafepointTableBuilder safepoints_;
+
+  friend class LDeferredCode;
+  friend class LEnvironment;
+  friend class SafepointGenerator;
+  DISALLOW_COPY_AND_ASSIGN(LCodeGen);
+};
+
+
+class LDeferredCode: public ZoneObject {
+ public:
+  explicit LDeferredCode(LCodeGen* codegen)
+      : codegen_(codegen), external_exit_(NULL) {
+    codegen->AddDeferredCode(this);
+  }
+
+  virtual ~LDeferredCode() { }
+  virtual void Generate() = 0;
+
+  void SetExit(Label *exit) { external_exit_ = exit; }
+  Label* entry() { return &entry_; }
+  Label* exit() { return external_exit_ != NULL ? external_exit_ : &exit_; }
+
+ protected:
+  LCodeGen* codegen() const { return codegen_; }
+  MacroAssembler* masm() const { return codegen_->masm(); }
+
+ private:
+  LCodeGen* codegen_;
+  Label entry_;
+  Label exit_;
+  Label* external_exit_;
+};
+
+} }  // namespace v8::internal
+
+#endif  // V8_ARM_LITHIUM_CODEGEN_ARM_H_
index afd7e2c..6ad8918 100644 (file)
@@ -1,4 +1,4 @@
-// Copyright 2006-2009 the V8 project authors. All rights reserved.
+// Copyright 2010 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -171,13 +171,6 @@ void MacroAssembler::Ret(Condition cond) {
 }
 
 
-void MacroAssembler::StackLimitCheck(Label* on_stack_overflow) {
-  LoadRoot(ip, Heap::kStackLimitRootIndex);
-  cmp(sp, Operand(ip));
-  b(lo, on_stack_overflow);
-}
-
-
 void MacroAssembler::Drop(int count, Condition cond) {
   if (count > 0) {
     add(sp, sp, Operand(count * kPointerSize), LeaveCC, cond);
@@ -447,6 +440,34 @@ void MacroAssembler::RecordWrite(Register object,
 }
 
 
+// Push and pop all registers that can hold pointers.
+void MacroAssembler::PushSafepointRegisters() {
+  // Safepoints expect a block of contiguous register values starting with r0:
+  ASSERT(((1 << kNumSafepointSavedRegisters) - 1) == kSafepointSavedRegisters);
+  // Safepoints expect a block of kNumSafepointRegisters values on the
+  // stack, so adjust the stack for unsaved registers.
+  const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
+  ASSERT(num_unsaved >= 0);
+  sub(sp, sp, Operand(num_unsaved * kPointerSize));
+  stm(db_w, sp, kSafepointSavedRegisters);
+}
+
+
+void MacroAssembler::PopSafepointRegisters() {
+  const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
+  ldm(ia_w, sp, kSafepointSavedRegisters);
+  add(sp, sp, Operand(num_unsaved * kPointerSize));
+}
+
+
+int MacroAssembler::SafepointRegisterStackIndex(int reg_code) {
+  // The registers are pushed starting with the highest encoding,
+  // which means that lowest encodings are closest to the stack pointer.
+  ASSERT(reg_code >= 0 && reg_code < kNumSafepointRegisters);
+  return reg_code;
+}
+
+
 void MacroAssembler::Ldrd(Register dst1, Register dst2,
                           const MemOperand& src, Condition cond) {
   ASSERT(src.rm().is(no_reg));
@@ -515,12 +536,8 @@ void MacroAssembler::LeaveFrame(StackFrame::Type type) {
 }
 
 
-void MacroAssembler::EnterExitFrame() {
-  // Compute the argv pointer and keep it in a callee-saved register.
+void MacroAssembler::EnterExitFrame(bool save_doubles) {
   // r0 is argc.
-  add(r6, sp, Operand(r0, LSL, kPointerSizeLog2));
-  sub(r6, r6, Operand(kPointerSize));
-
   // Compute callee's stack pointer before making changes and save it as
   // ip register so that it is restored as sp register on exit, thereby
   // popping the args.
@@ -528,6 +545,9 @@ void MacroAssembler::EnterExitFrame() {
   // ip = sp + kPointerSize * #args;
   add(ip, sp, Operand(r0, LSL, kPointerSizeLog2));
 
+  // Compute the argv pointer and keep it in a callee-saved register.
+  sub(r6, ip, Operand(kPointerSize));
+
   // Prepare the stack to be aligned when calling into C. After this point there
   // are 5 pushes before the call into C, so the stack needs to be aligned after
   // 5 pushes.
@@ -558,6 +578,28 @@ void MacroAssembler::EnterExitFrame() {
   // Setup argc and the builtin function in callee-saved registers.
   mov(r4, Operand(r0));
   mov(r5, Operand(r1));
+
+  // Optionally save all double registers.
+  if (save_doubles) {
+    // TODO(regis): Use vstrm instruction.
+    // The stack alignment code above made sp unaligned, so add space for one
+    // more double register and use aligned addresses.
+    ASSERT(kDoubleSize == frame_alignment);
+    // Mark the frame as containing doubles by pushing a non-valid return
+    // address, i.e. 0.
+    ASSERT(ExitFrameConstants::kMarkerOffset == -2 * kPointerSize);
+    mov(ip, Operand(0));  // Marker and alignment word.
+    push(ip);
+    int space = DwVfpRegister::kNumRegisters * kDoubleSize + kPointerSize;
+    sub(sp, sp, Operand(space));
+    for (int i = 0; i < DwVfpRegister::kNumRegisters; i++) {
+      DwVfpRegister reg = DwVfpRegister::from_code(i);
+      vstr(reg, sp, i * kDoubleSize + kPointerSize);
+    }
+    // Note that d0 will be accessible at fp - 2*kPointerSize -
+    // DwVfpRegister::kNumRegisters * kDoubleSize, since the code slot and the
+    // alignment word were pushed after the fp.
+  }
 }
 
 
@@ -592,7 +634,18 @@ int MacroAssembler::ActivationFrameAlignment() {
 }
 
 
-void MacroAssembler::LeaveExitFrame() {
+void MacroAssembler::LeaveExitFrame(bool save_doubles) {
+  // Optionally restore all double registers.
+  if (save_doubles) {
+    // TODO(regis): Use vldrm instruction.
+    for (int i = 0; i < DwVfpRegister::kNumRegisters; i++) {
+      DwVfpRegister reg = DwVfpRegister::from_code(i);
+      // Register d15 is just below the marker.
+      const int offset = ExitFrameConstants::kMarkerOffset;
+      vldr(reg, fp, (i - DwVfpRegister::kNumRegisters) * kDoubleSize + offset);
+    }
+  }
+
   // Clear top frame.
   mov(r3, Operand(0, RelocInfo::NONE));
   mov(ip, Operand(ExternalReference(Top::k_c_entry_fp_address)));
@@ -756,7 +809,15 @@ void MacroAssembler::InvokeFunction(JSFunction* function,
   // Invoke the cached code.
   Handle<Code> code(function->code());
   ParameterCount expected(function->shared()->formal_parameter_count());
-  InvokeCode(code, expected, actual, RelocInfo::CODE_TARGET, flag);
+  if (V8::UseCrankshaft()) {
+    // TODO(kasperl): For now, we always call indirectly through the
+    // code field in the function to allow recompilation to take effect
+    // without changing any of the call sites.
+    ldr(r3, FieldMemOperand(r1, JSFunction::kCodeEntryOffset));
+    InvokeCode(r3, expected, actual, flag);
+  } else {
+    InvokeCode(code, expected, actual, RelocInfo::CODE_TARGET, flag);
+  }
 }
 
 
@@ -1514,6 +1575,16 @@ void MacroAssembler::CallRuntime(Runtime::FunctionId fid, int num_arguments) {
 }
 
 
+void MacroAssembler::CallRuntimeSaveDoubles(Runtime::FunctionId id) {
+  Runtime::Function* function = Runtime::FunctionForId(id);
+  mov(r0, Operand(function->nargs));
+  mov(r1, Operand(ExternalReference(function)));
+  CEntryStub stub(1);
+  stub.SaveDoubles();
+  CallStub(&stub);
+}
+
+
 void MacroAssembler::CallExternalReference(const ExternalReference& ext,
                                            int num_arguments) {
   mov(r0, Operand(num_arguments));
index 8bd134c..3da8726 100644 (file)
@@ -1,4 +1,4 @@
-// Copyright 2006-2009 the V8 project authors. All rights reserved.
+// Copyright 2010 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -224,6 +224,12 @@ class MacroAssembler: public Assembler {
     }
   }
 
+  // Push and pop the registers that can hold pointers, as defined by the
+  // RegList constant kSafepointSavedRegisters.
+  void PushSafepointRegisters();
+  void PopSafepointRegisters();
+  static int SafepointRegisterStackIndex(int reg_code);
+
   // Load two consecutive registers with two consecutive memory locations.
   void Ldrd(Register dst1,
             Register dst2,
@@ -237,11 +243,6 @@ class MacroAssembler: public Assembler {
             Condition cond = al);
 
   // ---------------------------------------------------------------------------
-  // Stack limit support
-
-  void StackLimitCheck(Label* on_stack_limit_hit);
-
-  // ---------------------------------------------------------------------------
   // Activation frames
 
   void EnterInternalFrame() { EnterFrame(StackFrame::INTERNAL); }
@@ -254,10 +255,10 @@ class MacroAssembler: public Assembler {
   // Expects the number of arguments in register r0 and
   // the builtin function to call in register r1. Exits with argc in
   // r4, argv in r6, and and the builtin function to call in r5.
-  void EnterExitFrame();
+  void EnterExitFrame(bool save_doubles);
 
   // Leave the current exit frame. Expects the return value in r0.
-  void LeaveExitFrame();
+  void LeaveExitFrame(bool save_doubles);
 
   // Get the actual activation frame alignment for target environment.
   static int ActivationFrameAlignment();
@@ -575,6 +576,7 @@ class MacroAssembler: public Assembler {
 
   // Call a runtime routine.
   void CallRuntime(Runtime::Function* f, int num_arguments);
+  void CallRuntimeSaveDoubles(Runtime::FunctionId id);
 
   // Convenience function: Same as above, but takes the fid instead.
   void CallRuntime(Runtime::FunctionId fid, int num_arguments);
@@ -665,6 +667,14 @@ class MacroAssembler: public Assembler {
   // ---------------------------------------------------------------------------
   // Smi utilities
 
+  void SmiTag(Register reg, SBit s = LeaveCC) {
+    add(reg, reg, Operand(reg), s);
+  }
+
+  void SmiUntag(Register reg) {
+    mov(reg, Operand(reg, ASR, kSmiTagSize));
+  }
+
   // Jump if either of the registers contain a non-smi.
   void JumpIfNotBothSmi(Register reg1, Register reg2, Label* on_not_both_smi);
   // Jump if either of the registers contain a smi.
@@ -766,6 +776,17 @@ class CodePatcher {
 #endif  // ENABLE_DEBUGGER_SUPPORT
 
 
+// Helper class for generating code or data associated with the code
+// right after a call instruction. As an example this can be used to
+// generate safepoint data after calls for crankshaft.
+class PostCallGenerator {
+ public:
+  PostCallGenerator() { }
+  virtual ~PostCallGenerator() { }
+  virtual void Generate() = 0;
+};
+
+
 // -----------------------------------------------------------------------------
 // Static helper functions.
 
index 3ec5f44..143b839 100644 (file)
@@ -74,6 +74,7 @@ class Debugger {
   Simulator* sim_;
 
   int32_t GetRegisterValue(int regnum);
+  double GetVFPDoubleRegisterValue(int regnum);
   bool GetValue(const char* desc, int32_t* value);
   bool GetVFPSingleValue(const char* desc, float* value);
   bool GetVFPDoubleValue(const char* desc, double* value);
@@ -168,6 +169,11 @@ int32_t Debugger::GetRegisterValue(int regnum) {
 }
 
 
+double Debugger::GetVFPDoubleRegisterValue(int regnum) {
+  return sim_->get_double_from_d_register(regnum);
+}
+
+
 bool Debugger::GetValue(const char* desc, int32_t* value) {
   int regnum = Registers::Number(desc);
   if (regnum != kNoRegister) {
@@ -309,6 +315,11 @@ void Debugger::Debug() {
               value = GetRegisterValue(i);
               PrintF("%3s: 0x%08x %10d\n", Registers::Name(i), value, value);
             }
+            for (int i = 0; i < kNumVFPDoubleRegisters; i++) {
+              dvalue = GetVFPDoubleRegisterValue(i);
+              PrintF("%3s: %f\n",
+                  VFPRegisters::Name(i, true), dvalue);
+            }
           } else {
             if (GetValue(arg1, &value)) {
               PrintF("%s: 0x%08x %d \n", arg1, value, value);
@@ -837,6 +848,11 @@ void Simulator::set_pc(int32_t value) {
 }
 
 
+bool Simulator::has_bad_pc() const {
+  return ((registers_[pc] == bad_lr) || (registers_[pc] == end_sim_pc));
+}
+
+
 // Raw access to the PC register without the special adjustment when reading.
 int32_t Simulator::get_pc() const {
   return registers_[pc];
@@ -1510,7 +1526,8 @@ void Simulator::HandleRList(Instr* instr, bool load) {
 typedef int64_t (*SimulatorRuntimeCall)(int32_t arg0,
                                         int32_t arg1,
                                         int32_t arg2,
-                                        int32_t arg3);
+                                        int32_t arg3,
+                                        int32_t arg4);
 typedef double (*SimulatorRuntimeFPCall)(int32_t arg0,
                                          int32_t arg1,
                                          int32_t arg2,
@@ -1533,6 +1550,8 @@ void Simulator::SoftwareInterrupt(Instr* instr) {
       int32_t arg1 = get_register(r1);
       int32_t arg2 = get_register(r2);
       int32_t arg3 = get_register(r3);
+      int32_t* stack_pointer = reinterpret_cast<int32_t*>(get_register(sp));
+      int32_t arg4 = *stack_pointer;
       // This is dodgy but it works because the C entry stubs are never moved.
       // See comment in codegen-arm.cc and bug 1242173.
       int32_t saved_lr = get_register(lr);
@@ -1561,19 +1580,20 @@ void Simulator::SoftwareInterrupt(Instr* instr) {
             reinterpret_cast<SimulatorRuntimeCall>(external);
         if (::v8::internal::FLAG_trace_sim || !stack_aligned) {
           PrintF(
-              "Call to host function at %p with args %08x, %08x, %08x, %08x",
+              "Call to host function at %p args %08x, %08x, %08x, %08x, %0xc",
               FUNCTION_ADDR(target),
               arg0,
               arg1,
               arg2,
-              arg3);
+              arg3,
+              arg4);
           if (!stack_aligned) {
             PrintF(" with unaligned stack %08x\n", get_register(sp));
           }
           PrintF("\n");
         }
         CHECK(stack_aligned);
-        int64_t result = target(arg0, arg1, arg2, arg3);
+        int64_t result = target(arg0, arg1, arg2, arg3, arg4);
         int32_t lo_res = static_cast<int32_t>(result);
         int32_t hi_res = static_cast<int32_t>(result >> 32);
         if (::v8::internal::FLAG_trace_sim) {
@@ -1908,9 +1928,12 @@ void Simulator::DecodeType01(Instr* instr) {
           set_register(lr, old_pc + Instr::kInstrSize);
           break;
         }
-        case BKPT:
-          v8::internal::OS::DebugBreak();
+        case BKPT: {
+          Debugger dbg(this);
+          PrintF("Simulator hit BKPT.\n");
+          dbg.Debug();
           break;
+        }
         default:
           UNIMPLEMENTED();
       }
index c37b3f7..7bfe76a 100644 (file)
@@ -186,6 +186,10 @@ class Simulator {
   // ICache checking.
   static void FlushICache(void* start, size_t size);
 
+  // Returns true if pc register contains one of the 'special_values' defined
+  // below (bad_lr, end_sim_pc).
+  bool has_bad_pc() const;
+
  private:
   enum special_values {
     // Known bad pc value to ensure that the simulator does not execute
index 0a5eac2..74ffd3b 100644 (file)
@@ -874,6 +874,34 @@ MUST_USE_RESULT static MaybeObject* GenerateCheckPropertyCell(
   return cell;
 }
 
+// Calls GenerateCheckPropertyCell for each global object in the prototype chain
+// from object to (but not including) holder.
+MUST_USE_RESULT static MaybeObject* GenerateCheckPropertyCells(
+    MacroAssembler* masm,
+    JSObject* object,
+    JSObject* holder,
+    String* name,
+    Register scratch,
+    Label* miss) {
+  JSObject* current = object;
+  while (current != holder) {
+    if (current->IsGlobalObject()) {
+      // Returns a cell or a failure.
+      MaybeObject* result = GenerateCheckPropertyCell(
+          masm,
+          GlobalObject::cast(current),
+          name,
+          scratch,
+          miss);
+      if (result->IsFailure()) return result;
+    }
+    ASSERT(current->IsJSObject());
+    current = JSObject::cast(current->GetPrototype());
+  }
+  return NULL;
+}
+
+
 
 #undef __
 #define __ ACCESS_MASM(masm())
@@ -911,18 +939,19 @@ Register StubCompiler::CheckPrototypes(JSObject* object,
     // checks are allowed in stubs.
     ASSERT(current->IsJSGlobalProxy() || !current->IsAccessCheckNeeded());
 
+    ASSERT(current->GetPrototype()->IsJSObject());
     JSObject* prototype = JSObject::cast(current->GetPrototype());
     if (!current->HasFastProperties() &&
         !current->IsJSGlobalObject() &&
         !current->IsJSGlobalProxy()) {
       if (!name->IsSymbol()) {
-        MaybeObject* lookup_result = Heap::LookupSymbol(name);
-        if (lookup_result->IsFailure()) {
-          set_failure(Failure::cast(lookup_result));
+        MaybeObject* maybe_lookup_result = Heap::LookupSymbol(name);
+        Object* lookup_result = NULL;  // Initialization to please compiler.
+        if (!maybe_lookup_result->ToObject(&lookup_result)) {
+          set_failure(Failure::cast(maybe_lookup_result));
           return reg;
-        } else {
-          name = String::cast(lookup_result->ToObjectUnchecked());
         }
+        name = String::cast(lookup_result);
       }
       ASSERT(current->property_dictionary()->FindEntry(name) ==
              StringDictionary::kNotFound);
@@ -936,7 +965,7 @@ Register StubCompiler::CheckPrototypes(JSObject* object,
       __ ldr(scratch1, FieldMemOperand(reg, HeapObject::kMapOffset));
       reg = holder_reg;  // from now the object is in holder_reg
       __ ldr(reg, FieldMemOperand(scratch1, Map::kPrototypeOffset));
-    } else {
+    } else if (Heap::InNewSpace(prototype)) {
       // Get the map of the current object.
       __ ldr(scratch1, FieldMemOperand(reg, HeapObject::kMapOffset));
       __ cmp(scratch1, Operand(Handle<Map>(current->map())));
@@ -956,14 +985,24 @@ Register StubCompiler::CheckPrototypes(JSObject* object,
       }
 
       reg = holder_reg;  // from now the object is in holder_reg
-      if (Heap::InNewSpace(prototype)) {
-        // The prototype is in new space; we cannot store a reference
-        // to it in the code. Load it from the map.
-        __ ldr(reg, FieldMemOperand(scratch1, Map::kPrototypeOffset));
-      } else {
-        // The prototype is in old space; load it directly.
-        __ mov(reg, Operand(Handle<JSObject>(prototype)));
+      // The prototype is in new space; we cannot store a reference
+      // to it in the code. Load it from the map.
+      __ ldr(reg, FieldMemOperand(scratch1, Map::kPrototypeOffset));
+    } else {
+      // Check the map of the current object.
+      __ ldr(scratch1, FieldMemOperand(reg, HeapObject::kMapOffset));
+      __ cmp(scratch1, Operand(Handle<Map>(current->map())));
+      // Branch on the result of the map check.
+      __ b(ne, miss);
+      // Check access rights to the global object.  This has to happen
+      // after the map check so that we know that the object is
+      // actually a global object.
+      if (current->IsJSGlobalProxy()) {
+        __ CheckAccessGlobalProxy(reg, scratch1, miss);
       }
+      // The prototype is in old space; load it directly.
+      reg = holder_reg;  // from now the object is in holder_reg
+      __ mov(reg, Operand(Handle<JSObject>(prototype)));
     }
 
     if (save_at_depth == depth) {
@@ -982,32 +1021,22 @@ Register StubCompiler::CheckPrototypes(JSObject* object,
   // Log the check depth.
   LOG(IntEvent("check-maps-depth", depth + 1));
 
-  // Perform security check for access to the global object and return
-  // the holder register.
-  ASSERT(current == holder);
-  ASSERT(current->IsJSGlobalProxy() || !current->IsAccessCheckNeeded());
-  if (current->IsJSGlobalProxy()) {
+  // Perform security check for access to the global object.
+  ASSERT(holder->IsJSGlobalProxy() || !holder->IsAccessCheckNeeded());
+  if (holder->IsJSGlobalProxy()) {
     __ CheckAccessGlobalProxy(reg, scratch1, miss);
-  }
+  };
 
   // If we've skipped any global objects, it's not enough to verify
   // that their maps haven't changed.  We also need to check that the
   // property cell for the property is still empty.
-  current = object;
-  while (current != holder) {
-    if (current->IsGlobalObject()) {
-      MaybeObject* cell = GenerateCheckPropertyCell(masm(),
-                                                    GlobalObject::cast(current),
-                                                    name,
-                                                    scratch1,
-                                                    miss);
-      if (cell->IsFailure()) {
-        set_failure(Failure::cast(cell));
-        return reg;
-      }
-    }
-    current = JSObject::cast(current->GetPrototype());
-  }
+  MaybeObject* result = GenerateCheckPropertyCells(masm(),
+                                                   object,
+                                                   holder,
+                                                   name,
+                                                   scratch1,
+                                                   miss);
+  if (result->IsFailure()) set_failure(Failure::cast(result));
 
   // Return the register containing the holder.
   return reg;
@@ -1652,7 +1681,7 @@ MaybeObject* CallStubCompiler::CompileStringCharCodeAtCall(
   __ Drop(argc + 1);
   __ Ret();
 
-  ICRuntimeCallHelper call_helper;
+  StubRuntimeCallHelper call_helper;
   char_code_at_generator.GenerateSlow(masm(), call_helper);
 
   __ bind(&index_out_of_range);
@@ -1729,7 +1758,7 @@ MaybeObject* CallStubCompiler::CompileStringCharAtCall(
   __ Drop(argc + 1);
   __ Ret();
 
-  ICRuntimeCallHelper call_helper;
+  StubRuntimeCallHelper call_helper;
   char_at_generator.GenerateSlow(masm(), call_helper);
 
   __ bind(&index_out_of_range);
@@ -1804,7 +1833,7 @@ MaybeObject* CallStubCompiler::CompileStringFromCharCodeCall(
   __ Drop(argc + 1);
   __ Ret();
 
-  ICRuntimeCallHelper call_helper;
+  StubRuntimeCallHelper call_helper;
   char_from_code_generator.GenerateSlow(masm(), call_helper);
 
   // Tail call the full function. We do not have to patch the receiver
@@ -2330,8 +2359,16 @@ MaybeObject* CallStubCompiler::CompileCallGlobal(JSObject* object,
   ASSERT(function->is_compiled());
   Handle<Code> code(function->code());
   ParameterCount expected(function->shared()->formal_parameter_count());
-  __ InvokeCode(code, expected, arguments(),
-                RelocInfo::CODE_TARGET, JUMP_FUNCTION);
+  if (V8::UseCrankshaft()) {
+    // TODO(kasperl): For now, we always call indirectly through the
+    // code field in the function to allow recompilation to take effect
+    // without changing any of the call sites.
+    __ ldr(r3, FieldMemOperand(r1, JSFunction::kCodeEntryOffset));
+    __ InvokeCode(r3, expected, arguments(), JUMP_FUNCTION);
+  } else {
+    __ InvokeCode(code, expected, arguments(),
+                  RelocInfo::CODE_TARGET, JUMP_FUNCTION);
+  }
 
   // Handle call cache miss.
   __ bind(&miss);
@@ -2864,13 +2901,62 @@ MaybeObject* KeyedLoadStubCompiler::CompileLoadFunctionPrototype(String* name) {
 }
 
 
+MaybeObject* KeyedLoadStubCompiler::CompileLoadSpecialized(JSObject* receiver) {
+  // ----------- S t a t e -------------
+  //  -- lr    : return address
+  //  -- r0    : key
+  //  -- r1    : receiver
+  // -----------------------------------
+  Label miss;
+
+  // Check that the receiver isn't a smi.
+  __ tst(r1, Operand(kSmiTagMask));
+  __ b(eq, &miss);
+
+  // Check that the map matches.
+  __ ldr(r2, FieldMemOperand(r1, HeapObject::kMapOffset));
+  __ cmp(r2, Operand(Handle<Map>(receiver->map())));
+  __ b(ne, &miss);
+
+  // Check that the key is a smi.
+  __ tst(r0, Operand(kSmiTagMask));
+  __ b(ne, &miss);
+
+  // Get the elements array.
+  __ ldr(r2, FieldMemOperand(r1, JSObject::kElementsOffset));
+  __ AssertFastElements(r2);
+
+  // Check that the key is within bounds.
+  __ ldr(r3, FieldMemOperand(r2, FixedArray::kLengthOffset));
+  __ cmp(r0, Operand(r3));
+  __ b(hs, &miss);
+
+  // Load the result and make sure it's not the hole.
+  __ add(r3, r2, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+  ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2);
+  __ ldr(r4,
+         MemOperand(r3, r0, LSL, kPointerSizeLog2 - kSmiTagSize));
+  __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
+  __ cmp(r4, ip);
+  __ b(eq, &miss);
+  __ mov(r0, r4);
+  __ Ret();
+
+  __ bind(&miss);
+  GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
+
+  // Return the generated code.
+  return GetCode(NORMAL, NULL);
+}
+
+
 MaybeObject* KeyedStoreStubCompiler::CompileStoreField(JSObject* object,
                                                        int index,
                                                        Map* transition,
                                                        String* name) {
   // ----------- S t a t e -------------
   //  -- r0    : value
-  //  -- r1    : key
+  //  -- r1    : name
   //  -- r2    : receiver
   //  -- lr    : return address
   // -----------------------------------
@@ -2902,6 +2988,76 @@ MaybeObject* KeyedStoreStubCompiler::CompileStoreField(JSObject* object,
 }
 
 
+MaybeObject* KeyedStoreStubCompiler::CompileStoreSpecialized(
+    JSObject* receiver) {
+  // ----------- S t a t e -------------
+  //  -- r0    : value
+  //  -- r1    : key
+  //  -- r2    : receiver
+  //  -- lr    : return address
+  //  -- r3    : scratch
+  //  -- r4    : scratch (elements)
+  // -----------------------------------
+  Label miss;
+
+  Register value_reg = r0;
+  Register key_reg = r1;
+  Register receiver_reg = r2;
+  Register scratch = r3;
+  Register elements_reg = r4;
+
+  // Check that the receiver isn't a smi.
+  __ tst(receiver_reg, Operand(kSmiTagMask));
+  __ b(eq, &miss);
+
+  // Check that the map matches.
+  __ ldr(scratch, FieldMemOperand(receiver_reg, HeapObject::kMapOffset));
+  __ cmp(scratch, Operand(Handle<Map>(receiver->map())));
+  __ b(ne, &miss);
+
+  // Check that the key is a smi.
+  __ tst(key_reg, Operand(kSmiTagMask));
+  __ b(ne, &miss);
+
+  // Get the elements array and make sure it is a fast element array, not 'cow'.
+  __ ldr(elements_reg,
+         FieldMemOperand(receiver_reg, JSObject::kElementsOffset));
+  __ ldr(scratch, FieldMemOperand(elements_reg, HeapObject::kMapOffset));
+  __ cmp(scratch, Operand(Handle<Map>(Factory::fixed_array_map())));
+  __ b(ne, &miss);
+
+  // Check that the key is within bounds.
+  if (receiver->IsJSArray()) {
+    __ ldr(scratch, FieldMemOperand(receiver_reg, JSArray::kLengthOffset));
+  } else {
+    __ ldr(scratch, FieldMemOperand(elements_reg, FixedArray::kLengthOffset));
+  }
+  // Compare smis.
+  __ cmp(key_reg, scratch);
+  __ b(hs, &miss);
+
+  __ add(scratch,
+         elements_reg, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+  ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2);
+  __ str(value_reg,
+         MemOperand(scratch, key_reg, LSL, kPointerSizeLog2 - kSmiTagSize));
+  __ RecordWrite(scratch,
+                 Operand(key_reg, LSL, kPointerSizeLog2 - kSmiTagSize),
+                 receiver_reg , elements_reg);
+
+  // value_reg (r0) is preserved.
+  // Done.
+  __ Ret();
+
+  __ bind(&miss);
+  Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Miss));
+  __ Jump(ic, RelocInfo::CODE_TARGET);
+
+  // Return the generated code.
+  return GetCode(NORMAL, NULL);
+}
+
+
 MaybeObject* ConstructStubCompiler::CompileConstructStub(JSFunction* function) {
   // ----------- S t a t e -------------
   //  -- r0    : argc
index 101eece..d71a35a 100644 (file)
 #include "v8.h"
 
 #include "arguments.h"
+#include "deoptimizer.h"
 #include "execution.h"
 #include "ic-inl.h"
 #include "factory.h"
 #include "runtime.h"
+#include "runtime-profiler.h"
 #include "serialize.h"
 #include "stub-cache.h"
 #include "regexp-stack.h"
@@ -62,6 +64,10 @@ namespace v8 {
 namespace internal {
 
 
+const double DoubleConstant::min_int = kMinInt;
+const double DoubleConstant::one_half = 0.5;
+
+
 // -----------------------------------------------------------------------------
 // Implementation of Label
 
@@ -210,7 +216,7 @@ void RelocInfoWriter::Write(const RelocInfo* rinfo) {
 #endif
   Counters::reloc_info_count.Increment();
   ASSERT(rinfo->pc() - last_pc_ >= 0);
-  ASSERT(RelocInfo::NUMBER_OF_MODES < kMaxRelocModes);
+  ASSERT(RelocInfo::NUMBER_OF_MODES <= kMaxRelocModes);
   // Use unsigned delta-encoding for pc.
   uint32_t pc_delta = static_cast<uint32_t>(rinfo->pc() - last_pc_);
   RelocInfo::Mode rmode = rinfo->rmode();
@@ -386,7 +392,7 @@ void RelocIterator::next() {
 RelocIterator::RelocIterator(Code* code, int mode_mask) {
   rinfo_.pc_ = code->instruction_start();
   rinfo_.data_ = 0;
-  // relocation info is read backwards
+  // Relocation info is read backwards.
   pos_ = code->relocation_start() + code->relocation_size();
   end_ = code->relocation_start();
   done_ = false;
@@ -399,7 +405,7 @@ RelocIterator::RelocIterator(Code* code, int mode_mask) {
 RelocIterator::RelocIterator(const CodeDesc& desc, int mode_mask) {
   rinfo_.pc_ = desc.buffer;
   rinfo_.data_ = 0;
-  // relocation info is read backwards
+  // Relocation info is read backwards.
   pos_ = desc.buffer + desc.buffer_size;
   end_ = pos_ - desc.reloc_size;
   done_ = false;
@@ -431,6 +437,8 @@ const char* RelocInfo::RelocModeName(RelocInfo::Mode rmode) {
       return "debug break";
     case RelocInfo::CODE_TARGET:
       return "code target";
+    case RelocInfo::GLOBAL_PROPERTY_CELL:
+      return "global property cell";
     case RelocInfo::RUNTIME_ENTRY:
       return "runtime entry";
     case RelocInfo::JS_RETURN:
@@ -476,6 +484,13 @@ void RelocInfo::Print() {
     PrintF(" (%s)  (%p)", Code::Kind2String(code->kind()), target_address());
   } else if (IsPosition(rmode_)) {
     PrintF("  (%" V8_PTR_PREFIX "d)", data());
+  } else if (rmode_ == RelocInfo::RUNTIME_ENTRY) {
+    // Depotimization bailouts are stored as runtime entries.
+    int id = Deoptimizer::GetDeoptimizationId(
+        target_address(), Deoptimizer::EAGER);
+    if (id != Deoptimizer::kNotDeoptimizationEntry) {
+      PrintF("  (deoptimization bailout %d)", id);
+    }
   }
 
   PrintF("\n");
@@ -489,6 +504,9 @@ void RelocInfo::Verify() {
     case EMBEDDED_OBJECT:
       Object::VerifyPointer(target_object());
       break;
+    case GLOBAL_PROPERTY_CELL:
+      Object::VerifyPointer(target_cell());
+      break;
     case DEBUG_BREAK:
 #ifndef ENABLE_DEBUGGER_SUPPORT
       UNREACHABLE();
@@ -595,6 +613,23 @@ ExternalReference ExternalReference::transcendental_cache_array_address() {
 }
 
 
+ExternalReference ExternalReference::new_deoptimizer_function() {
+  return ExternalReference(
+      Redirect(FUNCTION_ADDR(Deoptimizer::New)));
+}
+
+
+ExternalReference ExternalReference::compute_output_frames_function() {
+  return ExternalReference(
+      Redirect(FUNCTION_ADDR(Deoptimizer::ComputeOutputFrames)));
+}
+
+
+ExternalReference ExternalReference::global_contexts_list() {
+  return ExternalReference(Heap::global_contexts_list_address());
+}
+
+
 ExternalReference ExternalReference::keyed_lookup_cache_keys() {
   return ExternalReference(KeyedLookupCache::keys_address());
 }
@@ -675,6 +710,18 @@ ExternalReference ExternalReference::scheduled_exception_address() {
 }
 
 
+ExternalReference ExternalReference::address_of_min_int() {
+  return ExternalReference(reinterpret_cast<void*>(
+      const_cast<double*>(&DoubleConstant::min_int)));
+}
+
+
+ExternalReference ExternalReference::address_of_one_half() {
+  return ExternalReference(reinterpret_cast<void*>(
+      const_cast<double*>(&DoubleConstant::one_half)));
+}
+
+
 #ifndef V8_INTERPRETED_REGEXP
 
 ExternalReference ExternalReference::re_check_stack_guard_state() {
diff --git a/src/assembler.cc.rej b/src/assembler.cc.rej
new file mode 100644 (file)
index 0000000..fbc576b
--- /dev/null
@@ -0,0 +1,30 @@
+--- src/assembler.cc   (revision 757)
++++ src/assembler.cc   (working copy)
+@@ -392,12 +392,16 @@
+ RelocIterator::RelocIterator(Code* code, int mode_mask) {
+   rinfo_.pc_ = code->instruction_start();
+   rinfo_.data_ = 0;
+-  // relocation info is read backwards
++  // Relocation info is read backwards.
+   pos_ = code->relocation_start() + code->relocation_size();
+   end_ = code->relocation_start();
+   done_ = false;
+   mode_mask_ = mode_mask;
+-  if (mode_mask_ == 0) pos_ = end_;
++  // Skip all relocation information if the mask is zero or if the
++  // code has been deoptimized and thereby destructively patched.
++  if (mode_mask_ == 0 || code->kind() == Code::DEOPTIMIZED_FUNCTION) {
++    pos_ = end_;
++  }
+   next();
+ }
+@@ -405,7 +409,7 @@
+ RelocIterator::RelocIterator(const CodeDesc& desc, int mode_mask) {
+   rinfo_.pc_ = desc.buffer;
+   rinfo_.data_ = 0;
+-  // relocation info is read backwards
++  // Relocation info is read backwards.
+   pos_ = desc.buffer + desc.buffer_size;
+   end_ = pos_ - desc.reloc_size;
+   done_ = false;
index da4ab21..82c9fc2 100644 (file)
 #include "runtime.h"
 #include "top.h"
 #include "token.h"
-#include "objects.h"
 
 namespace v8 {
 namespace internal {
 
 
 // -----------------------------------------------------------------------------
+// Common double constants.
+
+class DoubleConstant: public AllStatic {
+ public:
+  static const double min_int;
+  static const double one_half;
+};
+
+
+// -----------------------------------------------------------------------------
 // Labels represent pc locations; they are typically jump or call targets.
 // After declaration, a label can be freely used to denote known or (yet)
 // unknown pc location. Assembler::bind() is used to bind a label to the
@@ -174,6 +183,8 @@ class RelocInfo BASE_EMBEDDED {
     CODE_TARGET,  // Code target which is not any of the above.
     EMBEDDED_OBJECT,
 
+    GLOBAL_PROPERTY_CELL,
+
     // Everything after runtime_entry (inclusive) is not GC'ed.
     RUNTIME_ENTRY,
     JS_RETURN,  // Marks start of the ExitJSFrame code.
@@ -254,6 +265,10 @@ class RelocInfo BASE_EMBEDDED {
   INLINE(Handle<Object> target_object_handle(Assembler* origin));
   INLINE(Object** target_object_address());
   INLINE(void set_target_object(Object* target));
+  INLINE(JSGlobalPropertyCell* target_cell());
+  INLINE(Handle<JSGlobalPropertyCell> target_cell_handle());
+  INLINE(void set_target_cell(JSGlobalPropertyCell* cell));
+
 
   // Read the address of the word containing the target_address in an
   // instruction stream.  What this means exactly is architecture-independent.
@@ -484,6 +499,11 @@ class ExternalReference BASE_EMBEDDED {
   static ExternalReference transcendental_cache_array_address();
   static ExternalReference delete_handle_scope_extensions();
 
+  // Deoptimization support.
+  static ExternalReference new_deoptimizer_function();
+  static ExternalReference compute_output_frames_function();
+  static ExternalReference global_contexts_list();
+
   // Static data in the keyed lookup cache.
   static ExternalReference keyed_lookup_cache_keys();
   static ExternalReference keyed_lookup_cache_field_offsets();
@@ -526,6 +546,10 @@ class ExternalReference BASE_EMBEDDED {
 
   static ExternalReference scheduled_exception_address();
 
+  // Static variables containing common double constants.
+  static ExternalReference address_of_min_int();
+  static ExternalReference address_of_one_half();
+
   Address address() const {return reinterpret_cast<Address>(address_);}
 
 #ifdef ENABLE_DEBUGGER_SUPPORT
index f0a25c1..e88156d 100644 (file)
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
+#ifndef V8_AST_INL_H_
+#define V8_AST_INL_H_
+
 #include "v8.h"
 
 #include "ast.h"
+#include "jump-target-inl.h"
 
 namespace v8 {
 namespace internal {
 
-BreakableStatement::BreakableStatement(ZoneStringList* labels, Type type)
-    : labels_(labels), type_(type) {
-  ASSERT(labels == NULL || labels->length() > 0);
-}
-
 
 SwitchStatement::SwitchStatement(ZoneStringList* labels)
     : BreakableStatement(labels, TARGET_FOR_ANONYMOUS),
@@ -44,17 +43,42 @@ SwitchStatement::SwitchStatement(ZoneStringList* labels)
 }
 
 
+Block::Block(ZoneStringList* labels, int capacity, bool is_initializer_block)
+    : BreakableStatement(labels, TARGET_FOR_NAMED_ONLY),
+      statements_(capacity),
+      is_initializer_block_(is_initializer_block) {
+}
+
+
+BreakableStatement::BreakableStatement(ZoneStringList* labels, Type type)
+    : labels_(labels),
+      type_(type),
+      entry_id_(GetNextId()),
+      exit_id_(GetNextId()) {
+  ASSERT(labels == NULL || labels->length() > 0);
+}
+
+
 IterationStatement::IterationStatement(ZoneStringList* labels)
     : BreakableStatement(labels, TARGET_FOR_ANONYMOUS),
       body_(NULL),
-      continue_target_(JumpTarget::BIDIRECTIONAL) {
+      continue_target_(JumpTarget::BIDIRECTIONAL),
+      osr_entry_id_(GetNextId()) {
 }
 
 
-Block::Block(ZoneStringList* labels, int capacity, bool is_initializer_block)
-    : BreakableStatement(labels, TARGET_FOR_NAMED_ONLY),
-      statements_(capacity),
-      is_initializer_block_(is_initializer_block) {
+DoWhileStatement::DoWhileStatement(ZoneStringList* labels)
+    : IterationStatement(labels),
+      cond_(NULL),
+      condition_position_(-1),
+      next_id_(GetNextId()) {
+}
+
+
+WhileStatement::WhileStatement(ZoneStringList* labels)
+    : IterationStatement(labels),
+      cond_(NULL),
+      may_have_function_literal_(true) {
 }
 
 
@@ -64,7 +88,8 @@ ForStatement::ForStatement(ZoneStringList* labels)
       cond_(NULL),
       next_(NULL),
       may_have_function_literal_(true),
-      loop_variable_(NULL) {
+      loop_variable_(NULL),
+      next_id_(GetNextId()) {
 }
 
 
@@ -73,8 +98,6 @@ ForInStatement::ForInStatement(ZoneStringList* labels)
 }
 
 
-DoWhileStatement::DoWhileStatement(ZoneStringList* labels)
-    : IterationStatement(labels), cond_(NULL), condition_position_(-1) {
-}
-
 } }  // namespace v8::internal
+
+#endif  // V8_AST_INL_H_
index bb445c4..c1ea0a8 100644 (file)
 #include "v8.h"
 
 #include "ast.h"
+#include "jump-target-inl.h"
 #include "parser.h"
 #include "scopes.h"
 #include "string-stream.h"
-#include "ast-inl.h"
-#include "jump-target-inl.h"
 
 namespace v8 {
 namespace internal {
 
-
+unsigned AstNode::current_id_ = 0;
+unsigned AstNode::count_ = 0;
 VariableProxySentinel VariableProxySentinel::this_proxy_(true);
 VariableProxySentinel VariableProxySentinel::identifier_proxy_(false);
 ValidLeftHandSideSentinel ValidLeftHandSideSentinel::instance_;
@@ -48,6 +48,8 @@ Call Call::sentinel_(NULL, NULL, 0);
 // ----------------------------------------------------------------------------
 // All the Accept member functions for each syntax tree node type.
 
+void Slot::Accept(AstVisitor* v) { v->VisitSlot(this); }
+
 #define DECL_ACCEPT(type)                                       \
   void type::Accept(AstVisitor* v) { v->Visit##type(this); }
 AST_NODE_LIST(DECL_ACCEPT)
@@ -115,6 +117,29 @@ void VariableProxy::BindTo(Variable* var) {
 }
 
 
+Assignment::Assignment(Token::Value op,
+                       Expression* target,
+                       Expression* value,
+                       int pos)
+    : op_(op),
+      target_(target),
+      value_(value),
+      pos_(pos),
+      compound_bailout_id_(kNoNumber),
+      block_start_(false),
+      block_end_(false),
+      is_monomorphic_(false),
+      receiver_types_(NULL) {
+  ASSERT(Token::IsAssignmentOp(op));
+  binary_operation_ = is_compound()
+      ? new BinaryOperation(binary_op(), target, value, pos + 1)
+      : NULL;
+  if (is_compound()) {
+    compound_bailout_id_ = GetNextId();
+  }
+}
+
+
 Token::Value Assignment::binary_op() const {
   switch (op_) {
     case Token::ASSIGN_BIT_OR: return Token::BIT_OR;
@@ -139,6 +164,12 @@ bool FunctionLiteral::AllowsLazyCompilation() {
 }
 
 
+bool FunctionLiteral::AllowOptimize() {
+  // We can't deal with heap-allocated locals.
+  return scope()->num_heap_slots() == 0;
+}
+
+
 ObjectLiteral::Property::Property(Literal* key, Expression* value) {
   emit_store_ = true;
   key_ = key;
@@ -373,6 +404,265 @@ BinaryOperation::BinaryOperation(Assignment* assignment) {
 
 
 // ----------------------------------------------------------------------------
+// Inlining support
+
+bool Block::IsInlineable() const {
+  const int count = statements_.length();
+  for (int i = 0; i < count; ++i) {
+    if (!statements_[i]->IsInlineable()) return false;
+  }
+  return true;
+}
+
+
+bool ExpressionStatement::IsInlineable() const {
+  return expression()->IsInlineable();
+}
+
+
+bool IfStatement::IsInlineable() const {
+  return condition()->IsInlineable() && then_statement()->IsInlineable() &&
+      else_statement()->IsInlineable();
+}
+
+
+bool ReturnStatement::IsInlineable() const {
+  return expression()->IsInlineable();
+}
+
+
+bool Conditional::IsInlineable() const {
+  return condition()->IsInlineable() && then_expression()->IsInlineable() &&
+      else_expression()->IsInlineable();
+}
+
+
+bool VariableProxy::IsInlineable() const {
+  return var()->is_global() || var()->IsStackAllocated();
+}
+
+
+bool Assignment::IsInlineable() const {
+  return target()->IsInlineable() && value()->IsInlineable();
+}
+
+
+bool Property::IsInlineable() const {
+  return obj()->IsInlineable() && key()->IsInlineable();
+}
+
+
+bool Call::IsInlineable() const {
+  if (!expression()->IsInlineable()) return false;
+  const int count = arguments()->length();
+  for (int i = 0; i < count; ++i) {
+    if (!arguments()->at(i)->IsInlineable()) return false;
+  }
+  return true;
+}
+
+
+bool CallNew::IsInlineable() const {
+  if (!expression()->IsInlineable()) return false;
+  const int count = arguments()->length();
+  for (int i = 0; i < count; ++i) {
+    if (!arguments()->at(i)->IsInlineable()) return false;
+  }
+  return true;
+}
+
+
+bool CallRuntime::IsInlineable() const {
+  const int count = arguments()->length();
+  for (int i = 0; i < count; ++i) {
+    if (!arguments()->at(i)->IsInlineable()) return false;
+  }
+  return true;
+}
+
+
+bool UnaryOperation::IsInlineable() const {
+  return expression()->IsInlineable();
+}
+
+
+bool BinaryOperation::IsInlineable() const {
+  return left()->IsInlineable() && right()->IsInlineable();
+}
+
+
+bool CompareOperation::IsInlineable() const {
+  return left()->IsInlineable() && right()->IsInlineable();
+}
+
+
+bool CompareToNull::IsInlineable() const {
+  return expression()->IsInlineable();
+}
+
+
+bool CountOperation::IsInlineable() const {
+  return expression()->IsInlineable();
+}
+
+
+// ----------------------------------------------------------------------------
+// Recording of type feedback
+
+void Property::RecordTypeFeedback(TypeFeedbackOracle* oracle) {
+  // Record type feedback from the oracle in the AST.
+  is_monomorphic_ = oracle->LoadIsMonomorphic(this);
+  if (key()->IsPropertyName()) {
+    if (oracle->LoadIsBuiltin(this, Builtins::LoadIC_ArrayLength)) {
+      is_array_length_ = true;
+    } else {
+      Literal* lit_key = key()->AsLiteral();
+      ASSERT(lit_key != NULL && lit_key->handle()->IsString());
+      Handle<String> name = Handle<String>::cast(lit_key->handle());
+      ZoneMapList* types = oracle->LoadReceiverTypes(this, name);
+      receiver_types_ = types;
+    }
+  } else if (is_monomorphic_) {
+    monomorphic_receiver_type_ = oracle->LoadMonomorphicReceiverType(this);
+  }
+}
+
+
+void Assignment::RecordTypeFeedback(TypeFeedbackOracle* oracle) {
+  Property* prop = target()->AsProperty();
+  ASSERT(prop != NULL);
+  is_monomorphic_ = oracle->StoreIsMonomorphic(this);
+  if (prop->key()->IsPropertyName()) {
+    Literal* lit_key = prop->key()->AsLiteral();
+    ASSERT(lit_key != NULL && lit_key->handle()->IsString());
+    Handle<String> name = Handle<String>::cast(lit_key->handle());
+    ZoneMapList* types = oracle->StoreReceiverTypes(this, name);
+    receiver_types_ = types;
+  } else if (is_monomorphic_) {
+    // Record receiver type for monomorphic keyed loads.
+    monomorphic_receiver_type_ = oracle->StoreMonomorphicReceiverType(this);
+  }
+}
+
+
+void CaseClause::RecordTypeFeedback(TypeFeedbackOracle* oracle) {
+  TypeInfo info = oracle->SwitchType(this);
+  if (info.IsSmi()) {
+    compare_type_ = SMI_ONLY;
+  } else if (info.IsNonPrimitive()) {
+    compare_type_ = OBJECT_ONLY;
+  } else {
+    ASSERT(compare_type_ == NONE);
+  }
+}
+
+
+static bool CallWithoutIC(Handle<JSFunction> target, int arity) {
+  if (target->NeedsArgumentsAdaption()) {
+    // If the number of formal parameters of the target function
+    // does not match the number of arguments we're passing, we
+    // don't want to deal with it.
+    return target->shared()->formal_parameter_count() == arity;
+  } else {
+    // If the target doesn't need arguments adaption, we can call
+    // it directly, but we avoid to do so if it has a custom call
+    // generator, because that is likely to generate better code.
+    return !target->shared()->HasCustomCallGenerator();
+  }
+}
+
+
+bool Call::ComputeTarget(Handle<Map> type, Handle<String> name) {
+  holder_ = Handle<JSObject>::null();
+  while (true) {
+    LookupResult lookup;
+    type->LookupInDescriptors(NULL, *name, &lookup);
+    // If the function wasn't found directly in the map, we start
+    // looking upwards through the prototype chain.
+    if (!lookup.IsFound() && type->prototype()->IsJSObject()) {
+      holder_ = Handle<JSObject>(JSObject::cast(type->prototype()));
+      type = Handle<Map>(holder()->map());
+    } else if (lookup.IsProperty() && lookup.type() == CONSTANT_FUNCTION) {
+      target_ = Handle<JSFunction>(lookup.GetConstantFunctionFromMap(*type));
+      return CallWithoutIC(target_, arguments()->length());
+    } else {
+      return false;
+    }
+  }
+}
+
+
+bool Call::ComputeGlobalTarget(Handle<GlobalObject> global,
+                               Handle<String> name) {
+  target_ = Handle<JSFunction>::null();
+  cell_ = Handle<JSGlobalPropertyCell>::null();
+  LookupResult lookup;
+  global->Lookup(*name, &lookup);
+  if (lookup.IsProperty() && lookup.type() == NORMAL) {
+    cell_ = Handle<JSGlobalPropertyCell>(global->GetPropertyCell(&lookup));
+    if (cell_->value()->IsJSFunction()) {
+      Handle<JSFunction> candidate(JSFunction::cast(cell_->value()));
+      // If the function is in new space we assume it's more likely to
+      // change and thus prefer the general IC code.
+      if (!Heap::InNewSpace(*candidate)
+          && CallWithoutIC(candidate, arguments()->length())) {
+        target_ = candidate;
+        return true;
+      }
+    }
+  }
+  return false;
+}
+
+
+void Call::RecordTypeFeedback(TypeFeedbackOracle* oracle) {
+  Property* property = expression()->AsProperty();
+  ASSERT(property != NULL);
+  // Specialize for the receiver types seen at runtime.
+  Literal* key = property->key()->AsLiteral();
+  ASSERT(key != NULL && key->handle()->IsString());
+  Handle<String> name = Handle<String>::cast(key->handle());
+  receiver_types_ = oracle->CallReceiverTypes(this, name);
+#ifdef DEBUG
+  if (FLAG_enable_slow_asserts) {
+    if (receiver_types_ != NULL) {
+      int length = receiver_types_->length();
+      for (int i = 0; i < length; i++) {
+        Handle<Map> map = receiver_types_->at(i);
+        ASSERT(!map.is_null() && *map != NULL);
+      }
+    }
+  }
+#endif
+  if (receiver_types_ != NULL && receiver_types_->length() > 0) {
+    Handle<Map> type = receiver_types_->at(0);
+    is_monomorphic_ = oracle->CallIsMonomorphic(this);
+    if (is_monomorphic_) is_monomorphic_ = ComputeTarget(type, name);
+  }
+}
+
+
+void BinaryOperation::RecordTypeFeedback(TypeFeedbackOracle* oracle) {
+  TypeInfo left = oracle->BinaryType(this, TypeFeedbackOracle::LEFT);
+  TypeInfo right = oracle->BinaryType(this, TypeFeedbackOracle::RIGHT);
+  is_smi_only_ = left.IsSmi() && right.IsSmi();
+}
+
+
+void CompareOperation::RecordTypeFeedback(TypeFeedbackOracle* oracle) {
+  TypeInfo left = oracle->CompareType(this, TypeFeedbackOracle::LEFT);
+  TypeInfo right = oracle->CompareType(this, TypeFeedbackOracle::RIGHT);
+  if (left.IsSmi() && right.IsSmi()) {
+    compare_type_ = SMI_ONLY;
+  } else if (left.IsNonPrimitive() && right.IsNonPrimitive()) {
+    compare_type_ = OBJECT_ONLY;
+  } else {
+    ASSERT(compare_type_ == NONE);
+  }
+}
+
+
+// ----------------------------------------------------------------------------
 // Implementation of AstVisitor
 
 bool AstVisitor::CheckStackOverflow() {
@@ -742,15 +1032,12 @@ RegExpAlternative::RegExpAlternative(ZoneList<RegExpTree*>* nodes)
 }
 
 
-WhileStatement::WhileStatement(ZoneStringList* labels)
-    : IterationStatement(labels),
-      cond_(NULL),
-      may_have_function_literal_(true) {
-}
-
-
-CaseClause::CaseClause(Expression* label, ZoneList<Statement*>* statements)
-    : label_(label), statements_(statements) {
-}
+CaseClause::CaseClause(Expression* label,
+                       ZoneList<Statement*>* statements,
+                       int pos)
+    : label_(label),
+      statements_(statements),
+      position_(pos),
+      compare_type_(NONE) {}
 
 } }  // namespace v8::internal
index 0846dbc..cdf456f 100644 (file)
--- a/src/ast.h
+++ b/src/ast.h
@@ -75,7 +75,6 @@ namespace internal {
   V(FunctionLiteral)                            \
   V(SharedFunctionInfoLiteral)                  \
   V(Conditional)                                \
-  V(Slot)                                       \
   V(VariableProxy)                              \
   V(Literal)                                    \
   V(RegExpLiteral)                              \
@@ -102,10 +101,11 @@ namespace internal {
   EXPRESSION_NODE_LIST(V)
 
 // Forward declarations
-class TargetCollector;
-class MaterializedLiteral;
-class DefinitionInfo;
 class BitVector;
+class DefinitionInfo;
+class MaterializedLiteral;
+class TargetCollector;
+class TypeFeedbackOracle;
 
 #define DEF_FORWARD_DECLARATION(type) class type;
 AST_NODE_LIST(DEF_FORWARD_DECLARATION)
@@ -133,6 +133,10 @@ class AstNode: public ZoneObject {
   };
 #undef DECLARE_TYPE_ENUM
 
+  static const int kNoNumber = -1;
+
+  AstNode() : id_(GetNextId()) { count_++; }
+
   virtual ~AstNode() { }
 
   virtual void Accept(AstVisitor* v) = 0;
@@ -150,6 +154,27 @@ class AstNode: public ZoneObject {
   virtual BreakableStatement* AsBreakableStatement() { return NULL; }
   virtual IterationStatement* AsIterationStatement() { return NULL; }
   virtual MaterializedLiteral* AsMaterializedLiteral() { return NULL; }
+  virtual Slot* AsSlot() { return NULL; }
+
+  // True if the node is simple enough for us to inline calls containing it.
+  virtual bool IsInlineable() const { return false; }
+
+  static int Count() { return count_; }
+  static void ResetIds() { current_id_ = 0; }
+  unsigned id() const { return id_; }
+
+ protected:
+  static unsigned GetNextId() { return current_id_++; }
+  static unsigned ReserveIdRange(int n) {
+    unsigned tmp = current_id_;
+    current_id_ += n;
+    return tmp;
+  }
+
+ private:
+  static unsigned current_id_;
+  static unsigned count_;
+  unsigned id_;
 };
 
 
@@ -174,6 +199,18 @@ class Statement: public AstNode {
 
 class Expression: public AstNode {
  public:
+  enum Context {
+    // Not assigned a context yet, or else will not be visited during
+    // code generation.
+    kUninitialized,
+    // Evaluated for its side effects.
+    kEffect,
+    // Evaluated for its value (and side effects).
+    kValue,
+    // Evaluated for control flow (and side effects).
+    kTest
+  };
+
   Expression() : bitfields_(0) {}
 
   virtual Expression* AsExpression()  { return this; }
@@ -181,6 +218,10 @@ class Expression: public AstNode {
   virtual bool IsTrivial() { return false; }
   virtual bool IsValidLeftHandSide() { return false; }
 
+  // Helpers for ToBoolean conversion.
+  virtual bool ToBooleanIsTrue() { return false; }
+  virtual bool ToBooleanIsFalse() { return false; }
+
   // Symbols that cannot be parsed as array indices are considered property
   // names.  We do not treat symbols that can be array indexes as property
   // names because [] for string objects is handled only by keyed ICs.
@@ -198,6 +239,24 @@ class Expression: public AstNode {
   // True iff the expression is a literal represented as a smi.
   virtual bool IsSmiLiteral() { return false; }
 
+  // Type feedback information for assignments and properties.
+  virtual bool IsMonomorphic() {
+    UNREACHABLE();
+    return false;
+  }
+  virtual bool IsArrayLength() {
+    UNREACHABLE();
+    return false;
+  }
+  virtual ZoneMapList* GetReceiverTypes() {
+    UNREACHABLE();
+    return NULL;
+  }
+  virtual Handle<Map> GetMonomorphicReceiverType() {
+    UNREACHABLE();
+    return Handle<Map>();
+  }
+
   // Static type information for this expression.
   StaticType* type() { return &type_; }
 
@@ -301,6 +360,10 @@ class BreakableStatement: public Statement {
   // Testers.
   bool is_target_for_anonymous() const { return type_ == TARGET_FOR_ANONYMOUS; }
 
+  // Bailout support.
+  int EntryId() const { return entry_id_; }
+  int ExitId() const { return exit_id_; }
+
  protected:
   inline BreakableStatement(ZoneStringList* labels, Type type);
 
@@ -308,6 +371,8 @@ class BreakableStatement: public Statement {
   ZoneStringList* labels_;
   Type type_;
   BreakTarget break_target_;
+  int entry_id_;
+  int exit_id_;
 };
 
 
@@ -327,6 +392,8 @@ class Block: public BreakableStatement {
     return statements_[0]->StatementAsCountOperation();
   }
 
+  virtual bool IsInlineable() const;
+
   void AddStatement(Statement* statement) { statements_.Add(statement); }
 
   ZoneList<Statement*>* statements() { return &statements_; }
@@ -370,6 +437,10 @@ class IterationStatement: public BreakableStatement {
   Statement* body() const { return body_; }
   void set_body(Statement* stmt) { body_ = stmt; }
 
+  // Bailout support.
+  int OsrEntryId() const { return osr_entry_id_; }
+  virtual int ContinueId() const = 0;
+
   // Code generation
   BreakTarget* continue_target()  { return &continue_target_; }
 
@@ -383,6 +454,7 @@ class IterationStatement: public BreakableStatement {
  private:
   Statement* body_;
   BreakTarget continue_target_;
+  int osr_entry_id_;
 };
 
 
@@ -404,15 +476,19 @@ class DoWhileStatement: public IterationStatement {
   int condition_position() { return condition_position_; }
   void set_condition_position(int pos) { condition_position_ = pos; }
 
+  // Bailout support.
+  virtual int ContinueId() const { return next_id_; }
+
  private:
   Expression* cond_;
   int condition_position_;
+  int next_id_;
 };
 
 
 class WhileStatement: public IterationStatement {
  public:
-  explicit WhileStatement(ZoneStringList* labels);
+  explicit inline WhileStatement(ZoneStringList* labels);
 
   DECLARE_NODE_TYPE(WhileStatement)
 
@@ -429,6 +505,9 @@ class WhileStatement: public IterationStatement {
     may_have_function_literal_ = value;
   }
 
+  // Bailout support.
+  virtual int ContinueId() const { return EntryId(); }
+
  private:
   Expression* cond_;
   // True if there is a function literal subexpression in the condition.
@@ -466,6 +545,9 @@ class ForStatement: public IterationStatement {
     may_have_function_literal_ = value;
   }
 
+  // Bailout support.
+  virtual int ContinueId() const { return next_id_; }
+
   bool is_fast_smi_loop() { return loop_variable_ != NULL; }
   Variable* loop_variable() { return loop_variable_; }
   void set_loop_variable(Variable* var) { loop_variable_ = var; }
@@ -477,6 +559,7 @@ class ForStatement: public IterationStatement {
   // True if there is a function literal subexpression in the condition.
   bool may_have_function_literal_;
   Variable* loop_variable_;
+  int next_id_;
 };
 
 
@@ -495,6 +578,9 @@ class ForInStatement: public IterationStatement {
   Expression* each() const { return each_; }
   Expression* enumerable() const { return enumerable_; }
 
+  // Bailout support.
+  virtual int ContinueId() const { return EntryId(); }
+
  private:
   Expression* each_;
   Expression* enumerable_;
@@ -508,11 +594,13 @@ class ExpressionStatement: public Statement {
 
   DECLARE_NODE_TYPE(ExpressionStatement)
 
+  virtual bool IsInlineable() const;
+
   virtual Assignment* StatementAsSimpleAssignment();
   virtual CountOperation* StatementAsCountOperation();
 
   void set_expression(Expression* e) { expression_ = e; }
-  Expression* expression() { return expression_; }
+  Expression* expression() const { return expression_; }
 
  private:
   Expression* expression_;
@@ -554,7 +642,8 @@ class ReturnStatement: public Statement {
 
   DECLARE_NODE_TYPE(ReturnStatement)
 
-  Expression* expression() { return expression_; }
+  Expression* expression() const { return expression_; }
+  virtual bool IsInlineable() const;
 
  private:
   Expression* expression_;
@@ -588,7 +677,7 @@ class WithExitStatement: public Statement {
 
 class CaseClause: public ZoneObject {
  public:
-  CaseClause(Expression* label, ZoneList<Statement*>* statements);
+  CaseClause(Expression* label, ZoneList<Statement*>* statements, int pos);
 
   bool is_default() const { return label_ == NULL; }
   Expression* label() const {
@@ -598,10 +687,21 @@ class CaseClause: public ZoneObject {
   JumpTarget* body_target() { return &body_target_; }
   ZoneList<Statement*>* statements() const { return statements_; }
 
+  int position() { return position_; }
+  void set_position(int pos) { position_ = pos; }
+
+  // Type feedback information.
+  void RecordTypeFeedback(TypeFeedbackOracle* oracle);
+  bool IsSmiCompare() { return compare_type_ == SMI_ONLY; }
+  bool IsObjectCompare() { return compare_type_ == OBJECT_ONLY; }
+
  private:
   Expression* label_;
   JumpTarget body_target_;
   ZoneList<Statement*>* statements_;
+  int position_;
+  enum CompareTypeFeedback { NONE, SMI_ONLY, OBJECT_ONLY };
+  CompareTypeFeedback compare_type_;
 };
 
 
@@ -641,6 +741,8 @@ class IfStatement: public Statement {
 
   DECLARE_NODE_TYPE(IfStatement)
 
+  virtual bool IsInlineable() const;
+
   bool HasThenStatement() const { return !then_statement()->IsEmpty(); }
   bool HasElseStatement() const { return !else_statement()->IsEmpty(); }
 
@@ -744,6 +846,8 @@ class DebuggerStatement: public Statement {
 class EmptyStatement: public Statement {
  public:
   DECLARE_NODE_TYPE(EmptyStatement)
+
+  virtual bool IsInlineable() const { return true; }
 };
 
 
@@ -754,6 +858,7 @@ class Literal: public Expression {
   DECLARE_NODE_TYPE(Literal)
 
   virtual bool IsTrivial() { return true; }
+  virtual bool IsInlineable() const { return true; }
   virtual bool IsSmiLiteral() { return handle_->IsSmi(); }
 
   // Check if this literal is identical to the other literal.
@@ -769,6 +874,14 @@ class Literal: public Expression {
     return false;
   }
 
+  Handle<String> AsPropertyName() {
+    ASSERT(IsPropertyName());
+    return Handle<String>::cast(handle_);
+  }
+
+  virtual bool ToBooleanIsTrue() { return handle_->ToBoolean()->IsTrue(); }
+  virtual bool ToBooleanIsFalse() { return handle_->ToBoolean()->IsFalse(); }
+
   // Identity testers.
   bool IsNull() const { return handle_.is_identical_to(Factory::null_value()); }
   bool IsTrue() const { return handle_.is_identical_to(Factory::true_value()); }
@@ -906,16 +1019,21 @@ class ArrayLiteral: public MaterializedLiteral {
                int depth)
       : MaterializedLiteral(literal_index, is_simple, depth),
         constant_elements_(constant_elements),
-        values_(values) {}
+        values_(values),
+        first_element_id_(ReserveIdRange(values->length())) {}
 
   DECLARE_NODE_TYPE(ArrayLiteral)
 
   Handle<FixedArray> constant_elements() const { return constant_elements_; }
   ZoneList<Expression*>* values() const { return values_; }
 
+  // Return an AST id for an element that is used in simulate instructions.
+  int GetIdForElement(int i) { return first_element_id_ + i; }
+
  private:
   Handle<FixedArray> constant_elements_;
   ZoneList<Expression*>* values_;
+  int first_element_id_;
 };
 
 
@@ -967,6 +1085,8 @@ class VariableProxy: public Expression {
     return is_this_ || is_trivial_;
   }
 
+  virtual bool IsInlineable() const;
+
   bool IsVariable(Handle<String> n) {
     return !is_this() && name().is_identical_to(n);
   }
@@ -1044,7 +1164,9 @@ class Slot: public Expression {
     ASSERT(var != NULL);
   }
 
-  DECLARE_NODE_TYPE(Slot)
+  virtual void Accept(AstVisitor* v);
+
+  virtual Slot* AsSlot() { return this; }
 
   bool IsStackAllocated() { return type_ == PARAMETER || type_ == LOCAL; }
 
@@ -1069,17 +1191,41 @@ class Property: public Expression {
   // of the resolved Reference.
   enum Type { NORMAL, SYNTHETIC };
   Property(Expression* obj, Expression* key, int pos, Type type = NORMAL)
-      : obj_(obj), key_(key), pos_(pos), type_(type) { }
+      : obj_(obj),
+        key_(key),
+        pos_(pos),
+        type_(type),
+        is_monomorphic_(false),
+        receiver_types_(NULL),
+        is_array_length_(false),
+        is_arguments_access_(false) { }
 
   DECLARE_NODE_TYPE(Property)
 
   virtual bool IsValidLeftHandSide() { return true; }
+  virtual bool IsInlineable() const;
 
   Expression* obj() const { return obj_; }
   Expression* key() const { return key_; }
   int position() const { return pos_; }
   bool is_synthetic() const { return type_ == SYNTHETIC; }
 
+  // Marks that this is actually an argument rewritten to a keyed property
+  // accessing the argument through the arguments shadow object.
+  void set_is_arguments_access(bool is_arguments_access) {
+    is_arguments_access_ = is_arguments_access;
+  }
+  bool is_arguments_access() const { return is_arguments_access_; }
+
+  // Type feedback information.
+  void RecordTypeFeedback(TypeFeedbackOracle* oracle);
+  virtual bool IsMonomorphic() { return is_monomorphic_; }
+  virtual ZoneMapList* GetReceiverTypes() { return receiver_types_; }
+  virtual bool IsArrayLength() { return is_array_length_; }
+  virtual Handle<Map> GetMonomorphicReceiverType() {
+    return monomorphic_receiver_type_;
+  }
+
   // Returns a property singleton property access on 'this'.  Used
   // during preparsing.
   static Property* this_property() { return &this_property_; }
@@ -1090,6 +1236,12 @@ class Property: public Expression {
   int pos_;
   Type type_;
 
+  bool is_monomorphic_;
+  ZoneMapList* receiver_types_;
+  bool is_array_length_;
+  bool is_arguments_access_;
+  Handle<Map> monomorphic_receiver_type_;
+
   // Dummy property used during preparsing.
   static Property this_property_;
 };
@@ -1098,21 +1250,55 @@ class Property: public Expression {
 class Call: public Expression {
  public:
   Call(Expression* expression, ZoneList<Expression*>* arguments, int pos)
-      : expression_(expression), arguments_(arguments), pos_(pos) { }
+      : expression_(expression),
+        arguments_(arguments),
+        pos_(pos),
+        is_monomorphic_(false),
+        receiver_types_(NULL),
+        return_id_(GetNextId()) {
+  }
 
   DECLARE_NODE_TYPE(Call)
 
+  virtual bool IsInlineable() const;
+
   Expression* expression() const { return expression_; }
   ZoneList<Expression*>* arguments() const { return arguments_; }
   int position() { return pos_; }
 
+  void RecordTypeFeedback(TypeFeedbackOracle* oracle);
+  virtual ZoneMapList* GetReceiverTypes() { return receiver_types_; }
+  virtual bool IsMonomorphic() { return is_monomorphic_; }
+  Handle<JSFunction> target() { return target_; }
+  Handle<JSObject> holder() { return holder_; }
+  Handle<JSGlobalPropertyCell> cell() { return cell_; }
+
+  bool ComputeTarget(Handle<Map> type, Handle<String> name);
+  bool ComputeGlobalTarget(Handle<GlobalObject> global, Handle<String> name);
+
+  // Bailout support.
+  int ReturnId() const { return return_id_; }
+
   static Call* sentinel() { return &sentinel_; }
 
+#ifdef DEBUG
+  // Used to assert that the FullCodeGenerator records the return site.
+  bool return_is_recorded_;
+#endif
+
  private:
   Expression* expression_;
   ZoneList<Expression*>* arguments_;
   int pos_;
 
+  bool is_monomorphic_;
+  ZoneMapList* receiver_types_;
+  Handle<JSFunction> target_;
+  Handle<JSObject> holder_;
+  Handle<JSGlobalPropertyCell> cell_;
+
+  int return_id_;
+
   static Call sentinel_;
 };
 
@@ -1124,6 +1310,8 @@ class CallNew: public Expression {
 
   DECLARE_NODE_TYPE(CallNew)
 
+  virtual bool IsInlineable() const;
+
   Expression* expression() const { return expression_; }
   ZoneList<Expression*>* arguments() const { return arguments_; }
   int position() { return pos_; }
@@ -1148,6 +1336,8 @@ class CallRuntime: public Expression {
 
   DECLARE_NODE_TYPE(CallRuntime)
 
+  virtual bool IsInlineable() const;
+
   Handle<String> name() const { return name_; }
   Runtime::Function* function() const { return function_; }
   ZoneList<Expression*>* arguments() const { return arguments_; }
@@ -1169,6 +1359,8 @@ class UnaryOperation: public Expression {
 
   DECLARE_NODE_TYPE(UnaryOperation)
 
+  virtual bool IsInlineable() const;
+
   virtual bool ResultOverwriteAllowed();
 
   Token::Value op() const { return op_; }
@@ -1186,7 +1378,7 @@ class BinaryOperation: public Expression {
                   Expression* left,
                   Expression* right,
                   int pos)
-      : op_(op), left_(left), right_(right), pos_(pos) {
+      : op_(op), left_(left), right_(right), pos_(pos), is_smi_only_(false) {
     ASSERT(Token::IsBinaryOp(op));
   }
 
@@ -1195,6 +1387,8 @@ class BinaryOperation: public Expression {
 
   DECLARE_NODE_TYPE(BinaryOperation)
 
+  virtual bool IsInlineable() const;
+
   virtual bool ResultOverwriteAllowed();
 
   Token::Value op() const { return op_; }
@@ -1202,11 +1396,16 @@ class BinaryOperation: public Expression {
   Expression* right() const { return right_; }
   int position() const { return pos_; }
 
+  // Type feedback information.
+  void RecordTypeFeedback(TypeFeedbackOracle* oracle);
+  bool IsSmiOnly() const { return is_smi_only_; }
+
  private:
   Token::Value op_;
   Expression* left_;
   Expression* right_;
   int pos_;
+  bool is_smi_only_;
 };
 
 
@@ -1251,6 +1450,8 @@ class CountOperation: public Expression {
 
   virtual void MarkAsStatement() { is_prefix_ = true; }
 
+  virtual bool IsInlineable() const;
+
  private:
   bool is_prefix_;
   IncrementOperation* increment_;
@@ -1264,7 +1465,7 @@ class CompareOperation: public Expression {
                    Expression* left,
                    Expression* right,
                    int pos)
-      : op_(op), left_(left), right_(right), pos_(pos) {
+      : op_(op), left_(left), right_(right), pos_(pos), compare_type_(NONE) {
     ASSERT(Token::IsCompareOp(op));
   }
 
@@ -1275,11 +1476,21 @@ class CompareOperation: public Expression {
   Expression* right() const { return right_; }
   int position() const { return pos_; }
 
+  virtual bool IsInlineable() const;
+
+  // Type feedback information.
+  void RecordTypeFeedback(TypeFeedbackOracle* oracle);
+  bool IsSmiCompare() { return compare_type_ == SMI_ONLY; }
+  bool IsObjectCompare() { return compare_type_ == OBJECT_ONLY; }
+
  private:
   Token::Value op_;
   Expression* left_;
   Expression* right_;
   int pos_;
+
+  enum CompareTypeFeedback { NONE, SMI_ONLY, OBJECT_ONLY };
+  CompareTypeFeedback compare_type_;
 };
 
 
@@ -1290,6 +1501,8 @@ class CompareToNull: public Expression {
 
   DECLARE_NODE_TYPE(CompareToNull)
 
+  virtual bool IsInlineable() const;
+
   bool is_strict() const { return is_strict_; }
   Token::Value op() const { return is_strict_ ? Token::EQ_STRICT : Token::EQ; }
   Expression* expression() const { return expression_; }
@@ -1315,6 +1528,8 @@ class Conditional: public Expression {
 
   DECLARE_NODE_TYPE(Conditional)
 
+  virtual bool IsInlineable() const;
+
   Expression* condition() const { return condition_; }
   Expression* then_expression() const { return then_expression_; }
   Expression* else_expression() const { return else_expression_; }
@@ -1333,14 +1548,12 @@ class Conditional: public Expression {
 
 class Assignment: public Expression {
  public:
-  Assignment(Token::Value op, Expression* target, Expression* value, int pos)
-      : op_(op), target_(target), value_(value), pos_(pos),
-        block_start_(false), block_end_(false) {
-    ASSERT(Token::IsAssignmentOp(op));
-  }
+  Assignment(Token::Value op, Expression* target, Expression* value, int pos);
 
   DECLARE_NODE_TYPE(Assignment)
 
+  virtual bool IsInlineable() const;
+
   Assignment* AsSimpleAssignment() { return !is_compound() ? this : NULL; }
 
   Token::Value binary_op() const;
@@ -1349,6 +1562,8 @@ class Assignment: public Expression {
   Expression* target() const { return target_; }
   Expression* value() const { return value_; }
   int position() { return pos_; }
+  BinaryOperation* binary_operation() const { return binary_operation_; }
+
   // This check relies on the definition order of token in token.h.
   bool is_compound() const { return op() > Token::ASSIGN; }
 
@@ -1361,13 +1576,31 @@ class Assignment: public Expression {
   void mark_block_start() { block_start_ = true; }
   void mark_block_end() { block_end_ = true; }
 
+  // Type feedback information.
+  void RecordTypeFeedback(TypeFeedbackOracle* oracle);
+  virtual bool IsMonomorphic() { return is_monomorphic_; }
+  virtual ZoneMapList* GetReceiverTypes() { return receiver_types_; }
+  virtual Handle<Map> GetMonomorphicReceiverType() {
+    return monomorphic_receiver_type_;
+  }
+
+  // Bailout support.
+  int compound_bailout_id() const { return compound_bailout_id_; }
+
  private:
   Token::Value op_;
   Expression* target_;
   Expression* value_;
   int pos_;
+  BinaryOperation* binary_operation_;
+  int compound_bailout_id_;
+
   bool block_start_;
   bool block_end_;
+
+  bool is_monomorphic_;
+  ZoneMapList* receiver_types_;
+  Handle<Map> monomorphic_receiver_type_;
 };
 
 
@@ -1417,11 +1650,7 @@ class FunctionLiteral: public Expression {
         function_token_position_(RelocInfo::kNoPosition),
         inferred_name_(Heap::empty_string()),
         try_full_codegen_(false),
-        pretenure_(false) {
-#ifdef DEBUG
-    already_compiled_ = false;
-#endif
-  }
+        pretenure_(false) { }
 
   DECLARE_NODE_TYPE(FunctionLiteral)
 
@@ -1446,6 +1675,7 @@ class FunctionLiteral: public Expression {
   int num_parameters() { return num_parameters_; }
 
   bool AllowsLazyCompilation();
+  bool AllowOptimize();
 
   Handle<String> debug_name() const {
     if (name_->length() > 0) return name_;
@@ -1463,13 +1693,6 @@ class FunctionLiteral: public Expression {
   bool pretenure() { return pretenure_; }
   void set_pretenure(bool value) { pretenure_ = value; }
 
-#ifdef DEBUG
-  void mark_as_compiled() {
-    ASSERT(!already_compiled_);
-    already_compiled_ = true;
-  }
-#endif
-
  private:
   Handle<String> name_;
   Scope* scope_;
@@ -1487,9 +1710,6 @@ class FunctionLiteral: public Expression {
   Handle<String> inferred_name_;
   bool try_full_codegen_;
   bool pretenure_;
-#ifdef DEBUG
-  bool already_compiled_;
-#endif
 };
 
 
@@ -1894,8 +2114,12 @@ class AstVisitor BASE_EMBEDDED {
   // node, calling SetStackOverflow will make sure that the visitor
   // bails out without visiting more nodes.
   void SetStackOverflow() { stack_overflow_ = true; }
+  void ClearStackOverflow() { stack_overflow_ = false; }
+
+  // Nodes not appearing in the AST, including slots.
+  virtual void VisitSlot(Slot* node) { UNREACHABLE(); }
 
-  // Individual nodes
+  // Individual AST nodes.
 #define DEF_VISIT(type)                         \
   virtual void Visit##type(type* node) = 0;
   AST_NODE_LIST(DEF_VISIT)
diff --git a/src/atomicops.h b/src/atomicops.h
new file mode 100644 (file)
index 0000000..72a0d0f
--- /dev/null
@@ -0,0 +1,165 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// The routines exported by this module are subtle.  If you use them, even if
+// you get the code right, it will depend on careful reasoning about atomicity
+// and memory ordering; it will be less readable, and harder to maintain.  If
+// you plan to use these routines, you should have a good reason, such as solid
+// evidence that performance would otherwise suffer, or there being no
+// alternative.  You should assume only properties explicitly guaranteed by the
+// specifications in this file.  You are almost certainly _not_ writing code
+// just for the x86; if you assume x86 semantics, x86 hardware bugs and
+// implementations on other archtectures will cause your code to break.  If you
+// do not know what you are doing, avoid these routines, and use a Mutex.
+//
+// It is incorrect to make direct assignments to/from an atomic variable.
+// You should use one of the Load or Store routines.  The NoBarrier
+// versions are provided when no barriers are needed:
+//   NoBarrier_Store()
+//   NoBarrier_Load()
+// Although there are currently no compiler enforcement, you are encouraged
+// to use these.
+//
+
+#ifndef V8_ATOMICOPS_H_
+#define V8_ATOMICOPS_H_
+
+#include "../include/v8.h"
+#include "globals.h"
+
+namespace v8 {
+namespace internal {
+
+typedef int32_t Atomic32;
+#ifdef V8_HOST_ARCH_64_BIT
+// We need to be able to go between Atomic64 and AtomicWord implicitly.  This
+// means Atomic64 and AtomicWord should be the same type on 64-bit.
+#if defined(__APPLE__)
+// MacOS is an exception to the implicit conversion rule above,
+// because it uses long for intptr_t.
+typedef int64_t Atomic64;
+#else
+typedef intptr_t Atomic64;
+#endif
+#endif
+
+// Use AtomicWord for a machine-sized pointer.  It will use the Atomic32 or
+// Atomic64 routines below, depending on your architecture.
+typedef intptr_t AtomicWord;
+
+// Atomically execute:
+//      result = *ptr;
+//      if (*ptr == old_value)
+//        *ptr = new_value;
+//      return result;
+//
+// I.e., replace "*ptr" with "new_value" if "*ptr" used to be "old_value".
+// Always return the old value of "*ptr"
+//
+// This routine implies no memory barriers.
+Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
+                                  Atomic32 old_value,
+                                  Atomic32 new_value);
+
+// Atomically store new_value into *ptr, returning the previous value held in
+// *ptr.  This routine implies no memory barriers.
+Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr, Atomic32 new_value);
+
+// Atomically increment *ptr by "increment".  Returns the new value of
+// *ptr with the increment applied.  This routine implies no memory barriers.
+Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr, Atomic32 increment);
+
+Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
+                                 Atomic32 increment);
+
+// These following lower-level operations are typically useful only to people
+// implementing higher-level synchronization operations like spinlocks,
+// mutexes, and condition-variables.  They combine CompareAndSwap(), a load, or
+// a store with appropriate memory-ordering instructions.  "Acquire" operations
+// ensure that no later memory access can be reordered ahead of the operation.
+// "Release" operations ensure that no previous memory access can be reordered
+// after the operation.  "Barrier" operations have both "Acquire" and "Release"
+// semantics.   A MemoryBarrier() has "Barrier" semantics, but does no memory
+// access.
+Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
+                                Atomic32 old_value,
+                                Atomic32 new_value);
+Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
+                                Atomic32 old_value,
+                                Atomic32 new_value);
+
+void MemoryBarrier();
+void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value);
+void Acquire_Store(volatile Atomic32* ptr, Atomic32 value);
+void Release_Store(volatile Atomic32* ptr, Atomic32 value);
+
+Atomic32 NoBarrier_Load(volatile const Atomic32* ptr);
+Atomic32 Acquire_Load(volatile const Atomic32* ptr);
+Atomic32 Release_Load(volatile const Atomic32* ptr);
+
+// 64-bit atomic operations (only available on 64-bit processors).
+#ifdef V8_HOST_ARCH_64_BIT
+Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
+                                  Atomic64 old_value,
+                                  Atomic64 new_value);
+Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr, Atomic64 new_value);
+Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr, Atomic64 increment);
+Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr, Atomic64 increment);
+
+Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
+                                Atomic64 old_value,
+                                Atomic64 new_value);
+Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
+                                Atomic64 old_value,
+                                Atomic64 new_value);
+void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value);
+void Acquire_Store(volatile Atomic64* ptr, Atomic64 value);
+void Release_Store(volatile Atomic64* ptr, Atomic64 value);
+Atomic64 NoBarrier_Load(volatile const Atomic64* ptr);
+Atomic64 Acquire_Load(volatile const Atomic64* ptr);
+Atomic64 Release_Load(volatile const Atomic64* ptr);
+#endif  // V8_HOST_ARCH_64_BIT
+
+} }  // namespace v8::internal
+
+// Include our platform specific implementation.
+#if defined(_MSC_VER) && \
+  (defined(V8_HOST_ARCH_IA32) || defined(V8_HOST_ARCH_X64))
+#include "atomicops_internals_x86_msvc.h"
+#elif defined(__APPLE__) && \
+  (defined(V8_HOST_ARCH_IA32) || defined(V8_HOST_ARCH_X64))
+#include "atomicops_internals_x86_macosx.h"
+#elif defined(__GNUC__) && \
+  (defined(V8_HOST_ARCH_IA32) || defined(V8_HOST_ARCH_X64))
+#include "atomicops_internals_x86_gcc.h"
+#elif defined(__GNUC__) && defined(V8_HOST_ARCH_ARM)
+#include "atomicops_internals_arm_gcc.h"
+#else
+#error "Atomic operations are not supported on your platform"
+#endif
+
+#endif  // V8_ATOMICOPS_H_
diff --git a/src/atomicops_internals_arm_gcc.h b/src/atomicops_internals_arm_gcc.h
new file mode 100644 (file)
index 0000000..6c30256
--- /dev/null
@@ -0,0 +1,145 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// This file is an internal atomic implementation, use atomicops.h instead.
+//
+// LinuxKernelCmpxchg and Barrier_AtomicIncrement are from Google Gears.
+
+#ifndef V8_ATOMICOPS_INTERNALS_ARM_GCC_H_
+#define V8_ATOMICOPS_INTERNALS_ARM_GCC_H_
+
+namespace v8 {
+namespace internal {
+
+// 0xffff0fc0 is the hard coded address of a function provided by
+// the kernel which implements an atomic compare-exchange. On older
+// ARM architecture revisions (pre-v6) this may be implemented using
+// a syscall. This address is stable, and in active use (hard coded)
+// by at least glibc-2.7 and the Android C library.
+typedef Atomic32 (*LinuxKernelCmpxchgFunc)(Atomic32 old_value,
+                                           Atomic32 new_value,
+                                           volatile Atomic32* ptr);
+LinuxKernelCmpxchgFunc pLinuxKernelCmpxchg __attribute__((weak)) =
+    (LinuxKernelCmpxchgFunc) 0xffff0fc0;
+
+typedef void (*LinuxKernelMemoryBarrierFunc)(void);
+LinuxKernelMemoryBarrierFunc pLinuxKernelMemoryBarrier __attribute__((weak)) =
+    (LinuxKernelMemoryBarrierFunc) 0xffff0fa0;
+
+
+inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
+                                         Atomic32 old_value,
+                                         Atomic32 new_value) {
+  Atomic32 prev_value = *ptr;
+  do {
+    if (!pLinuxKernelCmpxchg(old_value, new_value,
+                             const_cast<Atomic32*>(ptr))) {
+      return old_value;
+    }
+    prev_value = *ptr;
+  } while (prev_value == old_value);
+  return prev_value;
+}
+
+inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
+                                         Atomic32 new_value) {
+  Atomic32 old_value;
+  do {
+    old_value = *ptr;
+  } while (pLinuxKernelCmpxchg(old_value, new_value,
+                               const_cast<Atomic32*>(ptr)));
+  return old_value;
+}
+
+inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
+                                          Atomic32 increment) {
+  return Barrier_AtomicIncrement(ptr, increment);
+}
+
+inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
+                                        Atomic32 increment) {
+  for (;;) {
+    // Atomic exchange the old value with an incremented one.
+    Atomic32 old_value = *ptr;
+    Atomic32 new_value = old_value + increment;
+    if (pLinuxKernelCmpxchg(old_value, new_value,
+                            const_cast<Atomic32*>(ptr)) == 0) {
+      // The exchange took place as expected.
+      return new_value;
+    }
+    // Otherwise, *ptr changed mid-loop and we need to retry.
+  }
+}
+
+inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
+                                       Atomic32 old_value,
+                                       Atomic32 new_value) {
+  return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
+}
+
+inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
+                                       Atomic32 old_value,
+                                       Atomic32 new_value) {
+  return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
+}
+
+inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
+  *ptr = value;
+}
+
+inline void MemoryBarrier() {
+  pLinuxKernelMemoryBarrier();
+}
+
+inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
+  *ptr = value;
+  MemoryBarrier();
+}
+
+inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
+  MemoryBarrier();
+  *ptr = value;
+}
+
+inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
+  return *ptr;
+}
+
+inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
+  Atomic32 value = *ptr;
+  MemoryBarrier();
+  return value;
+}
+
+inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
+  MemoryBarrier();
+  return *ptr;
+}
+
+} }  // namespace v8::internal
+
+#endif  // V8_ATOMICOPS_INTERNALS_ARM_GCC_H_
diff --git a/src/atomicops_internals_x86_gcc.cc b/src/atomicops_internals_x86_gcc.cc
new file mode 100644 (file)
index 0000000..a572564
--- /dev/null
@@ -0,0 +1,126 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// This module gets enough CPU information to optimize the
+// atomicops module on x86.
+
+#include <string.h>
+
+#include "atomicops.h"
+
+// This file only makes sense with atomicops_internals_x86_gcc.h -- it
+// depends on structs that are defined in that file.  If atomicops.h
+// doesn't sub-include that file, then we aren't needed, and shouldn't
+// try to do anything.
+#ifdef V8_ATOMICOPS_INTERNALS_X86_GCC_H_
+
+// Inline cpuid instruction.  In PIC compilations, %ebx contains the address
+// of the global offset table.  To avoid breaking such executables, this code
+// must preserve that register's value across cpuid instructions.
+#if defined(__i386__)
+#define cpuid(a, b, c, d, inp) \
+  asm("mov %%ebx, %%edi\n"     \
+      "cpuid\n"                \
+      "xchg %%edi, %%ebx\n"    \
+      : "=a" (a), "=D" (b), "=c" (c), "=d" (d) : "a" (inp))
+#elif defined(__x86_64__)
+#define cpuid(a, b, c, d, inp) \
+  asm("mov %%rbx, %%rdi\n"     \
+      "cpuid\n"                \
+      "xchg %%rdi, %%rbx\n"    \
+      : "=a" (a), "=D" (b), "=c" (c), "=d" (d) : "a" (inp))
+#endif
+
+#if defined(cpuid)        // initialize the struct only on x86
+
+// Set the flags so that code will run correctly and conservatively, so even
+// if we haven't been initialized yet, we're probably single threaded, and our
+// default values should hopefully be pretty safe.
+struct AtomicOps_x86CPUFeatureStruct AtomicOps_Internalx86CPUFeatures = {
+  false,          // bug can't exist before process spawns multiple threads
+  false,          // no SSE2
+};
+
+// Initialize the AtomicOps_Internalx86CPUFeatures struct.
+static void AtomicOps_Internalx86CPUFeaturesInit() {
+  uint32_t eax;
+  uint32_t ebx;
+  uint32_t ecx;
+  uint32_t edx;
+
+  // Get vendor string (issue CPUID with eax = 0)
+  cpuid(eax, ebx, ecx, edx, 0);
+  char vendor[13];
+  memcpy(vendor, &ebx, 4);
+  memcpy(vendor + 4, &edx, 4);
+  memcpy(vendor + 8, &ecx, 4);
+  vendor[12] = 0;
+
+  // get feature flags in ecx/edx, and family/model in eax
+  cpuid(eax, ebx, ecx, edx, 1);
+
+  int family = (eax >> 8) & 0xf;        // family and model fields
+  int model = (eax >> 4) & 0xf;
+  if (family == 0xf) {                  // use extended family and model fields
+    family += (eax >> 20) & 0xff;
+    model += ((eax >> 16) & 0xf) << 4;
+  }
+
+  // Opteron Rev E has a bug in which on very rare occasions a locked
+  // instruction doesn't act as a read-acquire barrier if followed by a
+  // non-locked read-modify-write instruction.  Rev F has this bug in
+  // pre-release versions, but not in versions released to customers,
+  // so we test only for Rev E, which is family 15, model 32..63 inclusive.
+  if (strcmp(vendor, "AuthenticAMD") == 0 &&       // AMD
+      family == 15 &&
+      32 <= model && model <= 63) {
+    AtomicOps_Internalx86CPUFeatures.has_amd_lock_mb_bug = true;
+  } else {
+    AtomicOps_Internalx86CPUFeatures.has_amd_lock_mb_bug = false;
+  }
+
+  // edx bit 26 is SSE2 which we use to tell use whether we can use mfence
+  AtomicOps_Internalx86CPUFeatures.has_sse2 = ((edx >> 26) & 1);
+}
+
+namespace {
+
+class AtomicOpsx86Initializer {
+ public:
+  AtomicOpsx86Initializer() {
+    AtomicOps_Internalx86CPUFeaturesInit();
+  }
+};
+
+// A global to get use initialized on startup via static initialization :/
+AtomicOpsx86Initializer g_initer;
+
+}  // namespace
+
+#endif  // if x86
+
+#endif  // ifdef V8_ATOMICOPS_INTERNALS_X86_GCC_H_
diff --git a/src/atomicops_internals_x86_gcc.h b/src/atomicops_internals_x86_gcc.h
new file mode 100644 (file)
index 0000000..3f17fa0
--- /dev/null
@@ -0,0 +1,287 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// This file is an internal atomic implementation, use atomicops.h instead.
+
+#ifndef V8_ATOMICOPS_INTERNALS_X86_GCC_H_
+#define V8_ATOMICOPS_INTERNALS_X86_GCC_H_
+
+// This struct is not part of the public API of this module; clients may not
+// use it.
+// Features of this x86.  Values may not be correct before main() is run,
+// but are set conservatively.
+struct AtomicOps_x86CPUFeatureStruct {
+  bool has_amd_lock_mb_bug;  // Processor has AMD memory-barrier bug; do lfence
+                             // after acquire compare-and-swap.
+  bool has_sse2;             // Processor has SSE2.
+};
+extern struct AtomicOps_x86CPUFeatureStruct AtomicOps_Internalx86CPUFeatures;
+
+#define ATOMICOPS_COMPILER_BARRIER() __asm__ __volatile__("" : : : "memory")
+
+namespace v8 {
+namespace internal {
+
+// 32-bit low-level operations on any platform.
+
+inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
+                                         Atomic32 old_value,
+                                         Atomic32 new_value) {
+  Atomic32 prev;
+  __asm__ __volatile__("lock; cmpxchgl %1,%2"
+                       : "=a" (prev)
+                       : "q" (new_value), "m" (*ptr), "0" (old_value)
+                       : "memory");
+  return prev;
+}
+
+inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
+                                         Atomic32 new_value) {
+  __asm__ __volatile__("xchgl %1,%0"  // The lock prefix is implicit for xchg.
+                       : "=r" (new_value)
+                       : "m" (*ptr), "0" (new_value)
+                       : "memory");
+  return new_value;  // Now it's the previous value.
+}
+
+inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
+                                          Atomic32 increment) {
+  Atomic32 temp = increment;
+  __asm__ __volatile__("lock; xaddl %0,%1"
+                       : "+r" (temp), "+m" (*ptr)
+                       : : "memory");
+  // temp now holds the old value of *ptr
+  return temp + increment;
+}
+
+inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
+                                        Atomic32 increment) {
+  Atomic32 temp = increment;
+  __asm__ __volatile__("lock; xaddl %0,%1"
+                       : "+r" (temp), "+m" (*ptr)
+                       : : "memory");
+  // temp now holds the old value of *ptr
+  if (AtomicOps_Internalx86CPUFeatures.has_amd_lock_mb_bug) {
+    __asm__ __volatile__("lfence" : : : "memory");
+  }
+  return temp + increment;
+}
+
+inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
+                                       Atomic32 old_value,
+                                       Atomic32 new_value) {
+  Atomic32 x = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
+  if (AtomicOps_Internalx86CPUFeatures.has_amd_lock_mb_bug) {
+    __asm__ __volatile__("lfence" : : : "memory");
+  }
+  return x;
+}
+
+inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
+                                       Atomic32 old_value,
+                                       Atomic32 new_value) {
+  return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
+}
+
+inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
+  *ptr = value;
+}
+
+#if defined(__x86_64__)
+
+// 64-bit implementations of memory barrier can be simpler, because it
+// "mfence" is guaranteed to exist.
+inline void MemoryBarrier() {
+  __asm__ __volatile__("mfence" : : : "memory");
+}
+
+inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
+  *ptr = value;
+  MemoryBarrier();
+}
+
+#else
+
+inline void MemoryBarrier() {
+  if (AtomicOps_Internalx86CPUFeatures.has_sse2) {
+    __asm__ __volatile__("mfence" : : : "memory");
+  } else {  // mfence is faster but not present on PIII
+    Atomic32 x = 0;
+    NoBarrier_AtomicExchange(&x, 0);  // acts as a barrier on PIII
+  }
+}
+
+inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
+  if (AtomicOps_Internalx86CPUFeatures.has_sse2) {
+    *ptr = value;
+    __asm__ __volatile__("mfence" : : : "memory");
+  } else {
+    NoBarrier_AtomicExchange(ptr, value);
+                          // acts as a barrier on PIII
+  }
+}
+#endif
+
+inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
+  ATOMICOPS_COMPILER_BARRIER();
+  *ptr = value;  // An x86 store acts as a release barrier.
+  // See comments in Atomic64 version of Release_Store(), below.
+}
+
+inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
+  return *ptr;
+}
+
+inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
+  Atomic32 value = *ptr;  // An x86 load acts as a acquire barrier.
+  // See comments in Atomic64 version of Release_Store(), below.
+  ATOMICOPS_COMPILER_BARRIER();
+  return value;
+}
+
+inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
+  MemoryBarrier();
+  return *ptr;
+}
+
+#if defined(__x86_64__)
+
+// 64-bit low-level operations on 64-bit platform.
+
+inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
+                                         Atomic64 old_value,
+                                         Atomic64 new_value) {
+  Atomic64 prev;
+  __asm__ __volatile__("lock; cmpxchgq %1,%2"
+                       : "=a" (prev)
+                       : "q" (new_value), "m" (*ptr), "0" (old_value)
+                       : "memory");
+  return prev;
+}
+
+inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr,
+                                         Atomic64 new_value) {
+  __asm__ __volatile__("xchgq %1,%0"  // The lock prefix is implicit for xchg.
+                       : "=r" (new_value)
+                       : "m" (*ptr), "0" (new_value)
+                       : "memory");
+  return new_value;  // Now it's the previous value.
+}
+
+inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr,
+                                          Atomic64 increment) {
+  Atomic64 temp = increment;
+  __asm__ __volatile__("lock; xaddq %0,%1"
+                       : "+r" (temp), "+m" (*ptr)
+                       : : "memory");
+  // temp now contains the previous value of *ptr
+  return temp + increment;
+}
+
+inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr,
+                                        Atomic64 increment) {
+  Atomic64 temp = increment;
+  __asm__ __volatile__("lock; xaddq %0,%1"
+                       : "+r" (temp), "+m" (*ptr)
+                       : : "memory");
+  // temp now contains the previous value of *ptr
+  if (AtomicOps_Internalx86CPUFeatures.has_amd_lock_mb_bug) {
+    __asm__ __volatile__("lfence" : : : "memory");
+  }
+  return temp + increment;
+}
+
+inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
+  *ptr = value;
+}
+
+inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) {
+  *ptr = value;
+  MemoryBarrier();
+}
+
+inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
+  ATOMICOPS_COMPILER_BARRIER();
+
+  *ptr = value;  // An x86 store acts as a release barrier
+                 // for current AMD/Intel chips as of Jan 2008.
+                 // See also Acquire_Load(), below.
+
+  // When new chips come out, check:
+  //  IA-32 Intel Architecture Software Developer's Manual, Volume 3:
+  //  System Programming Guide, Chatper 7: Multiple-processor management,
+  //  Section 7.2, Memory Ordering.
+  // Last seen at:
+  //   http://developer.intel.com/design/pentium4/manuals/index_new.htm
+  //
+  // x86 stores/loads fail to act as barriers for a few instructions (clflush
+  // maskmovdqu maskmovq movntdq movnti movntpd movntps movntq) but these are
+  // not generated by the compiler, and are rare.  Users of these instructions
+  // need to know about cache behaviour in any case since all of these involve
+  // either flushing cache lines or non-temporal cache hints.
+}
+
+inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {
+  return *ptr;
+}
+
+inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) {
+  Atomic64 value = *ptr;  // An x86 load acts as a acquire barrier,
+                          // for current AMD/Intel chips as of Jan 2008.
+                          // See also Release_Store(), above.
+  ATOMICOPS_COMPILER_BARRIER();
+  return value;
+}
+
+inline Atomic64 Release_Load(volatile const Atomic64* ptr) {
+  MemoryBarrier();
+  return *ptr;
+}
+
+inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
+                                       Atomic64 old_value,
+                                       Atomic64 new_value) {
+  Atomic64 x = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
+  if (AtomicOps_Internalx86CPUFeatures.has_amd_lock_mb_bug) {
+    __asm__ __volatile__("lfence" : : : "memory");
+  }
+  return x;
+}
+
+inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
+                                       Atomic64 old_value,
+                                       Atomic64 new_value) {
+  return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
+}
+
+#endif  // defined(__x86_64__)
+
+} }  // namespace v8::internal
+
+#undef ATOMICOPS_COMPILER_BARRIER
+
+#endif  // V8_ATOMICOPS_INTERNALS_X86_GCC_H_
diff --git a/src/atomicops_internals_x86_macosx.h b/src/atomicops_internals_x86_macosx.h
new file mode 100644 (file)
index 0000000..2bac006
--- /dev/null
@@ -0,0 +1,301 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// This file is an internal atomic implementation, use atomicops.h instead.
+
+#ifndef V8_ATOMICOPS_INTERNALS_X86_MACOSX_H_
+#define V8_ATOMICOPS_INTERNALS_X86_MACOSX_H_
+
+#include <libkern/OSAtomic.h>
+
+namespace v8 {
+namespace internal {
+
+inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32 *ptr,
+                                         Atomic32 old_value,
+                                         Atomic32 new_value) {
+  Atomic32 prev_value;
+  do {
+    if (OSAtomicCompareAndSwap32(old_value, new_value,
+                                 const_cast<Atomic32*>(ptr))) {
+      return old_value;
+    }
+    prev_value = *ptr;
+  } while (prev_value == old_value);
+  return prev_value;
+}
+
+inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32 *ptr,
+                                         Atomic32 new_value) {
+  Atomic32 old_value;
+  do {
+    old_value = *ptr;
+  } while (!OSAtomicCompareAndSwap32(old_value, new_value,
+                                     const_cast<Atomic32*>(ptr)));
+  return old_value;
+}
+
+inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32 *ptr,
+                                          Atomic32 increment) {
+  return OSAtomicAdd32(increment, const_cast<Atomic32*>(ptr));
+}
+
+inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32 *ptr,
+                                          Atomic32 increment) {
+  return OSAtomicAdd32Barrier(increment, const_cast<Atomic32*>(ptr));
+}
+
+inline void MemoryBarrier() {
+  OSMemoryBarrier();
+}
+
+inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32 *ptr,
+                                       Atomic32 old_value,
+                                       Atomic32 new_value) {
+  Atomic32 prev_value;
+  do {
+    if (OSAtomicCompareAndSwap32Barrier(old_value, new_value,
+                                        const_cast<Atomic32*>(ptr))) {
+      return old_value;
+    }
+    prev_value = *ptr;
+  } while (prev_value == old_value);
+  return prev_value;
+}
+
+inline Atomic32 Release_CompareAndSwap(volatile Atomic32 *ptr,
+                                       Atomic32 old_value,
+                                       Atomic32 new_value) {
+  return Acquire_CompareAndSwap(ptr, old_value, new_value);
+}
+
+inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
+  *ptr = value;
+}
+
+inline void Acquire_Store(volatile Atomic32 *ptr, Atomic32 value) {
+  *ptr = value;
+  MemoryBarrier();
+}
+
+inline void Release_Store(volatile Atomic32 *ptr, Atomic32 value) {
+  MemoryBarrier();
+  *ptr = value;
+}
+
+inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
+  return *ptr;
+}
+
+inline Atomic32 Acquire_Load(volatile const Atomic32 *ptr) {
+  Atomic32 value = *ptr;
+  MemoryBarrier();
+  return value;
+}
+
+inline Atomic32 Release_Load(volatile const Atomic32 *ptr) {
+  MemoryBarrier();
+  return *ptr;
+}
+
+#ifdef __LP64__
+
+// 64-bit implementation on 64-bit platform
+
+inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64 *ptr,
+                                         Atomic64 old_value,
+                                         Atomic64 new_value) {
+  Atomic64 prev_value;
+  do {
+    if (OSAtomicCompareAndSwap64(old_value, new_value,
+                                 const_cast<Atomic64*>(ptr))) {
+      return old_value;
+    }
+    prev_value = *ptr;
+  } while (prev_value == old_value);
+  return prev_value;
+}
+
+inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64 *ptr,
+                                         Atomic64 new_value) {
+  Atomic64 old_value;
+  do {
+    old_value = *ptr;
+  } while (!OSAtomicCompareAndSwap64(old_value, new_value,
+                                     const_cast<Atomic64*>(ptr)));
+  return old_value;
+}
+
+inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64 *ptr,
+                                          Atomic64 increment) {
+  return OSAtomicAdd64(increment, const_cast<Atomic64*>(ptr));
+}
+
+inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64 *ptr,
+                                        Atomic64 increment) {
+  return OSAtomicAdd64Barrier(increment, const_cast<Atomic64*>(ptr));
+}
+
+inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64 *ptr,
+                                       Atomic64 old_value,
+                                       Atomic64 new_value) {
+  Atomic64 prev_value;
+  do {
+    if (OSAtomicCompareAndSwap64Barrier(old_value, new_value,
+                                        const_cast<Atomic64*>(ptr))) {
+      return old_value;
+    }
+    prev_value = *ptr;
+  } while (prev_value == old_value);
+  return prev_value;
+}
+
+inline Atomic64 Release_CompareAndSwap(volatile Atomic64 *ptr,
+                                       Atomic64 old_value,
+                                       Atomic64 new_value) {
+  // The lib kern interface does not distinguish between
+  // Acquire and Release memory barriers; they are equivalent.
+  return Acquire_CompareAndSwap(ptr, old_value, new_value);
+}
+
+inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
+  *ptr = value;
+}
+
+inline void Acquire_Store(volatile Atomic64 *ptr, Atomic64 value) {
+  *ptr = value;
+  MemoryBarrier();
+}
+
+inline void Release_Store(volatile Atomic64 *ptr, Atomic64 value) {
+  MemoryBarrier();
+  *ptr = value;
+}
+
+inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {
+  return *ptr;
+}
+
+inline Atomic64 Acquire_Load(volatile const Atomic64 *ptr) {
+  Atomic64 value = *ptr;
+  MemoryBarrier();
+  return value;
+}
+
+inline Atomic64 Release_Load(volatile const Atomic64 *ptr) {
+  MemoryBarrier();
+  return *ptr;
+}
+
+#endif  // defined(__LP64__)
+
+// MacOS uses long for intptr_t, AtomicWord and Atomic32 are always different
+// on the Mac, even when they are the same size.  We need to explicitly cast
+// from AtomicWord to Atomic32/64 to implement the AtomicWord interface.
+#ifdef __LP64__
+#define AtomicWordCastType Atomic64
+#else
+#define AtomicWordCastType Atomic32
+#endif
+
+inline AtomicWord NoBarrier_CompareAndSwap(volatile AtomicWord* ptr,
+                                           AtomicWord old_value,
+                                           AtomicWord new_value) {
+  return NoBarrier_CompareAndSwap(
+      reinterpret_cast<volatile AtomicWordCastType*>(ptr),
+      old_value, new_value);
+}
+
+inline AtomicWord NoBarrier_AtomicExchange(volatile AtomicWord* ptr,
+                                           AtomicWord new_value) {
+  return NoBarrier_AtomicExchange(
+      reinterpret_cast<volatile AtomicWordCastType*>(ptr), new_value);
+}
+
+inline AtomicWord NoBarrier_AtomicIncrement(volatile AtomicWord* ptr,
+                                            AtomicWord increment) {
+  return NoBarrier_AtomicIncrement(
+      reinterpret_cast<volatile AtomicWordCastType*>(ptr), increment);
+}
+
+inline AtomicWord Barrier_AtomicIncrement(volatile AtomicWord* ptr,
+                                          AtomicWord increment) {
+  return Barrier_AtomicIncrement(
+      reinterpret_cast<volatile AtomicWordCastType*>(ptr), increment);
+}
+
+inline AtomicWord Acquire_CompareAndSwap(volatile AtomicWord* ptr,
+                                         AtomicWord old_value,
+                                         AtomicWord new_value) {
+  return v8::internal::Acquire_CompareAndSwap(
+      reinterpret_cast<volatile AtomicWordCastType*>(ptr),
+      old_value, new_value);
+}
+
+inline AtomicWord Release_CompareAndSwap(volatile AtomicWord* ptr,
+                                         AtomicWord old_value,
+                                         AtomicWord new_value) {
+  return v8::internal::Release_CompareAndSwap(
+      reinterpret_cast<volatile AtomicWordCastType*>(ptr),
+      old_value, new_value);
+}
+
+inline void NoBarrier_Store(volatile AtomicWord *ptr, AtomicWord value) {
+  NoBarrier_Store(
+      reinterpret_cast<volatile AtomicWordCastType*>(ptr), value);
+}
+
+inline void Acquire_Store(volatile AtomicWord* ptr, AtomicWord value) {
+  return v8::internal::Acquire_Store(
+      reinterpret_cast<volatile AtomicWordCastType*>(ptr), value);
+}
+
+inline void Release_Store(volatile AtomicWord* ptr, AtomicWord value) {
+  return v8::internal::Release_Store(
+      reinterpret_cast<volatile AtomicWordCastType*>(ptr), value);
+}
+
+inline AtomicWord NoBarrier_Load(volatile const AtomicWord *ptr) {
+  return NoBarrier_Load(
+      reinterpret_cast<volatile const AtomicWordCastType*>(ptr));
+}
+
+inline AtomicWord Acquire_Load(volatile const AtomicWord* ptr) {
+  return v8::internal::Acquire_Load(
+      reinterpret_cast<volatile const AtomicWordCastType*>(ptr));
+}
+
+inline AtomicWord Release_Load(volatile const AtomicWord* ptr) {
+  return v8::internal::Release_Load(
+      reinterpret_cast<volatile const AtomicWordCastType*>(ptr));
+}
+
+#undef AtomicWordCastType
+
+} }  // namespace v8::internal
+
+#endif  // V8_ATOMICOPS_INTERNALS_X86_MACOSX_H_
diff --git a/src/atomicops_internals_x86_msvc.h b/src/atomicops_internals_x86_msvc.h
new file mode 100644 (file)
index 0000000..a7753e4
--- /dev/null
@@ -0,0 +1,202 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// This file is an internal atomic implementation, use atomicops.h instead.
+
+#ifndef V8_ATOMICOPS_INTERNALS_X86_MSVC_H_
+#define V8_ATOMICOPS_INTERNALS_X86_MSVC_H_
+
+#include "win32-headers.h"
+
+namespace v8 {
+namespace internal {
+
+inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
+                                         Atomic32 old_value,
+                                         Atomic32 new_value) {
+  LONG result = InterlockedCompareExchange(
+      reinterpret_cast<volatile LONG*>(ptr),
+      static_cast<LONG>(new_value),
+      static_cast<LONG>(old_value));
+  return static_cast<Atomic32>(result);
+}
+
+inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
+                                         Atomic32 new_value) {
+  LONG result = InterlockedExchange(
+      reinterpret_cast<volatile LONG*>(ptr),
+      static_cast<LONG>(new_value));
+  return static_cast<Atomic32>(result);
+}
+
+inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
+                                        Atomic32 increment) {
+  return InterlockedExchangeAdd(
+      reinterpret_cast<volatile LONG*>(ptr),
+      static_cast<LONG>(increment)) + increment;
+}
+
+inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
+                                          Atomic32 increment) {
+  return Barrier_AtomicIncrement(ptr, increment);
+}
+
+#if !(defined(_MSC_VER) && _MSC_VER >= 1400)
+#error "We require at least vs2005 for MemoryBarrier"
+#endif
+inline void MemoryBarrier() {
+  // We use MemoryBarrier from WinNT.h
+  ::MemoryBarrier();
+}
+
+inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
+                                       Atomic32 old_value,
+                                       Atomic32 new_value) {
+  return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
+}
+
+inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
+                                       Atomic32 old_value,
+                                       Atomic32 new_value) {
+  return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
+}
+
+inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
+  *ptr = value;
+}
+
+inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
+  NoBarrier_AtomicExchange(ptr, value);
+              // acts as a barrier in this implementation
+}
+
+inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
+  *ptr = value;  // works w/o barrier for current Intel chips as of June 2005
+  // See comments in Atomic64 version of Release_Store() below.
+}
+
+inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
+  return *ptr;
+}
+
+inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
+  Atomic32 value = *ptr;
+  return value;
+}
+
+inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
+  MemoryBarrier();
+  return *ptr;
+}
+
+#if defined(_WIN64)
+
+// 64-bit low-level operations on 64-bit platform.
+
+STATIC_ASSERT(sizeof(Atomic64) == sizeof(PVOID));
+
+inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
+                                         Atomic64 old_value,
+                                         Atomic64 new_value) {
+  PVOID result = InterlockedCompareExchangePointer(
+    reinterpret_cast<volatile PVOID*>(ptr),
+    reinterpret_cast<PVOID>(new_value), reinterpret_cast<PVOID>(old_value));
+  return reinterpret_cast<Atomic64>(result);
+}
+
+inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr,
+                                         Atomic64 new_value) {
+  PVOID result = InterlockedExchangePointer(
+    reinterpret_cast<volatile PVOID*>(ptr),
+    reinterpret_cast<PVOID>(new_value));
+  return reinterpret_cast<Atomic64>(result);
+}
+
+inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr,
+                                        Atomic64 increment) {
+  return InterlockedExchangeAdd64(
+      reinterpret_cast<volatile LONGLONG*>(ptr),
+      static_cast<LONGLONG>(increment)) + increment;
+}
+
+inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr,
+                                          Atomic64 increment) {
+  return Barrier_AtomicIncrement(ptr, increment);
+}
+
+inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
+  *ptr = value;
+}
+
+inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) {
+  NoBarrier_AtomicExchange(ptr, value);
+              // acts as a barrier in this implementation
+}
+
+inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
+  *ptr = value;  // works w/o barrier for current Intel chips as of June 2005
+
+  // When new chips come out, check:
+  //  IA-32 Intel Architecture Software Developer's Manual, Volume 3:
+  //  System Programming Guide, Chatper 7: Multiple-processor management,
+  //  Section 7.2, Memory Ordering.
+  // Last seen at:
+  //   http://developer.intel.com/design/pentium4/manuals/index_new.htm
+}
+
+inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {
+  return *ptr;
+}
+
+inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) {
+  Atomic64 value = *ptr;
+  return value;
+}
+
+inline Atomic64 Release_Load(volatile const Atomic64* ptr) {
+  MemoryBarrier();
+  return *ptr;
+}
+
+inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
+                                       Atomic64 old_value,
+                                       Atomic64 new_value) {
+  return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
+}
+
+inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
+                                       Atomic64 old_value,
+                                       Atomic64 new_value) {
+  return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
+}
+
+
+#endif  // defined(_WIN64)
+
+} }  // namespace v8::internal
+
+#endif  // V8_ATOMICOPS_INTERNALS_X86_MSVC_H_
index f60a975..800c437 100644 (file)
@@ -500,6 +500,24 @@ Handle<JSFunction> Genesis::CreateEmptyFunction() {
 }
 
 
+static void AddToWeakGlobalContextList(Context* context) {
+  ASSERT(context->IsGlobalContext());
+#ifdef DEBUG
+  { // NOLINT
+    ASSERT(context->get(Context::NEXT_CONTEXT_LINK)->IsUndefined());
+    // Check that context is not in the list yet.
+    for (Object* current = Heap::global_contexts_list();
+         !current->IsUndefined();
+         current = Context::cast(current)->get(Context::NEXT_CONTEXT_LINK)) {
+      ASSERT(current != context);
+    }
+  }
+#endif
+  context->set(Context::NEXT_CONTEXT_LINK, Heap::global_contexts_list());
+  Heap::set_global_contexts_list(context);
+}
+
+
 void Genesis::CreateRoots() {
   // Allocate the global context FixedArray first and then patch the
   // closure and extension object later (we need the empty function
@@ -508,6 +526,7 @@ void Genesis::CreateRoots() {
   global_context_ =
       Handle<Context>::cast(
           GlobalHandles::Create(*Factory::NewGlobalContext()));
+  AddToWeakGlobalContextList(*global_context_);
   Top::set_context(*global_context());
 
   // Allocate the message listeners object.
@@ -1596,7 +1615,7 @@ bool Genesis::InstallJSBuiltins(Handle<JSBuiltinsObject> builtins) {
         = Handle<SharedFunctionInfo>(function->shared());
     if (!EnsureCompiled(shared, CLEAR_EXCEPTION)) return false;
     // Set the code object on the function object.
-    function->set_code(function->shared()->code());
+    function->ReplaceCode(function->shared()->code());
     builtins->set_javascript_builtin_code(id, shared->code());
   }
   return true;
@@ -1784,6 +1803,7 @@ Genesis::Genesis(Handle<Object> global_object,
   if (!new_context.is_null()) {
     global_context_ =
       Handle<Context>::cast(GlobalHandles::Create(*new_context));
+    AddToWeakGlobalContextList(*global_context_);
     Top::set_context(*global_context_);
     i::Counters::contexts_created_by_snapshot.Increment();
     result_ = global_context_;
@@ -1819,11 +1839,6 @@ Genesis::Genesis(Handle<Object> global_object,
     i::Counters::contexts_created_from_scratch.Increment();
   }
 
-  // Add this context to the weak list of global contexts.
-  (*global_context_)->set(Context::NEXT_CONTEXT_LINK,
-                          Heap::global_contexts_list());
-  Heap::set_global_contexts_list(*global_context_);
-
   result_ = global_context_;
 }
 
index e88ef6f..a833119 100644 (file)
@@ -32,6 +32,7 @@
 #include "bootstrapper.h"
 #include "builtins.h"
 #include "ic-inl.h"
+#include "vm-state-inl.h"
 
 namespace v8 {
 namespace internal {
@@ -1031,9 +1032,7 @@ MUST_USE_RESULT static MaybeObject* HandleApiCallHelper(
     {
       // Leaving JavaScript.
       VMState state(EXTERNAL);
-#ifdef ENABLE_LOGGING_AND_PROFILING
-      state.set_external_callback(v8::ToCData<Address>(callback_obj));
-#endif
+      ExternalCallbackScope call_scope(v8::ToCData<Address>(callback_obj));
       value = callback(new_args);
     }
     if (value.IsEmpty()) {
@@ -1103,9 +1102,7 @@ BUILTIN(FastHandleApiCall) {
   {
     // Leaving JavaScript.
     VMState state(EXTERNAL);
-#ifdef ENABLE_LOGGING_AND_PROFILING
-    state.set_external_callback(v8::ToCData<Address>(callback_obj));
-#endif
+    ExternalCallbackScope call_scope(v8::ToCData<Address>(callback_obj));
     v8::InvocationCallback callback =
         v8::ToCData<v8::InvocationCallback>(callback_obj);
 
@@ -1169,9 +1166,7 @@ MUST_USE_RESULT static MaybeObject* HandleApiCallAsFunctionOrConstructor(
     {
       // Leaving JavaScript.
       VMState state(EXTERNAL);
-#ifdef ENABLE_LOGGING_AND_PROFILING
-      state.set_external_callback(v8::ToCData<Address>(callback_obj));
-#endif
+      ExternalCallbackScope call_scope(v8::ToCData<Address>(callback_obj));
       value = callback(new_args);
     }
     if (value.IsEmpty()) {
@@ -1332,6 +1327,11 @@ static void Generate_StoreIC_ArrayLength(MacroAssembler* masm) {
 }
 
 
+static void Generate_StoreIC_GlobalProxy(MacroAssembler* masm) {
+  StoreIC::GenerateGlobalProxy(masm);
+}
+
+
 static void Generate_KeyedStoreIC_Generic(MacroAssembler* masm) {
   KeyedStoreIC::GenerateGeneric(masm);
 }
@@ -1581,4 +1581,5 @@ const char* Builtins::Lookup(byte* pc) {
   return NULL;
 }
 
+
 } }  // namespace v8::internal
index b5e8c4e..d2b4be2 100644 (file)
@@ -1,4 +1,4 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2010 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -71,6 +71,10 @@ enum BuiltinExtraArguments {
   V(JSEntryTrampoline,          BUILTIN, UNINITIALIZED)                   \
   V(JSConstructEntryTrampoline, BUILTIN, UNINITIALIZED)                   \
   V(LazyCompile,                BUILTIN, UNINITIALIZED)                   \
+  V(LazyRecompile,              BUILTIN, UNINITIALIZED)                   \
+  V(NotifyDeoptimized,          BUILTIN, UNINITIALIZED)                   \
+  V(NotifyLazyDeoptimized,      BUILTIN, UNINITIALIZED)                   \
+  V(NotifyOSR,                  BUILTIN, UNINITIALIZED)                   \
                                                                           \
   V(LoadIC_Miss,                BUILTIN, UNINITIALIZED)                   \
   V(KeyedLoadIC_Miss,           BUILTIN, UNINITIALIZED)                   \
@@ -102,6 +106,7 @@ enum BuiltinExtraArguments {
   V(StoreIC_ArrayLength,        STORE_IC, MONOMORPHIC)                    \
   V(StoreIC_Normal,             STORE_IC, MONOMORPHIC)                    \
   V(StoreIC_Megamorphic,        STORE_IC, MEGAMORPHIC)                    \
+  V(StoreIC_GlobalProxy,        STORE_IC, MEGAMORPHIC)                    \
                                                                           \
   V(KeyedStoreIC_Initialize,    KEYED_STORE_IC, UNINITIALIZED)            \
   V(KeyedStoreIC_Generic,       KEYED_STORE_IC, MEGAMORPHIC)              \
@@ -120,7 +125,9 @@ enum BuiltinExtraArguments {
   V(ArrayCode,                  BUILTIN, UNINITIALIZED)                   \
   V(ArrayConstructCode,         BUILTIN, UNINITIALIZED)                   \
                                                                           \
-  V(StringConstructCode,        BUILTIN, UNINITIALIZED)
+  V(StringConstructCode,        BUILTIN, UNINITIALIZED)                   \
+                                                                          \
+  V(OnStackReplacement,         BUILTIN, UNINITIALIZED)
 
 
 #ifdef ENABLE_DEBUGGER_SUPPORT
@@ -256,6 +263,10 @@ class Builtins : public AllStatic {
   static void Generate_JSEntryTrampoline(MacroAssembler* masm);
   static void Generate_JSConstructEntryTrampoline(MacroAssembler* masm);
   static void Generate_LazyCompile(MacroAssembler* masm);
+  static void Generate_LazyRecompile(MacroAssembler* masm);
+  static void Generate_NotifyDeoptimized(MacroAssembler* masm);
+  static void Generate_NotifyLazyDeoptimized(MacroAssembler* masm);
+  static void Generate_NotifyOSR(MacroAssembler* masm);
   static void Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm);
 
   static void Generate_FunctionCall(MacroAssembler* masm);
@@ -265,6 +276,8 @@ class Builtins : public AllStatic {
   static void Generate_ArrayConstructCode(MacroAssembler* masm);
 
   static void Generate_StringConstructCode(MacroAssembler* masm);
+
+  static void Generate_OnStackReplacement(MacroAssembler* masm);
 };
 
 } }  // namespace v8::internal
index d49f97f..aa557f0 100644 (file)
@@ -281,7 +281,7 @@ bool EnableSlowAsserts();
 // safely enabled in release mode. Moreover, the ((void) 0) expression
 // obeys different syntax rules than typedef's, e.g. it can't appear
 // inside class declaration, this leads to inconsistency between debug
-// and release compilation modes behaviour.
+// and release compilation modes behavior.
 #define STATIC_ASSERT(test)  STATIC_CHECK(test)
 
 #define ASSERT_NOT_NULL(p)  ASSERT_NE(NULL, p)
index 8b9198f..1b0d8b0 100644 (file)
@@ -103,6 +103,7 @@ Handle<Code> CodeStub::GetCode() {
         GetICState());
     Handle<Code> new_object = Factory::NewCode(desc, flags, masm.CodeObject());
     RecordCodeGeneration(*new_object, &masm);
+    FinishCode(*new_object);
 
     // Update the dictionary and the root in Heap.
     Handle<NumberDictionary> dict =
@@ -142,6 +143,7 @@ MaybeObject* CodeStub::TryGetCode() {
     }
     code = Code::cast(new_object);
     RecordCodeGeneration(code, &masm);
+    FinishCode(code);
 
     // Try to update the code cache but do not fail if unable.
     MaybeObject* maybe_new_object =
@@ -170,4 +172,29 @@ const char* CodeStub::MajorName(CodeStub::Major major_key,
 }
 
 
+int ICCompareStub::MinorKey() {
+  return OpField::encode(op_ - Token::EQ) | StateField::encode(state_);
+}
+
+
+void ICCompareStub::Generate(MacroAssembler* masm) {
+  switch (state_) {
+    case CompareIC::UNINITIALIZED:
+      GenerateMiss(masm);
+      break;
+    case CompareIC::SMIS:
+      GenerateSmis(masm);
+      break;
+    case CompareIC::HEAP_NUMBERS:
+      GenerateHeapNumbers(masm);
+      break;
+    case CompareIC::OBJECTS:
+      GenerateObjects(masm);
+      break;
+    default:
+      UNREACHABLE();
+  }
+}
+
+
 } }  // namespace v8::internal
index b156647..1010e95 100644 (file)
@@ -29,7 +29,6 @@
 #define V8_CODE_STUBS_H_
 
 #include "globals.h"
-#include "macro-assembler.h"
 
 namespace v8 {
 namespace internal {
@@ -39,11 +38,15 @@ namespace internal {
 #define CODE_STUB_LIST_ALL_PLATFORMS(V)  \
   V(CallFunction)                        \
   V(GenericBinaryOp)                     \
+  V(TypeRecordingBinaryOp)               \
   V(StringAdd)                           \
+  V(StringCharAt)                        \
   V(SubString)                           \
   V(StringCompare)                       \
   V(SmiOp)                               \
   V(Compare)                             \
+  V(CompareIC)                           \
+  V(MathPow)                             \
   V(RecordWrite)                         \
   V(ConvertToDouble)                     \
   V(WriteInt32ToHeapNumber)              \
@@ -60,6 +63,7 @@ namespace internal {
   V(CounterOp)                           \
   V(ArgumentsAccess)                     \
   V(RegExpExec)                          \
+  V(RegExpConstructResult)               \
   V(NumberToString)                      \
   V(CEntry)                              \
   V(JSEntry)                             \
@@ -125,7 +129,7 @@ class CodeStub BASE_EMBEDDED {
   virtual ~CodeStub() {}
 
  protected:
-  static const int kMajorBits = 5;
+  static const int kMajorBits = 6;
   static const int kMinorBits = kBitsPerInt - kSmiTagSize - kMajorBits;
 
  private:
@@ -143,6 +147,9 @@ class CodeStub BASE_EMBEDDED {
   // initially generated.
   void RecordCodeGeneration(Code* code, MacroAssembler* masm);
 
+  // Finish the code object after it has been generated.
+  virtual void FinishCode(Code* code) { }
+
   // Returns information for computing the number key.
   virtual Major MajorKey() = 0;
   virtual int MinorKey() = 0;
@@ -216,11 +223,11 @@ namespace v8 {
 namespace internal {
 
 
-// RuntimeCallHelper implementation used in IC stubs: enters/leaves a
+// RuntimeCallHelper implementation used in stubs: enters/leaves a
 // newly created internal frame before/after the runtime call.
-class ICRuntimeCallHelper : public RuntimeCallHelper {
+class StubRuntimeCallHelper : public RuntimeCallHelper {
  public:
-  ICRuntimeCallHelper() {}
+  StubRuntimeCallHelper() {}
 
   virtual void BeforeCall(MacroAssembler* masm) const;
 
@@ -376,9 +383,61 @@ class GenericUnaryOpStub : public CodeStub {
 };
 
 
-enum NaNInformation {
-  kBothCouldBeNaN,
-  kCantBothBeNaN
+class MathPowStub: public CodeStub {
+ public:
+  MathPowStub() {}
+  virtual void Generate(MacroAssembler* masm);
+
+ private:
+  virtual CodeStub::Major MajorKey() { return MathPow; }
+  virtual int MinorKey() { return 0; }
+
+  const char* GetName() { return "MathPowStub"; }
+};
+
+
+class StringCharAtStub: public CodeStub {
+ public:
+  StringCharAtStub() {}
+
+ private:
+  Major MajorKey() { return StringCharAt; }
+  int MinorKey() { return 0; }
+
+  void Generate(MacroAssembler* masm);
+};
+
+
+class ICCompareStub: public CodeStub {
+ public:
+  ICCompareStub(Token::Value op, CompareIC::State state)
+      : op_(op), state_(state) {
+    ASSERT(Token::IsCompareOp(op));
+  }
+
+  virtual void Generate(MacroAssembler* masm);
+
+ private:
+  class OpField: public BitField<int, 0, 3> { };
+  class StateField: public BitField<int, 3, 5> { };
+
+  virtual void FinishCode(Code* code) { code->set_compare_state(state_); }
+
+  virtual CodeStub::Major MajorKey() { return CompareIC; }
+  virtual int MinorKey();
+
+  virtual int GetCodeKind() { return Code::COMPARE_IC; }
+
+  void GenerateSmis(MacroAssembler* masm);
+  void GenerateHeapNumbers(MacroAssembler* masm);
+  void GenerateObjects(MacroAssembler* masm);
+  void GenerateMiss(MacroAssembler* masm);
+
+  bool strict() const { return op_ == Token::EQ_STRICT; }
+  Condition GetCondition() const { return CompareIC::ComputeCondition(op_); }
+
+  Token::Value op_;
+  CompareIC::State state_;
 };
 
 
@@ -391,6 +450,12 @@ enum CompareFlags {
 };
 
 
+enum NaNInformation {
+  kBothCouldBeNaN,
+  kCantBothBeNaN
+};
+
+
 class CompareStub: public CodeStub {
  public:
   CompareStub(Condition cc,
@@ -398,7 +463,7 @@ class CompareStub: public CodeStub {
               CompareFlags flags,
               Register lhs,
               Register rhs) :
-      cc_(cc),
+     cc_(cc),
       strict_(strict),
       never_nan_nan_((flags & CANT_BOTH_BE_NAN) != 0),
       include_number_compare_((flags & NO_NUMBER_COMPARE_IN_STUB) == 0),
@@ -440,6 +505,7 @@ class CompareStub: public CodeStub {
 
   // Register holding the left hand side of the comparison if the stub gives
   // a choice, no_reg otherwise.
+
   Register lhs_;
   // Register holding the right hand side of the comparison if the stub gives
   // a choice, no_reg otherwise.
@@ -457,6 +523,11 @@ class CompareStub: public CodeStub {
 
   int MinorKey();
 
+  virtual int GetCodeKind() { return Code::COMPARE_IC; }
+  virtual void FinishCode(Code* code) {
+    code->set_compare_state(CompareIC::GENERIC);
+  }
+
   // Branch to the label if the given object isn't a symbol.
   void BranchIfNonSymbol(MacroAssembler* masm,
                          Label* label,
@@ -490,9 +561,11 @@ class CompareStub: public CodeStub {
 
 class CEntryStub : public CodeStub {
  public:
-  explicit CEntryStub(int result_size) : result_size_(result_size) { }
+  explicit CEntryStub(int result_size)
+      : result_size_(result_size), save_doubles_(false) { }
 
   void Generate(MacroAssembler* masm);
+  void SaveDoubles() { save_doubles_ = true; }
 
  private:
   void GenerateCore(MacroAssembler* masm,
@@ -508,10 +581,9 @@ class CEntryStub : public CodeStub {
 
   // Number of pointers/values returned.
   const int result_size_;
+  bool save_doubles_;
 
   Major MajorKey() { return CEntry; }
-  // Minor key must differ if different result_size_ values means different
-  // code is generated.
   int MinorKey();
 
   const char* GetName() { return "CEntryStub"; }
@@ -597,6 +669,26 @@ class RegExpExecStub: public CodeStub {
 };
 
 
+class RegExpConstructResultStub: public CodeStub {
+ public:
+  RegExpConstructResultStub() { }
+
+ private:
+  Major MajorKey() { return RegExpConstructResult; }
+  int MinorKey() { return 0; }
+
+  void Generate(MacroAssembler* masm);
+
+  const char* GetName() { return "RegExpConstructResultStub"; }
+
+#ifdef DEBUG
+  void Print() {
+    PrintF("RegExpConstructResultStub\n");
+  }
+#endif
+};
+
+
 class CallFunctionStub: public CodeStub {
  public:
   CallFunctionStub(int argc, InLoopFlag in_loop, CallFunctionFlags flags)
index fb8c5cd..8a64d77 100644 (file)
@@ -139,6 +139,16 @@ void CodeGenerator::MakeCodePrologue(CompilationInfo* info) {
     print_source = FLAG_print_source;
     print_ast = FLAG_print_ast;
     print_json_ast = FLAG_print_json_ast;
+    Vector<const char> filter = CStrVector(FLAG_hydrogen_filter);
+    if (print_source && !filter.is_empty()) {
+      print_source = info->function()->name()->IsEqualTo(filter);
+    }
+    if (print_ast && !filter.is_empty()) {
+      print_ast = info->function()->name()->IsEqualTo(filter);
+    }
+    if (print_json_ast && !filter.is_empty()) {
+      print_json_ast = info->function()->name()->IsEqualTo(filter);
+    }
     ftype = "user-defined";
   }
 
@@ -174,14 +184,24 @@ Handle<Code> CodeGenerator::MakeCodeEpilogue(MacroAssembler* masm,
   masm->GetCode(&desc);
   Handle<Code> code = Factory::NewCode(desc, flags, masm->CodeObject());
 
+  if (!code.is_null()) {
+    Counters::total_compiled_code_size.Increment(code->instruction_size());
+  }
+  return code;
+}
+
+
+void CodeGenerator::PrintCode(Handle<Code> code, CompilationInfo* info) {
 #ifdef ENABLE_DISASSEMBLER
   bool print_code = Bootstrapper::IsActive()
       ? FLAG_print_builtin_code
-      : FLAG_print_code;
-  if (print_code) {
+      : (FLAG_print_code || (info->IsOptimizing() && FLAG_print_opt_code));
+  Vector<const char> filter = CStrVector(FLAG_hydrogen_filter);
+  FunctionLiteral* function = info->function();
+  bool match = filter.is_empty() || function->debug_name()->IsEqualTo(filter);
+  if (print_code && match) {
     // Print the source code if available.
     Handle<Script> script = info->script();
-    FunctionLiteral* function = info->function();
     if (!script->IsUndefined() && !script->source()->IsUndefined()) {
       PrintF("--- Raw source ---\n");
       StringInputBuffer stream(String::cast(script->source()));
@@ -199,22 +219,22 @@ Handle<Code> CodeGenerator::MakeCodeEpilogue(MacroAssembler* masm,
     code->Disassemble(*function->name()->ToCString());
   }
 #endif  // ENABLE_DISASSEMBLER
-
-  if (!code.is_null()) {
-    Counters::total_compiled_code_size.Increment(code->instruction_size());
-  }
-  return code;
 }
 
 
 // Generate the code.  Compile the AST and assemble all the pieces into a
 // Code object.
 bool CodeGenerator::MakeCode(CompilationInfo* info) {
+  // When using Crankshaft the classic backend should never be used.
+  ASSERT(!V8::UseCrankshaft());
   Handle<Script> script = info->script();
   if (!script->IsUndefined() && !script->source()->IsUndefined()) {
     int len = String::cast(script->source())->length();
     Counters::total_old_codegen_source_size.Increment(len);
   }
+  if (FLAG_trace_codegen) {
+    PrintF("Classic Compiler - ");
+  }
   MakeCodePrologue(info);
   // Generate code.
   const int kInitialBufferSize = 4 * KB;
@@ -230,6 +250,9 @@ bool CodeGenerator::MakeCode(CompilationInfo* info) {
   InLoopFlag in_loop = info->is_in_loop() ? IN_LOOP : NOT_IN_LOOP;
   Code::Flags flags = Code::ComputeFlags(Code::FUNCTION, in_loop);
   Handle<Code> code = MakeCodeEpilogue(cgen.masm(), flags, info);
+  // There is no stack check table in code generated by the classic backend.
+  code->SetNoStackCheckTable();
+  CodeGenerator::PrintCode(code, info);
   info->SetCode(code);  // May be an empty handle.
   return !code.is_null();
 }
@@ -441,10 +464,11 @@ void ArgumentsAccessStub::Generate(MacroAssembler* masm) {
 
 int CEntryStub::MinorKey() {
   ASSERT(result_size_ == 1 || result_size_ == 2);
+  int result = save_doubles_ ? 1 : 0;
 #ifdef _WIN64
-  return result_size_ == 1 ? 0 : 1;
+  return result | ((result_size_ == 1) ? 0 : 2);
 #else
-  return 0;
+  return result;
 #endif
 }
 
index 66300d6..23b36f0 100644 (file)
@@ -68,6 +68,9 @@
 //   CodeForDoWhileConditionPosition
 //   CodeForSourcePosition
 
+enum InitState { CONST_INIT, NOT_CONST_INIT };
+enum TypeofState { INSIDE_TYPEOF, NOT_INSIDE_TYPEOF };
+
 #if V8_TARGET_ARCH_IA32
 #include "ia32/codegen-ia32.h"
 #elif V8_TARGET_ARCH_X64
index 6e4e4bf..38438cb 100644 (file)
@@ -86,6 +86,9 @@ class CompilationSubCache {
   // Clear this sub-cache evicting all its content.
   void Clear();
 
+  // Remove given shared function info from sub-cache.
+  void Remove(Handle<SharedFunctionInfo> function_info);
+
   // Number of generations in this sub-cache.
   inline int generations() { return generations_; }
 
@@ -249,6 +252,18 @@ void CompilationSubCache::Clear() {
 }
 
 
+void CompilationSubCache::Remove(Handle<SharedFunctionInfo> function_info) {
+  // Probe the script generation tables. Make sure not to leak handles
+  // into the caller's handle scope.
+  { HandleScope scope;
+    for (int generation = 0; generation < generations(); generation++) {
+      Handle<CompilationCacheTable> table = GetTable(generation);
+      table->Remove(*function_info);
+    }
+  }
+}
+
+
 // We only re-use a cached function for some script source code if the
 // script originates from the same place. This is to avoid issues
 // when reporting errors, etc.
@@ -467,6 +482,15 @@ void CompilationCacheRegExp::Put(Handle<String> source,
 }
 
 
+void CompilationCache::Remove(Handle<SharedFunctionInfo> function_info) {
+  if (!IsEnabled()) return;
+
+  eval_global.Remove(function_info);
+  eval_contextual.Remove(function_info);
+  script.Remove(function_info);
+}
+
+
 Handle<SharedFunctionInfo> CompilationCache::LookupScript(Handle<String> source,
                                                           Handle<Object> name,
                                                           int line_offset,
@@ -545,6 +569,45 @@ void CompilationCache::PutRegExp(Handle<String> source,
 }
 
 
+static bool SourceHashCompare(void* key1, void* key2) {
+  return key1 == key2;
+}
+
+
+static HashMap* EagerOptimizingSet() {
+  static HashMap map(&SourceHashCompare);
+  return &map;
+}
+
+
+bool CompilationCache::ShouldOptimizeEagerly(Handle<JSFunction> function) {
+  if (FLAG_opt_eagerly) return true;
+  uint32_t hash = function->SourceHash();
+  void* key = reinterpret_cast<void*>(hash);
+  return EagerOptimizingSet()->Lookup(key, hash, false) != NULL;
+}
+
+
+void CompilationCache::MarkForEagerOptimizing(Handle<JSFunction> function) {
+  uint32_t hash = function->SourceHash();
+  void* key = reinterpret_cast<void*>(hash);
+  EagerOptimizingSet()->Lookup(key, hash, true);
+}
+
+
+void CompilationCache::MarkForLazyOptimizing(Handle<JSFunction> function) {
+  uint32_t hash = function->SourceHash();
+  void* key = reinterpret_cast<void*>(hash);
+  EagerOptimizingSet()->Remove(key, hash);
+}
+
+
+void CompilationCache::ResetEagerOptimizingData() {
+  HashMap* set = EagerOptimizingSet();
+  if (set->occupancy() > 0) set->Clear();
+}
+
+
 void CompilationCache::Clear() {
   for (int i = 0; i < kSubCacheCount; i++) {
     subcaches[i]->Clear();
index 22ecff8..37e21be 100644 (file)
@@ -76,9 +76,20 @@ class CompilationCache {
                         JSRegExp::Flags flags,
                         Handle<FixedArray> data);
 
+  // Support for eager optimization tracking.
+  static bool ShouldOptimizeEagerly(Handle<JSFunction> function);
+  static void MarkForEagerOptimizing(Handle<JSFunction> function);
+  static void MarkForLazyOptimizing(Handle<JSFunction> function);
+
+  // Reset the eager optimization tracking data.
+  static void ResetEagerOptimizingData();
+
   // Clear the cache - also used to initialize the cache at startup.
   static void Clear();
 
+  // Remove given shared function info from all caches.
+  static void Remove(Handle<SharedFunctionInfo> function_info);
+
   // GC support.
   static void Iterate(ObjectVisitor* v);
   static void IterateFunctions(ObjectVisitor* v);
index 29bbbc7..59a684c 100755 (executable)
 #include "data-flow.h"
 #include "debug.h"
 #include "full-codegen.h"
+#include "hydrogen.h"
+#include "lithium-allocator.h"
 #include "liveedit.h"
 #include "oprofile-agent.h"
 #include "parser.h"
 #include "rewriter.h"
+#include "runtime-profiler.h"
 #include "scopeinfo.h"
 #include "scopes.h"
+#include "vm-state-inl.h"
 
 namespace v8 {
 namespace internal {
@@ -52,7 +56,10 @@ CompilationInfo::CompilationInfo(Handle<Script> script)
       scope_(NULL),
       script_(script),
       extension_(NULL),
-      pre_parse_data_(NULL) {
+      pre_parse_data_(NULL),
+      supports_deoptimization_(false),
+      osr_ast_id_(AstNode::kNoNumber) {
+  Initialize(NONOPT);
 }
 
 
@@ -63,7 +70,10 @@ CompilationInfo::CompilationInfo(Handle<SharedFunctionInfo> shared_info)
       shared_info_(shared_info),
       script_(Handle<Script>(Script::cast(shared_info->script()))),
       extension_(NULL),
-      pre_parse_data_(NULL) {
+      pre_parse_data_(NULL),
+      supports_deoptimization_(false),
+      osr_ast_id_(AstNode::kNoNumber) {
+  Initialize(BASE);
 }
 
 
@@ -75,31 +85,200 @@ CompilationInfo::CompilationInfo(Handle<JSFunction> closure)
       shared_info_(Handle<SharedFunctionInfo>(closure->shared())),
       script_(Handle<Script>(Script::cast(shared_info_->script()))),
       extension_(NULL),
-      pre_parse_data_(NULL) {
+      pre_parse_data_(NULL),
+      supports_deoptimization_(false),
+      osr_ast_id_(AstNode::kNoNumber) {
+  Initialize(BASE);
 }
 
 
-// For normal operation the syntax checker is used to determine whether to
-// use the full compiler for top level code or not. However if the flag
-// --always-full-compiler is specified or debugging is active the full
-// compiler will be used for all code.
+// Determine whether to use the full compiler for all code. If the flag
+// --always-full-compiler is specified this is the case. For the virtual frame
+// based compiler the full compiler is also used if a debugger is connected, as
+// the code from the full compiler supports mode precise break points. For the
+// crankshaft adaptive compiler debugging the optimized code is not possible at
+// all. However crankshaft support recompilation of functions, so in this case
+// the full compiler need not be be used if a debugger is attached, but only if
+// break points has actually been set.
 static bool AlwaysFullCompiler() {
 #ifdef ENABLE_DEBUGGER_SUPPORT
-  return FLAG_always_full_compiler || Debugger::IsDebuggerActive();
+  if (V8::UseCrankshaft()) {
+    return FLAG_always_full_compiler || Debug::has_break_points();
+  } else {
+    return FLAG_always_full_compiler || Debugger::IsDebuggerActive();
+  }
 #else
   return FLAG_always_full_compiler;
 #endif
 }
 
 
+static void FinishOptimization(Handle<JSFunction> function, int64_t start) {
+  int opt_count = function->shared()->opt_count();
+  function->shared()->set_opt_count(opt_count + 1);
+  if (!FLAG_trace_opt) return;
+
+  double ms = static_cast<double>(OS::Ticks() - start) / 1000;
+  PrintF("[optimizing: ");
+  function->PrintName();
+  PrintF(" / %" V8PRIxPTR, reinterpret_cast<intptr_t>(*function));
+  PrintF(" - took %0.3f ms]\n", ms);
+}
+
+
+static void AbortAndDisable(CompilationInfo* info) {
+  // Disable optimization for the shared function info and mark the
+  // code as non-optimizable. The marker on the shared function info
+  // is there because we flush non-optimized code thereby loosing the
+  // non-optimizable information for the code. When the code is
+  // regenerated and set on the shared function info it is marked as
+  // non-optimizable if optimization is disabled for the shared
+  // function info.
+  Handle<SharedFunctionInfo> shared = info->shared_info();
+  shared->set_optimization_disabled(true);
+  Handle<Code> code = Handle<Code>(shared->code());
+  ASSERT(code->kind() == Code::FUNCTION);
+  code->set_optimizable(false);
+  info->SetCode(code);
+  if (FLAG_trace_opt) {
+    PrintF("[disabled optimization for: ");
+    info->closure()->PrintName();
+    PrintF(" / %" V8PRIxPTR "]\n",
+           reinterpret_cast<intptr_t>(*info->closure()));
+  }
+}
+
+
+static bool MakeCrankshaftCode(CompilationInfo* info) {
+  // Test if we can optimize this function when asked to. We can only
+  // do this after the scopes are computed.
+  if (!info->AllowOptimize()) info->DisableOptimization();
+
+  // In case we are not optimizing simply return the code from
+  // the full code generator.
+  if (!info->IsOptimizing()) {
+    return FullCodeGenerator::MakeCode(info);
+  }
+
+  // We should never arrive here if there is not code object on the
+  // shared function object.
+  Handle<Code> code(info->shared_info()->code());
+  ASSERT(code->kind() == Code::FUNCTION);
+
+  // Fall back to using the full code generator if it's not possible
+  // to use the Hydrogen-based optimizing compiler. We already have
+  // generated code for this from the shared function object.
+  if (AlwaysFullCompiler() || !FLAG_use_hydrogen) {
+    info->SetCode(code);
+    return true;
+  }
+
+  // Limit the number of times we re-compile a functions with
+  // the optimizing compiler.
+  const int kMaxOptCount = FLAG_deopt_every_n_times == 0 ? 10 : 1000;
+  if (info->shared_info()->opt_count() > kMaxOptCount) {
+    AbortAndDisable(info);
+    // True indicates the compilation pipeline is still going, not
+    // necessarily that we optimized the code.
+    return true;
+  }
+
+  // Due to an encoding limit on LUnallocated operands in the Lithium
+  // language, we cannot optimize functions with too many formal parameters
+  // or perform on-stack replacement for function with too many
+  // stack-allocated local variables.
+  //
+  // The encoding is as a signed value, with parameters using the negative
+  // indices and locals the non-negative ones.
+  const int limit = LUnallocated::kMaxFixedIndices / 2;
+  Scope* scope = info->scope();
+  if (scope->num_parameters() > limit || scope->num_stack_slots() > limit) {
+    AbortAndDisable(info);
+    // True indicates the compilation pipeline is still going, not
+    // necessarily that we optimized the code.
+    return true;
+  }
+
+  // Take --hydrogen-filter into account.
+  Vector<const char> filter = CStrVector(FLAG_hydrogen_filter);
+  Handle<String> name = info->function()->debug_name();
+  bool match = filter.is_empty() || name->IsEqualTo(filter);
+  if (!match) {
+    info->SetCode(code);
+    return true;
+  }
+
+  // Recompile the unoptimized version of the code if the current version
+  // doesn't have deoptimization support. Alternatively, we may decide to
+  // run the full code generator to get a baseline for the compile-time
+  // performance of the hydrogen-based compiler.
+  int64_t start = OS::Ticks();
+  bool should_recompile = !info->shared_info()->has_deoptimization_support();
+  if (should_recompile || FLAG_time_hydrogen) {
+    HPhase phase(HPhase::kFullCodeGen);
+    CompilationInfo unoptimized(info->shared_info());
+    // Note that we use the same AST that we will use for generating the
+    // optimized code.
+    unoptimized.SetFunction(info->function());
+    unoptimized.SetScope(info->scope());
+    if (should_recompile) unoptimized.EnableDeoptimizationSupport();
+    bool succeeded = FullCodeGenerator::MakeCode(&unoptimized);
+    if (should_recompile) {
+      if (!succeeded) return false;
+      Handle<SharedFunctionInfo> shared = info->shared_info();
+      shared->EnableDeoptimizationSupport(*unoptimized.code());
+      // The existing unoptimized code was replaced with the new one.
+      Compiler::RecordFunctionCompilation(Logger::LAZY_COMPILE_TAG,
+          Handle<String>(shared->DebugName()),
+          shared->start_position(),
+          &unoptimized);
+    }
+  }
+
+  // Check that the unoptimized, shared code is ready for
+  // optimizations.  When using the always_opt flag we disregard the
+  // optimizable marker in the code object and optimize anyway. This
+  // is safe as long as the unoptimized code has deoptimization
+  // support.
+  ASSERT(FLAG_always_opt || info->shared_info()->code()->optimizable());
+  ASSERT(info->shared_info()->has_deoptimization_support());
+
+  if (FLAG_trace_hydrogen) {
+    PrintF("-----------------------------------------------------------\n");
+    PrintF("Compiling method %s using hydrogen\n", *name->ToCString());
+    HTracer::Instance()->TraceCompilation(info->function());
+  }
+
+  TypeFeedbackOracle oracle(Handle<Code>(info->shared_info()->code()));
+  HGraphBuilder builder(&oracle);
+  HPhase phase(HPhase::kTotal);
+  HGraph* graph = builder.CreateGraph(info);
+  if (graph != NULL && FLAG_build_lithium) {
+    Handle<Code> code = graph->Compile();
+    if (!code.is_null()) {
+      info->SetCode(code);
+      FinishOptimization(info->closure(), start);
+      return true;
+    }
+  }
+
+  // Compilation with the Hydrogen compiler failed. Keep using the
+  // shared code but mark it as unoptimizable.
+  AbortAndDisable(info);
+  // True indicates the compilation pipeline is still going, not necessarily
+  // that we optimized the code.
+  return true;
+}
+
+
 static bool MakeCode(CompilationInfo* info) {
   // Precondition: code has been parsed.  Postcondition: the code field in
   // the compilation info is set if compilation succeeded.
   ASSERT(info->function() != NULL);
 
-  if (Rewriter::Rewrite(info) &&
-      Scope::Analyze(info) &&
-      Rewriter::Analyze(info)) {
+  if (Rewriter::Rewrite(info) && Scope::Analyze(info)) {
+    if (V8::UseCrankshaft()) return MakeCrankshaftCode(info);
+
     // Generate code and return it.  Code generator selection is governed by
     // which backends are enabled and whether the function is considered
     // run-once code or not.
@@ -109,17 +288,19 @@ static bool MakeCode(CompilationInfo* info) {
     //
     // The normal choice of backend can be overridden with the flags
     // --always-full-compiler.
-    Handle<SharedFunctionInfo> shared = info->shared_info();
-    bool is_run_once = (shared.is_null())
-        ? info->scope()->is_global_scope()
-        : (shared->is_toplevel() || shared->try_full_codegen());
-    bool can_use_full =
-        FLAG_full_compiler && !info->function()->contains_loops();
-    if (AlwaysFullCompiler() || (is_run_once && can_use_full)) {
-      return FullCodeGenerator::MakeCode(info);
-    } else {
-      AssignedVariablesAnalyzer ava;
-      return ava.Analyze(info) && CodeGenerator::MakeCode(info);
+    if (Rewriter::Analyze(info)) {
+      Handle<SharedFunctionInfo> shared = info->shared_info();
+      bool is_run_once = (shared.is_null())
+          ? info->scope()->is_global_scope()
+          : (shared->is_toplevel() || shared->try_full_codegen());
+      bool can_use_full =
+          FLAG_full_compiler && !info->function()->contains_loops();
+      if (AlwaysFullCompiler() || (is_run_once && can_use_full)) {
+        return FullCodeGenerator::MakeCode(info);
+      } else {
+        return AssignedVariablesAnalyzer::Analyze(info) &&
+            CodeGenerator::MakeCode(info);
+      }
     }
   }
 
@@ -374,40 +555,60 @@ bool Compiler::CompileLazy(CompilationInfo* info) {
       Top::StackOverflow();
     } else {
       ASSERT(!info->code().is_null());
+      Handle<Code> code = info->code();
+      Handle<JSFunction> function = info->closure();
       RecordFunctionCompilation(Logger::LAZY_COMPILE_TAG,
                                 Handle<String>(shared->DebugName()),
                                 shared->start_position(),
                                 info);
 
-      // Update the shared function info with the compiled code and the
-      // scope info.  Please note, that the order of the sharedfunction
-      // initialization is important since SerializedScopeInfo::Create might
-      // trigger a GC, causing the ASSERT below to be invalid if the code
-      // was flushed. By setting the code object last we avoid this.
-      Handle<SerializedScopeInfo> scope_info =
-          SerializedScopeInfo::Create(info->scope());
-      shared->set_scope_info(*scope_info);
-      shared->set_code(*info->code());
-      if (!info->closure().is_null()) {
-        info->closure()->set_code(*info->code());
+      if (info->IsOptimizing()) {
+        function->ReplaceCode(*code);
+      } else {
+        // Update the shared function info with the compiled code and the
+        // scope info.  Please note, that the order of the shared function
+        // info initialization is important since set_scope_info might
+        // trigger a GC, causing the ASSERT below to be invalid if the code
+        // was flushed. By settting the code object last we avoid this.
+        Handle<SerializedScopeInfo> scope_info =
+            SerializedScopeInfo::Create(info->scope());
+        shared->set_scope_info(*scope_info);
+        shared->set_code(*code);
+        if (!function.is_null()) {
+          function->ReplaceCode(*code);
+          ASSERT(!function->IsOptimized());
+        }
+
+        // Set the expected number of properties for instances.
+        FunctionLiteral* lit = info->function();
+        int expected = lit->expected_property_count();
+        SetExpectedNofPropertiesFromEstimate(shared, expected);
+
+        // Set the optimization hints after performing lazy compilation, as
+        // these are not set when the function is set up as a lazily
+        // compiled function.
+        shared->SetThisPropertyAssignmentsInfo(
+            lit->has_only_simple_this_property_assignments(),
+            *lit->this_property_assignments());
+
+        // Check the function has compiled code.
+        ASSERT(shared->is_compiled());
+        shared->set_code_age(0);
+
+        if (V8::UseCrankshaft() && info->AllowOptimize()) {
+          // If we're asked to always optimize, we compile the optimized
+          // version of the function right away - unless the debugger is
+          // active as it makes no sense to compile optimized code then.
+          if (FLAG_always_opt && !Debug::has_break_points()) {
+            CompilationInfo optimized(function);
+            optimized.SetOptimizing(AstNode::kNoNumber);
+            return CompileLazy(&optimized);
+          } else if (CompilationCache::ShouldOptimizeEagerly(function)) {
+            RuntimeProfiler::OptimizeSoon(*function);
+          }
+        }
       }
 
-      // Set the expected number of properties for instances.
-      FunctionLiteral* lit = info->function();
-      SetExpectedNofPropertiesFromEstimate(shared,
-                                           lit->expected_property_count());
-
-      // Set the optimization hints after performing lazy compilation, as
-      // these are not set when the function is set up as a lazily compiled
-      // function.
-      shared->SetThisPropertyAssignmentsInfo(
-          lit->has_only_simple_this_property_assignments(),
-          *lit->this_property_assignments());
-
-      // Check the function has compiled code.
-      ASSERT(shared->is_compiled());
-      shared->set_code_age(0);
-      ASSERT(!info->code().is_null());
       return true;
     }
   }
@@ -419,12 +620,6 @@ bool Compiler::CompileLazy(CompilationInfo* info) {
 
 Handle<SharedFunctionInfo> Compiler::BuildFunctionInfo(FunctionLiteral* literal,
                                                        Handle<Script> script) {
-#ifdef DEBUG
-  // We should not try to compile the same function literal more than
-  // once.
-  literal->mark_as_compiled();
-#endif
-
   // Precondition: code has been parsed and scopes have been analyzed.
   CompilationInfo info(script);
   info.SetFunction(literal);
@@ -446,28 +641,31 @@ Handle<SharedFunctionInfo> Compiler::BuildFunctionInfo(FunctionLiteral* literal,
     Handle<Code> code(Builtins::builtin(Builtins::LazyCompile));
     info.SetCode(code);
   } else {
-    // Generate code and return it.  The way that the compilation mode
-    // is controlled by the command-line flags is described in
-    // the static helper function MakeCode.
-    //
-    // The bodies of function literals have not yet been visited by
-    // the AST analyzer.
-    if (!Rewriter::Analyze(&info)) return Handle<SharedFunctionInfo>::null();
-
-    bool is_run_once = literal->try_full_codegen();
-    bool use_full = FLAG_full_compiler && !literal->contains_loops();
-    if (AlwaysFullCompiler() || (use_full && is_run_once)) {
-      if (!FullCodeGenerator::MakeCode(&info)) {
+    if (V8::UseCrankshaft()) {
+      if (!MakeCrankshaftCode(&info)) {
         return Handle<SharedFunctionInfo>::null();
       }
     } else {
-      // We fall back to the classic V8 code generator.
-      AssignedVariablesAnalyzer ava;
-      if (!ava.Analyze(&info)) return Handle<SharedFunctionInfo>::null();
-      if (!CodeGenerator::MakeCode(&info)) {
-        return Handle<SharedFunctionInfo>::null();
+      // The bodies of function literals have not yet been visited by the
+      // AST optimizer/analyzer.
+      if (!Rewriter::Analyze(&info)) return Handle<SharedFunctionInfo>::null();
+
+      bool is_run_once = literal->try_full_codegen();
+      bool can_use_full = FLAG_full_compiler && !literal->contains_loops();
+
+      if (AlwaysFullCompiler() || (is_run_once && can_use_full)) {
+        if (!FullCodeGenerator::MakeCode(&info)) {
+          return Handle<SharedFunctionInfo>::null();
+        }
+      } else {
+        // We fall back to the classic V8 code generator.
+        if (!AssignedVariablesAnalyzer::Analyze(&info) ||
+            !CodeGenerator::MakeCode(&info)) {
+          return Handle<SharedFunctionInfo>::null();
+        }
       }
     }
+    ASSERT(!info.code().is_null());
 
     // Function compilation complete.
     RecordFunctionCompilation(Logger::FUNCTION_TAG,
@@ -484,6 +682,7 @@ Handle<SharedFunctionInfo> Compiler::BuildFunctionInfo(FunctionLiteral* literal,
                                      info.code(),
                                      scope_info);
   SetFunctionInfo(result, literal, false, script);
+  result->set_allows_lazy_compilation(allow_lazy);
 
   // Set the expected number of properties for instances and return
   // the resulting function.
index 20868e5..1176c69 100644 (file)
@@ -59,6 +59,7 @@ class CompilationInfo BASE_EMBEDDED {
   v8::Extension* extension() const { return extension_; }
   ScriptDataImpl* pre_parse_data() const { return pre_parse_data_; }
   Handle<Context> calling_context() const { return calling_context_; }
+  int osr_ast_id() const { return osr_ast_id_; }
 
   void MarkAsEval() {
     ASSERT(!is_lazy());
@@ -93,8 +94,66 @@ class CompilationInfo BASE_EMBEDDED {
     ASSERT(is_eval());
     calling_context_ = context;
   }
+  void SetOsrAstId(int osr_ast_id) {
+    ASSERT(IsOptimizing());
+    osr_ast_id_ = osr_ast_id;
+  }
+
+  bool has_global_object() const {
+    return !closure().is_null() && (closure()->context()->global() != NULL);
+  }
+
+  GlobalObject* global_object() const {
+    return has_global_object() ? closure()->context()->global() : NULL;
+  }
+
+  // Accessors for the different compilation modes.
+  bool IsOptimizing() const { return mode_ == OPTIMIZE; }
+  bool IsOptimizable() const { return mode_ == BASE; }
+  void SetOptimizing(int osr_ast_id) {
+    SetMode(OPTIMIZE);
+    osr_ast_id_ = osr_ast_id;
+  }
+  void DisableOptimization() { SetMode(NONOPT); }
+
+  // Deoptimization support.
+  bool HasDeoptimizationSupport() const { return supports_deoptimization_; }
+  void EnableDeoptimizationSupport() {
+    ASSERT(IsOptimizable());
+    supports_deoptimization_ = true;
+  }
+
+  // Determine whether or not we can adaptively optimize.
+  bool AllowOptimize() {
+    return V8::UseCrankshaft() &&
+           !closure_.is_null() &&
+           function_->AllowOptimize();
+  }
 
  private:
+  // Compilation mode.
+  // BASE is generated by the full codegen, optionally prepared for bailouts.
+  // OPTIMIZE is optimized code generated by the Hydrogen-based backend.
+  // NONOPT is generated by the full codegen or the classic backend
+  //   and is not prepared for recompilation/bailouts. These functions
+  //   are never recompiled.
+  enum Mode {
+    BASE,
+    OPTIMIZE,
+    NONOPT
+  };
+
+  CompilationInfo() : function_(NULL) {}
+
+  void Initialize(Mode mode) {
+    mode_ = V8::UseCrankshaft() ? mode : NONOPT;
+  }
+
+  void SetMode(Mode mode) {
+    ASSERT(V8::UseCrankshaft());
+    mode_ = mode;
+  }
+
   // Flags using template class BitField<type, start, length>.  All are
   // false by default.
   //
@@ -130,6 +189,11 @@ class CompilationInfo BASE_EMBEDDED {
   // handle otherwise.
   Handle<Context> calling_context_;
 
+  // Compilation mode flag and whether deoptimization is allowed.
+  Mode mode_;
+  bool supports_deoptimization_;
+  int osr_ast_id_;
+
   DISALLOW_COPY_AND_ASSIGN(CompilationInfo);
 };
 
@@ -185,7 +249,6 @@ class Compiler : public AllStatic {
   static bool MakeCodeForLiveEdit(CompilationInfo* info);
 #endif
 
- private:
   static void RecordFunctionCompilation(Logger::LogEventsAndTags tag,
                                         Handle<String> name,
                                         int start_position,
index 1ce5007..3ad72a1 100644 (file)
@@ -239,6 +239,69 @@ bool Context::GlobalIfNotShadowedByEval(Handle<String> name) {
 }
 
 
+void Context::AddOptimizedFunction(JSFunction* function) {
+  ASSERT(IsGlobalContext());
+#ifdef DEBUG
+  Object* element = get(OPTIMIZED_FUNCTIONS_LIST);
+  while (!element->IsUndefined()) {
+    CHECK(element != function);
+    element = JSFunction::cast(element)->next_function_link();
+  }
+
+  CHECK(function->next_function_link()->IsUndefined());
+
+  // Check that the context belongs to the weak global contexts list.
+  bool found = false;
+  Object* context = Heap::global_contexts_list();
+  while (!context->IsUndefined()) {
+    if (context == this) {
+      found = true;
+      break;
+    }
+    context = Context::cast(context)->get(Context::NEXT_CONTEXT_LINK);
+  }
+  CHECK(found);
+#endif
+  function->set_next_function_link(get(OPTIMIZED_FUNCTIONS_LIST));
+  set(OPTIMIZED_FUNCTIONS_LIST, function);
+}
+
+
+void Context::RemoveOptimizedFunction(JSFunction* function) {
+  ASSERT(IsGlobalContext());
+  Object* element = get(OPTIMIZED_FUNCTIONS_LIST);
+  JSFunction* prev = NULL;
+  while (!element->IsUndefined()) {
+    JSFunction* element_function = JSFunction::cast(element);
+    ASSERT(element_function->next_function_link()->IsUndefined() ||
+           element_function->next_function_link()->IsJSFunction());
+    if (element_function == function) {
+      if (prev == NULL) {
+        set(OPTIMIZED_FUNCTIONS_LIST, element_function->next_function_link());
+      } else {
+        prev->set_next_function_link(element_function->next_function_link());
+      }
+      element_function->set_next_function_link(Heap::undefined_value());
+      return;
+    }
+    prev = element_function;
+    element = element_function->next_function_link();
+  }
+  UNREACHABLE();
+}
+
+
+Object* Context::OptimizedFunctionsListHead() {
+  ASSERT(IsGlobalContext());
+  return get(OPTIMIZED_FUNCTIONS_LIST);
+}
+
+
+void Context::ClearOptimizedFunctions() {
+  set(OPTIMIZED_FUNCTIONS_LIST, Heap::undefined_value());
+}
+
+
 #ifdef DEBUG
 bool Context::IsBootstrappingOrContext(Object* object) {
   // During bootstrapping we allow all objects to pass as
index 9722a93..d0d54d1 100644 (file)
@@ -228,12 +228,13 @@ class Context: public FixedArray {
 
     // Properties from here are treated as weak references by the full GC.
     // Scavenge treats them as strong references.
-    NEXT_CONTEXT_LINK,
+    OPTIMIZED_FUNCTIONS_LIST,  // Weak.
+    NEXT_CONTEXT_LINK,  // Weak.
 
     // Total number of slots.
     GLOBAL_CONTEXT_SLOTS,
 
-    FIRST_WEAK_SLOT = NEXT_CONTEXT_LINK
+    FIRST_WEAK_SLOT = OPTIMIZED_FUNCTIONS_LIST
   };
 
   // Direct slot access.
@@ -291,6 +292,12 @@ class Context: public FixedArray {
     return IsCatchContext() && extension() == object;
   }
 
+  // A global context hold a list of all functions which have been optimized.
+  void AddOptimizedFunction(JSFunction* function);
+  void RemoveOptimizedFunction(JSFunction* function);
+  Object* OptimizedFunctionsListHead();
+  void ClearOptimizedFunctions();
+
 #define GLOBAL_CONTEXT_FIELD_ACCESSORS(index, type, name) \
   void  set_##name(type* value) {                         \
     ASSERT(IsGlobalContext());                            \
index da19a45..f13c0ee 100644 (file)
@@ -34,6 +34,7 @@
 #include "frames-inl.h"
 #include "hashmap.h"
 #include "log-inl.h"
+#include "vm-state-inl.h"
 
 #include "../include/v8-profiler.h"
 
@@ -223,7 +224,7 @@ void ProfilerEventsProcessor::RegExpCodeCreateEvent(
 void ProfilerEventsProcessor::AddCurrentStack() {
   TickSampleEventRecord record;
   TickSample* sample = &record.sample;
-  sample->state = VMState::current_state();
+  sample->state = Top::current_vm_state();
   sample->pc = reinterpret_cast<Address>(sample);  // Not NULL.
   sample->frames_count = 0;
   for (StackTraceFrameIterator it;
@@ -314,6 +315,7 @@ void ProfilerEventsProcessor::Run() {
 
 
 CpuProfiler* CpuProfiler::singleton_ = NULL;
+Atomic32 CpuProfiler::is_profiling_ = false;
 
 void CpuProfiler::StartProfiling(const char* title) {
   ASSERT(singleton_ != NULL);
@@ -435,7 +437,7 @@ void CpuProfiler::FunctionCreateEvent(JSFunction* function) {
   }
   singleton_->processor_->FunctionCreateEvent(
       function->address(),
-      function->code()->address(),
+      function->shared()->code()->address(),
       security_token_id);
 }
 
@@ -525,6 +527,7 @@ void CpuProfiler::StartProcessorIfNotStarted() {
     Logger::logging_nesting_ = 0;
     generator_ = new ProfileGenerator(profiles_);
     processor_ = new ProfilerEventsProcessor(generator_);
+    NoBarrier_Store(&is_profiling_, true);
     processor_->Start();
     // Enumerate stuff we already have in the heap.
     if (Heap::HasBeenSetup()) {
@@ -539,7 +542,9 @@ void CpuProfiler::StartProcessorIfNotStarted() {
       Logger::LogAccessorCallbacks();
     }
     // Enable stack sampling.
-    reinterpret_cast<Sampler*>(Logger::ticker_)->Start();
+    Sampler* sampler = reinterpret_cast<Sampler*>(Logger::ticker_);
+    if (!sampler->IsActive()) sampler->Start();
+    sampler->IncreaseProfilingDepth();
   }
 }
 
@@ -570,12 +575,15 @@ CpuProfile* CpuProfiler::StopCollectingProfile(Object* security_token,
 
 void CpuProfiler::StopProcessorIfLastProfile(const char* title) {
   if (profiles_->IsLastProfile(title)) {
-    reinterpret_cast<Sampler*>(Logger::ticker_)->Stop();
+    Sampler* sampler = reinterpret_cast<Sampler*>(Logger::ticker_);
+    sampler->DecreaseProfilingDepth();
+    sampler->Stop();
     processor_->Stop();
     processor_->Join();
     delete processor_;
     delete generator_;
     processor_ = NULL;
+    NoBarrier_Store(&is_profiling_, false);
     generator_ = NULL;
     Logger::logging_nesting_ = saved_logging_nesting_;
   }
index d3158d7..10165f6 100644 (file)
@@ -30,6 +30,7 @@
 
 #ifdef ENABLE_LOGGING_AND_PROFILING
 
+#include "atomicops.h"
 #include "circular-queue.h"
 #include "unbound-queue.h"
 
@@ -269,7 +270,7 @@ class CpuProfiler {
   static void SetterCallbackEvent(String* name, Address entry_point);
 
   static INLINE(bool is_profiling()) {
-    return singleton_ != NULL && singleton_->processor_ != NULL;
+    return NoBarrier_Load(&is_profiling_);
   }
 
  private:
@@ -290,6 +291,7 @@ class CpuProfiler {
   int saved_logging_nesting_;
 
   static CpuProfiler* singleton_;
+  static Atomic32 is_profiling_;
 
 #else
   static INLINE(bool is_profiling()) { return false; }
index 30f04c7..de1fe0d 100644 (file)
--- a/src/d8.h
+++ b/src/d8.h
@@ -138,6 +138,10 @@ class Shell: public i::AllStatic {
   static Handle<Value> DebugCommandToJSONRequest(Handle<String> command);
 #endif
 
+#ifdef WIN32
+#undef Yield
+#endif
+
   static Handle<Value> Print(const Arguments& args);
   static Handle<Value> Write(const Arguments& args);
   static Handle<Value> Yield(const Arguments& args);
index be82446..9c02ff4 100644 (file)
@@ -33,7 +33,6 @@
 namespace v8 {
 namespace internal {
 
-
 #ifdef DEBUG
 void BitVector::Print() {
   bool first = true;
@@ -50,13 +49,39 @@ void BitVector::Print() {
 #endif
 
 
+void BitVector::Iterator::Advance() {
+  current_++;
+  uint32_t val = current_value_;
+  while (val == 0) {
+    current_index_++;
+    if (Done()) return;
+    val = target_->data_[current_index_];
+    current_ = current_index_ << 5;
+  }
+  val = SkipZeroBytes(val);
+  val = SkipZeroBits(val);
+  current_value_ = val >> 1;
+}
+
+
 bool AssignedVariablesAnalyzer::Analyze(CompilationInfo* info) {
-  info_ = info;
   Scope* scope = info->scope();
-  int variables = scope->num_parameters() + scope->num_stack_slots();
-  if (variables == 0) return true;
-  av_.ExpandTo(variables);
-  VisitStatements(info->function()->body());
+  int size = scope->num_parameters() + scope->num_stack_slots();
+  if (size == 0) return true;
+  AssignedVariablesAnalyzer analyzer(info, size);
+  return analyzer.Analyze();
+}
+
+
+AssignedVariablesAnalyzer::AssignedVariablesAnalyzer(CompilationInfo* info,
+                                                     int size)
+    : info_(info), av_(size) {
+}
+
+
+bool AssignedVariablesAnalyzer::Analyze() {
+  ASSERT(av_.length() > 0);
+  VisitStatements(info_->function()->body());
   return !HasStackOverflow();
 }
 
@@ -318,11 +343,6 @@ void AssignedVariablesAnalyzer::VisitConditional(Conditional* expr) {
 }
 
 
-void AssignedVariablesAnalyzer::VisitSlot(Slot* expr) {
-  UNREACHABLE();
-}
-
-
 void AssignedVariablesAnalyzer::VisitVariableProxy(VariableProxy* expr) {
   // Nothing to do.
   ASSERT(av_.IsEmpty());
index efce1ea..6e2230c 100644 (file)
@@ -42,10 +42,57 @@ class Node;
 
 class BitVector: public ZoneObject {
  public:
-  BitVector() : length_(0), data_length_(0), data_(NULL) { }
+  // Iterator for the elements of this BitVector.
+  class Iterator BASE_EMBEDDED {
+   public:
+    explicit Iterator(BitVector* target)
+        : target_(target),
+          current_index_(0),
+          current_value_(target->data_[0]),
+          current_(-1) {
+      ASSERT(target->data_length_ > 0);
+      Advance();
+    }
+    ~Iterator() { }
+
+    bool Done() const { return current_index_ >= target_->data_length_; }
+    void Advance();
+
+    int Current() const {
+      ASSERT(!Done());
+      return current_;
+    }
+
+   private:
+    uint32_t SkipZeroBytes(uint32_t val) {
+      while ((val & 0xFF) == 0) {
+        val >>= 8;
+        current_ += 8;
+      }
+      return val;
+    }
+    uint32_t SkipZeroBits(uint32_t val) {
+      while ((val & 0x1) == 0) {
+        val >>= 1;
+        current_++;
+      }
+      return val;
+    }
 
-  explicit BitVector(int length) {
-    ExpandTo(length);
+    BitVector* target_;
+    int current_index_;
+    uint32_t current_value_;
+    int current_;
+
+    friend class BitVector;
+  };
+
+  explicit BitVector(int length)
+      : length_(length),
+        data_length_(SizeFor(length)),
+        data_(Zone::NewArray<uint32_t>(data_length_)) {
+    ASSERT(length > 0);
+    Clear();
   }
 
   BitVector(const BitVector& other)
@@ -55,12 +102,8 @@ class BitVector: public ZoneObject {
     CopyFrom(other);
   }
 
-  void ExpandTo(int length) {
-    ASSERT(length > 0);
-    length_ = length;
-    data_length_ = SizeFor(length);
-    data_ = Zone::NewArray<uint32_t>(data_length_);
-    Clear();
+  static int SizeFor(int length) {
+    return 1 + ((length - 1) / 32);
   }
 
   BitVector& operator=(const BitVector& rhs) {
@@ -75,7 +118,7 @@ class BitVector: public ZoneObject {
     }
   }
 
-  bool Contains(int i) {
+  bool Contains(int i) const {
     ASSERT(i >= 0 && i < length());
     uint32_t block = data_[i / 32];
     return (block & (1U << (i % 32))) != 0;
@@ -98,6 +141,17 @@ class BitVector: public ZoneObject {
     }
   }
 
+  bool UnionIsChanged(const BitVector& other) {
+    ASSERT(other.length() == length());
+    bool changed = false;
+    for (int i = 0; i < data_length_; i++) {
+      uint32_t old_data = data_[i];
+      data_[i] |= other.data_[i];
+      if (data_[i] != old_data) changed = true;
+    }
+    return changed;
+  }
+
   void Intersect(const BitVector& other) {
     ASSERT(other.length() == length());
     for (int i = 0; i < data_length_; i++) {
@@ -139,16 +193,102 @@ class BitVector: public ZoneObject {
 #endif
 
  private:
-  static int SizeFor(int length) {
-    return 1 + ((length - 1) / 32);
-  }
-
   int length_;
   int data_length_;
   uint32_t* data_;
 };
 
 
+// An implementation of a sparse set whose elements are drawn from integers
+// in the range [0..universe_size[.  It supports constant-time Contains,
+// destructive Add, and destructuve Remove operations and linear-time (in
+// the number of elements) destructive Union.
+class SparseSet: public ZoneObject {
+ public:
+  // Iterator for sparse set elements.  Elements should not be added or
+  // removed during iteration.
+  class Iterator BASE_EMBEDDED {
+   public:
+    explicit Iterator(SparseSet* target) : target_(target), current_(0) {
+      ASSERT(++target->iterator_count_ > 0);
+    }
+    ~Iterator() {
+      ASSERT(target_->iterator_count_-- > 0);
+    }
+    bool Done() const { return current_ >= target_->dense_.length(); }
+    void Advance() {
+      ASSERT(!Done());
+      ++current_;
+    }
+    int Current() {
+      ASSERT(!Done());
+      return target_->dense_[current_];
+    }
+
+   private:
+    SparseSet* target_;
+    int current_;
+
+    friend class SparseSet;
+  };
+
+  explicit SparseSet(int universe_size)
+      : dense_(4),
+        sparse_(Zone::NewArray<int>(universe_size)) {
+#ifdef DEBUG
+    size_ = universe_size;
+    iterator_count_ = 0;
+#endif
+  }
+
+  bool Contains(int n) const {
+    ASSERT(0 <= n && n < size_);
+    int dense_index = sparse_[n];
+    return (0 <= dense_index) &&
+        (dense_index < dense_.length()) &&
+        (dense_[dense_index] == n);
+  }
+
+  void Add(int n) {
+    ASSERT(0 <= n && n < size_);
+    ASSERT(iterator_count_ == 0);
+    if (!Contains(n)) {
+      sparse_[n] = dense_.length();
+      dense_.Add(n);
+    }
+  }
+
+  void Remove(int n) {
+    ASSERT(0 <= n && n < size_);
+    ASSERT(iterator_count_ == 0);
+    if (Contains(n)) {
+      int dense_index = sparse_[n];
+      int last = dense_.RemoveLast();
+      if (dense_index < dense_.length()) {
+        dense_[dense_index] = last;
+        sparse_[last] = dense_index;
+      }
+    }
+  }
+
+  void Union(const SparseSet& other) {
+    for (int i = 0; i < other.dense_.length(); ++i) {
+      Add(other.dense_[i]);
+    }
+  }
+
+ private:
+  // The set is implemented as a pair of a growable dense list and an
+  // uninitialized sparse array.
+  ZoneList<int> dense_;
+  int* sparse_;
+#ifdef DEBUG
+  int size_;
+  int iterator_count_;
+#endif
+};
+
+
 // Simple fixed-capacity list-based worklist (managed as a queue) of
 // pointers to T.
 template<typename T>
@@ -198,10 +338,12 @@ class WorkList BASE_EMBEDDED {
 // is guaranteed to be a smi.
 class AssignedVariablesAnalyzer : public AstVisitor {
  public:
-  explicit AssignedVariablesAnalyzer() : info_(NULL) { }
-  bool Analyze(CompilationInfo* info);
+  static bool Analyze(CompilationInfo* info);
 
  private:
+  AssignedVariablesAnalyzer(CompilationInfo* info, int bits);
+  bool Analyze();
+
   Variable* FindSmiLoopVariable(ForStatement* stmt);
 
   int BitIndex(Variable* var);
index f3bf954..7709eb0 100644 (file)
@@ -35,6 +35,7 @@
 #include "compilation-cache.h"
 #include "compiler.h"
 #include "debug.h"
+#include "deoptimizer.h"
 #include "execution.h"
 #include "global-handles.h"
 #include "ic.h"
@@ -140,7 +141,9 @@ void BreakLocationIterator::Next() {
       Address target = original_rinfo()->target_address();
       Code* code = Code::GetCodeFromTargetAddress(target);
       if ((code->is_inline_cache_stub() &&
-           code->kind() != Code::BINARY_OP_IC) ||
+           !code->is_binary_op_stub() &&
+           !code->is_type_recording_binary_op_stub() &&
+           !code->is_compare_ic_stub()) ||
           RelocInfo::IsConstructCall(rmode())) {
         break_point_++;
         return;
@@ -1661,6 +1664,12 @@ bool Debug::EnsureDebugInfo(Handle<SharedFunctionInfo> shared) {
   // Ensure shared in compiled. Return false if this failed.
   if (!EnsureCompiled(shared, CLEAR_EXCEPTION)) return false;
 
+  // If preparing for the first break point make sure to deoptimize all
+  // functions as debugging does not work with optimized code.
+  if (!has_break_points_) {
+    Deoptimizer::DeoptimizeAll();
+  }
+
   // Create the debug info object.
   Handle<DebugInfo> debug_info = Factory::NewDebugInfo(shared);
 
diff --git a/src/deoptimizer.cc b/src/deoptimizer.cc
new file mode 100644 (file)
index 0000000..3aa2f35
--- /dev/null
@@ -0,0 +1,1139 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "codegen.h"
+#include "deoptimizer.h"
+#include "disasm.h"
+#include "full-codegen.h"
+#include "global-handles.h"
+#include "macro-assembler.h"
+#include "prettyprinter.h"
+
+
+namespace v8 {
+namespace internal {
+
+LargeObjectChunk* Deoptimizer::eager_deoptimization_entry_code_ = NULL;
+LargeObjectChunk* Deoptimizer::lazy_deoptimization_entry_code_ = NULL;
+Deoptimizer* Deoptimizer::current_ = NULL;
+DeoptimizingCodeListNode* Deoptimizer::deoptimizing_code_list_ = NULL;
+
+
+Deoptimizer* Deoptimizer::New(JSFunction* function,
+                              BailoutType type,
+                              unsigned bailout_id,
+                              Address from,
+                              int fp_to_sp_delta) {
+  Deoptimizer* deoptimizer =
+      new Deoptimizer(function, type, bailout_id, from, fp_to_sp_delta);
+  ASSERT(current_ == NULL);
+  current_ = deoptimizer;
+  return deoptimizer;
+}
+
+
+Deoptimizer* Deoptimizer::Grab() {
+  Deoptimizer* result = current_;
+  ASSERT(result != NULL);
+  result->DeleteFrameDescriptions();
+  current_ = NULL;
+  return result;
+}
+
+
+void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm,
+                                                int count,
+                                                BailoutType type) {
+  TableEntryGenerator generator(masm, type, count);
+  generator.Generate();
+}
+
+
+class DeoptimizingVisitor : public OptimizedFunctionVisitor {
+ public:
+  virtual void EnterContext(Context* context) {
+    if (FLAG_trace_deopt) {
+      PrintF("[deoptimize context: %" V8PRIxPTR "]\n",
+             reinterpret_cast<intptr_t>(context));
+    }
+  }
+
+  virtual void VisitFunction(JSFunction* function) {
+    Deoptimizer::DeoptimizeFunction(function);
+  }
+
+  virtual void LeaveContext(Context* context) {
+    context->ClearOptimizedFunctions();
+  }
+};
+
+
+void Deoptimizer::DeoptimizeAll() {
+  AssertNoAllocation no_allocation;
+
+  if (FLAG_trace_deopt) {
+    PrintF("[deoptimize all contexts]\n");
+  }
+
+  DeoptimizingVisitor visitor;
+  VisitAllOptimizedFunctions(&visitor);
+}
+
+
+void Deoptimizer::DeoptimizeGlobalObject(JSObject* object) {
+  AssertNoAllocation no_allocation;
+
+  DeoptimizingVisitor visitor;
+  VisitAllOptimizedFunctionsForGlobalObject(object, &visitor);
+}
+
+
+void Deoptimizer::VisitAllOptimizedFunctionsForContext(
+    Context* context, OptimizedFunctionVisitor* visitor) {
+  AssertNoAllocation no_allocation;
+
+  ASSERT(context->IsGlobalContext());
+
+  visitor->EnterContext(context);
+  // Run through the list of optimized functions and deoptimize them.
+  Object* element = context->OptimizedFunctionsListHead();
+  while (!element->IsUndefined()) {
+    JSFunction* element_function = JSFunction::cast(element);
+    // Get the next link before deoptimizing as deoptimizing will clear the
+    // next link.
+    element = element_function->next_function_link();
+    visitor->VisitFunction(element_function);
+  }
+  visitor->LeaveContext(context);
+}
+
+
+void Deoptimizer::VisitAllOptimizedFunctionsForGlobalObject(
+    JSObject* object, OptimizedFunctionVisitor* visitor) {
+  AssertNoAllocation no_allocation;
+
+  if (object->IsJSGlobalProxy()) {
+    Object* proto = object->GetPrototype();
+    ASSERT(proto->IsJSGlobalObject());
+    VisitAllOptimizedFunctionsForContext(
+        GlobalObject::cast(proto)->global_context(), visitor);
+  } else if (object->IsGlobalObject()) {
+    VisitAllOptimizedFunctionsForContext(
+        GlobalObject::cast(object)->global_context(), visitor);
+  }
+}
+
+
+void Deoptimizer::VisitAllOptimizedFunctions(
+    OptimizedFunctionVisitor* visitor) {
+  AssertNoAllocation no_allocation;
+
+  // Run through the list of all global contexts and deoptimize.
+  Object* global = Heap::global_contexts_list();
+  while (!global->IsUndefined()) {
+    VisitAllOptimizedFunctionsForGlobalObject(Context::cast(global)->global(),
+                                              visitor);
+    global = Context::cast(global)->get(Context::NEXT_CONTEXT_LINK);
+  }
+}
+
+
+void Deoptimizer::HandleWeakDeoptimizedCode(
+    v8::Persistent<v8::Value> obj, void* data) {
+  DeoptimizingCodeListNode* node =
+      reinterpret_cast<DeoptimizingCodeListNode*>(data);
+  RemoveDeoptimizingCode(*node->code());
+#ifdef DEBUG
+  node = Deoptimizer::deoptimizing_code_list_;
+  while (node != NULL) {
+    ASSERT(node != reinterpret_cast<DeoptimizingCodeListNode*>(data));
+    node = node->next();
+  }
+#endif
+}
+
+
+void Deoptimizer::ComputeOutputFrames(Deoptimizer* deoptimizer) {
+  deoptimizer->DoComputeOutputFrames();
+}
+
+
+Deoptimizer::Deoptimizer(JSFunction* function,
+                         BailoutType type,
+                         unsigned bailout_id,
+                         Address from,
+                         int fp_to_sp_delta)
+    : function_(function),
+      bailout_id_(bailout_id),
+      bailout_type_(type),
+      from_(from),
+      fp_to_sp_delta_(fp_to_sp_delta),
+      output_count_(0),
+      output_(NULL),
+      integer32_values_(NULL),
+      double_values_(NULL) {
+  if (FLAG_trace_deopt && type != OSR) {
+    PrintF("**** DEOPT: ");
+    function->PrintName();
+    PrintF(" at bailout #%u, address 0x%" V8PRIxPTR ", frame size %d\n",
+           bailout_id,
+           reinterpret_cast<intptr_t>(from),
+           fp_to_sp_delta - (2 * kPointerSize));
+  } else if (FLAG_trace_osr && type == OSR) {
+    PrintF("**** OSR: ");
+    function->PrintName();
+    PrintF(" at ast id #%u, address 0x%" V8PRIxPTR ", frame size %d\n",
+           bailout_id,
+           reinterpret_cast<intptr_t>(from),
+           fp_to_sp_delta - (2 * kPointerSize));
+  }
+  // Find the optimized code.
+  if (type == EAGER) {
+    ASSERT(from == NULL);
+    optimized_code_ = function_->code();
+  } else if (type == LAZY) {
+    optimized_code_ = FindDeoptimizingCodeFromAddress(from);
+    ASSERT(optimized_code_ != NULL);
+  } else if (type == OSR) {
+    // The function has already been optimized and we're transitioning
+    // from the unoptimized shared version to the optimized one in the
+    // function. The return address (from) points to unoptimized code.
+    optimized_code_ = function_->code();
+    ASSERT(optimized_code_->kind() == Code::OPTIMIZED_FUNCTION);
+    ASSERT(!optimized_code_->contains(from));
+  }
+  ASSERT(Heap::allow_allocation(false));
+  unsigned size = ComputeInputFrameSize();
+  input_ = new(size) FrameDescription(size, function);
+}
+
+
+Deoptimizer::~Deoptimizer() {
+  ASSERT(input_ == NULL && output_ == NULL);
+  delete[] integer32_values_;
+  delete[] double_values_;
+}
+
+
+void Deoptimizer::DeleteFrameDescriptions() {
+  delete input_;
+  for (int i = 0; i < output_count_; ++i) {
+    if (output_[i] != input_) delete output_[i];
+  }
+  delete[] output_;
+  input_ = NULL;
+  output_ = NULL;
+  ASSERT(!Heap::allow_allocation(true));
+}
+
+
+Address Deoptimizer::GetDeoptimizationEntry(int id, BailoutType type) {
+  ASSERT(id >= 0);
+  if (id >= kNumberOfEntries) return NULL;
+  LargeObjectChunk* base = NULL;
+  if (type == EAGER) {
+    if (eager_deoptimization_entry_code_ == NULL) {
+      eager_deoptimization_entry_code_ = CreateCode(type);
+    }
+    base = eager_deoptimization_entry_code_;
+  } else {
+    if (lazy_deoptimization_entry_code_ == NULL) {
+      lazy_deoptimization_entry_code_ = CreateCode(type);
+    }
+    base = lazy_deoptimization_entry_code_;
+  }
+  return
+      static_cast<Address>(base->GetStartAddress()) + (id * table_entry_size_);
+}
+
+
+int Deoptimizer::GetDeoptimizationId(Address addr, BailoutType type) {
+  LargeObjectChunk* base = NULL;
+  if (type == EAGER) {
+    base = eager_deoptimization_entry_code_;
+  } else {
+    base = lazy_deoptimization_entry_code_;
+  }
+  if (base == NULL ||
+      addr < base->GetStartAddress() ||
+      addr >= base->GetStartAddress() +
+          (kNumberOfEntries * table_entry_size_)) {
+    return kNotDeoptimizationEntry;
+  }
+  ASSERT_EQ(0,
+      static_cast<int>(addr - base->GetStartAddress()) % table_entry_size_);
+  return (addr - base->GetStartAddress()) / table_entry_size_;
+}
+
+
+void Deoptimizer::Setup() {
+  // Do nothing yet.
+}
+
+
+void Deoptimizer::TearDown() {
+  if (eager_deoptimization_entry_code_ != NULL) {
+    eager_deoptimization_entry_code_->Free(EXECUTABLE);
+    eager_deoptimization_entry_code_ = NULL;
+  }
+  if (lazy_deoptimization_entry_code_ != NULL) {
+    lazy_deoptimization_entry_code_->Free(EXECUTABLE);
+    lazy_deoptimization_entry_code_ = NULL;
+  }
+}
+
+
+unsigned Deoptimizer::GetOutputInfo(DeoptimizationOutputData* data,
+                                    unsigned id,
+                                    SharedFunctionInfo* shared) {
+  // TODO(kasperl): For now, we do a simple linear search for the PC
+  // offset associated with the given node id. This should probably be
+  // changed to a binary search.
+  int length = data->DeoptPoints();
+  Smi* smi_id = Smi::FromInt(id);
+  for (int i = 0; i < length; i++) {
+    if (data->AstId(i) == smi_id) {
+      return data->PcAndState(i)->value();
+    }
+  }
+  PrintF("[couldn't find pc offset for node=%u]\n", id);
+  PrintF("[method: %s]\n", *shared->DebugName()->ToCString());
+  // Print the source code if available.
+  HeapStringAllocator string_allocator;
+  StringStream stream(&string_allocator);
+  shared->SourceCodePrint(&stream, -1);
+  PrintF("[source:\n%s\n]", *stream.ToCString());
+
+  UNREACHABLE();
+  return -1;
+}
+
+
+int Deoptimizer::GetDeoptimizedCodeCount() {
+  int length = 0;
+  DeoptimizingCodeListNode* node = Deoptimizer::deoptimizing_code_list_;
+  while (node != NULL) {
+    length++;
+    node = node->next();
+  }
+  return length;
+}
+
+
+void Deoptimizer::DoComputeOutputFrames() {
+  if (bailout_type_ == OSR) {
+    DoComputeOsrOutputFrame();
+    return;
+  }
+
+  // Print some helpful diagnostic information.
+  int64_t start = OS::Ticks();
+  if (FLAG_trace_deopt) {
+    PrintF("[deoptimizing%s: begin 0x%08" V8PRIxPTR " ",
+           (bailout_type_ == LAZY ? " (lazy)" : ""),
+           reinterpret_cast<intptr_t>(function_));
+    function_->PrintName();
+    PrintF(" @%d]\n", bailout_id_);
+  }
+
+  // Determine basic deoptimization information.  The optimized frame is
+  // described by the input data.
+  DeoptimizationInputData* input_data =
+      DeoptimizationInputData::cast(optimized_code_->deoptimization_data());
+  unsigned node_id = input_data->AstId(bailout_id_)->value();
+  ByteArray* translations = input_data->TranslationByteArray();
+  unsigned translation_index =
+      input_data->TranslationIndex(bailout_id_)->value();
+
+  // Do the input frame to output frame(s) translation.
+  TranslationIterator iterator(translations, translation_index);
+  Translation::Opcode opcode =
+      static_cast<Translation::Opcode>(iterator.Next());
+  ASSERT(Translation::BEGIN == opcode);
+  USE(opcode);
+  // Read the number of output frames and allocate an array for their
+  // descriptions.
+  int count = iterator.Next();
+  ASSERT(output_ == NULL);
+  output_ = new FrameDescription*[count];
+  // Per-frame lists of untagged and unboxed int32 and double values.
+  integer32_values_ = new List<ValueDescriptionInteger32>[count];
+  double_values_ = new List<ValueDescriptionDouble>[count];
+  for (int i = 0; i < count; ++i) {
+    output_[i] = NULL;
+    integer32_values_[i].Initialize(0);
+    double_values_[i].Initialize(0);
+  }
+  output_count_ = count;
+
+  // Translate each output frame.
+  for (int i = 0; i < count; ++i) {
+    DoComputeFrame(&iterator, i);
+  }
+
+  // Print some helpful diagnostic information.
+  if (FLAG_trace_deopt) {
+    double ms = static_cast<double>(OS::Ticks() - start) / 1000;
+    int index = output_count_ - 1;  // Index of the topmost frame.
+    JSFunction* function = output_[index]->GetFunction();
+    PrintF("[deoptimizing: end 0x%08" V8PRIxPTR " ",
+           reinterpret_cast<intptr_t>(function));
+    function->PrintName();
+    PrintF(" => node=%u, pc=0x%08x, state=%s, took %0.3f ms]\n",
+           node_id,
+           output_[index]->GetPc(),
+           FullCodeGenerator::State2String(
+               static_cast<FullCodeGenerator::State>(
+                   output_[index]->GetState()->value())),
+           ms);
+  }
+}
+
+
+void Deoptimizer::InsertHeapNumberValues(int index, JavaScriptFrame* frame) {
+  // We need to adjust the stack index by one for the top-most frame.
+  int extra_slot_count = (index == output_count() - 1) ? 1 : 0;
+  List<ValueDescriptionInteger32>* ints = &integer32_values_[index];
+  for (int i = 0; i < ints->length(); i++) {
+    ValueDescriptionInteger32 value = ints->at(i);
+    double val = static_cast<double>(value.int32_value());
+    InsertHeapNumberValue(frame, value.stack_index(), val, extra_slot_count);
+  }
+
+  // Iterate over double values and convert them to a heap number.
+  List<ValueDescriptionDouble>* doubles = &double_values_[index];
+  for (int i = 0; i < doubles->length(); ++i) {
+    ValueDescriptionDouble value = doubles->at(i);
+    InsertHeapNumberValue(frame, value.stack_index(), value.double_value(),
+                          extra_slot_count);
+  }
+}
+
+
+void Deoptimizer::InsertHeapNumberValue(JavaScriptFrame* frame,
+                                        int stack_index,
+                                        double val,
+                                        int extra_slot_count) {
+  // Add one to the TOS index to take the 'state' pushed before jumping
+  // to the stub that calls Runtime::NotifyDeoptimized into account.
+  int tos_index = stack_index + extra_slot_count;
+  int index = (frame->ComputeExpressionsCount() - 1) - tos_index;
+  if (FLAG_trace_deopt) PrintF("Allocating a new heap number: %e\n", val);
+  Handle<Object> num = Factory::NewNumber(val);
+  frame->SetExpression(index, *num);
+}
+
+
+void Deoptimizer::DoTranslateCommand(TranslationIterator* iterator,
+                                     int frame_index,
+                                     unsigned output_offset) {
+  disasm::NameConverter converter;
+  // A GC-safe temporary placeholder that we can put in the output frame.
+  const intptr_t kPlaceholder = reinterpret_cast<intptr_t>(Smi::FromInt(0));
+
+  // Ignore commands marked as duplicate and act on the first non-duplicate.
+  Translation::Opcode opcode =
+      static_cast<Translation::Opcode>(iterator->Next());
+  while (opcode == Translation::DUPLICATE) {
+    opcode = static_cast<Translation::Opcode>(iterator->Next());
+    iterator->Skip(Translation::NumberOfOperandsFor(opcode));
+    opcode = static_cast<Translation::Opcode>(iterator->Next());
+  }
+
+  switch (opcode) {
+    case Translation::BEGIN:
+    case Translation::FRAME:
+    case Translation::DUPLICATE:
+      UNREACHABLE();
+      return;
+
+    case Translation::REGISTER: {
+      int input_reg = iterator->Next();
+      uint32_t input_value = input_->GetRegister(input_reg);
+      if (FLAG_trace_deopt) {
+        PrintF("    0x%08x: [top + %d] <- 0x%08x ; %s\n",
+               output_[frame_index]->GetTop() + output_offset,
+               output_offset,
+               input_value,
+               converter.NameOfCPURegister(input_reg));
+      }
+      output_[frame_index]->SetFrameSlot(output_offset, input_value);
+      return;
+    }
+
+    case Translation::INT32_REGISTER: {
+      int input_reg = iterator->Next();
+      uint32_t value = input_->GetRegister(input_reg);
+      bool is_smi = Smi::IsValid(value);
+      unsigned output_index = output_offset / kPointerSize;
+      if (FLAG_trace_deopt) {
+        PrintF("    0x%08x: [top + %d] <- %d ; %s (%s)\n",
+               output_[frame_index]->GetTop() + output_offset,
+               output_offset,
+               value,
+               converter.NameOfCPURegister(input_reg),
+               is_smi ? "smi" : "heap number");
+      }
+      if (is_smi) {
+        intptr_t tagged_value =
+            reinterpret_cast<intptr_t>(Smi::FromInt(value));
+        output_[frame_index]->SetFrameSlot(output_offset, tagged_value);
+      } else {
+        // We save the untagged value on the side and store a GC-safe
+        // temporary placeholder in the frame.
+        AddInteger32Value(frame_index, output_index, value);
+        output_[frame_index]->SetFrameSlot(output_offset, kPlaceholder);
+      }
+      return;
+    }
+
+    case Translation::DOUBLE_REGISTER: {
+      int input_reg = iterator->Next();
+      double value = input_->GetDoubleRegister(input_reg);
+      unsigned output_index = output_offset / kPointerSize;
+      if (FLAG_trace_deopt) {
+        PrintF("    0x%08x: [top + %d] <- %e ; %s\n",
+               output_[frame_index]->GetTop() + output_offset,
+               output_offset,
+               value,
+               DoubleRegister::AllocationIndexToString(input_reg));
+      }
+      // We save the untagged value on the side and store a GC-safe
+      // temporary placeholder in the frame.
+      AddDoubleValue(frame_index, output_index, value);
+      output_[frame_index]->SetFrameSlot(output_offset, kPlaceholder);
+      return;
+    }
+
+    case Translation::STACK_SLOT: {
+      int input_slot_index = iterator->Next();
+      unsigned input_offset =
+          input_->GetOffsetFromSlotIndex(this, input_slot_index);
+      uint32_t input_value = input_->GetFrameSlot(input_offset);
+      if (FLAG_trace_deopt) {
+        PrintF("    0x%08x: [top + %d] <- 0x%08x ; [esp + %d]\n",
+               output_[frame_index]->GetTop() + output_offset,
+               output_offset,
+               input_value,
+               input_offset);
+      }
+      output_[frame_index]->SetFrameSlot(output_offset, input_value);
+      return;
+    }
+
+    case Translation::INT32_STACK_SLOT: {
+      int input_slot_index = iterator->Next();
+      unsigned input_offset =
+          input_->GetOffsetFromSlotIndex(this, input_slot_index);
+      int32_t value = input_->GetFrameSlot(input_offset);
+      bool is_smi = Smi::IsValid(value);
+      unsigned output_index = output_offset / kPointerSize;
+      if (FLAG_trace_deopt) {
+        PrintF("    0x%08x: [top + %d] <- %d ; [esp + %d] (%s)\n",
+               output_[frame_index]->GetTop() + output_offset,
+               output_offset,
+               value,
+               input_offset,
+               is_smi ? "smi" : "heap number");
+      }
+      if (is_smi) {
+        intptr_t tagged_value =
+            reinterpret_cast<intptr_t>(Smi::FromInt(value));
+        output_[frame_index]->SetFrameSlot(output_offset, tagged_value);
+      } else {
+        // We save the untagged value on the side and store a GC-safe
+        // temporary placeholder in the frame.
+        AddInteger32Value(frame_index, output_index, value);
+        output_[frame_index]->SetFrameSlot(output_offset, kPlaceholder);
+      }
+      return;
+    }
+
+    case Translation::DOUBLE_STACK_SLOT: {
+      int input_slot_index = iterator->Next();
+      unsigned input_offset =
+          input_->GetOffsetFromSlotIndex(this, input_slot_index);
+      double value = input_->GetDoubleFrameSlot(input_offset);
+      unsigned output_index = output_offset / kPointerSize;
+      if (FLAG_trace_deopt) {
+        PrintF("    0x%08x: [top + %d] <- %e ; [esp + %d]\n",
+               output_[frame_index]->GetTop() + output_offset,
+               output_offset,
+               value,
+               input_offset);
+      }
+      // We save the untagged value on the side and store a GC-safe
+      // temporary placeholder in the frame.
+      AddDoubleValue(frame_index, output_index, value);
+      output_[frame_index]->SetFrameSlot(output_offset, kPlaceholder);
+      return;
+    }
+
+    case Translation::LITERAL: {
+      Object* literal = ComputeLiteral(iterator->Next());
+      if (FLAG_trace_deopt) {
+        PrintF("    0x%08x: [top + %d] <- ",
+               output_[frame_index]->GetTop() + output_offset,
+               output_offset);
+        literal->ShortPrint();
+        PrintF(" ; literal\n");
+      }
+      intptr_t value = reinterpret_cast<intptr_t>(literal);
+      output_[frame_index]->SetFrameSlot(output_offset, value);
+      return;
+    }
+
+    case Translation::ARGUMENTS_OBJECT: {
+      // Use the hole value as a sentinel and fill in the arguments object
+      // after the deoptimized frame is built.
+      ASSERT(frame_index == 0);  // Only supported for first frame.
+      if (FLAG_trace_deopt) {
+        PrintF("    0x%08x: [top + %d] <- ",
+               output_[frame_index]->GetTop() + output_offset,
+               output_offset);
+        Heap::the_hole_value()->ShortPrint();
+        PrintF(" ; arguments object\n");
+      }
+      intptr_t value = reinterpret_cast<intptr_t>(Heap::the_hole_value());
+      output_[frame_index]->SetFrameSlot(output_offset, value);
+      return;
+    }
+  }
+}
+
+
+bool Deoptimizer::DoOsrTranslateCommand(TranslationIterator* iterator,
+                                        int* input_offset) {
+  disasm::NameConverter converter;
+  FrameDescription* output = output_[0];
+
+  // The input values are all part of the unoptimized frame so they
+  // are all tagged pointers.
+  uint32_t input_value = input_->GetFrameSlot(*input_offset);
+  Object* input_object = reinterpret_cast<Object*>(input_value);
+
+  Translation::Opcode opcode =
+      static_cast<Translation::Opcode>(iterator->Next());
+  bool duplicate = (opcode == Translation::DUPLICATE);
+  if (duplicate) {
+    opcode = static_cast<Translation::Opcode>(iterator->Next());
+  }
+
+  switch (opcode) {
+    case Translation::BEGIN:
+    case Translation::FRAME:
+    case Translation::DUPLICATE:
+      UNREACHABLE();  // Malformed input.
+       return false;
+
+     case Translation::REGISTER: {
+       int output_reg = iterator->Next();
+       if (FLAG_trace_osr) {
+         PrintF("    %s <- 0x%08x ; [esp + %d]\n",
+                converter.NameOfCPURegister(output_reg),
+                input_value,
+                *input_offset);
+       }
+       output->SetRegister(output_reg, input_value);
+       break;
+     }
+
+    case Translation::INT32_REGISTER: {
+      // Abort OSR if we don't have a number.
+      if (!input_object->IsNumber()) return false;
+
+      int output_reg = iterator->Next();
+      int int32_value = input_object->IsSmi()
+          ? Smi::cast(input_object)->value()
+          : FastD2I(input_object->Number());
+      // Abort the translation if the conversion lost information.
+      if (!input_object->IsSmi() &&
+          FastI2D(int32_value) != input_object->Number()) {
+        if (FLAG_trace_osr) {
+          PrintF("**** %g could not be converted to int32 ****\n",
+                 input_object->Number());
+        }
+        return false;
+      }
+      if (FLAG_trace_osr) {
+        PrintF("    %s <- %d (int32) ; [esp + %d]\n",
+               converter.NameOfCPURegister(output_reg),
+               int32_value,
+               *input_offset);
+      }
+      output->SetRegister(output_reg, int32_value);
+      break;
+    }
+
+    case Translation::DOUBLE_REGISTER: {
+      // Abort OSR if we don't have a number.
+      if (!input_object->IsNumber()) return false;
+
+      int output_reg = iterator->Next();
+      double double_value = input_object->Number();
+      if (FLAG_trace_osr) {
+        PrintF("    %s <- %g (double) ; [esp + %d]\n",
+               DoubleRegister::AllocationIndexToString(output_reg),
+               double_value,
+               *input_offset);
+      }
+      output->SetDoubleRegister(output_reg, double_value);
+      break;
+    }
+
+    case Translation::STACK_SLOT: {
+      int output_index = iterator->Next();
+      unsigned output_offset =
+          output->GetOffsetFromSlotIndex(this, output_index);
+      if (FLAG_trace_osr) {
+        PrintF("    [esp + %d] <- 0x%08x ; [esp + %d]\n",
+               output_offset,
+               input_value,
+               *input_offset);
+      }
+      output->SetFrameSlot(output_offset, input_value);
+      break;
+    }
+
+    case Translation::INT32_STACK_SLOT: {
+      // Abort OSR if we don't have a number.
+      if (!input_object->IsNumber()) return false;
+
+      int output_index = iterator->Next();
+      unsigned output_offset =
+          output->GetOffsetFromSlotIndex(this, output_index);
+      int int32_value = input_object->IsSmi()
+          ? Smi::cast(input_object)->value()
+          : DoubleToInt32(input_object->Number());
+      // Abort the translation if the conversion lost information.
+      if (!input_object->IsSmi() &&
+          FastI2D(int32_value) != input_object->Number()) {
+        if (FLAG_trace_osr) {
+          PrintF("**** %g could not be converted to int32 ****\n",
+                 input_object->Number());
+        }
+        return false;
+      }
+      if (FLAG_trace_osr) {
+        PrintF("    [esp + %d] <- %d (int32) ; [esp + %d]\n",
+               output_offset,
+               int32_value,
+               *input_offset);
+      }
+      output->SetFrameSlot(output_offset, int32_value);
+      break;
+    }
+
+    case Translation::DOUBLE_STACK_SLOT: {
+      static const int kLowerOffset = 0 * kPointerSize;
+      static const int kUpperOffset = 1 * kPointerSize;
+
+      // Abort OSR if we don't have a number.
+      if (!input_object->IsNumber()) return false;
+
+      int output_index = iterator->Next();
+      unsigned output_offset =
+          output->GetOffsetFromSlotIndex(this, output_index);
+      double double_value = input_object->Number();
+      uint64_t int_value = BitCast<uint64_t, double>(double_value);
+      int32_t lower = static_cast<int32_t>(int_value);
+      int32_t upper = static_cast<int32_t>(int_value >> kBitsPerInt);
+      if (FLAG_trace_osr) {
+        PrintF("    [esp + %d] <- 0x%08x (upper bits of %g) ; [esp + %d]\n",
+               output_offset + kUpperOffset,
+               upper,
+               double_value,
+               *input_offset);
+        PrintF("    [esp + %d] <- 0x%08x (lower bits of %g) ; [esp + %d]\n",
+               output_offset + kLowerOffset,
+               lower,
+               double_value,
+               *input_offset);
+      }
+      output->SetFrameSlot(output_offset + kLowerOffset, lower);
+      output->SetFrameSlot(output_offset + kUpperOffset, upper);
+      break;
+    }
+
+    case Translation::LITERAL: {
+      // Just ignore non-materialized literals.
+      iterator->Next();
+      break;
+    }
+
+    case Translation::ARGUMENTS_OBJECT: {
+      // Optimized code assumes that the argument object has not been
+      // materialized and so bypasses it when doing arguments access.
+      // We should have bailed out before starting the frame
+      // translation.
+      UNREACHABLE();
+      return false;
+    }
+  }
+
+  if (!duplicate) *input_offset -= kPointerSize;
+  return true;
+}
+
+
+unsigned Deoptimizer::ComputeInputFrameSize() const {
+  unsigned fixed_size = ComputeFixedSize(function_);
+  // The fp-to-sp delta already takes the context and the function
+  // into account so we have to avoid double counting them (-2).
+  unsigned result = fixed_size + fp_to_sp_delta_ - (2 * kPointerSize);
+#ifdef DEBUG
+  if (bailout_type_ == OSR) {
+    // TODO(kasperl): It would be nice if we could verify that the
+    // size matches with the stack height we can compute based on the
+    // environment at the OSR entry. The code for that his built into
+    // the DoComputeOsrOutputFrame function for now.
+  } else {
+    unsigned stack_slots = optimized_code_->stack_slots();
+    unsigned outgoing_size = ComputeOutgoingArgumentSize();
+    ASSERT(result == fixed_size + (stack_slots * kPointerSize) + outgoing_size);
+  }
+#endif
+  return result;
+}
+
+
+unsigned Deoptimizer::ComputeFixedSize(JSFunction* function) const {
+  // The fixed part of the frame consists of the return address, frame
+  // pointer, function, context, and all the incoming arguments.
+  static const unsigned kFixedSlotSize = 4 * kPointerSize;
+  return ComputeIncomingArgumentSize(function) + kFixedSlotSize;
+}
+
+
+unsigned Deoptimizer::ComputeIncomingArgumentSize(JSFunction* function) const {
+  // The incoming arguments is the values for formal parameters and
+  // the receiver. Every slot contains a pointer.
+  unsigned arguments = function->shared()->formal_parameter_count() + 1;
+  return arguments * kPointerSize;
+}
+
+
+unsigned Deoptimizer::ComputeOutgoingArgumentSize() const {
+  DeoptimizationInputData* data = DeoptimizationInputData::cast(
+      optimized_code_->deoptimization_data());
+  unsigned height = data->ArgumentsStackHeight(bailout_id_)->value();
+  return height * kPointerSize;
+}
+
+
+Object* Deoptimizer::ComputeLiteral(int index) const {
+  DeoptimizationInputData* data = DeoptimizationInputData::cast(
+      optimized_code_->deoptimization_data());
+  FixedArray* literals = data->LiteralArray();
+  return literals->get(index);
+}
+
+
+void Deoptimizer::AddInteger32Value(int frame_index,
+                                    int slot_index,
+                                    int32_t value) {
+  ValueDescriptionInteger32 value_desc(slot_index, value);
+  integer32_values_[frame_index].Add(value_desc);
+}
+
+
+void Deoptimizer::AddDoubleValue(int frame_index,
+                                 int slot_index,
+                                 double value) {
+  ValueDescriptionDouble value_desc(slot_index, value);
+  double_values_[frame_index].Add(value_desc);
+}
+
+
+LargeObjectChunk* Deoptimizer::CreateCode(BailoutType type) {
+  // We cannot run this if the serializer is enabled because this will
+  // cause us to emit relocation information for the external
+  // references. This is fine because the deoptimizer's code section
+  // isn't meant to be serialized at all.
+  ASSERT(!Serializer::enabled());
+  bool old_debug_code = FLAG_debug_code;
+  FLAG_debug_code = false;
+
+  MacroAssembler masm(NULL, 16 * KB);
+  GenerateDeoptimizationEntries(&masm, kNumberOfEntries, type);
+  CodeDesc desc;
+  masm.GetCode(&desc);
+  ASSERT(desc.reloc_size == 0);
+
+  LargeObjectChunk* chunk = LargeObjectChunk::New(desc.instr_size, EXECUTABLE);
+  memcpy(chunk->GetStartAddress(), desc.buffer, desc.instr_size);
+  CPU::FlushICache(chunk->GetStartAddress(), desc.instr_size);
+  FLAG_debug_code = old_debug_code;
+  return chunk;
+}
+
+
+Code* Deoptimizer::FindDeoptimizingCodeFromAddress(Address addr) {
+  DeoptimizingCodeListNode* node = Deoptimizer::deoptimizing_code_list_;
+  while (node != NULL) {
+    if (node->code()->contains(addr)) return *node->code();
+    node = node->next();
+  }
+  return NULL;
+}
+
+
+void Deoptimizer::RemoveDeoptimizingCode(Code* code) {
+  ASSERT(deoptimizing_code_list_ != NULL);
+  // Run through the code objects to find this one and remove it.
+  DeoptimizingCodeListNode* prev = NULL;
+  DeoptimizingCodeListNode* current = deoptimizing_code_list_;
+  while (current != NULL) {
+    if (*current->code() == code) {
+      // Unlink from list. If prev is NULL we are looking at the first element.
+      if (prev == NULL) {
+        deoptimizing_code_list_ = current->next();
+      } else {
+        prev->set_next(current->next());
+      }
+      delete current;
+      return;
+    }
+    // Move to next in list.
+    prev = current;
+    current = current->next();
+  }
+  // Deoptimizing code is removed through weak callback. Each object is expected
+  // to be removed once and only once.
+  UNREACHABLE();
+}
+
+
+FrameDescription::FrameDescription(uint32_t frame_size,
+                                   JSFunction* function)
+    : frame_size_(frame_size),
+      function_(function),
+      top_(kZapUint32),
+      pc_(kZapUint32),
+      fp_(kZapUint32) {
+  // Zap all the registers.
+  for (int r = 0; r < Register::kNumRegisters; r++) {
+    SetRegister(r, kZapUint32);
+  }
+
+  // Zap all the slots.
+  for (unsigned o = 0; o < frame_size; o += kPointerSize) {
+    SetFrameSlot(o, kZapUint32);
+  }
+}
+
+
+unsigned FrameDescription::GetOffsetFromSlotIndex(Deoptimizer* deoptimizer,
+                                                  int slot_index) {
+  if (slot_index >= 0) {
+    // Local or spill slots. Skip the fixed part of the frame
+    // including all arguments.
+    unsigned base =
+        GetFrameSize() - deoptimizer->ComputeFixedSize(GetFunction());
+    return base - ((slot_index + 1) * kPointerSize);
+  } else {
+    // Incoming parameter.
+    unsigned base = GetFrameSize() -
+        deoptimizer->ComputeIncomingArgumentSize(GetFunction());
+    return base - ((slot_index + 1) * kPointerSize);
+  }
+}
+
+
+void TranslationBuffer::Add(int32_t value) {
+  // Encode the sign bit in the least significant bit.
+  bool is_negative = (value < 0);
+  uint32_t bits = ((is_negative ? -value : value) << 1) |
+      static_cast<int32_t>(is_negative);
+  // Encode the individual bytes using the least significant bit of
+  // each byte to indicate whether or not more bytes follow.
+  do {
+    uint32_t next = bits >> 7;
+    contents_.Add(((bits << 1) & 0xFF) | (next != 0));
+    bits = next;
+  } while (bits != 0);
+}
+
+
+int32_t TranslationIterator::Next() {
+  ASSERT(HasNext());
+  // Run through the bytes until we reach one with a least significant
+  // bit of zero (marks the end).
+  uint32_t bits = 0;
+  for (int i = 0; true; i += 7) {
+    uint8_t next = buffer_->get(index_++);
+    bits |= (next >> 1) << i;
+    if ((next & 1) == 0) break;
+  }
+  // The bits encode the sign in the least significant bit.
+  bool is_negative = (bits & 1) == 1;
+  int32_t result = bits >> 1;
+  return is_negative ? -result : result;
+}
+
+
+Handle<ByteArray> TranslationBuffer::CreateByteArray() {
+  int length = contents_.length();
+  Handle<ByteArray> result = Factory::NewByteArray(length, TENURED);
+  memcpy(result->GetDataStartAddress(), contents_.ToVector().start(), length);
+  return result;
+}
+
+
+void Translation::BeginFrame(int node_id, int literal_id, unsigned height) {
+  buffer_->Add(FRAME);
+  buffer_->Add(node_id);
+  buffer_->Add(literal_id);
+  buffer_->Add(height);
+}
+
+
+void Translation::StoreRegister(Register reg) {
+  buffer_->Add(REGISTER);
+  buffer_->Add(reg.code());
+}
+
+
+void Translation::StoreInt32Register(Register reg) {
+  buffer_->Add(INT32_REGISTER);
+  buffer_->Add(reg.code());
+}
+
+
+void Translation::StoreDoubleRegister(DoubleRegister reg) {
+  buffer_->Add(DOUBLE_REGISTER);
+  buffer_->Add(DoubleRegister::ToAllocationIndex(reg));
+}
+
+
+void Translation::StoreStackSlot(int index) {
+  buffer_->Add(STACK_SLOT);
+  buffer_->Add(index);
+}
+
+
+void Translation::StoreInt32StackSlot(int index) {
+  buffer_->Add(INT32_STACK_SLOT);
+  buffer_->Add(index);
+}
+
+
+void Translation::StoreDoubleStackSlot(int index) {
+  buffer_->Add(DOUBLE_STACK_SLOT);
+  buffer_->Add(index);
+}
+
+
+void Translation::StoreLiteral(int literal_id) {
+  buffer_->Add(LITERAL);
+  buffer_->Add(literal_id);
+}
+
+
+void Translation::StoreArgumentsObject() {
+  buffer_->Add(ARGUMENTS_OBJECT);
+}
+
+
+void Translation::MarkDuplicate() {
+  buffer_->Add(DUPLICATE);
+}
+
+
+int Translation::NumberOfOperandsFor(Opcode opcode) {
+  switch (opcode) {
+    case ARGUMENTS_OBJECT:
+    case DUPLICATE:
+      return 0;
+    case BEGIN:
+    case REGISTER:
+    case INT32_REGISTER:
+    case DOUBLE_REGISTER:
+    case STACK_SLOT:
+    case INT32_STACK_SLOT:
+    case DOUBLE_STACK_SLOT:
+    case LITERAL:
+      return 1;
+    case FRAME:
+      return 3;
+  }
+  UNREACHABLE();
+  return -1;
+}
+
+
+#ifdef DEBUG
+
+const char* Translation::StringFor(Opcode opcode) {
+  switch (opcode) {
+    case BEGIN:
+      return "BEGIN";
+    case FRAME:
+      return "FRAME";
+    case REGISTER:
+      return "REGISTER";
+    case INT32_REGISTER:
+      return "INT32_REGISTER";
+    case DOUBLE_REGISTER:
+      return "DOUBLE_REGISTER";
+    case STACK_SLOT:
+      return "STACK_SLOT";
+    case INT32_STACK_SLOT:
+      return "INT32_STACK_SLOT";
+    case DOUBLE_STACK_SLOT:
+      return "DOUBLE_STACK_SLOT";
+    case LITERAL:
+      return "LITERAL";
+    case ARGUMENTS_OBJECT:
+      return "ARGUMENTS_OBJECT";
+    case DUPLICATE:
+      return "DUPLICATE";
+  }
+  UNREACHABLE();
+  return "";
+}
+
+#endif
+
+
+DeoptimizingCodeListNode::DeoptimizingCodeListNode(Code* code): next_(NULL) {
+  // Globalize the code object and make it weak.
+  code_ = Handle<Code>::cast((GlobalHandles::Create(code)));
+  GlobalHandles::MakeWeak(reinterpret_cast<Object**>(code_.location()),
+                          this,
+                          Deoptimizer::HandleWeakDeoptimizedCode);
+}
+
+
+DeoptimizingCodeListNode::~DeoptimizingCodeListNode() {
+  GlobalHandles::Destroy(reinterpret_cast<Object**>(code_.location()));
+}
+
+
+} }  // namespace v8::internal
diff --git a/src/deoptimizer.h b/src/deoptimizer.h
new file mode 100644 (file)
index 0000000..7a39d16
--- /dev/null
@@ -0,0 +1,511 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_DEOPTIMIZER_H_
+#define V8_DEOPTIMIZER_H_
+
+#include "v8.h"
+
+#include "macro-assembler.h"
+#include "zone-inl.h"
+
+
+namespace v8 {
+namespace internal {
+
+class FrameDescription;
+class TranslationIterator;
+class DeoptimizingCodeListNode;
+
+
+class ValueDescription BASE_EMBEDDED {
+ public:
+  explicit ValueDescription(int index) : stack_index_(index) { }
+  int stack_index() const { return stack_index_; }
+
+ private:
+  // Offset relative to the top of the stack.
+  int stack_index_;
+};
+
+
+class ValueDescriptionInteger32: public ValueDescription {
+ public:
+  ValueDescriptionInteger32(int index, int32_t value)
+      : ValueDescription(index), int32_value_(value) { }
+  int32_t int32_value() const { return int32_value_; }
+
+ private:
+  // Raw value.
+  int32_t int32_value_;
+};
+
+
+class ValueDescriptionDouble: public ValueDescription {
+ public:
+  ValueDescriptionDouble(int index, double value)
+      : ValueDescription(index), double_value_(value) { }
+  double double_value() const { return double_value_; }
+
+ private:
+  // Raw value.
+  double double_value_;
+};
+
+
+class OptimizedFunctionVisitor BASE_EMBEDDED {
+ public:
+  virtual ~OptimizedFunctionVisitor() {}
+
+  // Function which is called before iteration of any optimized functions
+  // from given global context.
+  virtual void EnterContext(Context* context) = 0;
+
+  virtual void VisitFunction(JSFunction* function) = 0;
+
+  // Function which is called after iteration of all optimized functions
+  // from given global context.
+  virtual void LeaveContext(Context* context) = 0;
+};
+
+
+class Deoptimizer : public Malloced {
+ public:
+  enum BailoutType {
+    EAGER,
+    LAZY,
+    OSR
+  };
+
+  int output_count() const { return output_count_; }
+
+  static Deoptimizer* New(JSFunction* function,
+                          BailoutType type,
+                          unsigned bailout_id,
+                          Address from,
+                          int fp_to_sp_delta);
+  static Deoptimizer* Grab();
+
+  // Deoptimize the function now. Its current optimized code will never be run
+  // again and any activations of the optimized code will get deoptimized when
+  // execution returns.
+  static void DeoptimizeFunction(JSFunction* function);
+
+  // Deoptimize all functions in the heap.
+  static void DeoptimizeAll();
+
+  static void DeoptimizeGlobalObject(JSObject* object);
+
+  static void VisitAllOptimizedFunctionsForContext(
+      Context* context, OptimizedFunctionVisitor* visitor);
+
+  static void VisitAllOptimizedFunctionsForGlobalObject(
+      JSObject* object, OptimizedFunctionVisitor* visitor);
+
+  static void VisitAllOptimizedFunctions(OptimizedFunctionVisitor* visitor);
+
+  // Given the relocation info of a call to the stack check stub, patch the
+  // code so as to go unconditionally to the on-stack replacement builtin
+  // instead.
+  static void PatchStackCheckCode(RelocInfo* rinfo, Code* replacement_code);
+
+  // Given the relocation info of a call to the on-stack replacement
+  // builtin, patch the code back to the original stack check code.
+  static void RevertStackCheckCode(RelocInfo* rinfo, Code* check_code);
+
+  ~Deoptimizer();
+
+  void InsertHeapNumberValues(int index, JavaScriptFrame* frame);
+
+  static void ComputeOutputFrames(Deoptimizer* deoptimizer);
+
+  static Address GetDeoptimizationEntry(int id, BailoutType type);
+  static int GetDeoptimizationId(Address addr, BailoutType type);
+  static unsigned GetOutputInfo(DeoptimizationOutputData* data,
+                                unsigned node_id,
+                                SharedFunctionInfo* shared);
+
+  static void Setup();
+  static void TearDown();
+
+  // Code generation support.
+  static int input_offset() { return OFFSET_OF(Deoptimizer, input_); }
+  static int output_count_offset() {
+    return OFFSET_OF(Deoptimizer, output_count_);
+  }
+  static int output_offset() { return OFFSET_OF(Deoptimizer, output_); }
+
+  static int GetDeoptimizedCodeCount();
+
+  static const int kNotDeoptimizationEntry = -1;
+
+  // Generators for the deoptimization entry code.
+  class EntryGenerator BASE_EMBEDDED {
+   public:
+    EntryGenerator(MacroAssembler* masm, BailoutType type)
+        : masm_(masm), type_(type) { }
+    virtual ~EntryGenerator() { }
+
+    void Generate();
+
+   protected:
+    MacroAssembler* masm() const { return masm_; }
+    BailoutType type() const { return type_; }
+
+    virtual void GeneratePrologue() { }
+
+   private:
+    MacroAssembler* masm_;
+    Deoptimizer::BailoutType type_;
+  };
+
+  class TableEntryGenerator : public EntryGenerator {
+   public:
+    TableEntryGenerator(MacroAssembler* masm, BailoutType type,  int count)
+        : EntryGenerator(masm, type), count_(count) { }
+
+   protected:
+    virtual void GeneratePrologue();
+
+   private:
+    int count() const { return count_; }
+
+    int count_;
+  };
+
+ private:
+  static const int kNumberOfEntries = 4096;
+
+  Deoptimizer(JSFunction* function,
+              BailoutType type,
+              unsigned bailout_id,
+              Address from,
+              int fp_to_sp_delta);
+  void DeleteFrameDescriptions();
+
+  void DoComputeOutputFrames();
+  void DoComputeOsrOutputFrame();
+  void DoComputeFrame(TranslationIterator* iterator, int frame_index);
+  void DoTranslateCommand(TranslationIterator* iterator,
+                          int frame_index,
+                          unsigned output_offset);
+  // Translate a command for OSR.  Updates the input offset to be used for
+  // the next command.  Returns false if translation of the command failed
+  // (e.g., a number conversion failed) and may or may not have updated the
+  // input offset.
+  bool DoOsrTranslateCommand(TranslationIterator* iterator,
+                             int* input_offset);
+
+  unsigned ComputeInputFrameSize() const;
+  unsigned ComputeFixedSize(JSFunction* function) const;
+
+  unsigned ComputeIncomingArgumentSize(JSFunction* function) const;
+  unsigned ComputeOutgoingArgumentSize() const;
+
+  Object* ComputeLiteral(int index) const;
+
+  void InsertHeapNumberValue(JavaScriptFrame* frame,
+                             int stack_index,
+                             double val,
+                             int extra_slot_count);
+
+  void AddInteger32Value(int frame_index, int slot_index, int32_t value);
+  void AddDoubleValue(int frame_index, int slot_index, double value);
+
+  static LargeObjectChunk* CreateCode(BailoutType type);
+  static void GenerateDeoptimizationEntries(
+      MacroAssembler* masm, int count, BailoutType type);
+
+  // Weak handle callback for deoptimizing code objects.
+  static void HandleWeakDeoptimizedCode(
+      v8::Persistent<v8::Value> obj, void* data);
+  static Code* FindDeoptimizingCodeFromAddress(Address addr);
+  static void RemoveDeoptimizingCode(Code* code);
+
+  static LargeObjectChunk* eager_deoptimization_entry_code_;
+  static LargeObjectChunk* lazy_deoptimization_entry_code_;
+  static Deoptimizer* current_;
+
+  // List of deoptimized code which still have references from active stack
+  // frames. These code objects are needed by the deoptimizer when deoptimizing
+  // a frame for which the code object for the function function has been
+  // changed from the code present when deoptimizing was done.
+  static DeoptimizingCodeListNode* deoptimizing_code_list_;
+
+  JSFunction* function_;
+  Code* optimized_code_;
+  unsigned bailout_id_;
+  BailoutType bailout_type_;
+  Address from_;
+  int fp_to_sp_delta_;
+
+  // Input frame description.
+  FrameDescription* input_;
+  // Number of output frames.
+  int output_count_;
+  // Array of output frame descriptions.
+  FrameDescription** output_;
+
+  List<ValueDescriptionInteger32>* integer32_values_;
+  List<ValueDescriptionDouble>* double_values_;
+
+  static int table_entry_size_;
+
+  friend class FrameDescription;
+  friend class DeoptimizingCodeListNode;
+};
+
+
+class FrameDescription {
+ public:
+  FrameDescription(uint32_t frame_size,
+                   JSFunction* function);
+
+  void* operator new(size_t size, uint32_t frame_size) {
+    return malloc(size + frame_size);
+  }
+
+  void operator delete(void* description) {
+    free(description);
+  }
+
+  uint32_t GetFrameSize() const { return frame_size_; }
+
+  JSFunction* GetFunction() const { return function_; }
+
+  unsigned GetOffsetFromSlotIndex(Deoptimizer* deoptimizer, int slot_index);
+
+  uint32_t GetFrameSlot(unsigned offset) {
+    return *GetFrameSlotPointer(offset);
+  }
+
+  double GetDoubleFrameSlot(unsigned offset) {
+    return *reinterpret_cast<double*>(GetFrameSlotPointer(offset));
+  }
+
+  void SetFrameSlot(unsigned offset, uint32_t value) {
+    *GetFrameSlotPointer(offset) = value;
+  }
+
+  uint32_t GetRegister(unsigned n) const {
+    ASSERT(n < ARRAY_SIZE(registers_));
+    return registers_[n];
+  }
+
+  double GetDoubleRegister(unsigned n) const {
+    ASSERT(n < ARRAY_SIZE(double_registers_));
+    return double_registers_[n];
+  }
+
+  void SetRegister(unsigned n, uint32_t value) {
+    ASSERT(n < ARRAY_SIZE(registers_));
+    registers_[n] = value;
+  }
+
+  void SetDoubleRegister(unsigned n, double value) {
+    ASSERT(n < ARRAY_SIZE(double_registers_));
+    double_registers_[n] = value;
+  }
+
+  uint32_t GetTop() const { return top_; }
+  void SetTop(uint32_t top) { top_ = top; }
+
+  uint32_t GetPc() const { return pc_; }
+  void SetPc(uint32_t pc) { pc_ = pc; }
+
+  uint32_t GetFp() const { return fp_; }
+  void SetFp(uint32_t fp) { fp_ = fp; }
+
+  Smi* GetState() const { return state_; }
+  void SetState(Smi* state) { state_ = state; }
+
+  void SetContinuation(uint32_t pc) { continuation_ = pc; }
+
+  static int registers_offset() {
+    return OFFSET_OF(FrameDescription, registers_);
+  }
+
+  static int double_registers_offset() {
+    return OFFSET_OF(FrameDescription, double_registers_);
+  }
+
+  static int frame_size_offset() {
+    return OFFSET_OF(FrameDescription, frame_size_);
+  }
+
+  static int pc_offset() {
+    return OFFSET_OF(FrameDescription, pc_);
+  }
+
+  static int state_offset() {
+    return OFFSET_OF(FrameDescription, state_);
+  }
+
+  static int continuation_offset() {
+    return OFFSET_OF(FrameDescription, continuation_);
+  }
+
+  static int frame_content_offset() {
+    return sizeof(FrameDescription);
+  }
+
+ private:
+  static const uint32_t kZapUint32 = 0xbeeddead;
+
+  uint32_t frame_size_;  // Number of bytes.
+  JSFunction* function_;
+  uint32_t registers_[Register::kNumRegisters];
+  double double_registers_[DoubleRegister::kNumAllocatableRegisters];
+  uint32_t top_;
+  uint32_t pc_;
+  uint32_t fp_;
+  Smi* state_;
+
+  // Continuation is the PC where the execution continues after
+  // deoptimizing.
+  uint32_t continuation_;
+
+  uint32_t* GetFrameSlotPointer(unsigned offset) {
+    ASSERT(offset < frame_size_);
+    return reinterpret_cast<uint32_t*>(
+        reinterpret_cast<Address>(this) + frame_content_offset() + offset);
+  }
+};
+
+
+class TranslationBuffer BASE_EMBEDDED {
+ public:
+  TranslationBuffer() : contents_(256) { }
+
+  int CurrentIndex() const { return contents_.length(); }
+  void Add(int32_t value);
+
+  Handle<ByteArray> CreateByteArray();
+
+ private:
+  ZoneList<uint8_t> contents_;
+};
+
+
+class TranslationIterator BASE_EMBEDDED {
+ public:
+  TranslationIterator(ByteArray* buffer, int index)
+      : buffer_(buffer), index_(index) {
+    ASSERT(index >= 0 && index < buffer->length());
+  }
+
+  int32_t Next();
+
+  bool HasNext() const { return index_ >= 0; }
+
+  void Done() { index_ = -1; }
+
+  void Skip(int n) {
+    for (int i = 0; i < n; i++) Next();
+  }
+
+ private:
+  ByteArray* buffer_;
+  int index_;
+};
+
+
+class Translation BASE_EMBEDDED {
+ public:
+  enum Opcode {
+    BEGIN,
+    FRAME,
+    REGISTER,
+    INT32_REGISTER,
+    DOUBLE_REGISTER,
+    STACK_SLOT,
+    INT32_STACK_SLOT,
+    DOUBLE_STACK_SLOT,
+    LITERAL,
+    ARGUMENTS_OBJECT,
+
+    // A prefix indicating that the next command is a duplicate of the one
+    // that follows it.
+    DUPLICATE
+  };
+
+  Translation(TranslationBuffer* buffer, int frame_count)
+      : buffer_(buffer),
+        index_(buffer->CurrentIndex()) {
+    buffer_->Add(BEGIN);
+    buffer_->Add(frame_count);
+  }
+
+  int index() const { return index_; }
+
+  // Commands.
+  void BeginFrame(int node_id, int literal_id, unsigned height);
+  void StoreRegister(Register reg);
+  void StoreInt32Register(Register reg);
+  void StoreDoubleRegister(DoubleRegister reg);
+  void StoreStackSlot(int index);
+  void StoreInt32StackSlot(int index);
+  void StoreDoubleStackSlot(int index);
+  void StoreLiteral(int literal_id);
+  void StoreArgumentsObject();
+  void MarkDuplicate();
+
+  static int NumberOfOperandsFor(Opcode opcode);
+
+#ifdef DEBUG
+  static const char* StringFor(Opcode opcode);
+#endif
+
+ private:
+  TranslationBuffer* buffer_;
+  int index_;
+};
+
+
+// Linked list holding deoptimizing code objects. The deoptimizing code objects
+// are kept as weak handles until they are no longer activated on the stack.
+class DeoptimizingCodeListNode : public Malloced {
+ public:
+  explicit DeoptimizingCodeListNode(Code* code);
+  ~DeoptimizingCodeListNode();
+
+  DeoptimizingCodeListNode* next() const { return next_; }
+  void set_next(DeoptimizingCodeListNode* next) { next_ = next; }
+  Handle<Code> code() const { return code_; }
+
+ private:
+  // Global (weak) handle to the deoptimizing code object.
+  Handle<Code> code_;
+
+  // Next pointer for linked list.
+  DeoptimizingCodeListNode* next_;
+};
+
+
+} }  // namespace v8::internal
+
+#endif  // V8_DEOPTIMIZER_H_
index 2a4ea74..bb0a072 100644 (file)
@@ -1,4 +1,4 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2010 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -30,6 +30,7 @@
 #include "code-stubs.h"
 #include "codegen-inl.h"
 #include "debug.h"
+#include "deoptimizer.h"
 #include "disasm.h"
 #include "disassembler.h"
 #include "macro-assembler.h"
@@ -277,6 +278,15 @@ static int DecodeIt(FILE* f,
         } else {
           out.AddFormatted(" %s", Code::Kind2String(kind));
         }
+      } else if (rmode == RelocInfo::RUNTIME_ENTRY) {
+        // A runtime entry reloinfo might be a deoptimization bailout.
+        Address addr = relocinfo.target_address();
+        int id = Deoptimizer::GetDeoptimizationId(addr, Deoptimizer::EAGER);
+        if (id == Deoptimizer::kNotDeoptimizationEntry) {
+          out.AddFormatted("    ;; %s", RelocInfo::RelocModeName(rmode));
+        } else {
+          out.AddFormatted("    ;; deoptimization bailout %d", id);
+        }
       } else {
         out.AddFormatted("    ;; %s", RelocInfo::RelocModeName(rmode));
       }
@@ -299,8 +309,17 @@ int Disassembler::Decode(FILE* f, byte* begin, byte* end) {
 
 // Called by Code::CodePrint.
 void Disassembler::Decode(FILE* f, Code* code) {
-  byte* begin = Code::cast(code)->instruction_start();
-  byte* end = begin + Code::cast(code)->instruction_size();
+  int decode_size = (code->kind() == Code::OPTIMIZED_FUNCTION)
+      ? static_cast<int>(code->safepoint_table_start())
+      : code->instruction_size();
+  // If there might be a stack check table, stop before reaching it.
+  if (code->kind() == Code::FUNCTION) {
+    decode_size =
+        Min(decode_size, static_cast<int>(code->stack_check_table_start()));
+  }
+
+  byte* begin = code->instruction_start();
+  byte* end = begin + decode_size;
   V8NameConverter v8NameConverter(code);
   DecodeIt(f, v8NameConverter, begin, end);
 }
index 691d569..e88d9cd 100644 (file)
 #include "bootstrapper.h"
 #include "codegen-inl.h"
 #include "debug.h"
+#include "runtime-profiler.h"
 #include "simulator.h"
 #include "v8threads.h"
+#include "vm-state-inl.h"
 
 namespace v8 {
 namespace internal {
@@ -295,6 +297,25 @@ void StackGuard::TerminateExecution() {
 }
 
 
+bool StackGuard::IsRuntimeProfilerTick() {
+  ExecutionAccess access;
+  return thread_local_.interrupt_flags_ & RUNTIME_PROFILER_TICK;
+}
+
+
+void StackGuard::RequestRuntimeProfilerTick() {
+  // Ignore calls if we're not optimizing or if we can't get the lock.
+  if (FLAG_opt && ExecutionAccess::TryLock()) {
+    thread_local_.interrupt_flags_ |= RUNTIME_PROFILER_TICK;
+    if (thread_local_.postpone_interrupts_nesting_ == 0) {
+      thread_local_.jslimit_ = thread_local_.climit_ = kInterruptLimit;
+      Heap::SetStackLimits();
+    }
+    ExecutionAccess::Unlock();
+  }
+}
+
+
 #ifdef ENABLE_DEBUGGER_SUPPORT
 bool StackGuard::IsDebugBreak() {
   ExecutionAccess access;
@@ -682,6 +703,12 @@ void Execution::ProcessDebugMesssages(bool debug_command_only) {
 #endif
 
 MaybeObject* Execution::HandleStackGuardInterrupt() {
+  Counters::stack_interrupts.Increment();
+  if (StackGuard::IsRuntimeProfilerTick()) {
+    Counters::runtime_profiler_ticks.Increment();
+    StackGuard::Continue(RUNTIME_PROFILER_TICK);
+    RuntimeProfiler::OptimizeNow();
+  }
 #ifdef ENABLE_DEBUGGER_SUPPORT
   if (StackGuard::IsDebugBreak() || StackGuard::IsDebugCommand()) {
     DebugBreakHelper();
index a2ddc41..af8ad9a 100644 (file)
@@ -38,7 +38,8 @@ enum InterruptFlag {
   DEBUGBREAK = 1 << 1,
   DEBUGCOMMAND = 1 << 2,
   PREEMPT = 1 << 3,
-  TERMINATE = 1 << 4
+  TERMINATE = 1 << 4,
+  RUNTIME_PROFILER_TICK = 1 << 5
 };
 
 class Execution : public AllStatic {
@@ -175,6 +176,8 @@ class StackGuard : public AllStatic {
   static void Interrupt();
   static bool IsTerminateExecution();
   static void TerminateExecution();
+  static bool IsRuntimeProfilerTick();
+  static void RequestRuntimeProfilerTick();
 #ifdef ENABLE_DEBUGGER_SUPPORT
   static bool IsDebugBreak();
   static void DebugBreak();
index a05ff6c..83af447 100644 (file)
@@ -1,4 +1,4 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2010 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -32,6 +32,7 @@
 #include "execution.h"
 #include "factory.h"
 #include "macro-assembler.h"
+#include "objects.h"
 #include "objects-visiting.h"
 
 namespace v8 {
@@ -73,6 +74,26 @@ Handle<DescriptorArray> Factory::NewDescriptorArray(int number_of_descriptors) {
 }
 
 
+Handle<DeoptimizationInputData> Factory::NewDeoptimizationInputData(
+    int deopt_entry_count,
+    PretenureFlag pretenure) {
+  ASSERT(deopt_entry_count > 0);
+  CALL_HEAP_FUNCTION(DeoptimizationInputData::Allocate(deopt_entry_count,
+                                                       pretenure),
+                     DeoptimizationInputData);
+}
+
+
+Handle<DeoptimizationOutputData> Factory::NewDeoptimizationOutputData(
+    int deopt_entry_count,
+    PretenureFlag pretenure) {
+  ASSERT(deopt_entry_count > 0);
+  CALL_HEAP_FUNCTION(DeoptimizationOutputData::Allocate(deopt_entry_count,
+                                                        pretenure),
+                     DeoptimizationOutputData);
+}
+
+
 // Symbols are created in the old generation (data space).
 Handle<String> Factory::LookupSymbol(Vector<const char> string) {
   CALL_HEAP_FUNCTION(Heap::LookupSymbol(string), String);
@@ -243,6 +264,13 @@ Handle<ExternalArray> Factory::NewExternalArray(int length,
 }
 
 
+Handle<JSGlobalPropertyCell> Factory::NewJSGlobalPropertyCell(
+    Handle<Object> value) {
+  CALL_HEAP_FUNCTION(Heap::AllocateJSGlobalPropertyCell(*value),
+                     JSGlobalPropertyCell);
+}
+
+
 Handle<Map> Factory::NewMap(InstanceType type, int instance_size) {
   CALL_HEAP_FUNCTION(Heap::AllocateMap(type, instance_size), Map);
 }
@@ -333,6 +361,15 @@ Handle<JSFunction> Factory::NewFunctionFromSharedFunctionInfo(
                   context->global_context());
   }
   result->set_literals(*literals);
+  result->set_next_function_link(Heap::undefined_value());
+
+  if (V8::UseCrankshaft() &&
+      FLAG_always_opt &&
+      result->is_compiled() &&
+      !function_info->is_toplevel() &&
+      function_info->allows_lazy_compilation()) {
+    result->MarkForLazyRecompilation();
+  }
   return result;
 }
 
index c014986..b7a2882 100644 (file)
@@ -1,4 +1,4 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2010 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -53,6 +53,12 @@ class Factory : public AllStatic {
   static Handle<StringDictionary> NewStringDictionary(int at_least_space_for);
 
   static Handle<DescriptorArray> NewDescriptorArray(int number_of_descriptors);
+  static Handle<DeoptimizationInputData> NewDeoptimizationInputData(
+      int deopt_entry_count,
+      PretenureFlag pretenure);
+  static Handle<DeoptimizationOutputData> NewDeoptimizationOutputData(
+      int deopt_entry_count,
+      PretenureFlag pretenure);
 
   static Handle<String> LookupSymbol(Vector<const char> str);
   static Handle<String> LookupAsciiSymbol(const char* str) {
@@ -169,6 +175,9 @@ class Factory : public AllStatic {
       void* external_pointer,
       PretenureFlag pretenure = NOT_TENURED);
 
+  static Handle<JSGlobalPropertyCell> NewJSGlobalPropertyCell(
+      Handle<Object> value);
+
   static Handle<Map> NewMap(InstanceType type, int instance_size);
 
   static Handle<JSObject> NewFunctionPrototype(Handle<JSFunction> function);
index 46feea7..37653a4 100644 (file)
@@ -1,4 +1,4 @@
-// Copyright 2008 the V8 project authors. All rights reserved.
+// Copyright 2010 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -96,9 +96,56 @@ private:
 //
 #define FLAG FLAG_FULL
 
+// Flags for Crankshaft.
+#ifdef V8_TARGET_ARCH_IA32
+DEFINE_bool(crankshaft, true, "use crankshaft")
+#else
+DEFINE_bool(crankshaft, false, "use crankshaft")
+#endif
+DEFINE_string(hydrogen_filter, "", "hydrogen use/trace filter")
+DEFINE_bool(use_hydrogen, true, "use generated hydrogen for compilation")
+DEFINE_bool(build_lithium, true, "use lithium chunk builder")
+DEFINE_bool(alloc_lithium, true, "use lithium register allocator")
+DEFINE_bool(use_lithium, true, "use lithium code generator")
+DEFINE_bool(use_range, true, "use hydrogen range analysis")
+DEFINE_bool(eliminate_dead_phis, true, "eliminate dead phis")
+DEFINE_bool(use_gvn, true, "use hydrogen global value numbering")
+DEFINE_bool(use_peeling, false, "use loop peeling")
+DEFINE_bool(use_canonicalizing, true, "use hydrogen instruction canonicalizing")
+DEFINE_bool(use_inlining, true, "use function inlining")
+DEFINE_bool(limit_inlining, true, "limit code size growth from inlining")
+DEFINE_bool(eliminate_empty_blocks, true, "eliminate empty blocks")
+DEFINE_bool(loop_invariant_code_motion, true, "loop invariant code motion")
+DEFINE_bool(time_hydrogen, false, "timing for hydrogen")
+DEFINE_bool(trace_hydrogen, false, "trace generated hydrogen to file")
+DEFINE_bool(trace_inlining, false, "trace inlining decisions")
+DEFINE_bool(trace_alloc, false, "trace register allocator")
+DEFINE_bool(trace_range, false, "trace range analysis")
+DEFINE_bool(trace_gvn, false, "trace global value numbering")
+DEFINE_bool(trace_environment, false, "trace lithium environments")
+DEFINE_bool(trace_representation, false, "trace representation types")
+DEFINE_bool(stress_pointer_maps, false, "pointer map for every instruction")
+DEFINE_bool(stress_environments, false, "environment for every instruction")
+DEFINE_int(deopt_every_n_times,
+           0,
+           "deoptimize every n times a deopt point is passed")
+DEFINE_bool(process_arguments_object, true, "try to deal with arguments object")
+DEFINE_bool(trap_on_deopt, false, "put a break point before deoptimizing")
+DEFINE_bool(deoptimize_uncommon_cases, true, "deoptimize uncommon cases")
+DEFINE_bool(polymorphic_inlining, true, "polymorphic inlining")
+DEFINE_bool(aggressive_loop_invariant_motion, true,
+            "aggressive motion of instructions out of loops")
+#ifdef V8_TARGET_ARCH_IA32
+DEFINE_bool(use_osr, true, "use on-stack replacement")
+#else
+DEFINE_bool(use_osr, false, "use on-stack replacement")
+#endif
+DEFINE_bool(trace_osr, false, "trace on-stack replacement")
+
 // assembler-ia32.cc / assembler-arm.cc / assembler-x64.cc
 DEFINE_bool(debug_code, false,
-            "generate extra code (comments, assertions) for debugging")
+            "generate extra code (assertions) for debugging")
+DEFINE_bool(code_comments, false, "emit comments in code disassembly")
 DEFINE_bool(emit_branch_hints, false, "emit branch hints")
 DEFINE_bool(peephole_optimization, true,
             "perform peephole optimizations in assembly code")
@@ -146,7 +193,14 @@ DEFINE_bool(mask_constants_with_cookie,
 
 // codegen.cc
 DEFINE_bool(lazy, true, "use lazy compilation")
+DEFINE_bool(trace_opt, false, "trace lazy optimization")
+DEFINE_bool(opt, true, "use adaptive optimizations")
+DEFINE_bool(opt_eagerly, false, "be more eager when adaptively optimizing")
+DEFINE_bool(always_opt, false, "always try to optimize functions")
+DEFINE_bool(prepare_always_opt, false, "prepare for turning on always opt")
 DEFINE_bool(debug_info, true, "add debug information to compiled functions")
+DEFINE_bool(deopt, true, "support deoptimization")
+DEFINE_bool(trace_deopt, false, "trace deoptimization")
 
 // compiler.cc
 DEFINE_bool(strict, false, "strict error checking")
@@ -365,6 +419,9 @@ DEFINE_bool(collect_heap_spill_statistics, false,
             "report heap spill statistics along with heap_stats "
             "(requires heap_stats)")
 
+// VM state
+DEFINE_bool(log_state_changes, false, "Log state changes.")
+
 // Regexp
 DEFINE_bool(regexp_possessive_quantifier,
             false,
@@ -397,7 +454,6 @@ DEFINE_bool(log_gc, false,
 DEFINE_bool(log_handles, false, "Log global handle events.")
 DEFINE_bool(log_snapshot_positions, false,
             "log positions of (de)serialized objects in the snapshot.")
-DEFINE_bool(log_state_changes, false, "Log state changes.")
 DEFINE_bool(log_suspect, false, "Log suspect operations.")
 DEFINE_bool(log_producers, false, "Log stack traces of JS objects allocations.")
 DEFINE_bool(compress_log, false,
@@ -446,6 +502,8 @@ DEFINE_bool(print_code_stubs, false, "print code stubs")
 
 // codegen-ia32.cc / codegen-arm.cc
 DEFINE_bool(print_code, false, "print generated code")
+DEFINE_bool(print_opt_code, false, "print optimized code")
+DEFINE_bool(print_code_verbose, false, "print more information for code")
 DEFINE_bool(print_builtin_code, false, "print generated code for builtins")
 
 // Cleanup...
index bbe6bb7..c20f5ee 100644 (file)
@@ -279,7 +279,7 @@ static void SplitArgument(const char* arg,
   *value = NULL;
   *is_bool = false;
 
-  if (*arg == '-') {
+  if (arg != NULL && *arg == '-') {
     // find the begin of the flag name
     arg++;  // remove 1st '-'
     if (*arg == '-') {
index 48bb354..3b91b9d 100644 (file)
@@ -262,8 +262,8 @@ class FrameElement BASE_EMBEDDED {
   class CopiedField: public BitField<bool, 3, 1> {};
   class SyncedField: public BitField<bool, 4, 1> {};
   class UntaggedInt32Field: public BitField<bool, 5, 1> {};
-  class TypeInfoField: public BitField<int, 6, 6> {};
-  class DataField: public BitField<uint32_t, 12, 32 - 12> {};
+  class TypeInfoField: public BitField<int, 6, 7> {};
+  class DataField: public BitField<uint32_t, 13, 32 - 13> {};
 
   friend class VirtualFrame;
 };
index 3cdb015..775404d 100644 (file)
 
 #include "v8.h"
 
+#include "ast.h"
+#include "deoptimizer.h"
 #include "frames-inl.h"
+#include "full-codegen.h"
 #include "mark-compact.h"
+#include "safepoint-table.h"
 #include "scopeinfo.h"
 #include "string-stream.h"
 #include "top.h"
@@ -324,11 +328,33 @@ void SafeStackTraceFrameIterator::Advance() {
 #endif
 
 
+Code* StackFrame::GetSafepointData(Address pc,
+                                   uint8_t** safepoint_entry,
+                                   unsigned* stack_slots) {
+  PcToCodeCache::PcToCodeCacheEntry* entry = PcToCodeCache::GetCacheEntry(pc);
+  uint8_t* cached_safepoint_entry = entry->safepoint_entry;
+  if (cached_safepoint_entry == NULL) {
+    cached_safepoint_entry = entry->code->GetSafepointEntry(pc);
+    ASSERT(cached_safepoint_entry != NULL);  // No safepoint found.
+    entry->safepoint_entry = cached_safepoint_entry;
+  } else {
+    ASSERT(cached_safepoint_entry == entry->code->GetSafepointEntry(pc));
+  }
+
+  // Fill in the results and return the code.
+  Code* code = entry->code;
+  *safepoint_entry = cached_safepoint_entry;
+  *stack_slots = code->stack_slots();
+  return code;
+}
+
+
 bool StackFrame::HasHandler() const {
   StackHandlerIterator it(this, top_handler());
   return !it.done();
 }
 
+
 void StackFrame::IteratePc(ObjectVisitor* v,
                            Address* pc_address,
                            Code* holder) {
@@ -355,7 +381,16 @@ StackFrame::Type StackFrame::ComputeType(State* state) {
   // really the function.
   const int offset = StandardFrameConstants::kMarkerOffset;
   Object* marker = Memory::Object_at(state->fp + offset);
-  if (!marker->IsSmi()) return JAVA_SCRIPT;
+  if (!marker->IsSmi()) {
+    // If we're using a "safe" stack iterator, we treat optimized
+    // frames as normal JavaScript frames to avoid having to look
+    // into the heap to determine the state. This is safe as long
+    // as nobody tries to GC...
+    if (SafeStackFrameIterator::is_active()) return JAVA_SCRIPT;
+    Code::Kind kind = GetContainingCode(*(state->pc_address))->kind();
+    ASSERT(kind == Code::FUNCTION || kind == Code::OPTIMIZED_FUNCTION);
+    return (kind == Code::OPTIMIZED_FUNCTION) ? OPTIMIZED : JAVA_SCRIPT;
+  }
   return static_cast<StackFrame::Type>(Smi::cast(marker)->value());
 }
 
@@ -488,6 +523,70 @@ bool StandardFrame::IsExpressionInsideHandler(int n) const {
 }
 
 
+void OptimizedFrame::Iterate(ObjectVisitor* v) const {
+#ifdef DEBUG
+  // Make sure that optimized frames do not contain any stack handlers.
+  StackHandlerIterator it(this, top_handler());
+  ASSERT(it.done());
+#endif
+
+  // Make sure that we're not doing "safe" stack frame iteration. We cannot
+  // possibly find pointers in optimized frames in that state.
+  ASSERT(!SafeStackFrameIterator::is_active());
+
+  // Compute the safepoint information.
+  unsigned stack_slots = 0;
+  uint8_t* safepoint_entry = NULL;
+  Code* code = StackFrame::GetSafepointData(
+      pc(), &safepoint_entry, &stack_slots);
+  unsigned slot_space = stack_slots * kPointerSize;
+
+  // Visit the outgoing parameters. This is usually dealt with by the
+  // callee, but while GC'ing we artificially lower the number of
+  // arguments to zero and let the caller deal with it.
+  Object** parameters_base = &Memory::Object_at(sp());
+  Object** parameters_limit = &Memory::Object_at(
+      fp() + JavaScriptFrameConstants::kFunctionOffset - slot_space);
+
+  // Visit the registers that contain pointers if any.
+  if (SafepointTable::HasRegisters(safepoint_entry)) {
+    for (int i = kNumSafepointRegisters - 1; i >=0; i--) {
+      if (SafepointTable::HasRegisterAt(safepoint_entry, i)) {
+        int reg_stack_index = MacroAssembler::SafepointRegisterStackIndex(i);
+        v->VisitPointer(parameters_base + reg_stack_index);
+      }
+    }
+    // Skip the words containing the register values.
+    parameters_base += kNumSafepointRegisters;
+  }
+
+  // We're done dealing with the register bits.
+  safepoint_entry += kNumSafepointRegisters >> kBitsPerByteLog2;
+
+  // Visit the rest of the parameters.
+  v->VisitPointers(parameters_base, parameters_limit);
+
+  // Visit pointer spill slots and locals.
+  for (unsigned index = 0; index < stack_slots; index++) {
+    int byte_index = index >> kBitsPerByteLog2;
+    int bit_index = index & (kBitsPerByte - 1);
+    if ((safepoint_entry[byte_index] & (1U << bit_index)) != 0) {
+      v->VisitPointer(parameters_limit + index);
+    }
+  }
+
+  // Visit the context and the function.
+  Object** fixed_base = &Memory::Object_at(
+      fp() + JavaScriptFrameConstants::kFunctionOffset);
+  Object** fixed_limit = &Memory::Object_at(fp());
+  v->VisitPointers(fixed_base, fixed_limit);
+
+  // Visit the return address in the callee and incoming arguments.
+  IteratePc(v, pc_address(), code);
+  IterateArguments(v);
+}
+
+
 Object* JavaScriptFrame::GetParameter(int index) const {
   ASSERT(index >= 0 && index < ComputeParametersCount());
   const int offset = JavaScriptFrameConstants::kParam0Offset;
@@ -547,6 +646,185 @@ Address JavaScriptFrame::GetCallerStackPointer() const {
 }
 
 
+void JavaScriptFrame::GetFunctions(List<JSFunction*>* functions) {
+  ASSERT(functions->length() == 0);
+  functions->Add(JSFunction::cast(function()));
+}
+
+
+void JavaScriptFrame::Summarize(List<FrameSummary>* functions) {
+  ASSERT(functions->length() == 0);
+  Code* code_pointer = code();
+  int offset = pc() - code_pointer->address();
+  FrameSummary summary(receiver(),
+                       JSFunction::cast(function()),
+                       code_pointer,
+                       offset,
+                       IsConstructor());
+  functions->Add(summary);
+}
+
+
+void FrameSummary::Print() {
+  PrintF("receiver: ");
+  receiver_->ShortPrint();
+  PrintF("\nfunction: ");
+  function_->shared()->DebugName()->ShortPrint();
+  PrintF("\ncode: ");
+  code_->ShortPrint();
+  if (code_->kind() == Code::FUNCTION) PrintF(" NON-OPT");
+  if (code_->kind() == Code::OPTIMIZED_FUNCTION) PrintF(" OPT");
+  PrintF("\npc: %d\n", offset_);
+}
+
+
+void OptimizedFrame::Summarize(List<FrameSummary>* frames) {
+  ASSERT(frames->length() == 0);
+  ASSERT(is_optimized());
+
+  int deopt_index = AstNode::kNoNumber;
+  DeoptimizationInputData* data = GetDeoptimizationData(&deopt_index);
+
+  // BUG(3243555): Since we don't have a lazy-deopt registered at
+  // throw-statements, we can't use the translation at the call-site of
+  // throw. An entry with no deoptimization index indicates a call-site
+  // without a lazy-deopt. As a consequence we are not allowed to inline
+  // functions containing throw.
+  if (deopt_index == Safepoint::kNoDeoptimizationIndex) {
+    JavaScriptFrame::Summarize(frames);
+    return;
+  }
+
+  TranslationIterator it(data->TranslationByteArray(),
+                         data->TranslationIndex(deopt_index)->value());
+  Translation::Opcode opcode = static_cast<Translation::Opcode>(it.Next());
+  ASSERT(opcode == Translation::BEGIN);
+  int frame_count = it.Next();
+
+  // We create the summary in reverse order because the frames
+  // in the deoptimization translation are ordered bottom-to-top.
+  int i = frame_count;
+  while (i > 0) {
+    opcode = static_cast<Translation::Opcode>(it.Next());
+    if (opcode == Translation::FRAME) {
+      // We don't inline constructor calls, so only the first, outermost
+      // frame can be a constructor frame in case of inlining.
+      bool is_constructor = (i == frame_count) && IsConstructor();
+
+      i--;
+      int ast_id = it.Next();
+      int function_id = it.Next();
+      it.Next();  // Skip height.
+      JSFunction* function =
+          JSFunction::cast(data->LiteralArray()->get(function_id));
+
+      // The translation commands are ordered and the receiver is always
+      // at the first position. Since we are always at a call when we need
+      // to construct a stack trace, the receiver is always in a stack slot.
+      opcode = static_cast<Translation::Opcode>(it.Next());
+      ASSERT(opcode == Translation::STACK_SLOT);
+      int input_slot_index = it.Next();
+
+      // Get the correct receiver in the optimized frame.
+      Object* receiver = NULL;
+      // Positive index means the value is spilled to the locals area. Negative
+      // means it is stored in the incoming parameter area.
+      if (input_slot_index >= 0) {
+        receiver = GetExpression(input_slot_index);
+      } else {
+        // Index -1 overlaps with last parameter, -n with the first parameter,
+        // (-n - 1) with the receiver with n being the number of parameters
+        // of the outermost, optimized frame.
+        int parameter_count = ComputeParametersCount();
+        int parameter_index = input_slot_index + parameter_count;
+        receiver = (parameter_index == -1)
+            ? this->receiver()
+            : this->GetParameter(parameter_index);
+      }
+
+      Code* code = function->shared()->code();
+      DeoptimizationOutputData* output_data =
+          DeoptimizationOutputData::cast(code->deoptimization_data());
+      unsigned entry = Deoptimizer::GetOutputInfo(output_data,
+                                                  ast_id,
+                                                  function->shared());
+      unsigned pc_offset =
+          FullCodeGenerator::PcField::decode(entry) + Code::kHeaderSize;
+      ASSERT(pc_offset > 0);
+
+      FrameSummary summary(receiver, function, code, pc_offset, is_constructor);
+      frames->Add(summary);
+    } else {
+      // Skip over operands to advance to the next opcode.
+      it.Skip(Translation::NumberOfOperandsFor(opcode));
+    }
+  }
+}
+
+
+DeoptimizationInputData* OptimizedFrame::GetDeoptimizationData(
+    int* deopt_index) {
+  ASSERT(is_optimized());
+
+  JSFunction* opt_function = JSFunction::cast(function());
+  Code* code = opt_function->code();
+
+  // The code object may have been replaced by lazy deoptimization. Fall
+  // back to a slow search in this case to find the original optimized
+  // code object.
+  if (!code->contains(pc())) {
+    code = PcToCodeCache::GcSafeFindCodeForPc(pc());
+  }
+  ASSERT(code != NULL);
+  ASSERT(code->kind() == Code::OPTIMIZED_FUNCTION);
+
+  SafepointTable table(code);
+  unsigned pc_offset = pc() - code->instruction_start();
+  for (unsigned i = 0; i < table.length(); i++) {
+    if (table.GetPcOffset(i) == pc_offset) {
+      *deopt_index = table.GetDeoptimizationIndex(i);
+      break;
+    }
+  }
+  ASSERT(*deopt_index != AstNode::kNoNumber);
+
+  return DeoptimizationInputData::cast(code->deoptimization_data());
+}
+
+
+void OptimizedFrame::GetFunctions(List<JSFunction*>* functions) {
+  ASSERT(functions->length() == 0);
+  ASSERT(is_optimized());
+
+  int deopt_index = AstNode::kNoNumber;
+  DeoptimizationInputData* data = GetDeoptimizationData(&deopt_index);
+
+  TranslationIterator it(data->TranslationByteArray(),
+                         data->TranslationIndex(deopt_index)->value());
+  Translation::Opcode opcode = static_cast<Translation::Opcode>(it.Next());
+  ASSERT(opcode == Translation::BEGIN);
+  int frame_count = it.Next();
+
+  // We insert the frames in reverse order because the frames
+  // in the deoptimization translation are ordered bottom-to-top.
+  while (frame_count > 0) {
+    opcode = static_cast<Translation::Opcode>(it.Next());
+    if (opcode == Translation::FRAME) {
+      frame_count--;
+      it.Next();  // Skip ast id.
+      int function_id = it.Next();
+      it.Next();  // Skip height.
+      JSFunction* function =
+          JSFunction::cast(data->LiteralArray()->get(function_id));
+      functions->Add(function);
+    } else {
+      // Skip over operands to advance to the next opcode.
+      it.Skip(Translation::NumberOfOperandsFor(opcode));
+    }
+  }
+}
+
+
 Address ArgumentsAdaptorFrame::GetCallerStackPointer() const {
   const int arguments = Smi::cast(GetExpression(0))->value();
   const int offset = StandardFrameConstants::kCallerSPOffset;
@@ -789,7 +1067,11 @@ void StandardFrame::IterateExpressions(ObjectVisitor* v) const {
 void JavaScriptFrame::Iterate(ObjectVisitor* v) const {
   IterateExpressions(v);
   IteratePc(v, pc_address(), code());
+  IterateArguments(v);
+}
 
+
+void JavaScriptFrame::IterateArguments(ObjectVisitor* v) const {
   // Traverse callee-saved registers, receiver, and parameters.
   const int kBaseOffset = JavaScriptFrameConstants::kSavedRegistersOffset;
   const int kLimitOffset = JavaScriptFrameConstants::kReceiverOffset;
@@ -851,6 +1133,7 @@ Code* PcToCodeCache::GcSafeFindCodeForPc(Address pc) {
   }
 }
 
+
 PcToCodeCache::PcToCodeCacheEntry* PcToCodeCache::GetCacheEntry(Address pc) {
   Counters::pc_to_code.Increment();
   ASSERT(IsPowerOf2(kPcToCodeCacheSize));
@@ -867,6 +1150,7 @@ PcToCodeCache::PcToCodeCacheEntry* PcToCodeCache::GetCacheEntry(Address pc) {
     // been set. Otherwise, we risk trying to use a cache entry before
     // the code has been computed.
     entry->code = GcSafeFindCodeForPc(pc);
+    entry->safepoint_entry = NULL;
     entry->pc = pc;
   }
   return entry;
index 2d4f338..778f9d2 100644 (file)
@@ -51,6 +51,7 @@ class PcToCodeCache : AllStatic {
   struct PcToCodeCacheEntry {
     Address pc;
     Code* code;
+    uint8_t* safepoint_entry;
   };
 
   static PcToCodeCacheEntry* cache(int index) {
@@ -115,6 +116,7 @@ class StackHandler BASE_EMBEDDED {
   V(ENTRY_CONSTRUCT,   EntryConstructFrame)   \
   V(EXIT,              ExitFrame)             \
   V(JAVA_SCRIPT,       JavaScriptFrame)       \
+  V(OPTIMIZED,         OptimizedFrame)        \
   V(INTERNAL,          InternalFrame)         \
   V(CONSTRUCT,         ConstructFrame)        \
   V(ARGUMENTS_ADAPTOR, ArgumentsAdaptorFrame)
@@ -158,12 +160,17 @@ class StackFrame BASE_EMBEDDED {
   bool is_entry() const { return type() == ENTRY; }
   bool is_entry_construct() const { return type() == ENTRY_CONSTRUCT; }
   bool is_exit() const { return type() == EXIT; }
-  bool is_java_script() const { return type() == JAVA_SCRIPT; }
+  bool is_optimized() const { return type() == OPTIMIZED; }
   bool is_arguments_adaptor() const { return type() == ARGUMENTS_ADAPTOR; }
   bool is_internal() const { return type() == INTERNAL; }
   bool is_construct() const { return type() == CONSTRUCT; }
   virtual bool is_standard() const { return false; }
 
+  bool is_java_script() const {
+    Type type = this->type();
+    return (type == JAVA_SCRIPT) || (type == OPTIMIZED);
+  }
+
   // Accessors.
   Address sp() const { return state_.sp; }
   Address fp() const { return state_.fp; }
@@ -193,10 +200,17 @@ class StackFrame BASE_EMBEDDED {
   Code* code() const { return GetContainingCode(pc()); }
 
   // Get the code object that contains the given pc.
-  Code* GetContainingCode(Address pc) const {
+  static Code* GetContainingCode(Address pc) {
     return PcToCodeCache::GetCacheEntry(pc)->code;
   }
 
+  // Get the code object containing the given pc and fill in the
+  // safepoint entry and the number of stack slots. The pc must be at
+  // a safepoint.
+  static Code* GetSafepointData(Address pc,
+                                uint8_t** safepoint_entry,
+                                unsigned* stack_slots);
+
   virtual void Iterate(ObjectVisitor* v) const = 0;
   static void IteratePc(ObjectVisitor* v, Address* pc_address, Code* holder);
 
@@ -393,6 +407,36 @@ class StandardFrame: public StackFrame {
 };
 
 
+class FrameSummary BASE_EMBEDDED {
+ public:
+  FrameSummary(Object* receiver,
+               JSFunction* function,
+               Code* code,
+               int offset,
+               bool is_constructor)
+      : receiver_(receiver),
+        function_(function),
+        code_(code),
+        offset_(offset),
+        is_constructor_(is_constructor) { }
+  Handle<Object> receiver() { return receiver_; }
+  Handle<JSFunction> function() { return function_; }
+  Handle<Code> code() { return code_; }
+  Address pc() { return reinterpret_cast<Address>(*code_) + offset_; }
+  int offset() { return offset_; }
+  bool is_constructor() { return is_constructor_; }
+
+  void Print();
+
+ private:
+  Handle<Object> receiver_;
+  Handle<JSFunction> function_;
+  Handle<Code> code_;
+  int offset_;
+  bool is_constructor_;
+};
+
+
 class JavaScriptFrame: public StandardFrame {
  public:
   virtual Type type() const { return JAVA_SCRIPT; }
@@ -431,6 +475,12 @@ class JavaScriptFrame: public StandardFrame {
   // Determine the code for the frame.
   virtual Code* unchecked_code() const;
 
+  // Return a list with JSFunctions of this frame.
+  virtual void GetFunctions(List<JSFunction*>* functions);
+
+  // Build a list with summaries for this frame including all inlined frames.
+  virtual void Summarize(List<FrameSummary>* frames);
+
   static JavaScriptFrame* cast(StackFrame* frame) {
     ASSERT(frame->is_java_script());
     return static_cast<JavaScriptFrame*>(frame);
@@ -442,6 +492,10 @@ class JavaScriptFrame: public StandardFrame {
 
   virtual Address GetCallerStackPointer() const;
 
+  // Garbage collection support. Iterates over incoming arguments,
+  // receiver, and any callee-saved registers.
+  void IterateArguments(ObjectVisitor* v) const;
+
  private:
   inline Object* function_slot_object() const;
 
@@ -450,6 +504,31 @@ class JavaScriptFrame: public StandardFrame {
 };
 
 
+class OptimizedFrame : public JavaScriptFrame {
+ public:
+  virtual Type type() const { return OPTIMIZED; }
+
+  // GC support.
+  virtual void Iterate(ObjectVisitor* v) const;
+
+  // Return a list with JSFunctions of this frame.
+  // The functions are ordered bottom-to-top (i.e. functions.last()
+  // is the top-most activation)
+  virtual void GetFunctions(List<JSFunction*>* functions);
+
+  virtual void Summarize(List<FrameSummary>* frames);
+
+  DeoptimizationInputData* GetDeoptimizationData(int* deopt_index);
+
+ protected:
+  explicit OptimizedFrame(StackFrameIterator* iterator)
+      : JavaScriptFrame(iterator) { }
+
+ private:
+  friend class StackFrameIterator;
+};
+
+
 // Arguments adaptor frames are automatically inserted below
 // JavaScript frames when the actual number of parameters does not
 // match the formal number of parameters.
index 5cfaf89..4eb10c7 100644 (file)
@@ -1,4 +1,4 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
+// Copyright 2010 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
 
 #include "codegen-inl.h"
 #include "compiler.h"
+#include "debug.h"
 #include "full-codegen.h"
+#include "liveedit.h"
 #include "macro-assembler.h"
+#include "prettyprinter.h"
 #include "scopes.h"
 #include "stub-cache.h"
-#include "debug.h"
-#include "liveedit.h"
 
 namespace v8 {
 namespace internal {
@@ -166,10 +167,6 @@ void BreakableStatementChecker::VisitConditional(Conditional* expr) {
 }
 
 
-void BreakableStatementChecker::VisitSlot(Slot* expr) {
-}
-
-
 void BreakableStatementChecker::VisitVariableProxy(VariableProxy* expr) {
 }
 
@@ -283,6 +280,9 @@ bool FullCodeGenerator::MakeCode(CompilationInfo* info) {
     int len = String::cast(script->source())->length();
     Counters::total_full_codegen_source_size.Increment(len);
   }
+  if (FLAG_trace_codegen) {
+    PrintF("Full Compiler - ");
+  }
   CodeGenerator::MakeCodePrologue(info);
   const int kInitialBufferSize = 4 * KB;
   MacroAssembler masm(NULL, kInitialBufferSize);
@@ -293,14 +293,105 @@ bool FullCodeGenerator::MakeCode(CompilationInfo* info) {
     ASSERT(!Top::has_pending_exception());
     return false;
   }
+  unsigned table_offset = cgen.EmitStackCheckTable();
 
   Code::Flags flags = Code::ComputeFlags(Code::FUNCTION, NOT_IN_LOOP);
   Handle<Code> code = CodeGenerator::MakeCodeEpilogue(&masm, flags, info);
+  code->set_optimizable(info->IsOptimizable());
+  cgen.PopulateDeoptimizationData(code);
+  code->set_has_deoptimization_support(info->HasDeoptimizationSupport());
+  code->set_allow_osr_at_loop_nesting_level(0);
+  code->set_stack_check_table_start(table_offset);
+  CodeGenerator::PrintCode(code, info);
   info->SetCode(code);  // may be an empty handle.
   return !code.is_null();
 }
 
 
+unsigned FullCodeGenerator::EmitStackCheckTable() {
+  // The stack check table consists of a length (in number of entries)
+  // field, and then a sequence of entries.  Each entry is a pair of AST id
+  // and code-relative pc offset.
+  masm()->Align(kIntSize);
+  masm()->RecordComment("[ Stack check table");
+  unsigned offset = masm()->pc_offset();
+  unsigned length = stack_checks_.length();
+  __ dd(length);
+  for (unsigned i = 0; i < length; ++i) {
+    __ dd(stack_checks_[i].id);
+    __ dd(stack_checks_[i].pc_and_state);
+  }
+  masm()->RecordComment("]");
+  return offset;
+}
+
+
+void FullCodeGenerator::PopulateDeoptimizationData(Handle<Code> code) {
+  // Fill in the deoptimization information.
+  ASSERT(info_->HasDeoptimizationSupport() || bailout_entries_.is_empty());
+  if (!info_->HasDeoptimizationSupport()) return;
+  int length = bailout_entries_.length();
+  Handle<DeoptimizationOutputData> data =
+      Factory::NewDeoptimizationOutputData(length, TENURED);
+  for (int i = 0; i < length; i++) {
+    data->SetAstId(i, Smi::FromInt(bailout_entries_[i].id));
+    data->SetPcAndState(i, Smi::FromInt(bailout_entries_[i].pc_and_state));
+  }
+  code->set_deoptimization_data(*data);
+}
+
+
+void FullCodeGenerator::PrepareForBailout(AstNode* node, State state) {
+  PrepareForBailoutForId(node->id(), state);
+}
+
+
+void FullCodeGenerator::RecordJSReturnSite(Call* call) {
+  // We record the offset of the function return so we can rebuild the frame
+  // if the function was inlined, i.e., this is the return address in the
+  // inlined function's frame.
+  //
+  // The state is ignored.  We defensively set it to TOS_REG, which is the
+  // real state of the unoptimized code at the return site.
+  PrepareForBailoutForId(call->ReturnId(), TOS_REG);
+#ifdef DEBUG
+  // In debug builds, mark the return so we can verify that this function
+  // was called.
+  ASSERT(!call->return_is_recorded_);
+  call->return_is_recorded_ = true;
+#endif
+}
+
+
+void FullCodeGenerator::PrepareForBailoutForId(int id, State state) {
+  // There's no need to prepare this code for bailouts from already optimized
+  // code or code that can't be optimized.
+  if (!FLAG_deopt || !info_->HasDeoptimizationSupport()) return;
+  unsigned pc_and_state =
+      StateField::encode(state) | PcField::encode(masm_->pc_offset());
+  BailoutEntry entry = { id, pc_and_state };
+#ifdef DEBUG
+  // Assert that we don't have multiple bailout entries for the same node.
+  for (int i = 0; i < bailout_entries_.length(); i++) {
+    if (bailout_entries_.at(i).id == entry.id) {
+      AstPrinter printer;
+      PrintF("%s", printer.PrintProgram(info_->function()));
+      UNREACHABLE();
+    }
+  }
+#endif  // DEBUG
+  bailout_entries_.Add(entry);
+}
+
+
+void FullCodeGenerator::RecordStackCheck(int ast_id) {
+  // The pc offset does not need to be encoded and packed together with a
+  // state.
+  BailoutEntry entry = { ast_id, masm_->pc_offset() };
+  stack_checks_.Add(entry);
+}
+
+
 int FullCodeGenerator::SlotOffset(Slot* slot) {
   ASSERT(slot != NULL);
   // Offset is negative because higher indexes are at lower addresses.
@@ -335,13 +426,11 @@ void FullCodeGenerator::EffectContext::Plug(Register reg) const {
 
 
 void FullCodeGenerator::AccumulatorValueContext::Plug(Register reg) const {
-  // Move value into place.
   __ Move(result_register(), reg);
 }
 
 
 void FullCodeGenerator::StackValueContext::Plug(Register reg) const {
-  // Move value into place.
   __ push(reg);
 }
 
@@ -349,6 +438,7 @@ void FullCodeGenerator::StackValueContext::Plug(Register reg) const {
 void FullCodeGenerator::TestContext::Plug(Register reg) const {
   // For simplicity we always test the accumulator register.
   __ Move(result_register(), reg);
+  codegen()->PrepareForBailoutBeforeSplit(TOS_REG, false, NULL, NULL);
   codegen()->DoTest(true_label_, false_label_, fall_through_);
 }
 
@@ -370,6 +460,7 @@ void FullCodeGenerator::StackValueContext::PlugTOS() const {
 void FullCodeGenerator::TestContext::PlugTOS() const {
   // For simplicity we always test the accumulator register.
   __ pop(result_register());
+  codegen()->PrepareForBailoutBeforeSplit(TOS_REG, false, NULL, NULL);
   codegen()->DoTest(true_label_, false_label_, fall_through_);
 }
 
@@ -614,7 +705,8 @@ void FullCodeGenerator::VisitBinaryOperation(BinaryOperation* expr) {
   switch (op) {
     case Token::COMMA:
       VisitForEffect(left);
-      Visit(right);
+      if (context()->IsTest()) ForwardBailoutToChild(expr);
+      context()->HandleExpression(right);
       break;
 
     case Token::OR:
@@ -670,7 +762,8 @@ void FullCodeGenerator::EmitLogicalOperation(BinaryOperation* expr) {
   context()->EmitLogicalLeft(expr, &eval_right, &done);
 
   __ bind(&eval_right);
-  Visit(expr->right());
+  if (context()->IsTest()) ForwardBailoutToChild(expr);
+  context()->HandleExpression(expr->right());
 
   __ bind(&done);
 }
@@ -692,15 +785,17 @@ void FullCodeGenerator::AccumulatorValueContext::EmitLogicalLeft(
     BinaryOperation* expr,
     Label* eval_right,
     Label* done) const {
-  codegen()->Visit(expr->left());
+  HandleExpression(expr->left());
   // We want the value in the accumulator for the test, and on the stack in case
   // we need it.
   __ push(result_register());
   Label discard, restore;
   if (expr->op() == Token::OR) {
+    codegen()->PrepareForBailoutBeforeSplit(TOS_REG, false, NULL, NULL);
     codegen()->DoTest(&restore, &discard, &restore);
   } else {
     ASSERT(expr->op() == Token::AND);
+    codegen()->PrepareForBailoutBeforeSplit(TOS_REG, false, NULL, NULL);
     codegen()->DoTest(&discard, &restore, &restore);
   }
   __ bind(&restore);
@@ -721,9 +816,11 @@ void FullCodeGenerator::StackValueContext::EmitLogicalLeft(
   __ push(result_register());
   Label discard;
   if (expr->op() == Token::OR) {
+    codegen()->PrepareForBailoutBeforeSplit(TOS_REG, false, NULL, NULL);
     codegen()->DoTest(done, &discard, &discard);
   } else {
     ASSERT(expr->op() == Token::AND);
+    codegen()->PrepareForBailoutBeforeSplit(TOS_REG, false, NULL, NULL);
     codegen()->DoTest(&discard, done, &discard);
   }
   __ bind(&discard);
@@ -745,12 +842,66 @@ void FullCodeGenerator::TestContext::EmitLogicalLeft(BinaryOperation* expr,
 }
 
 
+void FullCodeGenerator::ForwardBailoutToChild(Expression* expr) {
+  if (!info_->HasDeoptimizationSupport()) return;
+  ASSERT(context()->IsTest());
+  ASSERT(expr == forward_bailout_stack_->expr());
+  forward_bailout_pending_ = forward_bailout_stack_;
+}
+
+
+void FullCodeGenerator::EffectContext::HandleExpression(
+    Expression* expr) const {
+  codegen()->HandleInNonTestContext(expr, NO_REGISTERS);
+}
+
+
+void FullCodeGenerator::AccumulatorValueContext::HandleExpression(
+    Expression* expr) const {
+  codegen()->HandleInNonTestContext(expr, TOS_REG);
+}
+
+
+void FullCodeGenerator::StackValueContext::HandleExpression(
+    Expression* expr) const {
+  codegen()->HandleInNonTestContext(expr, NO_REGISTERS);
+}
+
+
+void FullCodeGenerator::TestContext::HandleExpression(Expression* expr) const {
+  codegen()->VisitInTestContext(expr);
+}
+
+
+void FullCodeGenerator::HandleInNonTestContext(Expression* expr, State state) {
+  ASSERT(forward_bailout_pending_ == NULL);
+  AstVisitor::Visit(expr);
+  PrepareForBailout(expr, state);
+  // Forwarding bailouts to children is a one shot operation. It
+  // should have been processed at this point.
+  ASSERT(forward_bailout_pending_ == NULL);
+}
+
+
+void FullCodeGenerator::VisitInTestContext(Expression* expr) {
+  ForwardBailoutStack stack(expr, forward_bailout_pending_);
+  ForwardBailoutStack* saved = forward_bailout_stack_;
+  forward_bailout_pending_ = NULL;
+  forward_bailout_stack_ = &stack;
+  AstVisitor::Visit(expr);
+  forward_bailout_stack_ = saved;
+}
+
+
 void FullCodeGenerator::VisitBlock(Block* stmt) {
   Comment cmnt(masm_, "[ Block");
   Breakable nested_statement(this, stmt);
   SetStatementPosition(stmt);
+
+  PrepareForBailoutForId(stmt->EntryId(), TOS_REG);
   VisitStatements(stmt->statements());
   __ bind(nested_statement.break_target());
+  PrepareForBailoutForId(stmt->ExitId(), NO_REGISTERS);
 }
 
 
@@ -786,6 +937,7 @@ void FullCodeGenerator::VisitIfStatement(IfStatement* stmt) {
     Visit(stmt->then_statement());
   }
   __ bind(&done);
+  PrepareForBailoutForId(stmt->id(), NO_REGISTERS);
 }
 
 
@@ -872,7 +1024,7 @@ void FullCodeGenerator::VisitWithExitStatement(WithExitStatement* stmt) {
 void FullCodeGenerator::VisitDoWhileStatement(DoWhileStatement* stmt) {
   Comment cmnt(masm_, "[ DoWhileStatement");
   SetStatementPosition(stmt);
-  Label body, stack_limit_hit, stack_check_success, done;
+  Label body, stack_check;
 
   Iteration loop_statement(this, stmt);
   increment_loop_depth();
@@ -880,75 +1032,63 @@ void FullCodeGenerator::VisitDoWhileStatement(DoWhileStatement* stmt) {
   __ bind(&body);
   Visit(stmt->body());
 
-  // Check stack before looping.
-  __ bind(loop_statement.continue_target());
-  __ StackLimitCheck(&stack_limit_hit);
-  __ bind(&stack_check_success);
-
   // Record the position of the do while condition and make sure it is
   // possible to break on the condition.
+  __ bind(loop_statement.continue_target());
+  PrepareForBailoutForId(stmt->ContinueId(), NO_REGISTERS);
   SetExpressionPosition(stmt->cond(), stmt->condition_position());
   VisitForControl(stmt->cond(),
-                  &body,
+                  &stack_check,
                   loop_statement.break_target(),
-                  loop_statement.break_target());
+                  &stack_check);
 
-  __ bind(loop_statement.break_target());
-  __ jmp(&done);
-
-  __ bind(&stack_limit_hit);
-  StackCheckStub stack_stub;
-  __ CallStub(&stack_stub);
-  __ jmp(&stack_check_success);
+  // Check stack before looping.
+  __ bind(&stack_check);
+  EmitStackCheck(stmt);
+  __ jmp(&body);
 
-  __ bind(&done);
+  __ bind(loop_statement.break_target());
+  PrepareForBailoutForId(stmt->ExitId(), NO_REGISTERS);
   decrement_loop_depth();
 }
 
 
 void FullCodeGenerator::VisitWhileStatement(WhileStatement* stmt) {
   Comment cmnt(masm_, "[ WhileStatement");
-  Label body, stack_limit_hit, stack_check_success, done;
+  Label test, body;
 
   Iteration loop_statement(this, stmt);
   increment_loop_depth();
 
   // Emit the test at the bottom of the loop.
-  __ jmp(loop_statement.continue_target());
+  __ jmp(&test);
 
   __ bind(&body);
   Visit(stmt->body());
-  __ bind(loop_statement.continue_target());
 
   // Emit the statement position here as this is where the while
   // statement code starts.
+  __ bind(loop_statement.continue_target());
   SetStatementPosition(stmt);
 
   // Check stack before looping.
-  __ StackLimitCheck(&stack_limit_hit);
-  __ bind(&stack_check_success);
+  EmitStackCheck(stmt);
 
+  __ bind(&test);
   VisitForControl(stmt->cond(),
                   &body,
                   loop_statement.break_target(),
                   loop_statement.break_target());
 
   __ bind(loop_statement.break_target());
-  __ jmp(&done);
-
-  __ bind(&stack_limit_hit);
-  StackCheckStub stack_stub;
-  __ CallStub(&stack_stub);
-  __ jmp(&stack_check_success);
-
-  __ bind(&done);
+  PrepareForBailoutForId(stmt->ExitId(), NO_REGISTERS);
   decrement_loop_depth();
 }
 
 
 void FullCodeGenerator::VisitForStatement(ForStatement* stmt) {
   Comment cmnt(masm_, "[ ForStatement");
-  Label test, body, stack_limit_hit, stack_check_success;
+  Label test, body;
 
   Iteration loop_statement(this, stmt);
   if (stmt->init() != NULL) {
@@ -959,30 +1099,25 @@ void FullCodeGenerator::VisitForStatement(ForStatement* stmt) {
   // Emit the test at the bottom of the loop (even if empty).
   __ jmp(&test);
 
-    __ bind(&stack_limit_hit);
-  StackCheckStub stack_stub;
-  __ CallStub(&stack_stub);
-  __ jmp(&stack_check_success);
-
   __ bind(&body);
   Visit(stmt->body());
 
   __ bind(loop_statement.continue_target());
+  PrepareForBailoutForId(stmt->ContinueId(), NO_REGISTERS);
 
   SetStatementPosition(stmt);
   if (stmt->next() != NULL) {
     Visit(stmt->next());
   }
 
-  __ bind(&test);
   // Emit the statement position here as this is where the for
   // statement code starts.
   SetStatementPosition(stmt);
 
   // Check stack before looping.
-  __ StackLimitCheck(&stack_limit_hit);
-  __ bind(&stack_check_success);
+  EmitStackCheck(stmt);
 
+  __ bind(&test);
   if (stmt->cond() != NULL) {
     VisitForControl(stmt->cond(),
                     &body,
@@ -993,6 +1128,7 @@ void FullCodeGenerator::VisitForStatement(ForStatement* stmt) {
   }
 
   __ bind(loop_statement.break_target());
+  PrepareForBailoutForId(stmt->ExitId(), NO_REGISTERS);
   decrement_loop_depth();
 }
 
@@ -1130,14 +1266,15 @@ void FullCodeGenerator::VisitConditional(Conditional* expr) {
                     for_test->false_label(),
                     NULL);
   } else {
-    Visit(expr->then_expression());
+    context()->HandleExpression(expr->then_expression());
     __ jmp(&done);
   }
 
   __ bind(&false_case);
+  if (context()->IsTest()) ForwardBailoutToChild(expr);
   SetExpressionPosition(expr->else_expression(),
                         expr->else_expression_position());
-  Visit(expr->else_expression());
+  context()->HandleExpression(expr->else_expression());
   // If control flow falls through Visit, merge it with true case here.
   if (!context()->IsTest()) {
     __ bind(&done);
@@ -1145,12 +1282,6 @@ void FullCodeGenerator::VisitConditional(Conditional* expr) {
 }
 
 
-void FullCodeGenerator::VisitSlot(Slot* expr) {
-  // Slots do not appear directly in the AST.
-  UNREACHABLE();
-}
-
-
 void FullCodeGenerator::VisitLiteral(Literal* expr) {
   Comment cmnt(masm_, "[ Literal");
   context()->Plug(expr->handle());
index 257f20c..8d9fe2d 100644 (file)
@@ -31,6 +31,8 @@
 #include "v8.h"
 
 #include "ast.h"
+#include "code-stubs.h"
+#include "codegen.h"
 #include "compiler.h"
 
 namespace v8 {
@@ -66,17 +68,39 @@ class BreakableStatementChecker: public AstVisitor {
 
 class FullCodeGenerator: public AstVisitor {
  public:
+  enum State {
+    NO_REGISTERS,
+    TOS_REG
+  };
+
   explicit FullCodeGenerator(MacroAssembler* masm)
       : masm_(masm),
         info_(NULL),
         nesting_stack_(NULL),
         loop_depth_(0),
-        context_(NULL) {
+        context_(NULL),
+        bailout_entries_(0),
+        stack_checks_(2),  // There's always at least one.
+        forward_bailout_stack_(NULL),
+        forward_bailout_pending_(NULL) {
   }
 
   static bool MakeCode(CompilationInfo* info);
 
   void Generate(CompilationInfo* info);
+  void PopulateDeoptimizationData(Handle<Code> code);
+
+  class StateField : public BitField<State, 0, 8> { };
+  class PcField    : public BitField<unsigned, 8, 32-8> { };
+
+  static const char* State2String(State state) {
+    switch (state) {
+      case NO_REGISTERS: return "NO_REGISTERS";
+      case TOS_REG: return "TOS_REG";
+    }
+    UNREACHABLE();
+    return NULL;
+  }
 
  private:
   class Breakable;
@@ -229,6 +253,24 @@ class FullCodeGenerator: public AstVisitor {
     DISALLOW_COPY_AND_ASSIGN(ForIn);
   };
 
+  // The forward bailout stack keeps track of the expressions that can
+  // bail out to just before the control flow is split in a child
+  // node. The stack elements are linked together through the parent
+  // link when visiting expressions in test contexts after requesting
+  // bailout in child forwarding.
+  class ForwardBailoutStack BASE_EMBEDDED {
+   public:
+    ForwardBailoutStack(Expression* expr, ForwardBailoutStack* parent)
+        : expr_(expr), parent_(parent) { }
+
+    Expression* expr() const { return expr_; }
+    ForwardBailoutStack* parent() const { return parent_; }
+
+   private:
+    Expression* const expr_;
+    ForwardBailoutStack* const parent_;
+  };
+
   enum ConstantOperand {
     kNoConstants,
     kLeftConstant,
@@ -274,19 +316,23 @@ class FullCodeGenerator: public AstVisitor {
   // register.
   MemOperand EmitSlotSearch(Slot* slot, Register scratch);
 
+  // Forward the bailout responsibility for the given expression to
+  // the next child visited (which must be in a test context).
+  void ForwardBailoutToChild(Expression* expr);
+
   void VisitForEffect(Expression* expr) {
     EffectContext context(this);
-    Visit(expr);
+    HandleInNonTestContext(expr, NO_REGISTERS);
   }
 
   void VisitForAccumulatorValue(Expression* expr) {
     AccumulatorValueContext context(this);
-    Visit(expr);
+    HandleInNonTestContext(expr, TOS_REG);
   }
 
   void VisitForStackValue(Expression* expr) {
     StackValueContext context(this);
-    Visit(expr);
+    HandleInNonTestContext(expr, NO_REGISTERS);
   }
 
   void VisitForControl(Expression* expr,
@@ -294,9 +340,15 @@ class FullCodeGenerator: public AstVisitor {
                        Label* if_false,
                        Label* fall_through) {
     TestContext context(this, if_true, if_false, fall_through);
-    Visit(expr);
+    VisitInTestContext(expr);
+    // Forwarding bailouts to children is a one shot operation. It
+    // should have been processed at this point.
+    ASSERT(forward_bailout_pending_ == NULL);
   }
 
+  void HandleInNonTestContext(Expression* expr, State state);
+  void VisitInTestContext(Expression* expr);
+
   void VisitDeclarations(ZoneList<Declaration*>* declarations);
   void DeclareGlobals(Handle<FixedArray> pairs);
 
@@ -310,12 +362,39 @@ class FullCodeGenerator: public AstVisitor {
                          Label* if_false,
                          Label* fall_through);
 
+  // Bailout support.
+  void PrepareForBailout(AstNode* node, State state);
+  void PrepareForBailoutForId(int id, State state);
+
+  // Record a call's return site offset, used to rebuild the frame if the
+  // called function was inlined at the site.
+  void RecordJSReturnSite(Call* call);
+
+  // Prepare for bailout before a test (or compare) and branch.  If
+  // should_normalize, then the following comparison will not handle the
+  // canonical JS true value so we will insert a (dead) test against true at
+  // the actual bailout target from the optimized code. If not
+  // should_normalize, the true and false labels are ignored.
+  void PrepareForBailoutBeforeSplit(State state,
+                                    bool should_normalize,
+                                    Label* if_true,
+                                    Label* if_false);
+
   // Platform-specific code for a variable, constant, or function
   // declaration.  Functions have an initial value.
   void EmitDeclaration(Variable* variable,
                        Variable::Mode mode,
                        FunctionLiteral* function);
 
+  // Platform-specific code for checking the stack limit at the back edge of
+  // a loop.
+  void EmitStackCheck(IterationStatement* stmt);
+  // Record the OSR AST id corresponding to a stack check in the code.
+  void RecordStackCheck(int osr_ast_id);
+  // Emit a table of stack check ids and pcs into the code stream.  Return
+  // the offset of the start of the table.
+  unsigned EmitStackCheckTable();
+
   // Platform-specific return sequence
   void EmitReturnSequence();
 
@@ -471,14 +550,13 @@ class FullCodeGenerator: public AstVisitor {
 
   void VisitForTypeofValue(Expression* expr);
 
-  MacroAssembler* masm_;
-  CompilationInfo* info_;
+  struct BailoutEntry {
+    unsigned id;
+    unsigned pc_and_state;
+  };
 
-  Label return_label_;
-  NestedStatement* nesting_stack_;
-  int loop_depth_;
 
-  class ExpressionContext {
+  class ExpressionContext BASE_EMBEDDED {
    public:
     explicit ExpressionContext(FullCodeGenerator* codegen)
         : masm_(codegen->masm()), old_(codegen->context()), codegen_(codegen) {
@@ -504,7 +582,8 @@ class FullCodeGenerator: public AstVisitor {
 
     // Emit code to convert pure control flow to a pair of unbound labels into
     // the result expected according to this expression context.  The
-    // implementation may decide to bind either of the labels.
+    // implementation will bind both labels unless it's a TestContext, which
+    // won't bind them at this point.
     virtual void Plug(Label* materialize_true,
                       Label* materialize_false) const = 0;
 
@@ -526,12 +605,14 @@ class FullCodeGenerator: public AstVisitor {
                              Label** if_false,
                              Label** fall_through) const = 0;
 
+    virtual void HandleExpression(Expression* expr) const = 0;
+
     // Returns true if we are evaluating only for side effects (ie if the result
-    // will be discarded.
+    // will be discarded).
     virtual bool IsEffect() const { return false; }
 
     // Returns true if we are branching on the value rather than materializing
-    // it.
+    // it.  Only used for asserts.
     virtual bool IsTest() const { return false; }
 
    protected:
@@ -565,6 +646,7 @@ class FullCodeGenerator: public AstVisitor {
                              Label** if_true,
                              Label** if_false,
                              Label** fall_through) const;
+    virtual void HandleExpression(Expression* expr) const;
   };
 
   class StackValueContext : public ExpressionContext {
@@ -588,6 +670,7 @@ class FullCodeGenerator: public AstVisitor {
                              Label** if_true,
                              Label** if_false,
                              Label** fall_through) const;
+    virtual void HandleExpression(Expression* expr) const;
   };
 
   class TestContext : public ExpressionContext {
@@ -626,6 +709,7 @@ class FullCodeGenerator: public AstVisitor {
                              Label** if_true,
                              Label** if_false,
                              Label** fall_through) const;
+    virtual void HandleExpression(Expression* expr) const;
     virtual bool IsTest() const { return true; }
 
    private:
@@ -655,10 +739,20 @@ class FullCodeGenerator: public AstVisitor {
                              Label** if_true,
                              Label** if_false,
                              Label** fall_through) const;
+    virtual void HandleExpression(Expression* expr) const;
     virtual bool IsEffect() const { return true; }
   };
 
+  MacroAssembler* masm_;
+  CompilationInfo* info_;
+  Label return_label_;
+  NestedStatement* nesting_stack_;
+  int loop_depth_;
   const ExpressionContext* context_;
+  ZoneList<BailoutEntry> bailout_entries_;
+  ZoneList<BailoutEntry> stack_checks_;
+  ForwardBailoutStack* forward_bailout_stack_;
+  ForwardBailoutStack* forward_bailout_pending_;
 
   friend class NestedStatement;
 
index 5339840..18cdc5a 100644 (file)
@@ -30,6 +30,8 @@
 #include "api.h"
 #include "global-handles.h"
 
+#include "vm-state-inl.h"
+
 namespace v8 {
 namespace internal {
 
index 88c3e78..b56b835 100644 (file)
@@ -147,13 +147,16 @@ typedef byte* Address;
 #ifdef _MSC_VER
 #define V8_UINT64_C(x)  (x ## UI64)
 #define V8_INT64_C(x)   (x ## I64)
+#define V8_INTPTR_C(x)  (x ## I64)
 #define V8_PTR_PREFIX "ll"
 #else  // _MSC_VER
 #define V8_UINT64_C(x)  (x ## UL)
 #define V8_INT64_C(x)   (x ## L)
+#define V8_INTPTR_C(x)  (x ## L)
 #define V8_PTR_PREFIX "l"
 #endif  // _MSC_VER
 #else  // V8_HOST_ARCH_64_BIT
+#define V8_INTPTR_C(x)  (x)
 #define V8_PTR_PREFIX ""
 #endif  // V8_HOST_ARCH_64_BIT
 
@@ -223,6 +226,7 @@ const int kBinary32MinExponent  = 0x01;
 const int kBinary32MantissaBits = 23;
 const int kBinary32ExponentShift = 23;
 
+
 // The expression OFFSET_OF(type, field) computes the byte-offset
 // of the specified field relative to the containing type. This
 // corresponds to 'offsetof' (in stddef.h), except that it doesn't
diff --git a/src/graph-codegen.cc.rej b/src/graph-codegen.cc.rej
new file mode 100644 (file)
index 0000000..f68facb
--- /dev/null
@@ -0,0 +1,214 @@
+--- src/graph-codegen.cc       (revision 757)
++++ src/graph-codegen.cc       (working copy)
+@@ -45,6 +45,7 @@
+       bailout_literals_(8),
+       arguments_stack_height_(0),
+       safepoint_pc_offsets_(32),
++      safepoint_bailout_ids_(32),
+       safepoint_span_indexes_(32),
+       current_block_(NULL),
+       next_block_(NULL),
+@@ -257,6 +258,7 @@
+   }
+ }
++
+ SpanList* SpanList::Insert(Span* span) {
+   return new SpanList(span, this);
+ }
+@@ -994,90 +996,9 @@
+     BAILOUT("attempted bailout when deoptimization is impossible");
+   }
+-  // Compute the output frame height.
+-  int height = environment_.ExpressionStackHeight();
+-
+-  // Build the translation.  The size is the part above the frame pointer.
+-  int translation_size = height + LocalCount();
+-  Translation translation(&translations_, translation_size);
+-
+-  // Total output frame size: Expression stack + locals + fixed elements +
+-  // parameters and receiver.
+-  int output_frame_size = translation_size + 4 + ParameterCount() + 1;
+-
+-  // The parameters are at the bottom of the frame.  They have negative
+-  // span indices that increase (go toward zero) as the parameter index
+-  // goes up.  They have positive destination indices that decrease as the
+-  // parameter index goes up.
+-  //
+-  // Output frame index of the slot above the last parameter.  First '1' is
+-  // receiver, second '1' is to convert to a zero-based index.
+-  int parameter_base = output_frame_size - (ParameterCount() + 1) - 1;
+-  EnvironmentIterator parameters(&environment_,
+-                                 EnvironmentIterator::PARAMETERS);
+-  while (parameters.HasNext()) {
+-    Span* span = parameters.Next()->span();
+-    ASSERT(span->HasFixedSpillSlot());
+-    int dest_index = parameter_base - span->index();
+-    if (span->IsAllocated()) {
+-      translation.MoveStackReg(dest_index, span->reg());
+-    } else if (span->IsSpilled()) {
+-      // Nothing to do. Parameter already in its fixed slot.
+-    } else {
+-      UNREACHABLE();
+-    }
+-  }
+-
+-  // Setup the locals.  Locals have positive span indices that increase as
+-  // the local index goes up.  They have positive output frame indices that
+-  // decrease as the local index goes up.
+-  int output_index = translation_size - 1;  // For local 0.
+-  EnvironmentIterator locals(&environment_, EnvironmentIterator::LOCALS);
+-  while (locals.HasNext()) {
+-    Span* span = locals.Next()->span();
+-    if (span->IsAllocated()) {
+-      translation.MoveStackReg(output_index, span->reg());
+-    } else if (span->IsSpilled()) {
+-      // TODO(kmillikin): spilled spans should be already in place in the
+-      // output frame.  Eliminate this move.
+-      translation.MoveStackStack(output_index, span->index());
+-    } else {
+-      UNREACHABLE();
+-    }
+-    --output_index;
+-  }
+-
+-  // Setup the rest of the expression stack.
+-  for (int i = 0; i < height; i++) {
+-    Expression* expr = environment_.ExpressionStackAt(i);
+-    Span* span = expr->span();
+-    if (span->IsAllocated()) {
+-      translation.MoveStackReg(i, span->reg());
+-    } else if (span->IsSpilled()) {
+-      translation.MoveStackStack(i, span->index());
+-    } else if (span->IsArgument()) {
+-      int index = arguments_stack_height_ - (span->ArgumentIndex() + 1);
+-      translation.MoveStackArgument(i, index);
+-    } else if (expr->AsLiteral() != NULL) {
+-      int index = DefineBailoutLiteral(expr->AsLiteral()->handle());
+-      translation.MoveStackLiteral(i, index);
+-    } else {
+-      UNREACHABLE();
+-    }
+-  }
+-
+-  // Emit the bailout information.
+-  int id = bailouts_.length();
+-  Bailout bailout = {
+-    node->id(),
+-    translation.index(),
+-    arguments_stack_height_,
+-  };
+-  bailouts_.Add(bailout);
++  unsigned id = RecordBailout(node);
+   Address entry = Deoptimizer::GetDeoptimizationEntry(id);
+-  if (entry == NULL) {
+-    BAILOUT("bailout was not prepared");
+-  }
++  if (entry == NULL) BAILOUT("too many bailouts");
+   __ j(cc, entry, RelocInfo::RUNTIME_ENTRY, not_taken);
+ }
+@@ -1920,9 +1841,102 @@
+   }
+   safepoint_pc_offsets_.Add(masm_->pc_offset());
+   safepoint_span_indexes_.Add(indexes);
++
++  // Record a bailout at every safe point.
++  unsigned id = RecordBailout(current_instruction());
++  safepoint_bailout_ids_.Add(id);
++  if (Deoptimizer::GetDeoptimizationEntry(id) == NULL) {
++    BAILOUT("too many bailouts");
++  }
+ }
++unsigned GraphCodeGenerator::RecordBailout(AstNode* node) {
++  // Compute the output frame height.
++  int height = environment_.ExpressionStackHeight();
++
++  // Build the translation.  The size is the part above the frame pointer.
++  int translation_size = height + LocalCount();
++  Translation translation(&translations_, translation_size);
++
++  // Total output frame size: Expression stack + locals + fixed elements +
++  // parameters and receiver.
++  int output_frame_size = translation_size + 4 + ParameterCount() + 1;
++
++  // The parameters are at the bottom of the frame.  They have negative
++  // span indices that increase (go toward zero) as the parameter index
++  // goes up.  They have positive destination indices that decrease as the
++  // parameter index goes up.
++  //
++  // Output frame index of the slot above the last parameter.  First '1' is
++  // receiver, second '1' is to convert to a zero-based index.
++  int parameter_base = output_frame_size - (ParameterCount() + 1) - 1;
++  EnvironmentIterator parameters(&environment_,
++                                 EnvironmentIterator::PARAMETERS);
++  while (parameters.HasNext()) {
++    Span* span = parameters.Next()->span();
++    ASSERT(span->HasFixedSpillSlot());
++    int dest_index = parameter_base - span->index();
++    if (span->IsAllocated()) {
++      translation.MoveStackReg(dest_index, span->reg());
++    } else if (span->IsSpilled()) {
++      // Nothing to do. Parameter already in its fixed slot.
++    } else {
++      UNREACHABLE();
++    }
++  }
++
++  // Setup the locals.  Locals have positive span indices that increase as
++  // the local index goes up.  They have positive output frame indices that
++  // decrease as the local index goes up.
++  int output_index = translation_size - 1;  // For local 0.
++  EnvironmentIterator locals(&environment_, EnvironmentIterator::LOCALS);
++  while (locals.HasNext()) {
++    Span* span = locals.Next()->span();
++    if (span->IsAllocated()) {
++      translation.MoveStackReg(output_index, span->reg());
++    } else if (span->IsSpilled()) {
++      // TODO(kmillikin): spilled spans should be already in place in the
++      // output frame.  Eliminate this move.
++      translation.MoveStackStack(output_index, span->index());
++    } else {
++      UNREACHABLE();
++    }
++    --output_index;
++  }
++
++  // Setup the rest of the expression stack.
++  for (int i = 0; i < height; i++) {
++    Expression* expr = environment_.ExpressionStackAt(i);
++    Span* span = expr->span();
++    if (span->IsAllocated()) {
++      translation.MoveStackReg(i, span->reg());
++    } else if (span->IsSpilled()) {
++      translation.MoveStackStack(i, span->index());
++    } else if (span->IsArgument()) {
++      int index = arguments_stack_height_ - (span->ArgumentIndex() + 1);
++      translation.MoveStackArgument(i, index);
++    } else if (expr->AsLiteral() != NULL) {
++      int index = DefineBailoutLiteral(expr->AsLiteral()->handle());
++      translation.MoveStackLiteral(i, index);
++    } else {
++      UNREACHABLE();
++    }
++  }
++
++  // Emit the bailout information.
++  unsigned id = bailouts_.length();
++  Bailout bailout = {
++    node->id(),
++    translation.index(),
++    arguments_stack_height_,
++  };
++  bailouts_.Add(bailout);
++  return id;
++
++}
++
++
+ unsigned GraphCodeGenerator::EmitSafepointTable(unsigned prologue_size) {
+   // Make sure the safepoint table is properly aligned. Pad with nops.
+   static const unsigned kTableAlignment = kIntSize;
diff --git a/src/graph-codegen.h.rej b/src/graph-codegen.h.rej
new file mode 100644 (file)
index 0000000..fcf72b3
--- /dev/null
@@ -0,0 +1,18 @@
+--- src/graph-codegen.h        (revision 757)
++++ src/graph-codegen.h        (working copy)
+@@ -254,6 +254,7 @@
+   // Support for recording safepoint information.
+   void RecordSafepoint();
++  unsigned RecordBailout(AstNode* node);
+   // Emits the safepoint table and returns the pc offset where the
+   // table starts after the instructions.
+@@ -286,6 +287,7 @@
+   // Safe point data structures.
+   ZoneList<unsigned> safepoint_pc_offsets_;
++  ZoneList<unsigned> safepoint_bailout_ids_;
+   ZoneList<ZoneList<int>*> safepoint_span_indexes_;
+   // Current and next basic block.
index 4430926..68c61b5 100644 (file)
@@ -39,6 +39,7 @@
 #include "runtime.h"
 #include "string-search.h"
 #include "stub-cache.h"
+#include "vm-state-inl.h"
 
 namespace v8 {
 namespace internal {
@@ -797,7 +798,7 @@ bool EnsureCompiled(Handle<SharedFunctionInfo> shared,
 static bool CompileLazyHelper(CompilationInfo* info,
                               ClearExceptionFlag flag) {
   // Compile the source information to a code object.
-  ASSERT(!info->shared_info()->is_compiled());
+  ASSERT(info->IsOptimizing() || !info->shared_info()->is_compiled());
   bool result = Compiler::CompileLazy(info);
   ASSERT(result != Top::has_pending_exception());
   if (!result && flag == CLEAR_EXCEPTION) Top::clear_pending_exception();
@@ -814,36 +815,47 @@ bool CompileLazyShared(Handle<SharedFunctionInfo> shared,
 
 bool CompileLazy(Handle<JSFunction> function,
                  ClearExceptionFlag flag) {
+  bool result = true;
   if (function->shared()->is_compiled()) {
-    function->set_code(function->shared()->code());
-    PROFILE(FunctionCreateEvent(*function));
+    function->ReplaceCode(function->shared()->code());
     function->shared()->set_code_age(0);
-    return true;
   } else {
     CompilationInfo info(function);
-    bool result = CompileLazyHelper(&info, flag);
+    result = CompileLazyHelper(&info, flag);
     ASSERT(!result || function->is_compiled());
+  }
+  if (result && function->is_compiled()) {
     PROFILE(FunctionCreateEvent(*function));
-    return result;
   }
+  return result;
 }
 
 
 bool CompileLazyInLoop(Handle<JSFunction> function,
                        ClearExceptionFlag flag) {
+  bool result = true;
   if (function->shared()->is_compiled()) {
-    function->set_code(function->shared()->code());
-    PROFILE(FunctionCreateEvent(*function));
+    function->ReplaceCode(function->shared()->code());
     function->shared()->set_code_age(0);
-    return true;
   } else {
     CompilationInfo info(function);
     info.MarkAsInLoop();
-    bool result = CompileLazyHelper(&info, flag);
+    result = CompileLazyHelper(&info, flag);
     ASSERT(!result || function->is_compiled());
+  }
+  if (result && function->is_compiled()) {
     PROFILE(FunctionCreateEvent(*function));
-    return result;
   }
+  return result;
+}
+
+
+bool CompileOptimized(Handle<JSFunction> function, int osr_ast_id) {
+  CompilationInfo info(function);
+  info.SetOptimizing(osr_ast_id);
+  bool result = CompileLazyHelper(&info, KEEP_EXCEPTION);
+  if (result) PROFILE(FunctionCreateEvent(*function));
+  return result;
 }
 
 
index 2e18ab3..8fd25dc 100644 (file)
@@ -342,6 +342,8 @@ bool CompileLazy(Handle<JSFunction> function, ClearExceptionFlag flag);
 
 bool CompileLazyInLoop(Handle<JSFunction> function, ClearExceptionFlag flag);
 
+bool CompileOptimized(Handle<JSFunction> function, int osr_ast_id);
+
 class NoHandleAllocation BASE_EMBEDDED {
  public:
 #ifndef DEBUG
index ba50c0f..8f6fb98 100644 (file)
@@ -409,8 +409,8 @@ void Heap::SetLastScriptId(Object* last_script_id) {
       v8::internal::V8::FatalProcessOutOfMemory("CALL_AND_RETRY_0", true);\
     }                                                                     \
     if (!__maybe_object__->IsRetryAfterGC()) RETURN_EMPTY;                \
-    Heap::CollectGarbage(Failure::cast(__maybe_object__)->                \
-                             allocation_space());                         \
+    Heap::CollectGarbage(                                                 \
+        Failure::cast(__maybe_object__)->allocation_space());             \
     __maybe_object__ = FUNCTION_CALL;                                     \
     if (__maybe_object__->ToObject(&__object__)) RETURN_VALUE;            \
     if (__maybe_object__->IsOutOfMemory()) {                              \
index 26859d7..0497ad5 100644 (file)
 #include "mark-compact.h"
 #include "natives.h"
 #include "objects-visiting.h"
+#include "runtime-profiler.h"
 #include "scanner-base.h"
 #include "scopeinfo.h"
 #include "snapshot.h"
 #include "v8threads.h"
+#include "vm-state-inl.h"
 #if V8_TARGET_ARCH_ARM && !V8_INTERPRETED_REGEXP
 #include "regexp-macro-assembler.h"
 #include "arm/regexp-macro-assembler-arm.h"
@@ -839,6 +841,8 @@ void Heap::MarkCompactPrologue(bool is_compacting) {
   ContextSlotCache::Clear();
   DescriptorLookupCache::Clear();
 
+  RuntimeProfiler::MarkCompactPrologue(is_compacting);
+
   CompilationCache::MarkCompactPrologue();
 
   CompletelyClearInstanceofCache();
@@ -1049,6 +1053,14 @@ void Heap::Scavenge() {
   // Scavenge object reachable from the global contexts list directly.
   scavenge_visitor.VisitPointer(BitCast<Object**>(&global_contexts_list_));
 
+  // Scavenge objects reachable from the runtime-profiler sampler
+  // window directly.
+  Object** sampler_window_address = RuntimeProfiler::SamplerWindowAddress();
+  int sampler_window_size = RuntimeProfiler::SamplerWindowSize();
+  scavenge_visitor.VisitPointers(
+      sampler_window_address,
+      sampler_window_address + sampler_window_size);
+
   new_space_front = DoScavenge(&scavenge_visitor, new_space_front);
 
   UpdateNewSpaceReferencesInExternalStringTable(
@@ -1116,6 +1128,40 @@ void Heap::UpdateNewSpaceReferencesInExternalStringTable(
 }
 
 
+static Object* ProcessFunctionWeakReferences(Object* function,
+                                             WeakObjectRetainer* retainer) {
+  Object* head = Heap::undefined_value();
+  JSFunction* tail = NULL;
+  Object* candidate = function;
+  while (!candidate->IsUndefined()) {
+    // Check whether to keep the candidate in the list.
+    JSFunction* candidate_function = reinterpret_cast<JSFunction*>(candidate);
+    Object* retain = retainer->RetainAs(candidate);
+    if (retain != NULL) {
+      if (head->IsUndefined()) {
+        // First element in the list.
+        head = candidate_function;
+      } else {
+        // Subsequent elements in the list.
+        ASSERT(tail != NULL);
+        tail->set_next_function_link(candidate_function);
+      }
+      // Retained function is new tail.
+      tail = candidate_function;
+    }
+    // Move to next element in the list.
+    candidate = candidate_function->next_function_link();
+  }
+
+  // Terminate the list if there is one or more elements.
+  if (tail != NULL) {
+    tail->set_next_function_link(Heap::undefined_value());
+  }
+
+  return head;
+}
+
+
 void Heap::ProcessWeakReferences(WeakObjectRetainer* retainer) {
   Object* head = undefined_value();
   Context* tail = NULL;
@@ -1137,6 +1183,15 @@ void Heap::ProcessWeakReferences(WeakObjectRetainer* retainer) {
       }
       // Retained context is new tail.
       tail = candidate_context;
+
+      // Process the weak list of optimized functions for the context.
+      Object* function_list_head =
+          ProcessFunctionWeakReferences(
+              candidate_context->get(Context::OPTIMIZED_FUNCTIONS_LIST),
+              retainer);
+      candidate_context->set_unchecked(Context::OPTIMIZED_FUNCTIONS_LIST,
+                                       function_list_head,
+                                       UPDATE_WRITE_BARRIER);
     }
     // Move to next element in the list.
     candidate = candidate_context->get(Context::NEXT_CONTEXT_LINK);
@@ -1651,6 +1706,11 @@ bool Heap::CreateInitialMaps() {
   }
   set_byte_array_map(Map::cast(obj));
 
+  { MaybeObject* maybe_obj = AllocateByteArray(0, TENURED);
+    if (!maybe_obj->ToObject(&obj)) return false;
+  }
+  set_empty_byte_array(ByteArray::cast(obj));
+
   { MaybeObject* maybe_obj =
         AllocateMap(PIXEL_ARRAY_TYPE, PixelArray::kAlignedSize);
     if (!maybe_obj->ToObject(&obj)) return false;
@@ -2245,9 +2305,11 @@ MaybeObject* Heap::AllocateSharedFunctionInfo(Object* name) {
   share->set_debug_info(undefined_value());
   share->set_inferred_name(empty_string());
   share->set_compiler_hints(0);
+  share->set_deopt_counter(Smi::FromInt(FLAG_deopt_every_n_times));
   share->set_initial_map(undefined_value());
   share->set_this_property_assignments_count(0);
   share->set_this_property_assignments(undefined_value());
+  share->set_opt_count(0);
   share->set_num_literals(0);
   share->set_end_position(0);
   share->set_function_token_position(0);
@@ -2666,6 +2728,7 @@ MaybeObject* Heap::CreateCode(const CodeDesc& desc,
   code->set_instruction_size(desc.instr_size);
   code->set_relocation_info(ByteArray::cast(reloc_info));
   code->set_flags(flags);
+  code->set_deoptimization_data(empty_fixed_array());
   // Allow self references to created code object by patching the handle to
   // point to the newly allocated Code object.
   if (!self_reference.is_null()) {
@@ -2794,6 +2857,7 @@ MaybeObject* Heap::InitializeFunction(JSFunction* function,
   function->set_prototype_or_initial_map(prototype);
   function->set_context(undefined_value());
   function->set_literals(empty_fixed_array());
+  function->set_next_function_link(undefined_value());
   return function;
 }
 
index 93caf3b..e4dcb4a 100644 (file)
@@ -62,6 +62,7 @@ namespace internal {
   V(Object, termination_exception, TerminationException)                       \
   V(Map, hash_table_map, HashTableMap)                                         \
   V(FixedArray, empty_fixed_array, EmptyFixedArray)                            \
+  V(ByteArray, empty_byte_array, EmptyByteArray)                               \
   V(Map, string_map, StringMap)                                                \
   V(Map, ascii_string_map, AsciiStringMap)                                     \
   V(Map, symbol_map, SymbolMap)                                                \
@@ -173,6 +174,8 @@ namespace internal {
   V(value_of_symbol, "valueOf")                                          \
   V(InitializeVarGlobal_symbol, "InitializeVarGlobal")                   \
   V(InitializeConstGlobal_symbol, "InitializeConstGlobal")               \
+  V(KeyedLoadSpecialized_symbol, "KeyedLoadSpecialized")                 \
+  V(KeyedStoreSpecialized_symbol, "KeyedStoreSpecialized")               \
   V(stack_overflow_symbol, "kStackOverflowBoilerplate")                  \
   V(illegal_access_symbol, "illegal access")                             \
   V(out_of_memory_symbol, "out-of-memory")                               \
diff --git a/src/hydrogen-instructions.cc b/src/hydrogen-instructions.cc
new file mode 100644 (file)
index 0000000..670dad8
--- /dev/null
@@ -0,0 +1,1482 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "factory.h"
+#include "hydrogen.h"
+
+#if V8_TARGET_ARCH_IA32
+#include "ia32/lithium-ia32.h"
+#elif V8_TARGET_ARCH_X64
+#include "x64/lithium-x64.h"
+#elif V8_TARGET_ARCH_ARM
+#include "arm/lithium-arm.h"
+#else
+#error Unsupported target architecture.
+#endif
+
+namespace v8 {
+namespace internal {
+
+#define DEFINE_COMPILE(type)                                         \
+  LInstruction* H##type::CompileToLithium(LChunkBuilder* builder) {  \
+    return builder->Do##type(this);                                  \
+  }
+HYDROGEN_CONCRETE_INSTRUCTION_LIST(DEFINE_COMPILE)
+#undef DEFINE_COMPILE
+
+
+const char* Representation::Mnemonic() const {
+  switch (kind_) {
+    case kNone: return "v";
+    case kTagged: return "t";
+    case kDouble: return "d";
+    case kInteger32: return "i";
+    default:
+      UNREACHABLE();
+      return NULL;
+  }
+}
+
+
+static int32_t AddAssertNoOverflow(int32_t a, int32_t b) {
+  ASSERT(static_cast<int64_t>(a + b) == (static_cast<int64_t>(a) +
+                                         static_cast<int64_t>(b)));
+  return a + b;
+}
+
+
+static int32_t SubAssertNoOverflow(int32_t a, int32_t b) {
+  ASSERT(static_cast<int64_t>(a - b) == (static_cast<int64_t>(a) -
+                                         static_cast<int64_t>(b)));
+  return a - b;
+}
+
+
+static int32_t MulAssertNoOverflow(int32_t a, int32_t b) {
+  ASSERT(static_cast<int64_t>(a * b) == (static_cast<int64_t>(a) *
+                                         static_cast<int64_t>(b)));
+  return a * b;
+}
+
+
+static int32_t AddWithoutOverflow(int32_t a, int32_t b) {
+  if (b > 0) {
+    if (a <= kMaxInt - b) return AddAssertNoOverflow(a, b);
+    return kMaxInt;
+  } else {
+    if (a >= kMinInt - b) return AddAssertNoOverflow(a, b);
+    return kMinInt;
+  }
+}
+
+
+static int32_t SubWithoutOverflow(int32_t a, int32_t b) {
+  if (b < 0) {
+    if (a <= kMaxInt + b) return SubAssertNoOverflow(a, b);
+    return kMaxInt;
+  } else {
+    if (a >= kMinInt + b) return SubAssertNoOverflow(a, b);
+    return kMinInt;
+  }
+}
+
+
+static int32_t MulWithoutOverflow(int32_t a, int32_t b, bool* overflow) {
+  if (b == 0 || a == 0) return 0;
+  if (a == 1) return b;
+  if (b == 1) return a;
+
+  int sign = 1;
+  if ((a < 0 && b > 0) || (a > 0 && b < 0)) sign = -1;
+  if (a < 0) a = -a;
+  if (b < 0) b = -b;
+
+  if (kMaxInt / b > a && a != kMinInt && b != kMinInt) {
+    return MulAssertNoOverflow(a, b) * sign;
+  }
+
+  *overflow = true;
+  if (sign == 1) {
+    return kMaxInt;
+  } else {
+    return kMinInt;
+  }
+}
+
+
+int32_t Range::Mask() const {
+  if (lower_ == upper_) return lower_;
+  if (lower_ >= 0) {
+    int32_t res = 1;
+    while (res < upper_) {
+      res = (res << 1) | 1;
+    }
+    return res;
+  }
+  return 0xffffffff;
+}
+
+
+void Range::Add(int32_t value) {
+  if (value == 0) return;
+  lower_ = AddWithoutOverflow(lower_, value);
+  upper_ = AddWithoutOverflow(upper_, value);
+  Verify();
+}
+
+
+// Returns whether the add may overflow.
+bool Range::AddAndCheckOverflow(Range* other) {
+  int old_lower = lower_;
+  int old_upper = upper_;
+  lower_ = AddWithoutOverflow(lower_, other->lower());
+  upper_ = AddWithoutOverflow(upper_, other->upper());
+  bool r = (old_lower + other->lower() != lower_ ||
+           old_upper + other->upper() != upper_);
+  KeepOrder();
+  Verify();
+  return r;
+}
+
+
+// Returns whether the sub may overflow.
+bool Range::SubAndCheckOverflow(Range* other) {
+  int old_lower = lower_;
+  int old_upper = upper_;
+  lower_ = SubWithoutOverflow(lower_, other->lower());
+  upper_ = SubWithoutOverflow(upper_, other->upper());
+  bool r = (old_lower - other->lower() != lower_ ||
+           old_upper - other->upper() != upper_);
+  KeepOrder();
+  Verify();
+  return r;
+}
+
+
+void Range::KeepOrder() {
+  if (lower_ > upper_) {
+    int32_t tmp = lower_;
+    lower_ = upper_;
+    upper_ = tmp;
+  }
+}
+
+
+void Range::Verify() const {
+  ASSERT(lower_ <= upper_);
+}
+
+
+// Returns whether the mul may overflow.
+bool Range::MulAndCheckOverflow(Range* other) {
+  bool may_overflow = false;
+  int v1 = MulWithoutOverflow(lower_, other->lower(), &may_overflow);
+  int v2 = MulWithoutOverflow(lower_, other->upper(), &may_overflow);
+  int v3 = MulWithoutOverflow(upper_, other->lower(), &may_overflow);
+  int v4 = MulWithoutOverflow(upper_, other->upper(), &may_overflow);
+  lower_ = Min(Min(v1, v2), Min(v3, v4));
+  upper_ = Max(Max(v1, v2), Max(v3, v4));
+  Verify();
+  return may_overflow;
+}
+
+
+const char* HType::ToString() {
+  switch (type_) {
+    case kTagged: return "tagged";
+    case kTaggedPrimitive: return "primitive";
+    case kTaggedNumber: return "number";
+    case kSmi: return "smi";
+    case kHeapNumber: return "heap-number";
+    case kString: return "string";
+    case kBoolean: return "boolean";
+    case kNonPrimitive: return "non-primitive";
+    case kJSArray: return "array";
+    case kJSObject: return "object";
+    case kUninitialized: return "uninitialized";
+  }
+  UNREACHABLE();
+  return "Unreachable code";
+}
+
+
+const char* HType::ToShortString() {
+  switch (type_) {
+    case kTagged: return "t";
+    case kTaggedPrimitive: return "p";
+    case kTaggedNumber: return "n";
+    case kSmi: return "m";
+    case kHeapNumber: return "h";
+    case kString: return "s";
+    case kBoolean: return "b";
+    case kNonPrimitive: return "r";
+    case kJSArray: return "a";
+    case kJSObject: return "o";
+    case kUninitialized: return "z";
+  }
+  UNREACHABLE();
+  return "Unreachable code";
+}
+
+
+HType HType::TypeFromValue(Handle<Object> value) {
+  HType result = HType::Tagged();
+  if (value->IsSmi()) {
+    result = HType::Smi();
+  } else if (value->IsHeapNumber()) {
+    result = HType::HeapNumber();
+  } else if (value->IsString()) {
+    result = HType::String();
+  } else if (value->IsBoolean()) {
+    result = HType::Boolean();
+  } else if (value->IsJSObject()) {
+    result = HType::JSObject();
+  } else if (value->IsJSArray()) {
+    result = HType::JSArray();
+  }
+  return result;
+}
+
+
+int HValue::LookupOperandIndex(int occurrence_index, HValue* op) const {
+  for (int i = 0; i < OperandCount(); ++i) {
+    if (OperandAt(i) == op) {
+      if (occurrence_index == 0) return i;
+      --occurrence_index;
+    }
+  }
+  return -1;
+}
+
+
+bool HValue::IsDefinedAfter(HBasicBlock* other) const {
+  return block()->block_id() > other->block_id();
+}
+
+
+bool HValue::UsesMultipleTimes(HValue* op) const {
+  bool seen = false;
+  for (int i = 0; i < OperandCount(); ++i) {
+    if (OperandAt(i) == op) {
+      if (seen) return true;
+      seen = true;
+    }
+  }
+  return false;
+}
+
+
+bool HValue::Equals(HValue* other) const {
+  if (other->opcode() != opcode()) return false;
+  if (!other->representation().Equals(representation())) return false;
+  if (!other->type_.Equals(type_)) return false;
+  if (OperandCount() != other->OperandCount()) return false;
+  for (int i = 0; i < OperandCount(); ++i) {
+    if (OperandAt(i)->id() != other->OperandAt(i)->id()) return false;
+  }
+  bool result = DataEquals(other);
+  ASSERT(!result || Hashcode() == other->Hashcode());
+  return result;
+}
+
+
+intptr_t HValue::Hashcode() const {
+  intptr_t result = opcode();
+  int count = OperandCount();
+  for (int i = 0; i < count; ++i) {
+    result = result * 19 + OperandAt(i)->id() + (result >> 7);
+  }
+  return result;
+}
+
+
+void HValue::SetOperandAt(int index, HValue* value) {
+  ASSERT(value == NULL || !value->representation().IsNone());
+  RegisterUse(index, value);
+  InternalSetOperandAt(index, value);
+}
+
+
+void HValue::ReplaceAndDelete(HValue* other) {
+  ReplaceValue(other);
+  Delete();
+}
+
+
+void HValue::ReplaceValue(HValue* other) {
+  ZoneList<HValue*> start_uses(2);
+  for (int i = 0; i < uses_.length(); ++i) {
+    HValue* use = uses_.at(i);
+    if (!use->block()->IsStartBlock()) {
+      InternalReplaceAtUse(use, other);
+      other->uses_.Add(use);
+    } else {
+      start_uses.Add(use);
+    }
+  }
+  uses_.Clear();
+  uses_.AddAll(start_uses);
+}
+
+
+void HValue::ClearOperands() {
+  for (int i = 0; i < OperandCount(); ++i) {
+    SetOperandAt(i, NULL);
+  }
+}
+
+
+void HValue::Delete() {
+  ASSERT(HasNoUses());
+  ClearOperands();
+  DeleteFromGraph();
+}
+
+
+void HValue::ReplaceAtUse(HValue* use, HValue* other) {
+  for (int i = 0; i < use->OperandCount(); ++i) {
+    if (use->OperandAt(i) == this) {
+      use->SetOperandAt(i, other);
+    }
+  }
+}
+
+
+void HValue::ReplaceFirstAtUse(HValue* use, HValue* other, Representation r) {
+  for (int i = 0; i < use->OperandCount(); ++i) {
+    if (use->RequiredInputRepresentation(i).Equals(r) &&
+        use->OperandAt(i) == this) {
+      use->SetOperandAt(i, other);
+      return;
+    }
+  }
+}
+
+
+void HValue::InternalReplaceAtUse(HValue* use, HValue* other) {
+  for (int i = 0; i < use->OperandCount(); ++i) {
+    if (use->OperandAt(i) == this) {
+      // Call internal method that does not update use lists. The caller is
+      // responsible for doing so.
+      use->InternalSetOperandAt(i, other);
+    }
+  }
+}
+
+
+void HValue::SetBlock(HBasicBlock* block) {
+  ASSERT(block_ == NULL || block == NULL);
+  block_ = block;
+  if (id_ == kNoNumber && block != NULL) {
+    id_ = block->graph()->GetNextValueID(this);
+  }
+}
+
+
+void HValue::PrintTypeTo(HType type, StringStream* stream) {
+  stream->Add(type.ToShortString());
+}
+
+
+void HValue::PrintNameTo(StringStream* stream) {
+  stream->Add("%s%d", representation_.Mnemonic(), id());
+}
+
+
+bool HValue::UpdateInferredType() {
+  HType type = CalculateInferredType();
+  bool result = (!type.Equals(type_));
+  type_ = type;
+  return result;
+}
+
+
+void HValue::RegisterUse(int index, HValue* new_value) {
+  HValue* old_value = OperandAt(index);
+  if (old_value == new_value) return;
+  if (old_value != NULL) {
+    ASSERT(old_value->uses_.Contains(this));
+    old_value->uses_.RemoveElement(this);
+  }
+  if (new_value != NULL) {
+    new_value->uses_.Add(this);
+  }
+}
+
+
+void HValue::AddNewRange(Range* r) {
+  if (!HasRange()) ComputeInitialRange();
+  if (!HasRange()) range_ = new Range();
+  ASSERT(HasRange());
+  r->StackUpon(range_);
+  range_ = r;
+}
+
+
+void HValue::RemoveLastAddedRange() {
+  ASSERT(HasRange());
+  ASSERT(range_->next() != NULL);
+  range_ = range_->next();
+}
+
+
+void HValue::ComputeInitialRange() {
+  ASSERT(!HasRange());
+  range_ = InferRange();
+  ASSERT(HasRange());
+}
+
+
+void HInstruction::PrintTo(StringStream* stream) const {
+  stream->Add("%s", Mnemonic());
+  if (HasSideEffects()) stream->Add("*");
+  stream->Add(" ");
+  PrintDataTo(stream);
+
+  if (range() != NULL) {
+    stream->Add(" range[%d,%d,m0=%d]",
+                range()->lower(),
+                range()->upper(),
+                static_cast<int>(range()->CanBeMinusZero()));
+  }
+
+  int changes_flags = (flags() & HValue::ChangesFlagsMask());
+  if (changes_flags != 0) {
+    stream->Add(" changes[0x%x]", changes_flags);
+  }
+
+  if (representation().IsTagged() && !type().Equals(HType::Tagged())) {
+    stream->Add(" type[%s]", type().ToString());
+  }
+}
+
+
+void HInstruction::Unlink() {
+  ASSERT(IsLinked());
+  ASSERT(!IsControlInstruction());  // Must never move control instructions.
+  clear_block();
+  if (previous_ != NULL) previous_->next_ = next_;
+  if (next_ != NULL) next_->previous_ = previous_;
+}
+
+
+void HInstruction::InsertBefore(HInstruction* next) {
+  ASSERT(!IsLinked());
+  ASSERT(!next->IsBlockEntry());
+  ASSERT(!IsControlInstruction());
+  ASSERT(!next->block()->IsStartBlock());
+  ASSERT(next->previous_ != NULL);
+  HInstruction* prev = next->previous();
+  prev->next_ = this;
+  next->previous_ = this;
+  next_ = next;
+  previous_ = prev;
+  SetBlock(next->block());
+}
+
+
+void HInstruction::InsertAfter(HInstruction* previous) {
+  ASSERT(!IsLinked());
+  ASSERT(!previous->IsControlInstruction());
+  ASSERT(!IsControlInstruction() || previous->next_ == NULL);
+  HBasicBlock* block = previous->block();
+  // Never insert anything except constants into the start block after finishing
+  // it.
+  if (block->IsStartBlock() && block->IsFinished() && !IsConstant()) {
+    ASSERT(block->end()->SecondSuccessor() == NULL);
+    InsertAfter(block->end()->FirstSuccessor()->first());
+    return;
+  }
+
+  // If we're inserting after an instruction with side-effects that is
+  // followed by a simulate instruction, we need to insert after the
+  // simulate instruction instead.
+  HInstruction* next = previous->next_;
+  if (previous->HasSideEffects() && next != NULL) {
+    ASSERT(next->IsSimulate());
+    previous = next;
+    next = previous->next_;
+  }
+
+  previous_ = previous;
+  next_ = next;
+  SetBlock(block);
+  previous->next_ = this;
+  if (next != NULL) next->previous_ = this;
+}
+
+
+#ifdef DEBUG
+void HInstruction::Verify() const {
+  // Verify that input operands are defined before use.
+  HBasicBlock* cur_block = block();
+  for (int i = 0; i < OperandCount(); ++i) {
+    HValue* other_operand = OperandAt(i);
+    HBasicBlock* other_block = other_operand->block();
+    if (cur_block == other_block) {
+      if (!other_operand->IsPhi()) {
+        HInstruction* cur = cur_block->first();
+        while (cur != NULL) {
+          ASSERT(cur != this);  // We should reach other_operand before!
+          if (cur == other_operand) break;
+          cur = cur->next();
+        }
+        // Must reach other operand in the same block!
+        ASSERT(cur == other_operand);
+      }
+    } else {
+      ASSERT(other_block->Dominates(cur_block));
+    }
+  }
+
+  // Verify that instructions that may have side-effects are followed
+  // by a simulate instruction.
+  if (HasSideEffects() && !IsOsrEntry()) {
+    ASSERT(next()->IsSimulate());
+  }
+}
+#endif
+
+
+HCall::HCall(int count) : arguments_(Zone::NewArray<HValue*>(count), count) {
+  for (int i = 0; i < count; ++i) arguments_[i] = NULL;
+  set_representation(Representation::Tagged());
+  SetFlagMask(AllSideEffects());
+}
+
+
+void HCall::PrintDataTo(StringStream* stream) const {
+  stream->Add("(");
+  for (int i = 0; i < arguments_.length(); ++i) {
+    if (i != 0) stream->Add(", ");
+    arguments_.at(i)->PrintNameTo(stream);
+  }
+  stream->Add(")");
+}
+
+
+void HClassOfTest::PrintDataTo(StringStream* stream) const {
+  stream->Add("class_of_test(");
+  value()->PrintTo(stream);
+  stream->Add(", \"%o\")", *class_name());
+}
+
+
+void HAccessArgumentsAt::PrintDataTo(StringStream* stream) const {
+  arguments()->PrintNameTo(stream);
+  stream->Add("[");
+  index()->PrintNameTo(stream);
+  stream->Add("], length ");
+  length()->PrintNameTo(stream);
+}
+
+
+void HCall::SetArgumentAt(int index, HPushArgument* push_argument) {
+  push_argument->set_argument_index(index);
+  SetOperandAt(index, push_argument);
+}
+
+
+void HCallConstantFunction::PrintDataTo(StringStream* stream) const {
+  if (IsApplyFunction()) {
+    stream->Add("SPECIAL function: apply");
+  } else {
+    stream->Add("%s", *(function()->shared()->DebugName()->ToCString()));
+  }
+  HCall::PrintDataTo(stream);
+}
+
+
+void HBranch::PrintDataTo(StringStream* stream) const {
+  int first_id = FirstSuccessor()->block_id();
+  int second_id = SecondSuccessor()->block_id();
+  stream->Add("on ");
+  value()->PrintNameTo(stream);
+  stream->Add(" (B%d, B%d)", first_id, second_id);
+}
+
+
+void HGoto::PrintDataTo(StringStream* stream) const {
+  stream->Add("B%d", FirstSuccessor()->block_id());
+}
+
+
+void HReturn::PrintDataTo(StringStream* stream) const {
+  value()->PrintNameTo(stream);
+}
+
+
+void HThrow::PrintDataTo(StringStream* stream) const {
+  value()->PrintNameTo(stream);
+}
+
+
+const char* HUnaryMathOperation::OpName() const {
+  switch (op()) {
+    case kMathFloor: return "floor";
+    case kMathRound: return "round";
+    case kMathCeil: return "ceil";
+    case kMathAbs: return "abs";
+    case kMathLog: return "log";
+    case kMathSin: return "sin";
+    case kMathCos: return "cos";
+    case kMathTan: return "tan";
+    case kMathASin: return "asin";
+    case kMathACos: return "acos";
+    case kMathATan: return "atan";
+    case kMathExp: return "exp";
+    case kMathSqrt: return "sqrt";
+    default: break;
+  }
+  return "(unknown operation)";
+}
+
+
+void HUnaryMathOperation::PrintDataTo(StringStream* stream) const {
+  const char* name = OpName();
+  stream->Add("%s ", name);
+  value()->PrintNameTo(stream);
+}
+
+
+void HUnaryOperation::PrintDataTo(StringStream* stream) const {
+  value()->PrintNameTo(stream);
+}
+
+
+void HHasInstanceType::PrintDataTo(StringStream* stream) const {
+  value()->PrintNameTo(stream);
+  switch (from_) {
+    case FIRST_JS_OBJECT_TYPE:
+      if (to_ == LAST_TYPE) stream->Add(" spec_object");
+      break;
+    case JS_REGEXP_TYPE:
+      if (to_ == JS_REGEXP_TYPE) stream->Add(" reg_exp");
+      break;
+    case JS_ARRAY_TYPE:
+      if (to_ == JS_ARRAY_TYPE) stream->Add(" array");
+      break;
+    case JS_FUNCTION_TYPE:
+      if (to_ == JS_FUNCTION_TYPE) stream->Add(" function");
+      break;
+    default:
+      break;
+  }
+}
+
+
+void HTypeofIs::PrintDataTo(StringStream* stream) const {
+  value()->PrintNameTo(stream);
+  stream->Add(" == ");
+  stream->Add(type_literal_->ToAsciiVector());
+}
+
+
+void HPushArgument::PrintDataTo(StringStream* stream) const {
+  HUnaryOperation::PrintDataTo(stream);
+  if (argument_index() != -1) {
+    stream->Add(" [%d]", argument_index_);
+  }
+}
+
+
+void HChange::PrintDataTo(StringStream* stream) const {
+  HUnaryOperation::PrintDataTo(stream);
+  stream->Add(" %s to %s", from_.Mnemonic(), to_.Mnemonic());
+
+  if (CanTruncateToInt32()) stream->Add(" truncating-int32");
+  if (CheckFlag(kBailoutOnMinusZero)) stream->Add(" -0?");
+}
+
+
+HCheckInstanceType* HCheckInstanceType::NewIsJSObjectOrJSFunction(
+    HValue* value)  {
+  STATIC_ASSERT((LAST_JS_OBJECT_TYPE + 1) == JS_FUNCTION_TYPE);
+  return new HCheckInstanceType(value, FIRST_JS_OBJECT_TYPE, JS_FUNCTION_TYPE);
+}
+
+
+void HCheckMap::PrintDataTo(StringStream* stream) const {
+  value()->PrintNameTo(stream);
+  stream->Add(" %p", *map());
+}
+
+
+void HCheckFunction::PrintDataTo(StringStream* stream) const {
+  value()->PrintNameTo(stream);
+  stream->Add(" %p", *target());
+}
+
+
+void HCallKeyed::PrintDataTo(StringStream* stream) const {
+  stream->Add("[");
+  key()->PrintNameTo(stream);
+  stream->Add("](");
+  for (int i = 1; i < arguments_.length(); ++i) {
+    if (i != 1) stream->Add(", ");
+    arguments_.at(i)->PrintNameTo(stream);
+  }
+  stream->Add(")");
+}
+
+
+void HCallNamed::PrintDataTo(StringStream* stream) const {
+  SmartPointer<char> name_string = name()->ToCString();
+  stream->Add("%s ", *name_string);
+  HCall::PrintDataTo(stream);
+}
+
+
+void HCallGlobal::PrintDataTo(StringStream* stream) const {
+  SmartPointer<char> name_string = name()->ToCString();
+  stream->Add("%s ", *name_string);
+  HCall::PrintDataTo(stream);
+}
+
+
+void HCallRuntime::PrintDataTo(StringStream* stream) const {
+  SmartPointer<char> name_string = name()->ToCString();
+  stream->Add("%s ", *name_string);
+  HCall::PrintDataTo(stream);
+}
+
+void HCallStub::PrintDataTo(StringStream* stream) const {
+  stream->Add("%s(%d)",
+              CodeStub::MajorName(major_key_, false),
+              argument_count_);
+}
+
+
+Range* HValue::InferRange() {
+  if (representation().IsTagged()) {
+    // Tagged values are always in int32 range when converted to integer,
+    // but they can contain -0.
+    Range* result = new Range();
+    result->set_can_be_minus_zero(true);
+    return result;
+  } else if (representation().IsNone()) {
+    return NULL;
+  } else {
+    return new Range();
+  }
+}
+
+
+Range* HConstant::InferRange() {
+  if (has_int32_value_) {
+    Range* result = new Range(int32_value_, int32_value_);
+    result->set_can_be_minus_zero(false);
+    return result;
+  }
+  return HInstruction::InferRange();
+}
+
+
+Range* HPhi::InferRange() {
+  if (representation().IsInteger32()) {
+    if (block()->IsLoopHeader()) {
+      Range* range = new Range(kMinInt, kMaxInt);
+      return range;
+    } else {
+      Range* range = OperandAt(0)->range()->Copy();
+      for (int i = 1; i < OperandCount(); ++i) {
+        range->Union(OperandAt(i)->range());
+      }
+      return range;
+    }
+  } else {
+    return HValue::InferRange();
+  }
+}
+
+
+Range* HAdd::InferRange() {
+  if (representation().IsInteger32()) {
+    Range* a = left()->range();
+    Range* b = right()->range();
+    Range* res = a->Copy();
+    if (!res->AddAndCheckOverflow(b)) {
+      ClearFlag(kCanOverflow);
+    }
+    bool m0 = a->CanBeMinusZero() && b->CanBeMinusZero();
+    res->set_can_be_minus_zero(m0);
+    return res;
+  } else {
+    return HArithmeticBinaryOperation::InferRange();
+  }
+}
+
+
+Range* HSub::InferRange() {
+  if (representation().IsInteger32()) {
+    Range* a = left()->range();
+    Range* b = right()->range();
+    Range* res = a->Copy();
+    if (!res->SubAndCheckOverflow(b)) {
+      ClearFlag(kCanOverflow);
+    }
+    res->set_can_be_minus_zero(a->CanBeMinusZero() && b->CanBeZero());
+    return res;
+  } else {
+    return HArithmeticBinaryOperation::InferRange();
+  }
+}
+
+
+Range* HMul::InferRange() {
+  if (representation().IsInteger32()) {
+    Range* a = left()->range();
+    Range* b = right()->range();
+    Range* res = a->Copy();
+    if (!res->MulAndCheckOverflow(b)) {
+      ClearFlag(kCanOverflow);
+    }
+    bool m0 = (a->CanBeZero() && b->CanBeNegative()) ||
+        (a->CanBeNegative() && b->CanBeZero());
+    res->set_can_be_minus_zero(m0);
+    return res;
+  } else {
+    return HArithmeticBinaryOperation::InferRange();
+  }
+}
+
+
+Range* HDiv::InferRange() {
+  if (representation().IsInteger32()) {
+    Range* result = new Range();
+    if (left()->range()->CanBeMinusZero()) {
+      result->set_can_be_minus_zero(true);
+    }
+
+    if (left()->range()->CanBeZero() && right()->range()->CanBeNegative()) {
+      result->set_can_be_minus_zero(true);
+    }
+
+    if (right()->range()->Includes(-1) && left()->range()->Includes(kMinInt)) {
+      SetFlag(HValue::kCanOverflow);
+    }
+
+    if (!right()->range()->CanBeZero()) {
+      ClearFlag(HValue::kCanBeDivByZero);
+    }
+    return result;
+  } else {
+    return HArithmeticBinaryOperation::InferRange();
+  }
+}
+
+
+Range* HMod::InferRange() {
+  if (representation().IsInteger32()) {
+    Range* a = left()->range();
+    Range* result = new Range();
+    if (a->CanBeMinusZero() || a->CanBeNegative()) {
+      result->set_can_be_minus_zero(true);
+    }
+    if (!right()->range()->CanBeZero()) {
+      ClearFlag(HValue::kCanBeDivByZero);
+    }
+    return result;
+  } else {
+    return HArithmeticBinaryOperation::InferRange();
+  }
+}
+
+
+void HPhi::PrintTo(StringStream* stream) const {
+  stream->Add("[");
+  for (int i = 0; i < OperandCount(); ++i) {
+    HValue* value = OperandAt(i);
+    stream->Add(" ");
+    value->PrintNameTo(stream);
+    stream->Add(" ");
+  }
+  stream->Add(" uses%d_%di_%dd_%dt]",
+              uses()->length(),
+              int32_non_phi_uses() + int32_indirect_uses(),
+              double_non_phi_uses() + double_indirect_uses(),
+              tagged_non_phi_uses() + tagged_indirect_uses());
+}
+
+
+void HPhi::AddInput(HValue* value) {
+  inputs_.Add(NULL);
+  SetOperandAt(OperandCount() - 1, value);
+  // Mark phis that may have 'arguments' directly or indirectly as an operand.
+  if (!CheckFlag(kIsArguments) && value->CheckFlag(kIsArguments)) {
+    SetFlag(kIsArguments);
+  }
+}
+
+
+bool HPhi::HasReceiverOperand() {
+  for (int i = 0; i < OperandCount(); i++) {
+    if (OperandAt(i)->IsParameter() &&
+        HParameter::cast(OperandAt(i))->index() == 0) {
+      return true;
+    }
+  }
+  return false;
+}
+
+
+HValue* HPhi::GetRedundantReplacement() const {
+  HValue* candidate = NULL;
+  int count = OperandCount();
+  int position = 0;
+  while (position < count && candidate == NULL) {
+    HValue* current = OperandAt(position++);
+    if (current != this) candidate = current;
+  }
+  while (position < count) {
+    HValue* current = OperandAt(position++);
+    if (current != this && current != candidate) return NULL;
+  }
+  ASSERT(candidate != this);
+  return candidate;
+}
+
+
+void HPhi::DeleteFromGraph() {
+  ASSERT(block() != NULL);
+  block()->RemovePhi(this);
+  ASSERT(block() == NULL);
+}
+
+
+void HPhi::InitRealUses(int phi_id) {
+  // Initialize real uses.
+  phi_id_ = phi_id;
+  for (int j = 0; j < uses()->length(); j++) {
+    HValue* use = uses()->at(j);
+    if (!use->IsPhi()) {
+      int index = use->LookupOperandIndex(0, this);
+      Representation req_rep = use->RequiredInputRepresentation(index);
+      non_phi_uses_[req_rep.kind()]++;
+    }
+  }
+}
+
+
+void HPhi::AddNonPhiUsesFrom(HPhi* other) {
+  for (int i = 0; i < Representation::kNumRepresentations; i++) {
+    indirect_uses_[i] += other->non_phi_uses_[i];
+  }
+}
+
+
+void HPhi::AddIndirectUsesTo(int* dest) {
+  for (int i = 0; i < Representation::kNumRepresentations; i++) {
+    dest[i] += indirect_uses_[i];
+  }
+}
+
+
+void HSimulate::PrintDataTo(StringStream* stream) const {
+  stream->Add("id=%d ", ast_id());
+  if (pop_count_ > 0) stream->Add("pop %d", pop_count_);
+  if (values_.length() > 0) {
+    if (pop_count_ > 0) stream->Add(" /");
+    for (int i = 0; i < values_.length(); ++i) {
+      if (!HasAssignedIndexAt(i)) {
+        stream->Add(" push ");
+      } else {
+        stream->Add(" var[%d] = ", GetAssignedIndexAt(i));
+      }
+      values_[i]->PrintNameTo(stream);
+    }
+  }
+}
+
+
+void HEnterInlined::PrintDataTo(StringStream* stream) const {
+  SmartPointer<char> name = function()->debug_name()->ToCString();
+  stream->Add("%s, id=%d", *name, function()->id());
+}
+
+
+HConstant::HConstant(Handle<Object> handle, Representation r)
+    : handle_(handle),
+      constant_type_(HType::TypeFromValue(handle)),
+      has_int32_value_(false),
+      int32_value_(0),
+      has_double_value_(false),
+      double_value_(0)  {
+  set_representation(r);
+  SetFlag(kUseGVN);
+  if (handle_->IsNumber()) {
+    double n = handle_->Number();
+    has_int32_value_ = static_cast<double>(static_cast<int32_t>(n)) == n;
+    if (has_int32_value_) int32_value_ = static_cast<int32_t>(n);
+    double_value_ = n;
+    has_double_value_ = true;
+  }
+}
+
+
+HConstant* HConstant::CopyToRepresentation(Representation r) const {
+  if (r.IsInteger32() && !has_int32_value_) return NULL;
+  if (r.IsDouble() && !has_double_value_) return NULL;
+  return new HConstant(handle_, r);
+}
+
+
+HConstant* HConstant::CopyToTruncatedInt32() const {
+  if (!has_double_value_) return NULL;
+  int32_t truncated = NumberToInt32(*handle_);
+  return new HConstant(Factory::NewNumberFromInt(truncated),
+                       Representation::Integer32());
+}
+
+
+void HConstant::PrintDataTo(StringStream* stream) const {
+  handle()->ShortPrint(stream);
+}
+
+
+bool HArrayLiteral::IsCopyOnWrite() const {
+  return constant_elements()->map() == Heap::fixed_cow_array_map();
+}
+
+
+void HBinaryOperation::PrintDataTo(StringStream* stream) const {
+  left()->PrintNameTo(stream);
+  stream->Add(" ");
+  right()->PrintNameTo(stream);
+  if (CheckFlag(kCanOverflow)) stream->Add(" !");
+  if (CheckFlag(kBailoutOnMinusZero)) stream->Add(" -0?");
+}
+
+
+Range* HBitAnd::InferRange() {
+  Range* a = left()->range();
+  Range* b = right()->range();
+  int32_t a_mask = 0xffffffff;
+  int32_t b_mask = 0xffffffff;
+  if (a != NULL) a_mask = a->Mask();
+  if (b != NULL) b_mask = b->Mask();
+  int32_t result_mask = a_mask & b_mask;
+  if (result_mask >= 0) {
+    return new Range(0, result_mask);
+  } else {
+    return HBinaryOperation::InferRange();
+  }
+}
+
+
+Range* HBitOr::InferRange() {
+  Range* a = left()->range();
+  Range* b = right()->range();
+  int32_t a_mask = 0xffffffff;
+  int32_t b_mask = 0xffffffff;
+  if (a != NULL) a_mask = a->Mask();
+  if (b != NULL) b_mask = b->Mask();
+  int32_t result_mask = a_mask | b_mask;
+  if (result_mask >= 0) {
+    return new Range(0, result_mask);
+  } else {
+    return HBinaryOperation::InferRange();
+  }
+}
+
+
+Range* HSar::InferRange() {
+  if (right()->IsConstant()) {
+    HConstant* c = HConstant::cast(right());
+    if (c->HasInteger32Value()) {
+      int32_t val = c->Integer32Value();
+      Range* result = NULL;
+      Range* left_range = left()->range();
+      if (left_range == NULL) {
+        result = new Range();
+      } else {
+        result = left_range->Copy();
+      }
+      result->Sar(val);
+      return result;
+    }
+  }
+
+  return HBinaryOperation::InferRange();
+}
+
+
+Range* HShl::InferRange() {
+  if (right()->IsConstant()) {
+    HConstant* c = HConstant::cast(right());
+    if (c->HasInteger32Value()) {
+      int32_t val = c->Integer32Value();
+      Range* result = NULL;
+      Range* left_range = left()->range();
+      if (left_range == NULL) {
+        result = new Range();
+      } else {
+        result = left_range->Copy();
+      }
+      result->Shl(val);
+      return result;
+    }
+  }
+
+  return HBinaryOperation::InferRange();
+}
+
+
+
+void HCompare::PrintDataTo(StringStream* stream) const {
+  stream->Add(Token::Name(token()));
+  stream->Add(" ");
+  HBinaryOperation::PrintDataTo(stream);
+}
+
+
+void HCompare::SetInputRepresentation(Representation r) {
+  input_representation_ = r;
+  if (r.IsTagged()) {
+    SetFlagMask(AllSideEffects());
+    ClearFlag(kUseGVN);
+  } else {
+    ClearFlagMask(AllSideEffects());
+    SetFlag(kUseGVN);
+  }
+}
+
+
+void HParameter::PrintDataTo(StringStream* stream) const {
+  stream->Add("%u", index());
+}
+
+
+void HLoadNamedField::PrintDataTo(StringStream* stream) const {
+  object()->PrintNameTo(stream);
+  stream->Add(" @%d%s", offset(), is_in_object() ? "[in-object]" : "");
+}
+
+
+void HLoadKeyed::PrintDataTo(StringStream* stream) const {
+  object()->PrintNameTo(stream);
+  stream->Add("[");
+  key()->PrintNameTo(stream);
+  stream->Add("]");
+}
+
+
+void HStoreNamed::PrintDataTo(StringStream* stream) const {
+  object()->PrintNameTo(stream);
+  stream->Add(".");
+  ASSERT(name()->IsString());
+  stream->Add(*String::cast(*name())->ToCString());
+  stream->Add(" = ");
+  value()->PrintNameTo(stream);
+}
+
+
+void HStoreNamedField::PrintDataTo(StringStream* stream) const {
+  HStoreNamed::PrintDataTo(stream);
+  if (!transition().is_null()) {
+    stream->Add(" (transition map %p)", *transition());
+  }
+}
+
+
+void HStoreKeyed::PrintDataTo(StringStream* stream) const {
+  object()->PrintNameTo(stream);
+  stream->Add("[");
+  key()->PrintNameTo(stream);
+  stream->Add("] = ");
+  value()->PrintNameTo(stream);
+}
+
+
+void HLoadGlobal::PrintDataTo(StringStream* stream) const {
+  stream->Add("[%p]", *cell());
+  if (check_hole_value()) stream->Add(" (deleteable/read-only)");
+}
+
+
+void HStoreGlobal::PrintDataTo(StringStream* stream) const {
+  stream->Add("[%p] = ", *cell());
+  value()->PrintNameTo(stream);
+}
+
+
+// Implementation of type inference and type conversions. Calculates
+// the inferred type of this instruction based on the input operands.
+
+HType HValue::CalculateInferredType() const {
+  return type_;
+}
+
+
+HType HCheckMap::CalculateInferredType() const {
+  return value()->type();
+}
+
+
+HType HCheckFunction::CalculateInferredType() const {
+  return value()->type();
+}
+
+
+HType HCheckNonSmi::CalculateInferredType() const {
+  // TODO(kasperl): Is there any way to signal that this isn't a smi?
+  return HType::Tagged();
+}
+
+
+HType HCheckSmi::CalculateInferredType() const {
+  return HType::Smi();
+}
+
+
+HType HPhi::CalculateInferredType() const {
+  HType result = HType::Uninitialized();
+  for (int i = 0; i < OperandCount(); ++i) {
+    HType current = OperandAt(i)->type();
+    result = result.Combine(current);
+  }
+  return result;
+}
+
+
+HType HConstant::CalculateInferredType() const {
+  return constant_type_;
+}
+
+
+HType HCompare::CalculateInferredType() const {
+  return HType::Boolean();
+}
+
+
+HType HCompareJSObjectEq::CalculateInferredType() const {
+  return HType::Boolean();
+}
+
+
+HType HUnaryPredicate::CalculateInferredType() const {
+  return HType::Boolean();
+}
+
+
+HType HArithmeticBinaryOperation::CalculateInferredType() const {
+  return HType::TaggedNumber();
+}
+
+
+HType HAdd::CalculateInferredType() const {
+  return HType::Tagged();
+}
+
+
+HType HBitAnd::CalculateInferredType() const {
+  return HType::TaggedNumber();
+}
+
+
+HType HBitXor::CalculateInferredType() const {
+  return HType::TaggedNumber();
+}
+
+
+HType HBitOr::CalculateInferredType() const {
+  return HType::TaggedNumber();
+}
+
+
+HType HBitNot::CalculateInferredType() const {
+  return HType::TaggedNumber();
+}
+
+
+HType HUnaryMathOperation::CalculateInferredType() const {
+  return HType::TaggedNumber();
+}
+
+
+HType HShl::CalculateInferredType() const {
+  return HType::TaggedNumber();
+}
+
+
+HType HShr::CalculateInferredType() const {
+  return HType::TaggedNumber();
+}
+
+
+HType HSar::CalculateInferredType() const {
+  return HType::TaggedNumber();
+}
+
+
+HValue* HUnaryMathOperation::EnsureAndPropagateNotMinusZero(
+    BitVector* visited) {
+  visited->Add(id());
+  if (representation().IsInteger32() &&
+      !value()->representation().IsInteger32()) {
+    if (value()->range() == NULL || value()->range()->CanBeMinusZero()) {
+      SetFlag(kBailoutOnMinusZero);
+    }
+  }
+  if (RequiredInputRepresentation(0).IsInteger32() &&
+      representation().IsInteger32()) {
+    return value();
+  }
+  return NULL;
+}
+
+
+
+HValue* HChange::EnsureAndPropagateNotMinusZero(BitVector* visited) {
+  visited->Add(id());
+  if (from().IsInteger32()) return NULL;
+  if (CanTruncateToInt32()) return NULL;
+  if (value()->range() == NULL || value()->range()->CanBeMinusZero()) {
+    SetFlag(kBailoutOnMinusZero);
+  }
+  ASSERT(!from().IsInteger32() || !to().IsInteger32());
+  return NULL;
+}
+
+
+HValue* HMod::EnsureAndPropagateNotMinusZero(BitVector* visited) {
+  visited->Add(id());
+  if (range() == NULL || range()->CanBeMinusZero()) {
+    SetFlag(kBailoutOnMinusZero);
+    return left();
+  }
+  return NULL;
+}
+
+
+HValue* HDiv::EnsureAndPropagateNotMinusZero(BitVector* visited) {
+  visited->Add(id());
+  if (range() == NULL || range()->CanBeMinusZero()) {
+    SetFlag(kBailoutOnMinusZero);
+  }
+  return NULL;
+}
+
+
+HValue* HMul::EnsureAndPropagateNotMinusZero(BitVector* visited) {
+  visited->Add(id());
+  if (range() == NULL || range()->CanBeMinusZero()) {
+    SetFlag(kBailoutOnMinusZero);
+  }
+  return NULL;
+}
+
+
+HValue* HSub::EnsureAndPropagateNotMinusZero(BitVector* visited) {
+  visited->Add(id());
+  // Propagate to the left argument. If the left argument cannot be -0, then
+  // the result of the add operation cannot be either.
+  if (range() == NULL || range()->CanBeMinusZero()) {
+    return left();
+  }
+  return NULL;
+}
+
+
+HValue* HAdd::EnsureAndPropagateNotMinusZero(BitVector* visited) {
+  visited->Add(id());
+  // Propagate to the left argument. If the left argument cannot be -0, then
+  // the result of the sub operation cannot be either.
+  if (range() == NULL || range()->CanBeMinusZero()) {
+    return left();
+  }
+  return NULL;
+}
+
+
+// Node-specific verification code is only included in debug mode.
+#ifdef DEBUG
+
+void HPhi::Verify() const {
+  ASSERT(OperandCount() == block()->predecessors()->length());
+  for (int i = 0; i < OperandCount(); ++i) {
+    HValue* value = OperandAt(i);
+    HBasicBlock* defining_block = value->block();
+    HBasicBlock* predecessor_block = block()->predecessors()->at(i);
+    ASSERT(defining_block == predecessor_block ||
+           defining_block->Dominates(predecessor_block));
+  }
+}
+
+
+void HSimulate::Verify() const {
+  HInstruction::Verify();
+  ASSERT(HasAstId());
+}
+
+
+void HBoundsCheck::Verify() const {
+  HInstruction::Verify();
+  ASSERT(HasNoUses());
+}
+
+
+void HCheckSmi::Verify() const {
+  HInstruction::Verify();
+  ASSERT(HasNoUses());
+}
+
+
+void HCheckNonSmi::Verify() const {
+  HInstruction::Verify();
+  ASSERT(HasNoUses());
+}
+
+
+void HCheckInstanceType::Verify() const {
+  HInstruction::Verify();
+  ASSERT(HasNoUses());
+}
+
+
+void HCheckMap::Verify() const {
+  HInstruction::Verify();
+  ASSERT(HasNoUses());
+}
+
+
+void HCheckFunction::Verify() const {
+  HInstruction::Verify();
+  ASSERT(HasNoUses());
+}
+
+
+void HCheckPrototypeMaps::Verify() const {
+  HInstruction::Verify();
+  ASSERT(HasNoUses());
+}
+
+#endif
+
+} }  // namespace v8::internal
diff --git a/src/hydrogen-instructions.h b/src/hydrogen-instructions.h
new file mode 100644 (file)
index 0000000..ff1ab1a
--- /dev/null
@@ -0,0 +1,2885 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_HYDROGEN_INSTRUCTIONS_H_
+#define V8_HYDROGEN_INSTRUCTIONS_H_
+
+#include "v8.h"
+#include "code-stubs.h"
+#include "string-stream.h"
+#include "zone.h"
+
+namespace v8 {
+namespace internal {
+
+// Forward declarations.
+class HBasicBlock;
+class HEnvironment;
+class HInstruction;
+class HLoopInformation;
+class HValue;
+class LInstruction;
+class LChunkBuilder;
+
+
+// Type hierarchy:
+//
+// HValue
+//   HInstruction
+//     HAccessArgumentsAt
+//     HApplyArguments
+//     HArgumentsElements
+//     HArgumentsLength
+//     HArgumentsObject
+//     HBinaryOperation
+//       HArithmeticBinaryOperation
+//         HAdd
+//         HDiv
+//         HMod
+//         HMul
+//         HSub
+//       HBitwiseBinaryOperation
+//         HBitAnd
+//         HBitOr
+//         HBitXor
+//         HSar
+//         HShl
+//         HShr
+//       HBoundsCheck
+//       HCompare
+//       HCompareJSObjectEq
+//       HInstanceOf
+//       HLoadKeyed
+//         HLoadKeyedFastElement
+//         HLoadKeyedGeneric
+//       HLoadNamedGeneric
+//       HStoreNamed
+//         HStoreNamedField
+//         HStoreNamedGeneric
+//     HBlockEntry
+//     HCall
+//       HCallConstantFunction
+//       HCallFunction
+//       HCallGlobal
+//       HCallKeyed
+//       HCallKnownGlobal
+//       HCallNamed
+//       HCallNew
+//       HCallRuntime
+//     HCallStub
+//     HConstant
+//     HControlInstruction
+//       HGoto
+//       HUnaryControlInstruction
+//         HBranch
+//         HCompareMapAndBranch
+//         HReturn
+//         HThrow
+//     HDeoptimize
+//     HEnterInlined
+//     HFunctionLiteral
+//     HGlobalObject
+//     HGlobalReceiver
+//     HLeaveInlined
+//     HLoadGlobal
+//     HMaterializedLiteral
+//       HArrayLiteral
+//       HObjectLiteral
+//       HRegExpLiteral
+//     HOsrEntry
+//     HParameter
+//     HSimulate
+//     HStackCheck
+//     HStoreKeyed
+//       HStoreKeyedFastElement
+//       HStoreKeyedGeneric
+//     HUnaryOperation
+//       HArrayLength
+//       HBitNot
+//       HChange
+//       HCheckFunction
+//       HCheckInstanceType
+//       HCheckMap
+//       HCheckNonSmi
+//       HCheckPrototypeMaps
+//       HCheckSmi
+//       HDeleteProperty
+//       HLoadElements
+//         HTypeofIs
+//       HLoadNamedField
+//       HPushArgument
+//       HTypeof
+//       HUnaryMathOperation
+//       HUnaryPredicate
+//         HClassOfTest
+//         HHasCachedArrayIndex
+//         HHasInstanceType
+//         HIsNull
+//         HIsSmi
+//       HValueOf
+//     HUnknownOSRValue
+//   HPhi
+
+#define HYDROGEN_ALL_INSTRUCTION_LIST(V)       \
+  V(ArithmeticBinaryOperation)                 \
+  V(BinaryOperation)                           \
+  V(BitwiseBinaryOperation)                    \
+  V(Call)                                      \
+  V(ControlInstruction)                        \
+  V(Instruction)                               \
+  V(LoadKeyed)                                 \
+  V(MaterializedLiteral)                       \
+  V(Phi)                                       \
+  V(StoreKeyed)                                \
+  V(StoreNamed)                                \
+  V(UnaryControlInstruction)                   \
+  V(UnaryOperation)                            \
+  HYDROGEN_CONCRETE_INSTRUCTION_LIST(V)
+
+
+#define HYDROGEN_CONCRETE_INSTRUCTION_LIST(V)  \
+  V(AccessArgumentsAt)                         \
+  V(Add)                                       \
+  V(ApplyArguments)                            \
+  V(ArgumentsElements)                         \
+  V(ArgumentsLength)                           \
+  V(ArgumentsObject)                           \
+  V(ArrayLength)                               \
+  V(ArrayLiteral)                              \
+  V(BitAnd)                                    \
+  V(BitNot)                                    \
+  V(BitOr)                                     \
+  V(BitXor)                                    \
+  V(BlockEntry)                                \
+  V(BoundsCheck)                               \
+  V(Branch)                                    \
+  V(CallConstantFunction)                      \
+  V(CallFunction)                              \
+  V(CallGlobal)                                \
+  V(CallKeyed)                                 \
+  V(CallKnownGlobal)                           \
+  V(CallNamed)                                 \
+  V(CallNew)                                   \
+  V(CallRuntime)                               \
+  V(CallStub)                                  \
+  V(Change)                                    \
+  V(CheckFunction)                             \
+  V(CheckInstanceType)                         \
+  V(CheckMap)                                  \
+  V(CheckNonSmi)                               \
+  V(CheckPrototypeMaps)                        \
+  V(CheckSmi)                                  \
+  V(Compare)                                   \
+  V(CompareJSObjectEq)                         \
+  V(CompareMapAndBranch)                       \
+  V(Constant)                                  \
+  V(DeleteProperty)                            \
+  V(Deoptimize)                                \
+  V(Div)                                       \
+  V(EnterInlined)                              \
+  V(FunctionLiteral)                           \
+  V(GlobalObject)                              \
+  V(GlobalReceiver)                            \
+  V(Goto)                                      \
+  V(InstanceOf)                                \
+  V(IsNull)                                    \
+  V(IsSmi)                                     \
+  V(HasInstanceType)                           \
+  V(HasCachedArrayIndex)                       \
+  V(ClassOfTest)                               \
+  V(LeaveInlined)                              \
+  V(LoadElements)                              \
+  V(LoadGlobal)                                \
+  V(LoadKeyedFastElement)                      \
+  V(LoadKeyedGeneric)                          \
+  V(LoadNamedField)                            \
+  V(LoadNamedGeneric)                          \
+  V(Mod)                                       \
+  V(Mul)                                       \
+  V(ObjectLiteral)                             \
+  V(OsrEntry)                                  \
+  V(Parameter)                                 \
+  V(PushArgument)                              \
+  V(RegExpLiteral)                             \
+  V(Return)                                    \
+  V(Sar)                                       \
+  V(Shl)                                       \
+  V(Shr)                                       \
+  V(Simulate)                                  \
+  V(StackCheck)                                \
+  V(StoreGlobal)                               \
+  V(StoreKeyedFastElement)                     \
+  V(StoreKeyedGeneric)                         \
+  V(StoreNamedField)                           \
+  V(StoreNamedGeneric)                         \
+  V(Sub)                                       \
+  V(Throw)                                     \
+  V(Typeof)                                    \
+  V(TypeofIs)                                  \
+  V(UnaryMathOperation)                        \
+  V(UnknownOSRValue)                           \
+  V(ValueOf)
+
+#define GVN_FLAG_LIST(V)                       \
+  V(Calls)                                     \
+  V(InobjectFields)                            \
+  V(BackingStoreFields)                        \
+  V(ArrayElements)                             \
+  V(GlobalVars)                                \
+  V(Maps)                                      \
+  V(ArrayLengths)                              \
+  V(OsrEntries)
+
+#define DECLARE_INSTRUCTION(type)                   \
+  virtual bool Is##type() const { return true; }    \
+  static H##type* cast(HValue* value) {             \
+    ASSERT(value->Is##type());                      \
+    return reinterpret_cast<H##type*>(value);       \
+  }                                                 \
+  Opcode opcode() const { return HValue::k##type; }
+
+
+#define DECLARE_CONCRETE_INSTRUCTION(type, mnemonic)              \
+  virtual LInstruction* CompileToLithium(LChunkBuilder* builder); \
+  virtual const char* Mnemonic() const { return mnemonic; }       \
+  DECLARE_INSTRUCTION(type)
+
+
+
+template<int kSize>
+class HOperandVector : public EmbeddedVector<HValue*, kSize> {
+ public:
+  HOperandVector() : EmbeddedVector<HValue*, kSize>(NULL) { }
+};
+
+
+class Range: public ZoneObject {
+ public:
+  Range() : lower_(kMinInt),
+            upper_(kMaxInt),
+            next_(NULL),
+            can_be_minus_zero_(false) { }
+
+  Range(int32_t lower, int32_t upper)
+      : lower_(lower), upper_(upper), next_(NULL), can_be_minus_zero_(false) { }
+
+  bool IsInSmiRange() const {
+    return lower_ >= Smi::kMinValue && upper_ <= Smi::kMaxValue;
+  }
+  void KeepOrder();
+  void Verify() const;
+  int32_t upper() const { return upper_; }
+  int32_t lower() const { return lower_; }
+  Range* next() const { return next_; }
+  Range* CopyClearLower() const { return new Range(kMinInt, upper_); }
+  Range* CopyClearUpper() const { return new Range(lower_, kMaxInt); }
+  void ClearLower() { lower_ = kMinInt; }
+  void ClearUpper() { upper_ = kMaxInt; }
+  Range* Copy() const { return new Range(lower_, upper_); }
+  bool IsMostGeneric() const { return lower_ == kMinInt && upper_ == kMaxInt; }
+  int32_t Mask() const;
+  void set_can_be_minus_zero(bool b) { can_be_minus_zero_ = b; }
+  bool CanBeMinusZero() const { return CanBeZero() && can_be_minus_zero_; }
+  bool CanBeZero() const { return upper_ >= 0 && lower_ <= 0; }
+  bool CanBeNegative() const { return lower_ < 0; }
+  bool Includes(int value) const {
+    return lower_ <= value && upper_ >= value;
+  }
+
+  void Sar(int32_t value) {
+    int32_t bits = value & 0x1F;
+    lower_ = lower_ >> bits;
+    upper_ = upper_ >> bits;
+    set_can_be_minus_zero(false);
+  }
+
+  void Shl(int32_t value) {
+    int32_t bits = value & 0x1F;
+    int old_lower = lower_;
+    int old_upper = upper_;
+    lower_ = lower_ << bits;
+    upper_ = upper_ << bits;
+    if (old_lower != lower_ >> bits || old_upper != upper_ >> bits) {
+      upper_ = kMaxInt;
+      lower_ = kMinInt;
+    }
+    set_can_be_minus_zero(false);
+  }
+
+  void StackUpon(Range* other) {
+    Intersect(other);
+    next_ = other;
+  }
+
+  void Intersect(Range* other) {
+    upper_ = Min(upper_, other->upper_);
+    lower_ = Max(lower_, other->lower_);
+    bool b = CanBeMinusZero() && other->CanBeMinusZero();
+    set_can_be_minus_zero(b);
+  }
+
+  void Union(Range* other) {
+    upper_ = Max(upper_, other->upper_);
+    lower_ = Min(lower_, other->lower_);
+    bool b = CanBeMinusZero() || other->CanBeMinusZero();
+    set_can_be_minus_zero(b);
+  }
+
+  void Add(int32_t value);
+  bool AddAndCheckOverflow(Range* other);
+  bool SubAndCheckOverflow(Range* other);
+  bool MulAndCheckOverflow(Range* other);
+
+ private:
+  int32_t lower_;
+  int32_t upper_;
+  Range* next_;
+  bool can_be_minus_zero_;
+};
+
+
+class Representation {
+ public:
+  enum Kind {
+    kNone,
+    kTagged,
+    kDouble,
+    kInteger32,
+    kNumRepresentations
+  };
+
+  Representation() : kind_(kNone) { }
+
+  static Representation None() { return Representation(kNone); }
+  static Representation Tagged() { return Representation(kTagged); }
+  static Representation Integer32() { return Representation(kInteger32); }
+  static Representation Double() { return Representation(kDouble); }
+
+  bool Equals(const Representation& other) const {
+    return kind_ == other.kind_;
+  }
+
+  Kind kind() const { return kind_; }
+  bool IsNone() const { return kind_ == kNone; }
+  bool IsTagged() const { return kind_ == kTagged; }
+  bool IsInteger32() const { return kind_ == kInteger32; }
+  bool IsDouble() const { return kind_ == kDouble; }
+  bool IsSpecialization() const {
+    return kind_ == kInteger32 || kind_ == kDouble;
+  }
+  const char* Mnemonic() const;
+
+ private:
+  explicit Representation(Kind k) : kind_(k) { }
+
+  Kind kind_;
+};
+
+
+class HType {
+ public:
+  HType() : type_(kUninitialized) { }
+
+  static HType Tagged() { return HType(kTagged); }
+  static HType TaggedPrimitive() { return HType(kTaggedPrimitive); }
+  static HType TaggedNumber() { return HType(kTaggedNumber); }
+  static HType Smi() { return HType(kSmi); }
+  static HType HeapNumber() { return HType(kHeapNumber); }
+  static HType String() { return HType(kString); }
+  static HType Boolean() { return HType(kBoolean); }
+  static HType NonPrimitive() { return HType(kNonPrimitive); }
+  static HType JSArray() { return HType(kJSArray); }
+  static HType JSObject() { return HType(kJSObject); }
+  static HType Uninitialized() { return HType(kUninitialized); }
+
+  // Return the weakest (least precise) common type.
+  HType Combine(HType other) {
+    return HType(static_cast<Type>(type_ & other.type_));
+  }
+
+  bool Equals(const HType& other) {
+    return type_ == other.type_;
+  }
+
+  bool IsSubtypeOf(const HType& other) {
+    return Combine(other).Equals(other);
+  }
+
+  bool IsTagged() {
+    ASSERT(type_ != kUninitialized);
+    return ((type_ & kTagged) == kTagged);
+  }
+
+  bool IsTaggedPrimitive() {
+    ASSERT(type_ != kUninitialized);
+    return ((type_ & kTaggedPrimitive) == kTaggedPrimitive);
+  }
+
+  bool IsTaggedNumber() {
+    ASSERT(type_ != kUninitialized);
+    return ((type_ & kTaggedNumber) == kTaggedNumber);
+  }
+
+  bool IsSmi() {
+    ASSERT(type_ != kUninitialized);
+    return ((type_ & kSmi) == kSmi);
+  }
+
+  bool IsHeapNumber() {
+    ASSERT(type_ != kUninitialized);
+    return ((type_ & kHeapNumber) == kHeapNumber);
+  }
+
+  bool IsString() {
+    ASSERT(type_ != kUninitialized);
+    return ((type_ & kString) == kString);
+  }
+
+  bool IsBoolean() {
+    ASSERT(type_ != kUninitialized);
+    return ((type_ & kBoolean) == kBoolean);
+  }
+
+  bool IsNonPrimitive() {
+    ASSERT(type_ != kUninitialized);
+    return ((type_ & kNonPrimitive) == kNonPrimitive);
+  }
+
+  bool IsJSArray() {
+    ASSERT(type_ != kUninitialized);
+    return ((type_ & kJSArray) == kJSArray);
+  }
+
+  bool IsJSObject() {
+    ASSERT(type_ != kUninitialized);
+    return ((type_ & kJSObject) == kJSObject);
+  }
+
+  bool IsUninitialized() {
+    return type_ == kUninitialized;
+  }
+
+  static HType TypeFromValue(Handle<Object> value);
+
+  const char* ToString();
+  const char* ToShortString();
+
+ private:
+  enum Type {
+    kTagged = 0x1,           // 0000 0000 0000 0001
+    kTaggedPrimitive = 0x5,  // 0000 0000 0000 0101
+    kTaggedNumber = 0xd,     // 0000 0000 0000 1101
+    kSmi = 0x1d,             // 0000 0000 0001 1101
+    kHeapNumber = 0x2d,      // 0000 0000 0010 1101
+    kString = 0x45,          // 0000 0000 0100 0101
+    kBoolean = 0x85,         // 0000 0000 1000 0101
+    kNonPrimitive = 0x101,   // 0000 0001 0000 0001
+    kJSObject = 0x301,       // 0000 0011 0000 0001
+    kJSArray = 0x701,        // 0000 0111 1000 0001
+    kUninitialized = 0x1fff  // 0001 1111 1111 1111
+  };
+
+  explicit HType(Type t) : type_(t) { }
+
+  Type type_;
+};
+
+
+class HValue: public ZoneObject {
+ public:
+  static const int kNoNumber = -1;
+
+  // There must be one corresponding kDepends flag for every kChanges flag and
+  // the order of the kChanges flags must be exactly the same as of the kDepends
+  // flags.
+  enum Flag {
+    // Declare global value numbering flags.
+  #define DECLARE_DO(type) kChanges##type, kDependsOn##type,
+    GVN_FLAG_LIST(DECLARE_DO)
+  #undef DECLARE_DO
+    kFlexibleRepresentation,
+    kUseGVN,
+    kCanOverflow,
+    kBailoutOnMinusZero,
+    kCanBeDivByZero,
+    kIsArguments,
+    kTruncatingToInt32,
+    kLastFlag = kTruncatingToInt32
+  };
+
+  STATIC_ASSERT(kLastFlag < kBitsPerInt);
+
+  static const int kChangesToDependsFlagsLeftShift = 1;
+
+  static int ChangesFlagsMask() {
+    int result = 0;
+    // Create changes mask.
+#define DECLARE_DO(type) result |= (1 << kChanges##type);
+  GVN_FLAG_LIST(DECLARE_DO)
+#undef DECLARE_DO
+    return result;
+  }
+
+  static int DependsFlagsMask() {
+    return ConvertChangesToDependsFlags(ChangesFlagsMask());
+  }
+
+  static int ConvertChangesToDependsFlags(int flags) {
+    return flags << kChangesToDependsFlagsLeftShift;
+  }
+
+  // A flag mask to mark an instruction as having arbitrary side effects.
+  static int AllSideEffects() {
+    return ChangesFlagsMask() & ~(1 << kChangesOsrEntries);
+  }
+
+  static HValue* cast(HValue* value) { return value; }
+
+  enum Opcode {
+    // Declare a unique enum value for each hydrogen instruction.
+  #define DECLARE_DO(type) k##type,
+    HYDROGEN_ALL_INSTRUCTION_LIST(DECLARE_DO)
+  #undef DECLARE_DO
+    kMaxInstructionClass
+  };
+
+  HValue() : block_(NULL),
+             id_(kNoNumber),
+             uses_(2),
+             type_(HType::Tagged()),
+             range_(NULL),
+             flags_(0) {}
+  virtual ~HValue() {}
+
+  HBasicBlock* block() const { return block_; }
+  void SetBlock(HBasicBlock* block);
+
+  int id() const { return id_; }
+  void set_id(int id) { id_ = id; }
+
+  const ZoneList<HValue*>* uses() const { return &uses_; }
+
+  virtual bool EmitAtUses() const { return false; }
+  Representation representation() const { return representation_; }
+  void ChangeRepresentation(Representation r) {
+    // Representation was already set and is allowed to be changed.
+    ASSERT(!representation_.IsNone());
+    ASSERT(!r.IsNone());
+    ASSERT(CheckFlag(kFlexibleRepresentation));
+    RepresentationChanged(r);
+    representation_ = r;
+  }
+
+  HType type() const { return type_; }
+  void set_type(HType type) {
+    ASSERT(uses_.length() == 0);
+    type_ = type;
+  }
+
+  // An operation needs to override this function iff:
+  //   1) it can produce an int32 output.
+  //   2) the true value of its output can potentially be minus zero.
+  // The implementation must set a flag so that it bails out in the case where
+  // it would otherwise output what should be a minus zero as an int32 zero.
+  // If the operation also exists in a form that takes int32 and outputs int32
+  // then the operation should return its input value so that we can propagate
+  // back.  There are two operations that need to propagate back to more than
+  // one input.  They are phi and binary add.  They always return NULL and
+  // expect the caller to take care of things.
+  virtual HValue* EnsureAndPropagateNotMinusZero(BitVector* visited) {
+    visited->Add(id());
+    return NULL;
+  }
+
+  bool HasSideEffects() const {
+    return (flags_ & AllSideEffects()) != 0;
+  }
+  bool IsDefinedAfter(HBasicBlock* other) const;
+
+  // Operands.
+  virtual int OperandCount() const { return 0; }
+  virtual HValue* OperandAt(int index) const {
+    UNREACHABLE();
+    return NULL;
+  }
+  void SetOperandAt(int index, HValue* value);
+
+  int LookupOperandIndex(int occurrence_index, HValue* op) const;
+  bool UsesMultipleTimes(HValue* op) const;
+
+  void ReplaceAndDelete(HValue* other);
+  void ReplaceValue(HValue* other);
+  void ReplaceAtUse(HValue* use, HValue* other);
+  void ReplaceFirstAtUse(HValue* use, HValue* other, Representation r);
+  bool HasNoUses() const { return uses_.is_empty(); }
+  void ClearOperands();
+  void Delete();
+
+  int flags() const { return flags_; }
+  void SetFlagMask(int mask) { flags_ |= mask; }
+  void SetFlag(Flag f) { SetFlagMask(1 << f); }
+  void ClearFlagMask(int mask) { flags_ &= ~mask; }
+  void ClearFlag(Flag f) { ClearFlagMask(1 << f); }
+  bool CheckFlag(Flag f) const { return CheckFlagMask(1 << f); }
+  bool CheckFlagMask(int mask) const { return (flags_ & mask) != 0; }
+
+  Range* range() const { return range_; }
+  bool HasRange() const { return range_ != NULL; }
+  void AddNewRange(Range* r);
+  void RemoveLastAddedRange();
+  void ComputeInitialRange();
+
+  // Representation helpers.
+  virtual Representation RequiredInputRepresentation(int index) const {
+    return Representation::None();
+  }
+  virtual Representation InferredRepresentation() const {
+    return representation();
+  }
+
+  // This gives the instruction an opportunity to replace itself with an
+  // instruction that does the same in some better way.  To replace an
+  // instruction with a new one, first add the new instruction to the graph,
+  // then return it.  Return NULL to have the instruction deleted.
+  virtual HValue* Canonicalize() { return this; }
+
+  // Declare virtual type testers.
+#define DECLARE_DO(type) virtual bool Is##type() const { return false; }
+  HYDROGEN_ALL_INSTRUCTION_LIST(DECLARE_DO)
+#undef DECLARE_DO
+
+  bool Equals(HValue* other) const;
+  virtual intptr_t Hashcode() const;
+
+  // Printing support.
+  virtual void PrintTo(StringStream* stream) const = 0;
+  void PrintNameTo(StringStream* stream);
+  static void PrintTypeTo(HType type, StringStream* stream);
+
+  virtual const char* Mnemonic() const = 0;
+  virtual Opcode opcode() const = 0;
+
+  // Updated the inferred type of this instruction and returns true if
+  // it has changed.
+  bool UpdateInferredType();
+
+  virtual HType CalculateInferredType() const;
+
+  // Helper for type conversions used by normal and phi instructions.
+  void InsertInputConversion(HInstruction* previous, int index, HType type);
+
+#ifdef DEBUG
+  virtual void Verify() const = 0;
+#endif
+
+ protected:
+  virtual bool DataEquals(HValue* other) const { return true; }
+  virtual void RepresentationChanged(Representation to) { }
+  virtual Range* InferRange();
+  virtual void DeleteFromGraph() = 0;
+  virtual void InternalSetOperandAt(int index, HValue* value) { UNREACHABLE(); }
+  void clear_block() {
+    ASSERT(block_ != NULL);
+    block_ = NULL;
+  }
+
+  void set_representation(Representation r) {
+    // Representation is set-once.
+    ASSERT(representation_.IsNone() && !r.IsNone());
+    representation_ = r;
+  }
+
+ private:
+  void InternalReplaceAtUse(HValue* use, HValue* other);
+  void RegisterUse(int index, HValue* new_value);
+
+  HBasicBlock* block_;
+
+  // The id of this instruction in the hydrogen graph, assigned when first
+  // added to the graph. Reflects creation order.
+  int id_;
+
+  Representation representation_;
+  ZoneList<HValue*> uses_;
+  HType type_;
+  Range* range_;
+  int flags_;
+
+  DISALLOW_COPY_AND_ASSIGN(HValue);
+};
+
+
+class HInstruction: public HValue {
+ public:
+  HInstruction* next() const { return next_; }
+  HInstruction* previous() const { return previous_; }
+
+  void PrintTo(StringStream* stream) const;
+  virtual void PrintDataTo(StringStream* stream) const {}
+
+  bool IsLinked() const { return block() != NULL; }
+  void Unlink();
+  void InsertBefore(HInstruction* next);
+  void InsertAfter(HInstruction* previous);
+
+  int position() const { return position_; }
+  bool has_position() const { return position_ != RelocInfo::kNoPosition; }
+  void set_position(int position) { position_ = position; }
+
+  virtual LInstruction* CompileToLithium(LChunkBuilder* builder) = 0;
+
+#ifdef DEBUG
+  virtual void Verify() const;
+#endif
+
+  DECLARE_INSTRUCTION(Instruction)
+
+ protected:
+  HInstruction()
+      : next_(NULL),
+        previous_(NULL),
+        position_(RelocInfo::kNoPosition) {
+    SetFlag(kDependsOnOsrEntries);
+  }
+
+  virtual void DeleteFromGraph() { Unlink(); }
+
+ private:
+  void InitializeAsFirst(HBasicBlock* block) {
+    ASSERT(!IsLinked());
+    SetBlock(block);
+  }
+
+  HInstruction* next_;
+  HInstruction* previous_;
+  int position_;
+
+  friend class HBasicBlock;
+};
+
+
+class HBlockEntry: public HInstruction {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(BlockEntry, "block_entry")
+};
+
+
+class HControlInstruction: public HInstruction {
+ public:
+  virtual HBasicBlock* FirstSuccessor() const { return NULL; }
+  virtual HBasicBlock* SecondSuccessor() const { return NULL; }
+
+  DECLARE_INSTRUCTION(ControlInstruction)
+};
+
+
+class HDeoptimize: public HControlInstruction {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(Deoptimize, "deoptimize")
+};
+
+
+class HGoto: public HControlInstruction {
+ public:
+  explicit HGoto(HBasicBlock* destination)
+      : destination_(destination),
+        include_stack_check_(false) {}
+
+  virtual HBasicBlock* FirstSuccessor() const { return destination_; }
+  void set_include_stack_check(bool include_stack_check) {
+    include_stack_check_ = include_stack_check;
+  }
+  bool include_stack_check() const { return include_stack_check_; }
+
+  virtual void PrintDataTo(StringStream* stream) const;
+
+  DECLARE_CONCRETE_INSTRUCTION(Goto, "goto")
+
+ private:
+  HBasicBlock* destination_;
+  bool include_stack_check_;
+};
+
+
+class HUnaryControlInstruction: public HControlInstruction {
+ public:
+  explicit HUnaryControlInstruction(HValue* value) {
+    SetOperandAt(0, value);
+  }
+
+  virtual Representation RequiredInputRepresentation(int index) const {
+    return Representation::Tagged();
+  }
+
+  HValue* value() const { return OperandAt(0); }
+  virtual int OperandCount() const { return 1; }
+  virtual HValue* OperandAt(int index) const { return operands_[index]; }
+
+  DECLARE_INSTRUCTION(UnaryControlInstruction)
+
+ protected:
+  virtual void InternalSetOperandAt(int index, HValue* value) {
+    operands_[index] = value;
+  }
+
+ private:
+  HOperandVector<1> operands_;
+};
+
+
+class HBranch: public HUnaryControlInstruction {
+ public:
+  HBranch(HBasicBlock* true_destination,
+          HBasicBlock* false_destination,
+          HValue* boolean_value)
+      : HUnaryControlInstruction(boolean_value),
+        true_destination_(true_destination),
+        false_destination_(false_destination) {
+    ASSERT(true_destination != NULL && false_destination != NULL);
+  }
+
+  virtual Representation RequiredInputRepresentation(int index) const {
+    return Representation::None();
+  }
+
+  virtual HBasicBlock* FirstSuccessor() const { return true_destination_; }
+  virtual HBasicBlock* SecondSuccessor() const { return false_destination_; }
+
+  virtual void PrintDataTo(StringStream* stream) const;
+
+  DECLARE_CONCRETE_INSTRUCTION(Branch, "branch")
+
+ private:
+  HBasicBlock* true_destination_;
+  HBasicBlock* false_destination_;
+};
+
+
+class HCompareMapAndBranch: public HUnaryControlInstruction {
+ public:
+  HCompareMapAndBranch(HValue* result,
+                       Handle<Map> map,
+                       HBasicBlock* true_destination,
+                       HBasicBlock* false_destination)
+      : HUnaryControlInstruction(result),
+        map_(map),
+        true_destination_(true_destination),
+        false_destination_(false_destination) {
+    ASSERT(true_destination != NULL);
+    ASSERT(false_destination != NULL);
+    ASSERT(!map.is_null());
+  }
+
+  virtual HBasicBlock* FirstSuccessor() const { return true_destination_; }
+  virtual HBasicBlock* SecondSuccessor() const { return false_destination_; }
+
+  Handle<Map> map() const { return map_; }
+
+  DECLARE_CONCRETE_INSTRUCTION(CompareMapAndBranch, "compare_map_and_branch")
+
+ private:
+  Handle<Map> map_;
+  HBasicBlock* true_destination_;
+  HBasicBlock* false_destination_;
+};
+
+
+class HReturn: public HUnaryControlInstruction {
+ public:
+  explicit HReturn(HValue* result) : HUnaryControlInstruction(result) { }
+
+  virtual void PrintDataTo(StringStream* stream) const;
+
+  DECLARE_CONCRETE_INSTRUCTION(Return, "return")
+};
+
+
+class HThrow: public HUnaryControlInstruction {
+ public:
+  explicit HThrow(HValue* value) : HUnaryControlInstruction(value) { }
+
+  virtual void PrintDataTo(StringStream* stream) const;
+
+  DECLARE_CONCRETE_INSTRUCTION(Throw, "throw")
+};
+
+
+class HUnaryOperation: public HInstruction {
+ public:
+  explicit HUnaryOperation(HValue* value) {
+    SetOperandAt(0, value);
+  }
+
+  HValue* value() const { return OperandAt(0); }
+  virtual void PrintDataTo(StringStream* stream) const;
+  virtual int OperandCount() const { return 1; }
+  virtual HValue* OperandAt(int index) const { return operands_[index]; }
+
+  DECLARE_INSTRUCTION(UnaryOperation)
+
+ protected:
+  virtual void InternalSetOperandAt(int index, HValue* value) {
+    operands_[index] = value;
+  }
+
+ private:
+  HOperandVector<1> operands_;
+};
+
+
+class HChange: public HUnaryOperation {
+ public:
+  HChange(HValue* value,
+          Representation from,
+          Representation to)
+      : HUnaryOperation(value), from_(from), to_(to) {
+    ASSERT(!from.IsNone() && !to.IsNone());
+    ASSERT(!from.Equals(to));
+    set_representation(to);
+    SetFlag(kUseGVN);
+
+    if (from.IsInteger32() && to.IsTagged() && value->range() != NULL &&
+        value->range()->IsInSmiRange()) {
+      set_type(HType::Smi());
+    }
+  }
+
+  virtual HValue* EnsureAndPropagateNotMinusZero(BitVector* visited);
+
+  Representation from() const { return from_; }
+  Representation to() const { return to_; }
+  virtual Representation RequiredInputRepresentation(int index) const {
+    return from_;
+  }
+
+  bool CanTruncateToInt32() const {
+    for (int i = 0; i < uses()->length(); ++i) {
+      if (!uses()->at(i)->CheckFlag(HValue::kTruncatingToInt32)) return false;
+    }
+    return true;
+  }
+
+  virtual void PrintDataTo(StringStream* stream) const;
+
+  DECLARE_CONCRETE_INSTRUCTION(Change,
+                               CanTruncateToInt32() ? "truncate" : "change")
+
+ protected:
+  virtual bool DataEquals(HValue* other) const {
+    if (!other->IsChange()) return false;
+    HChange* change = HChange::cast(other);
+    return value() == change->value()
+        && to().Equals(change->to())
+        && CanTruncateToInt32() == change->CanTruncateToInt32();
+  }
+
+ private:
+  Representation from_;
+  Representation to_;
+};
+
+
+class HSimulate: public HInstruction {
+ public:
+  HSimulate(int ast_id, int pop_count, int environment_height)
+      : ast_id_(ast_id),
+        pop_count_(pop_count),
+        environment_height_(environment_height),
+        values_(2),
+        assigned_indexes_(2) {}
+  virtual ~HSimulate() {}
+
+  virtual void PrintDataTo(StringStream* stream) const;
+
+  bool HasAstId() const { return ast_id_ != AstNode::kNoNumber; }
+  int ast_id() const { return ast_id_; }
+  void set_ast_id(int id) {
+    ASSERT(!HasAstId());
+    ast_id_ = id;
+  }
+
+  int environment_height() const { return environment_height_; }
+  int pop_count() const { return pop_count_; }
+  const ZoneList<HValue*>* values() const { return &values_; }
+  int GetAssignedIndexAt(int index) const {
+    ASSERT(HasAssignedIndexAt(index));
+    return assigned_indexes_[index];
+  }
+  bool HasAssignedIndexAt(int index) const {
+    return assigned_indexes_[index] != kNoIndex;
+  }
+  void AddAssignedValue(int index, HValue* value) {
+    AddValue(index, value);
+  }
+  void AddPushedValue(HValue* value) {
+    AddValue(kNoIndex, value);
+  }
+  virtual int OperandCount() const { return values_.length(); }
+  virtual HValue* OperandAt(int index) const { return values_[index]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(Simulate, "simulate")
+
+#ifdef DEBUG
+  virtual void Verify() const;
+#endif
+
+ protected:
+  virtual void InternalSetOperandAt(int index, HValue* value) {
+    values_[index] = value;
+  }
+
+ private:
+  static const int kNoIndex = -1;
+  void AddValue(int index, HValue* value) {
+    assigned_indexes_.Add(index);
+    // Resize the list of pushed values.
+    values_.Add(NULL);
+    // Set the operand through the base method in HValue to make sure that the
+    // use lists are correctly updated.
+    SetOperandAt(values_.length() - 1, value);
+  }
+  int ast_id_;
+  int pop_count_;
+  int environment_height_;
+  ZoneList<HValue*> values_;
+  ZoneList<int> assigned_indexes_;
+};
+
+
+class HStackCheck: public HInstruction {
+ public:
+  HStackCheck() { }
+
+  DECLARE_CONCRETE_INSTRUCTION(Throw, "stack_check")
+};
+
+
+class HEnterInlined: public HInstruction {
+ public:
+  HEnterInlined(Handle<JSFunction> closure, FunctionLiteral* function)
+      : closure_(closure), function_(function) {
+  }
+
+  virtual void PrintDataTo(StringStream* stream) const;
+
+  Handle<JSFunction> closure() const { return closure_; }
+  FunctionLiteral* function() const { return function_; }
+
+  DECLARE_CONCRETE_INSTRUCTION(EnterInlined, "enter_inlined")
+
+ private:
+  Handle<JSFunction> closure_;
+  FunctionLiteral* function_;
+};
+
+
+class HLeaveInlined: public HInstruction {
+ public:
+  HLeaveInlined() {}
+
+  DECLARE_CONCRETE_INSTRUCTION(LeaveInlined, "leave_inlined")
+};
+
+
+class HPushArgument: public HUnaryOperation {
+ public:
+  explicit HPushArgument(HValue* value)
+      : HUnaryOperation(value), argument_index_(-1) {
+    set_representation(Representation::Tagged());
+  }
+
+  virtual Representation RequiredInputRepresentation(int index) const {
+    return Representation::Tagged();
+  }
+
+  virtual void PrintDataTo(StringStream* stream) const;
+  HValue* argument() const { return OperandAt(0); }
+  int argument_index() const { return argument_index_; }
+  void set_argument_index(int index) {
+    ASSERT(argument_index_ == -1 || index == argument_index_);
+    argument_index_ = index;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(PushArgument, "push_argument")
+
+ private:
+  int argument_index_;
+};
+
+
+class HGlobalObject: public HInstruction {
+ public:
+  HGlobalObject() {
+    set_representation(Representation::Tagged());
+    SetFlag(kUseGVN);
+    SetFlag(kDependsOnCalls);
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(GlobalObject, "global_object")
+};
+
+
+class HGlobalReceiver: public HInstruction {
+ public:
+  HGlobalReceiver() {
+    set_representation(Representation::Tagged());
+    SetFlag(kUseGVN);
+    SetFlag(kDependsOnCalls);
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(GlobalReceiver, "global_receiver")
+};
+
+
+class HCall: public HInstruction {
+ public:
+  // Construct a call with uninitialized arguments. The argument count
+  // includes the receiver.
+  explicit HCall(int count);
+
+  virtual HType CalculateInferredType() const { return HType::Tagged(); }
+
+  // TODO(3190496): This needs a cleanup. We don't want the arguments
+  // be operands of the call instruction. This results in bad code quality.
+  virtual int argument_count() const { return arguments_.length(); }
+  virtual int OperandCount() const { return argument_count(); }
+  virtual HValue* OperandAt(int index) const { return arguments_[index]; }
+  virtual HPushArgument* PushArgumentAt(int index) const {
+    return HPushArgument::cast(OperandAt(index));
+  }
+  virtual HValue* ArgumentAt(int index) const {
+    return PushArgumentAt(index)->argument();
+  }
+  virtual void SetArgumentAt(int index, HPushArgument* push_argument);
+
+  virtual void PrintDataTo(StringStream* stream) const;
+
+  DECLARE_INSTRUCTION(Call)
+
+ protected:
+  virtual void InternalSetOperandAt(int index, HValue* value) {
+    arguments_[index] = value;
+  }
+
+  int argument_count_;
+  Vector<HValue*> arguments_;
+};
+
+
+class HCallConstantFunction: public HCall {
+ public:
+  HCallConstantFunction(Handle<JSFunction> function, int argument_count)
+      : HCall(argument_count), function_(function) { }
+
+  Handle<JSFunction> function() const { return function_; }
+  bool IsApplyFunction() const {
+    return function_->code() == Builtins::builtin(Builtins::FunctionApply);
+  }
+
+  virtual void PrintDataTo(StringStream* stream) const;
+
+  DECLARE_CONCRETE_INSTRUCTION(CallConstantFunction, "call_constant_function")
+
+ private:
+  Handle<JSFunction> function_;
+};
+
+
+class HCallKeyed: public HCall {
+ public:
+  HCallKeyed(HValue* key, int argument_count)
+      : HCall(argument_count + 1) {
+    SetOperandAt(0, key);
+  }
+
+  virtual Representation RequiredInputRepresentation(int index) const {
+    return Representation::Tagged();
+  }
+
+  // TODO(3190496): This is a hack to get an additional operand that
+  // is not an argument to work with the current setup. This _needs_ a cleanup.
+  // (see HCall)
+  virtual void PrintDataTo(StringStream* stream) const;
+  HValue* key() const { return OperandAt(0); }
+  virtual int argument_count() const { return arguments_.length() - 1; }
+  virtual int OperandCount() const { return arguments_.length(); }
+  virtual HValue* OperandAt(int index) const { return arguments_[index]; }
+  virtual HPushArgument* PushArgumentAt(int index) const {
+    return HPushArgument::cast(OperandAt(index + 1));
+  }
+  virtual void SetArgumentAt(int index, HPushArgument* push_argument) {
+    HCall::SetArgumentAt(index + 1, push_argument);
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(CallKeyed, "call_keyed")
+};
+
+
+class HCallNamed: public HCall {
+ public:
+  HCallNamed(Handle<String> name, int argument_count)
+      : HCall(argument_count), name_(name) { }
+  virtual void PrintDataTo(StringStream* stream) const;
+
+  Handle<String> name() const { return name_; }
+
+  DECLARE_CONCRETE_INSTRUCTION(CallNamed, "call_named")
+
+ private:
+  Handle<String> name_;
+};
+
+
+class HCallFunction: public HCall {
+ public:
+  explicit HCallFunction(int argument_count) : HCall(argument_count) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(CallFunction, "call_function")
+};
+
+
+class HCallGlobal: public HCall {
+ public:
+  HCallGlobal(Handle<String> name, int argument_count)
+      : HCall(argument_count), name_(name) { }
+
+  virtual void PrintDataTo(StringStream* stream) const;
+
+  Handle<String> name() const { return name_; }
+
+  DECLARE_CONCRETE_INSTRUCTION(CallGlobal, "call_global")
+
+ private:
+  Handle<String> name_;
+};
+
+
+class HCallKnownGlobal: public HCall {
+ public:
+  HCallKnownGlobal(Handle<JSFunction> target,
+                   int argument_count)
+      : HCall(argument_count), target_(target) { }
+
+  Handle<JSFunction> target() const { return target_; }
+
+  DECLARE_CONCRETE_INSTRUCTION(CallKnownGlobal, "call_known_global")
+
+ private:
+  Handle<JSFunction> target_;
+};
+
+
+class HCallNew: public HCall {
+ public:
+  explicit HCallNew(int argument_count) : HCall(argument_count) { }
+
+  virtual Representation RequiredInputRepresentation(int index) const {
+    return Representation::Tagged();
+  }
+
+  HValue* constructor() const { return ArgumentAt(0); }
+
+  DECLARE_CONCRETE_INSTRUCTION(CallNew, "call_new")
+};
+
+
+class HCallRuntime: public HCall {
+ public:
+  HCallRuntime(Handle<String> name,
+               Runtime::Function* c_function,
+               int argument_count)
+      : HCall(argument_count), c_function_(c_function), name_(name) { }
+  virtual void PrintDataTo(StringStream* stream) const;
+
+  Runtime::Function* function() const { return c_function_; }
+  Handle<String> name() const { return name_; }
+
+  DECLARE_CONCRETE_INSTRUCTION(CallRuntime, "call_runtime")
+
+ private:
+  Runtime::Function* c_function_;
+  Handle<String> name_;
+};
+
+
+class HArrayLength: public HUnaryOperation {
+ public:
+  explicit HArrayLength(HValue* value) : HUnaryOperation(value) {
+    // The length of an array is stored as a tagged value in the array
+    // object. It is guaranteed to be 32 bit integer, but it can be
+    // represented as either a smi or heap number.
+    set_representation(Representation::Tagged());
+    SetFlag(kDependsOnArrayLengths);
+    SetFlag(kUseGVN);
+  }
+
+  virtual Representation RequiredInputRepresentation(int index) const {
+    return Representation::Tagged();
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(ArrayLength, "array_length")
+};
+
+
+class HBitNot: public HUnaryOperation {
+ public:
+  explicit HBitNot(HValue* value) : HUnaryOperation(value) {
+    set_representation(Representation::Integer32());
+    SetFlag(kUseGVN);
+    SetFlag(kTruncatingToInt32);
+  }
+
+  virtual Representation RequiredInputRepresentation(int index) const {
+    return Representation::Integer32();
+  }
+  virtual HType CalculateInferredType() const;
+
+  DECLARE_CONCRETE_INSTRUCTION(BitNot, "bit_not")
+};
+
+
+class HUnaryMathOperation: public HUnaryOperation {
+ public:
+  HUnaryMathOperation(HValue* value, MathFunctionId op)
+      : HUnaryOperation(value), op_(op) {
+    switch (op) {
+      case kMathFloor:
+      case kMathRound:
+      case kMathCeil:
+        set_representation(Representation::Integer32());
+        break;
+      case kMathAbs:
+        set_representation(Representation::Tagged());
+        SetFlag(kFlexibleRepresentation);
+        break;
+      case kMathSqrt:
+      default:
+        set_representation(Representation::Double());
+    }
+    SetFlag(kUseGVN);
+  }
+
+  virtual void PrintDataTo(StringStream* stream) const;
+
+  virtual HType CalculateInferredType() const;
+
+  virtual HValue* EnsureAndPropagateNotMinusZero(BitVector* visited);
+
+  virtual Representation RequiredInputRepresentation(int index) const {
+    switch (op_) {
+      case kMathFloor:
+      case kMathRound:
+      case kMathCeil:
+      case kMathSqrt:
+        return Representation::Double();
+        break;
+      case kMathAbs:
+        return representation();
+        break;
+      default:
+        return Representation::None();
+    }
+  }
+
+  virtual HValue* Canonicalize() {
+    // If the input is integer32 then we replace the floor instruction
+    // with its inputs.  This happens before the representation changes are
+    // introduced.
+    if (op() == kMathFloor) {
+      if (value()->representation().IsInteger32()) return value();
+    }
+    return this;
+  }
+
+  MathFunctionId op() const { return op_; }
+  const char* OpName() const;
+
+  DECLARE_CONCRETE_INSTRUCTION(UnaryMathOperation, "unary_math_operation")
+
+ private:
+  MathFunctionId op_;
+};
+
+
+class HLoadElements: public HUnaryOperation {
+ public:
+  explicit HLoadElements(HValue* value) : HUnaryOperation(value) {
+    set_representation(Representation::Tagged());
+    SetFlag(kUseGVN);
+    SetFlag(kDependsOnMaps);
+  }
+
+  virtual Representation RequiredInputRepresentation(int index) const {
+    return Representation::Tagged();
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(LoadElements, "load-elements")
+};
+
+
+class HCheckMap: public HUnaryOperation {
+ public:
+  HCheckMap(HValue* value, Handle<Map> map)
+      : HUnaryOperation(value), map_(map) {
+    set_representation(Representation::Tagged());
+    SetFlag(kUseGVN);
+    SetFlag(kDependsOnMaps);
+  }
+
+  virtual Representation RequiredInputRepresentation(int index) const {
+    return Representation::Tagged();
+  }
+  virtual void PrintDataTo(StringStream* stream) const;
+  virtual HType CalculateInferredType() const;
+
+#ifdef DEBUG
+  virtual void Verify() const;
+#endif
+
+  Handle<Map> map() const { return map_; }
+
+  DECLARE_CONCRETE_INSTRUCTION(CheckMap, "check_map")
+
+ protected:
+  virtual bool DataEquals(HValue* other) const {
+    HCheckMap* b = HCheckMap::cast(other);
+    return map_.is_identical_to(b->map());
+  }
+
+ private:
+  Handle<Map> map_;
+};
+
+
+class HCheckFunction: public HUnaryOperation {
+ public:
+  HCheckFunction(HValue* value, Handle<JSFunction> function)
+      : HUnaryOperation(value), target_(function) {
+    set_representation(Representation::Tagged());
+    SetFlag(kUseGVN);
+  }
+
+  virtual Representation RequiredInputRepresentation(int index) const {
+    return Representation::Tagged();
+  }
+  virtual void PrintDataTo(StringStream* stream) const;
+  virtual HType CalculateInferredType() const;
+
+#ifdef DEBUG
+  virtual void Verify() const;
+#endif
+
+  Handle<JSFunction> target() const { return target_; }
+
+  DECLARE_CONCRETE_INSTRUCTION(CheckFunction, "check_function")
+
+ protected:
+  virtual bool DataEquals(HValue* other) const {
+    HCheckFunction* b = HCheckFunction::cast(other);
+    return target_.is_identical_to(b->target());
+  }
+
+ private:
+  Handle<JSFunction> target_;
+};
+
+
+class HCheckInstanceType: public HUnaryOperation {
+ public:
+  // Check that the instance type is in the range [first, last] where
+  // both first and last are included.
+  HCheckInstanceType(HValue* value, InstanceType first, InstanceType last)
+      : HUnaryOperation(value), first_(first), last_(last) {
+    ASSERT(first <= last);
+    set_representation(Representation::Tagged());
+    SetFlag(kUseGVN);
+  }
+
+  virtual Representation RequiredInputRepresentation(int index) const {
+    return Representation::Tagged();
+  }
+
+#ifdef DEBUG
+  virtual void Verify() const;
+#endif
+
+  static HCheckInstanceType* NewIsJSObjectOrJSFunction(HValue* value);
+
+  InstanceType first() const { return first_; }
+  InstanceType last() const { return last_; }
+
+  DECLARE_CONCRETE_INSTRUCTION(CheckInstanceType, "check_instance_type")
+
+ protected:
+  // TODO(ager): It could be nice to allow the ommision of instance
+  // type checks if we have already performed an instance type check
+  // with a larger range.
+  virtual bool DataEquals(HValue* other) const {
+    HCheckInstanceType* b = HCheckInstanceType::cast(other);
+    return (first_ == b->first()) && (last_ == b->last());
+  }
+
+ private:
+  InstanceType first_;
+  InstanceType last_;
+};
+
+
+class HCheckNonSmi: public HUnaryOperation {
+ public:
+  explicit HCheckNonSmi(HValue* value) : HUnaryOperation(value) {
+    set_representation(Representation::Tagged());
+    SetFlag(kUseGVN);
+  }
+
+  virtual Representation RequiredInputRepresentation(int index) const {
+    return Representation::Tagged();
+  }
+
+  virtual HType CalculateInferredType() const;
+
+#ifdef DEBUG
+  virtual void Verify() const;
+#endif
+
+  DECLARE_CONCRETE_INSTRUCTION(CheckNonSmi, "check_non_smi")
+};
+
+
+class HCheckPrototypeMaps: public HUnaryOperation {
+ public:
+  HCheckPrototypeMaps(HValue* value,
+                      Handle<JSObject> holder,
+                      Handle<Map> receiver_map)
+      : HUnaryOperation(value),
+        holder_(holder),
+        receiver_map_(receiver_map) {
+    set_representation(Representation::Tagged());
+    SetFlag(kUseGVN);
+    SetFlag(kDependsOnMaps);
+  }
+
+  virtual Representation RequiredInputRepresentation(int index) const {
+    return Representation::Tagged();
+  }
+
+#ifdef DEBUG
+  virtual void Verify() const;
+#endif
+
+  Handle<JSObject> holder() const { return holder_; }
+  Handle<Map> receiver_map() const { return receiver_map_; }
+
+  DECLARE_CONCRETE_INSTRUCTION(CheckPrototypeMaps, "check_prototype_maps")
+
+ protected:
+  virtual bool DataEquals(HValue* other) const {
+    HCheckPrototypeMaps* b = HCheckPrototypeMaps::cast(other);
+    return holder_.is_identical_to(b->holder()) &&
+        receiver_map_.is_identical_to(b->receiver_map());
+  }
+
+ private:
+  Handle<JSObject> holder_;
+  Handle<Map> receiver_map_;
+};
+
+
+class HCheckSmi: public HUnaryOperation {
+ public:
+  explicit HCheckSmi(HValue* value) : HUnaryOperation(value) {
+    set_representation(Representation::Tagged());
+    SetFlag(kUseGVN);
+  }
+
+  virtual Representation RequiredInputRepresentation(int index) const {
+    return Representation::Tagged();
+  }
+  virtual HType CalculateInferredType() const;
+
+#ifdef DEBUG
+  virtual void Verify() const;
+#endif
+
+  DECLARE_CONCRETE_INSTRUCTION(CheckSmi, "check_smi")
+};
+
+
+class HPhi: public HValue {
+ public:
+  explicit HPhi(int merged_index)
+      : inputs_(2),
+        merged_index_(merged_index),
+        phi_id_(-1) {
+    for (int i = 0; i < Representation::kNumRepresentations; i++) {
+      non_phi_uses_[i] = 0;
+      indirect_uses_[i] = 0;
+    }
+    ASSERT(merged_index >= 0);
+    set_representation(Representation::Tagged());
+    SetFlag(kFlexibleRepresentation);
+  }
+
+  virtual Representation InferredRepresentation() const {
+    bool double_occurred = false;
+    bool int32_occurred = false;
+    for (int i = 0; i < OperandCount(); ++i) {
+      HValue* value = OperandAt(i);
+      if (value->representation().IsDouble()) double_occurred = true;
+      if (value->representation().IsInteger32()) int32_occurred = true;
+      if (value->representation().IsTagged()) return Representation::Tagged();
+    }
+
+    if (double_occurred) return Representation::Double();
+    if (int32_occurred) return Representation::Integer32();
+    return Representation::None();
+  }
+
+  virtual Range* InferRange();
+  virtual Representation RequiredInputRepresentation(int index) const {
+    return representation();
+  }
+  virtual HType CalculateInferredType() const;
+  virtual int OperandCount() const { return inputs_.length(); }
+  virtual HValue* OperandAt(int index) const { return inputs_[index]; }
+  HValue* GetRedundantReplacement() const;
+  void AddInput(HValue* value);
+
+  bool HasReceiverOperand();
+
+  int merged_index() const { return merged_index_; }
+
+  virtual const char* Mnemonic() const { return "phi"; }
+
+  virtual void PrintTo(StringStream* stream) const;
+
+#ifdef DEBUG
+  virtual void Verify() const;
+#endif
+
+  DECLARE_INSTRUCTION(Phi)
+
+  void InitRealUses(int id);
+  void AddNonPhiUsesFrom(HPhi* other);
+  void AddIndirectUsesTo(int* use_count);
+
+  int tagged_non_phi_uses() const {
+    return non_phi_uses_[Representation::kTagged];
+  }
+  int int32_non_phi_uses() const {
+    return non_phi_uses_[Representation::kInteger32];
+  }
+  int double_non_phi_uses() const {
+    return non_phi_uses_[Representation::kDouble];
+  }
+  int tagged_indirect_uses() const {
+    return indirect_uses_[Representation::kTagged];
+  }
+  int int32_indirect_uses() const {
+    return indirect_uses_[Representation::kInteger32];
+  }
+  int double_indirect_uses() const {
+    return indirect_uses_[Representation::kDouble];
+  }
+  int phi_id() { return phi_id_; }
+
+ protected:
+  virtual void DeleteFromGraph();
+  virtual void InternalSetOperandAt(int index, HValue* value) {
+    inputs_[index] = value;
+  }
+
+ private:
+  ZoneList<HValue*> inputs_;
+  int merged_index_;
+
+  int non_phi_uses_[Representation::kNumRepresentations];
+  int indirect_uses_[Representation::kNumRepresentations];
+  int phi_id_;
+};
+
+
+class HArgumentsObject: public HInstruction {
+ public:
+  HArgumentsObject() {
+    set_representation(Representation::Tagged());
+    SetFlag(kIsArguments);
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(ArgumentsObject, "arguments-object")
+};
+
+
+class HConstant: public HInstruction {
+ public:
+  HConstant(Handle<Object> handle, Representation r);
+
+  Handle<Object> handle() const { return handle_; }
+
+  virtual bool EmitAtUses() const { return !representation().IsDouble(); }
+  virtual void PrintDataTo(StringStream* stream) const;
+  virtual HType CalculateInferredType() const;
+  bool IsInteger() const { return handle_->IsSmi(); }
+  HConstant* CopyToRepresentation(Representation r) const;
+  HConstant* CopyToTruncatedInt32() const;
+  bool HasInteger32Value() const { return has_int32_value_; }
+  int32_t Integer32Value() const {
+    ASSERT(HasInteger32Value());
+    return int32_value_;
+  }
+  bool HasDoubleValue() const { return has_double_value_; }
+  double DoubleValue() const {
+    ASSERT(HasDoubleValue());
+    return double_value_;
+  }
+  bool HasStringValue() const { return handle_->IsString(); }
+
+  virtual intptr_t Hashcode() const {
+    ASSERT(!Heap::allow_allocation(false));
+    return reinterpret_cast<intptr_t>(*handle());
+  }
+
+#ifdef DEBUG
+  virtual void Verify() const { }
+#endif
+
+  DECLARE_CONCRETE_INSTRUCTION(Constant, "constant")
+
+ protected:
+  virtual Range* InferRange();
+
+  virtual bool DataEquals(HValue* other) const {
+    HConstant* other_constant = HConstant::cast(other);
+    return handle().is_identical_to(other_constant->handle());
+  }
+
+ private:
+  Handle<Object> handle_;
+  HType constant_type_;
+
+  // The following two values represent the int32 and the double value of the
+  // given constant if there is a lossless conversion between the constant
+  // and the specific representation.
+  bool has_int32_value_;
+  int32_t int32_value_;
+  bool has_double_value_;
+  double double_value_;
+};
+
+
+class HBinaryOperation: public HInstruction {
+ public:
+  HBinaryOperation(HValue* left, HValue* right) {
+    ASSERT(left != NULL && right != NULL);
+    SetOperandAt(0, left);
+    SetOperandAt(1, right);
+  }
+
+  HValue* left() const { return OperandAt(0); }
+  HValue* right() const { return OperandAt(1); }
+
+  // TODO(kasperl): Move these helpers to the IA-32 Lithium
+  // instruction sequence builder.
+  HValue* LeastConstantOperand() const {
+    if (IsCommutative() && left()->IsConstant()) return right();
+    return left();
+  }
+  HValue* MostConstantOperand() const {
+    if (IsCommutative() && left()->IsConstant()) return left();
+    return right();
+  }
+
+  virtual bool IsCommutative() const { return false; }
+
+  virtual void PrintDataTo(StringStream* stream) const;
+  virtual int OperandCount() const { return operands_.length(); }
+  virtual HValue* OperandAt(int index) const { return operands_[index]; }
+
+  DECLARE_INSTRUCTION(BinaryOperation)
+
+ protected:
+  virtual void InternalSetOperandAt(int index, HValue* value) {
+    operands_[index] = value;
+  }
+
+ private:
+  HOperandVector<2> operands_;
+};
+
+
+class HApplyArguments: public HInstruction {
+ public:
+  HApplyArguments(HValue* function,
+                  HValue* receiver,
+                  HValue* length,
+                  HValue* elements) {
+    set_representation(Representation::Tagged());
+    SetOperandAt(0, function);
+    SetOperandAt(1, receiver);
+    SetOperandAt(2, length);
+    SetOperandAt(3, elements);
+  }
+
+  virtual Representation RequiredInputRepresentation(int index) const {
+    // The length is untagged, all other inputs are tagged.
+    return (index == 2)
+        ? Representation::Integer32()
+        : Representation::Tagged();
+  }
+
+  HValue* function() const { return OperandAt(0); }
+  HValue* receiver() const { return OperandAt(1); }
+  HValue* length() const { return OperandAt(2); }
+  HValue* elements() const { return OperandAt(3); }
+
+  virtual int OperandCount() const { return operands_.length(); }
+  virtual HValue* OperandAt(int index) const { return operands_[index]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(ApplyArguments, "apply_arguments")
+
+
+
+ protected:
+  virtual void InternalSetOperandAt(int index, HValue* value) {
+    operands_[index] = value;
+  }
+
+ private:
+  HOperandVector<4> operands_;
+};
+
+
+class HArgumentsElements: public HInstruction {
+ public:
+  HArgumentsElements() {
+    // The value produced by this instruction is a pointer into the stack
+    // that looks as if it was a smi because of alignment.
+    set_representation(Representation::Tagged());
+    SetFlag(kUseGVN);
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(ArgumentsElements, "arguments_elements")
+};
+
+
+class HArgumentsLength: public HUnaryOperation {
+ public:
+  explicit HArgumentsLength(HValue* value) : HUnaryOperation(value) {
+    set_representation(Representation::Integer32());
+    SetFlag(kUseGVN);
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(ArgumentsLength, "arguments_length")
+};
+
+
+class HAccessArgumentsAt: public HInstruction {
+ public:
+  HAccessArgumentsAt(HValue* arguments, HValue* length, HValue* index) {
+    set_representation(Representation::Tagged());
+    SetFlag(kUseGVN);
+    SetOperandAt(0, arguments);
+    SetOperandAt(1, length);
+    SetOperandAt(2, index);
+  }
+
+  virtual void PrintDataTo(StringStream* stream) const;
+
+  virtual Representation RequiredInputRepresentation(int index) const {
+    // The arguments elements is considered tagged.
+    return index == 0
+        ? Representation::Tagged()
+        : Representation::Integer32();
+  }
+
+  HValue* arguments() const { return operands_[0]; }
+  HValue* length() const { return operands_[1]; }
+  HValue* index() const { return operands_[2]; }
+
+  virtual int OperandCount() const { return operands_.length(); }
+  virtual HValue* OperandAt(int index) const { return operands_[index]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(AccessArgumentsAt, "access_arguments_at")
+
+ protected:
+  virtual void InternalSetOperandAt(int index, HValue* value) {
+    operands_[index] = value;
+  }
+
+ private:
+  HOperandVector<3> operands_;
+};
+
+
+class HBoundsCheck: public HBinaryOperation {
+ public:
+  HBoundsCheck(HValue* index, HValue* length)
+      : HBinaryOperation(index, length) {
+    SetFlag(kUseGVN);
+  }
+
+  virtual Representation RequiredInputRepresentation(int index) const {
+    return Representation::Integer32();
+  }
+
+#ifdef DEBUG
+  virtual void Verify() const;
+#endif
+
+  HValue* index() const { return left(); }
+  HValue* length() const { return right(); }
+
+  DECLARE_CONCRETE_INSTRUCTION(BoundsCheck, "bounds_check")
+};
+
+
+class HBitwiseBinaryOperation: public HBinaryOperation {
+ public:
+  HBitwiseBinaryOperation(HValue* left, HValue* right)
+      : HBinaryOperation(left, right) {
+    // Default to truncating, Integer32, UseGVN.
+    set_representation(Representation::Integer32());
+    SetFlag(kTruncatingToInt32);
+    SetFlag(kUseGVN);
+  }
+
+  virtual Representation RequiredInputRepresentation(int index) const {
+    return Representation::Integer32();
+  }
+
+  DECLARE_INSTRUCTION(BitwiseBinaryOperation)
+};
+
+
+class HArithmeticBinaryOperation: public HBinaryOperation {
+ public:
+  HArithmeticBinaryOperation(HValue* left, HValue* right)
+      : HBinaryOperation(left, right) {
+    set_representation(Representation::Tagged());
+    SetFlag(kFlexibleRepresentation);
+    SetFlagMask(AllSideEffects());
+  }
+
+  virtual void RepresentationChanged(Representation to) {
+    if (!to.IsTagged()) {
+      ClearFlagMask(AllSideEffects());
+      SetFlag(kUseGVN);
+    }
+  }
+
+  virtual HType CalculateInferredType() const;
+  virtual Representation RequiredInputRepresentation(int index) const {
+    return representation();
+  }
+  virtual Representation InferredRepresentation() const {
+    if (left()->representation().Equals(right()->representation())) {
+      return left()->representation();
+    }
+    return HValue::InferredRepresentation();
+  }
+
+  DECLARE_INSTRUCTION(ArithmeticBinaryOperation)
+};
+
+
+class HCompare: public HBinaryOperation {
+ public:
+  HCompare(HValue* left, HValue* right, Token::Value token)
+      : HBinaryOperation(left, right), token_(token) {
+    ASSERT(Token::IsCompareOp(token));
+    set_representation(Representation::Tagged());
+    SetFlagMask(AllSideEffects());
+  }
+
+  void SetInputRepresentation(Representation r);
+  virtual bool EmitAtUses() const { return uses()->length() <= 1; }
+  virtual Representation RequiredInputRepresentation(int index) const {
+    return input_representation_;
+  }
+  Representation GetInputRepresentation() const {
+    return input_representation_;
+  }
+  Token::Value token() const { return token_; }
+  virtual void PrintDataTo(StringStream* stream) const;
+
+  virtual HType CalculateInferredType() const;
+
+  virtual intptr_t Hashcode() const {
+    return HValue::Hashcode() * 7 + token_;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(Compare, "compare")
+
+ protected:
+  virtual bool DataEquals(HValue* other) const {
+    HCompare* comp = HCompare::cast(other);
+    return token_ == comp->token();
+  }
+
+ private:
+  Representation input_representation_;
+  Token::Value token_;
+};
+
+
+class HCompareJSObjectEq: public HBinaryOperation {
+ public:
+  HCompareJSObjectEq(HValue* left, HValue* right)
+      : HBinaryOperation(left, right) {
+    set_representation(Representation::Tagged());
+    SetFlag(kUseGVN);
+  }
+
+  virtual bool EmitAtUses() const { return uses()->length() <= 1; }
+  virtual Representation RequiredInputRepresentation(int index) const {
+    return Representation::Tagged();
+  }
+  virtual HType CalculateInferredType() const;
+
+  DECLARE_CONCRETE_INSTRUCTION(CompareJSObjectEq, "compare-js-object-eq")
+};
+
+
+class HUnaryPredicate: public HUnaryOperation {
+ public:
+  explicit HUnaryPredicate(HValue* value) : HUnaryOperation(value) {
+    set_representation(Representation::Tagged());
+    SetFlag(kUseGVN);
+  }
+  virtual bool EmitAtUses() const { return uses()->length() <= 1; }
+  virtual Representation RequiredInputRepresentation(int index) const {
+    return Representation::Tagged();
+  }
+  virtual HType CalculateInferredType() const;
+};
+
+
+class HIsNull: public HUnaryPredicate {
+ public:
+  HIsNull(HValue* value, bool is_strict)
+      : HUnaryPredicate(value), is_strict_(is_strict) { }
+
+  bool is_strict() const { return is_strict_; }
+
+  DECLARE_CONCRETE_INSTRUCTION(IsNull, "is_null")
+
+ private:
+  bool is_strict_;
+};
+
+
+class HIsSmi: public HUnaryPredicate {
+ public:
+  explicit HIsSmi(HValue* value) : HUnaryPredicate(value) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(IsSmi, "is_smi")
+};
+
+
+class HHasInstanceType: public HUnaryPredicate {
+ public:
+  HHasInstanceType(HValue* value, InstanceType type)
+      : HUnaryPredicate(value), from_(type), to_(type) { }
+  HHasInstanceType(HValue* value, InstanceType from, InstanceType to)
+      : HUnaryPredicate(value), from_(from), to_(to) {
+    ASSERT(to == LAST_TYPE);  // Others not implemented yet in backend.
+  }
+
+  InstanceType from() { return from_; }
+  InstanceType to() { return to_; }
+
+  virtual void PrintDataTo(StringStream* stream) const;
+
+  DECLARE_CONCRETE_INSTRUCTION(HasInstanceType, "has_instance_type")
+
+ private:
+  InstanceType from_;
+  InstanceType to_;  // Inclusive range, not all combinations work.
+};
+
+
+class HHasCachedArrayIndex: public HUnaryPredicate {
+ public:
+  explicit HHasCachedArrayIndex(HValue* value) : HUnaryPredicate(value) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(HasCachedArrayIndex, "has_cached_array_index")
+};
+
+
+class HClassOfTest: public HUnaryPredicate {
+ public:
+  HClassOfTest(HValue* value, Handle<String> class_name)
+      : HUnaryPredicate(value), class_name_(class_name) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(ClassOfTest, "class_of_test")
+
+  virtual void PrintDataTo(StringStream* stream) const;
+
+  Handle<String> class_name() const { return class_name_; }
+
+ private:
+  Handle<String> class_name_;
+};
+
+
+class HTypeofIs: public HUnaryPredicate {
+ public:
+  HTypeofIs(HValue* value, Handle<String> type_literal)
+      : HUnaryPredicate(value), type_literal_(type_literal) { }
+
+  Handle<String> type_literal() { return type_literal_; }
+  virtual void PrintDataTo(StringStream* stream) const;
+
+  DECLARE_CONCRETE_INSTRUCTION(TypeofIs, "typeof_is")
+
+ protected:
+  virtual bool DataEquals(HValue* other) const {
+    HTypeofIs* b = HTypeofIs::cast(other);
+    return type_literal_.is_identical_to(b->type_literal_);
+  }
+
+ private:
+  Handle<String> type_literal_;
+};
+
+
+class HInstanceOf: public HBinaryOperation {
+ public:
+  HInstanceOf(HValue* left, HValue* right) : HBinaryOperation(left, right) {
+    set_representation(Representation::Tagged());
+    SetFlagMask(AllSideEffects());
+  }
+
+  virtual bool EmitAtUses() const { return uses()->length() <= 1; }
+
+  virtual Representation RequiredInputRepresentation(int index) const {
+    return Representation::Tagged();
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(InstanceOf, "instance_of")
+};
+
+
+class HAdd: public HArithmeticBinaryOperation {
+ public:
+  HAdd(HValue* left, HValue* right) : HArithmeticBinaryOperation(left, right) {
+    SetFlag(kCanOverflow);
+  }
+
+  // Add is only commutative if two integer values are added and not if two
+  // tagged values are added (because it might be a String concatenation).
+  virtual bool IsCommutative() const {
+    return !representation().IsTagged();
+  }
+
+  virtual HValue* EnsureAndPropagateNotMinusZero(BitVector* visited);
+
+  virtual HType CalculateInferredType() const;
+
+  DECLARE_CONCRETE_INSTRUCTION(Add, "add")
+
+ protected:
+  virtual Range* InferRange();
+};
+
+
+class HSub: public HArithmeticBinaryOperation {
+ public:
+  HSub(HValue* left, HValue* right) : HArithmeticBinaryOperation(left, right) {
+    SetFlag(kCanOverflow);
+  }
+
+  virtual HValue* EnsureAndPropagateNotMinusZero(BitVector* visited);
+
+  DECLARE_CONCRETE_INSTRUCTION(Sub, "sub")
+
+ protected:
+  virtual Range* InferRange();
+};
+
+
+class HMul: public HArithmeticBinaryOperation {
+ public:
+  HMul(HValue* left, HValue* right) : HArithmeticBinaryOperation(left, right) {
+    SetFlag(kCanOverflow);
+  }
+
+  virtual HValue* EnsureAndPropagateNotMinusZero(BitVector* visited);
+
+  // Only commutative if it is certain that not two objects are multiplicated.
+  virtual bool IsCommutative() const {
+    return !representation().IsTagged();
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(Mul, "mul")
+
+ protected:
+  virtual Range* InferRange();
+};
+
+
+class HMod: public HArithmeticBinaryOperation {
+ public:
+  HMod(HValue* left, HValue* right) : HArithmeticBinaryOperation(left, right) {
+    SetFlag(kCanBeDivByZero);
+  }
+
+  virtual HValue* EnsureAndPropagateNotMinusZero(BitVector* visited);
+
+  DECLARE_CONCRETE_INSTRUCTION(Mod, "mod")
+
+ protected:
+  virtual Range* InferRange();
+};
+
+
+class HDiv: public HArithmeticBinaryOperation {
+ public:
+  HDiv(HValue* left, HValue* right) : HArithmeticBinaryOperation(left, right) {
+    SetFlag(kCanBeDivByZero);
+    SetFlag(kCanOverflow);
+  }
+
+  virtual HValue* EnsureAndPropagateNotMinusZero(BitVector* visited);
+
+  DECLARE_CONCRETE_INSTRUCTION(Div, "div")
+
+ protected:
+  virtual Range* InferRange();
+};
+
+
+class HBitAnd: public HBitwiseBinaryOperation {
+ public:
+  HBitAnd(HValue* left, HValue* right)
+      : HBitwiseBinaryOperation(left, right) { }
+
+  virtual bool IsCommutative() const { return true; }
+  virtual HType CalculateInferredType() const;
+
+  DECLARE_CONCRETE_INSTRUCTION(BitAnd, "bit_and")
+
+ protected:
+  virtual Range* InferRange();
+};
+
+
+class HBitXor: public HBitwiseBinaryOperation {
+ public:
+  HBitXor(HValue* left, HValue* right)
+      : HBitwiseBinaryOperation(left, right) { }
+
+  virtual bool IsCommutative() const { return true; }
+  virtual HType CalculateInferredType() const;
+
+  DECLARE_CONCRETE_INSTRUCTION(BitXor, "bit_xor")
+};
+
+
+class HBitOr: public HBitwiseBinaryOperation {
+ public:
+  HBitOr(HValue* left, HValue* right)
+      : HBitwiseBinaryOperation(left, right) { }
+
+  virtual bool IsCommutative() const { return true; }
+  virtual HType CalculateInferredType() const;
+
+  DECLARE_CONCRETE_INSTRUCTION(BitOr, "bit_or")
+
+ protected:
+  virtual Range* InferRange();
+};
+
+
+class HShl: public HBitwiseBinaryOperation {
+ public:
+  HShl(HValue* left, HValue* right)
+      : HBitwiseBinaryOperation(left, right) { }
+
+  virtual Range* InferRange();
+  virtual HType CalculateInferredType() const;
+
+  DECLARE_CONCRETE_INSTRUCTION(Shl, "shl")
+};
+
+
+class HShr: public HBitwiseBinaryOperation {
+ public:
+  HShr(HValue* left, HValue* right)
+      : HBitwiseBinaryOperation(left, right) { }
+
+  virtual HType CalculateInferredType() const;
+
+  DECLARE_CONCRETE_INSTRUCTION(Shr, "shr")
+};
+
+
+class HSar: public HBitwiseBinaryOperation {
+ public:
+  HSar(HValue* left, HValue* right)
+      : HBitwiseBinaryOperation(left, right) { }
+
+  virtual Range* InferRange();
+  virtual HType CalculateInferredType() const;
+
+  DECLARE_CONCRETE_INSTRUCTION(Sar, "sar")
+};
+
+
+class HOsrEntry: public HInstruction {
+ public:
+  explicit HOsrEntry(int ast_id) : ast_id_(ast_id) {
+    SetFlag(kChangesOsrEntries);
+  }
+
+  int ast_id() const { return ast_id_; }
+
+  DECLARE_CONCRETE_INSTRUCTION(OsrEntry, "osr_entry")
+
+ private:
+  int ast_id_;
+};
+
+
+class HParameter: public HInstruction {
+ public:
+  explicit HParameter(unsigned index) : index_(index) {
+    set_representation(Representation::Tagged());
+  }
+
+  unsigned index() const { return index_; }
+
+  virtual void PrintDataTo(StringStream* stream) const;
+
+  DECLARE_CONCRETE_INSTRUCTION(Parameter, "parameter")
+
+ private:
+  unsigned index_;
+};
+
+
+class HCallStub: public HInstruction {
+ public:
+  HCallStub(CodeStub::Major major_key, int argument_count)
+      : major_key_(major_key),
+        argument_count_(argument_count),
+        transcendental_type_(TranscendentalCache::kNumberOfCaches) {
+    set_representation(Representation::Tagged());
+    SetFlagMask(AllSideEffects());
+  }
+
+  CodeStub::Major major_key() { return major_key_; }
+  int argument_count() { return argument_count_; }
+
+  void set_transcendental_type(TranscendentalCache::Type transcendental_type) {
+    transcendental_type_ = transcendental_type;
+  }
+  TranscendentalCache::Type transcendental_type() {
+    return transcendental_type_;
+  }
+  virtual void PrintDataTo(StringStream* stream) const;
+
+  DECLARE_CONCRETE_INSTRUCTION(CallStub, "call_stub")
+
+ private:
+  CodeStub::Major major_key_;
+  int argument_count_;
+  TranscendentalCache::Type transcendental_type_;
+};
+
+
+class HUnknownOSRValue: public HInstruction {
+ public:
+  HUnknownOSRValue() { set_representation(Representation::Tagged()); }
+
+  DECLARE_CONCRETE_INSTRUCTION(UnknownOSRValue, "unknown_osr_value")
+};
+
+
+class HLoadGlobal: public HInstruction {
+ public:
+  HLoadGlobal(Handle<JSGlobalPropertyCell> cell, bool check_hole_value)
+      : cell_(cell), check_hole_value_(check_hole_value) {
+    set_representation(Representation::Tagged());
+    SetFlag(kUseGVN);
+    SetFlag(kDependsOnGlobalVars);
+  }
+
+  Handle<JSGlobalPropertyCell>  cell() const { return cell_; }
+  bool check_hole_value() const { return check_hole_value_; }
+
+  virtual Representation RequiredInputRepresentation(int index) const {
+    return Representation::Tagged();
+  }
+  virtual void PrintDataTo(StringStream* stream) const;
+
+  virtual intptr_t Hashcode() const {
+    ASSERT(!Heap::allow_allocation(false));
+    return reinterpret_cast<intptr_t>(*cell_);
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(LoadGlobal, "load_global")
+
+ protected:
+  virtual bool DataEquals(HValue* other) const {
+    HLoadGlobal* b = HLoadGlobal::cast(other);
+    return cell_.is_identical_to(b->cell());
+  }
+
+ private:
+  Handle<JSGlobalPropertyCell> cell_;
+  bool check_hole_value_;
+};
+
+
+class HStoreGlobal: public HUnaryOperation {
+ public:
+  HStoreGlobal(HValue* value, Handle<JSGlobalPropertyCell> cell)
+      : HUnaryOperation(value), cell_(cell) {
+    SetFlag(kChangesGlobalVars);
+  }
+
+  Handle<JSGlobalPropertyCell> cell() const { return cell_; }
+
+  virtual Representation RequiredInputRepresentation(int index) const {
+    return Representation::Tagged();
+  }
+  virtual void PrintDataTo(StringStream* stream) const;
+
+  DECLARE_CONCRETE_INSTRUCTION(StoreGlobal, "store_global")
+
+ protected:
+  virtual bool DataEquals(HValue* other) const {
+    HStoreGlobal* b = HStoreGlobal::cast(other);
+    return cell_.is_identical_to(b->cell());
+  }
+
+ private:
+  Handle<JSGlobalPropertyCell> cell_;
+};
+
+
+class HLoadNamedField: public HUnaryOperation {
+ public:
+  HLoadNamedField(HValue* object, bool is_in_object, int offset)
+      : HUnaryOperation(object),
+        is_in_object_(is_in_object),
+        offset_(offset) {
+    set_representation(Representation::Tagged());
+    SetFlag(kUseGVN);
+    if (is_in_object) {
+      SetFlag(kDependsOnInobjectFields);
+    } else {
+      SetFlag(kDependsOnBackingStoreFields);
+    }
+  }
+
+  HValue* object() const { return OperandAt(0); }
+  bool is_in_object() const { return is_in_object_; }
+  int offset() const { return offset_; }
+
+  virtual Representation RequiredInputRepresentation(int index) const {
+    return Representation::Tagged();
+  }
+  virtual void PrintDataTo(StringStream* stream) const;
+
+  DECLARE_CONCRETE_INSTRUCTION(LoadNamedField, "load_named_field")
+
+ protected:
+  virtual bool DataEquals(HValue* other) const {
+    HLoadNamedField* b = HLoadNamedField::cast(other);
+    return is_in_object_ == b->is_in_object_ && offset_ == b->offset_;
+  }
+
+ private:
+  bool is_in_object_;
+  int offset_;
+};
+
+
+class HLoadNamedGeneric: public HUnaryOperation {
+ public:
+  HLoadNamedGeneric(HValue* object, Handle<Object> name)
+      : HUnaryOperation(object), name_(name) {
+    set_representation(Representation::Tagged());
+    SetFlagMask(AllSideEffects());
+  }
+
+  HValue* object() const { return OperandAt(0); }
+  Handle<Object> name() const { return name_; }
+
+  virtual Representation RequiredInputRepresentation(int index) const {
+    return Representation::Tagged();
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(LoadNamedGeneric, "load_named_generic")
+
+ protected:
+  virtual bool DataEquals(HValue* other) const {
+    HLoadNamedGeneric* b = HLoadNamedGeneric::cast(other);
+    return name_.is_identical_to(b->name_);
+  }
+
+ private:
+  Handle<Object> name_;
+};
+
+
+class HLoadKeyed: public HBinaryOperation {
+ public:
+  HLoadKeyed(HValue* obj, HValue* key) : HBinaryOperation(obj, key) {
+    set_representation(Representation::Tagged());
+  }
+
+  virtual void PrintDataTo(StringStream* stream) const;
+
+  virtual Representation RequiredInputRepresentation(int index) const {
+    return Representation::Tagged();
+  }
+  HValue* object() const { return OperandAt(0); }
+  HValue* key() const { return OperandAt(1); }
+
+  DECLARE_INSTRUCTION(LoadKeyed)
+};
+
+
+class HLoadKeyedFastElement: public HLoadKeyed {
+ public:
+  HLoadKeyedFastElement(HValue* obj, HValue* key) : HLoadKeyed(obj, key) {
+    SetFlag(kDependsOnArrayElements);
+    SetFlag(kUseGVN);
+  }
+
+  virtual Representation RequiredInputRepresentation(int index) const {
+    // The key is supposed to be Integer32.
+    return (index == 1) ? Representation::Integer32()
+        : Representation::Tagged();
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(LoadKeyedFastElement,
+                               "load_keyed_fast_element")
+};
+
+
+class HLoadKeyedGeneric: public HLoadKeyed {
+ public:
+  HLoadKeyedGeneric(HValue* obj, HValue* key) : HLoadKeyed(obj, key) {
+    SetFlagMask(AllSideEffects());
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(LoadKeyedGeneric, "load_keyed_generic")
+};
+
+
+class HStoreNamed: public HBinaryOperation {
+ public:
+  HStoreNamed(HValue* obj, Handle<Object> name, HValue* val)
+      : HBinaryOperation(obj, val), name_(name) {
+  }
+
+  virtual Representation RequiredInputRepresentation(int index) const {
+    return Representation::Tagged();
+  }
+
+  virtual void PrintDataTo(StringStream* stream) const;
+
+  HValue* object() const { return OperandAt(0); }
+  Handle<Object> name() const { return name_; }
+  HValue* value() const { return OperandAt(1); }
+  void set_value(HValue* value) { SetOperandAt(1, value); }
+
+  DECLARE_INSTRUCTION(StoreNamed)
+
+ protected:
+  virtual bool DataEquals(HValue* other) const {
+    HStoreNamed* b = HStoreNamed::cast(other);
+    return name_.is_identical_to(b->name_);
+  }
+
+ private:
+  Handle<Object> name_;
+};
+
+
+class HStoreNamedField: public HStoreNamed {
+ public:
+  HStoreNamedField(HValue* obj,
+                   Handle<Object> name,
+                   HValue* val,
+                   bool in_object,
+                   int offset)
+      : HStoreNamed(obj, name, val),
+        is_in_object_(in_object),
+        offset_(offset) {
+    if (is_in_object_) {
+      SetFlag(kChangesInobjectFields);
+    } else {
+      SetFlag(kChangesBackingStoreFields);
+    }
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(StoreNamedField, "store_named_field")
+
+  virtual Representation RequiredInputRepresentation(int index) const {
+    return  Representation::Tagged();
+  }
+  virtual void PrintDataTo(StringStream* stream) const;
+
+  bool is_in_object() const { return is_in_object_; }
+  int offset() const { return offset_; }
+  Handle<Map> transition() const { return transition_; }
+  void set_transition(Handle<Map> map) { transition_ = map; }
+
+ private:
+  bool is_in_object_;
+  int offset_;
+  Handle<Map> transition_;
+};
+
+
+class HStoreNamedGeneric: public HStoreNamed {
+ public:
+  HStoreNamedGeneric(HValue* obj, Handle<Object> name, HValue* val)
+      : HStoreNamed(obj, name, val) {
+    SetFlagMask(AllSideEffects());
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(StoreNamedGeneric, "store_named_generic")
+};
+
+
+class HStoreKeyed: public HInstruction {
+ public:
+  HStoreKeyed(HValue* obj, HValue* key, HValue* val) {
+    SetOperandAt(0, obj);
+    SetOperandAt(1, key);
+    SetOperandAt(2, val);
+  }
+
+  virtual void PrintDataTo(StringStream* stream) const;
+  virtual int OperandCount() const { return operands_.length(); }
+  virtual HValue* OperandAt(int index) const { return operands_[index]; }
+
+  virtual Representation RequiredInputRepresentation(int index) const {
+    return Representation::Tagged();
+  }
+
+  HValue* object() const { return OperandAt(0); }
+  HValue* key() const { return OperandAt(1); }
+  HValue* value() const { return OperandAt(2); }
+
+  DECLARE_INSTRUCTION(StoreKeyed)
+
+ protected:
+  virtual void InternalSetOperandAt(int index, HValue* value) {
+    operands_[index] = value;
+  }
+
+ private:
+  HOperandVector<3> operands_;
+};
+
+
+class HStoreKeyedFastElement: public HStoreKeyed {
+ public:
+  HStoreKeyedFastElement(HValue* obj, HValue* key, HValue* val)
+      : HStoreKeyed(obj, key, val) {
+    SetFlag(kChangesArrayElements);
+  }
+
+  bool NeedsWriteBarrier() const {
+    return !value()->type().IsSmi();
+  }
+
+  virtual Representation RequiredInputRepresentation(int index) const {
+    // The key is supposed to be Integer32.
+    return (index == 1) ? Representation::Integer32()
+        : Representation::Tagged();
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(StoreKeyedFastElement,
+                               "store_keyed_fast_element")
+};
+
+
+class HStoreKeyedGeneric: public HStoreKeyed {
+ public:
+  HStoreKeyedGeneric(HValue* obj, HValue* key, HValue* val)
+      : HStoreKeyed(obj, key, val) {
+    SetFlagMask(AllSideEffects());
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(StoreKeyedGeneric, "store_keyed_generic")
+};
+
+
+class HMaterializedLiteral: public HInstruction {
+ public:
+  HMaterializedLiteral(int index, int depth)
+      : literal_index_(index), depth_(depth) {
+    set_representation(Representation::Tagged());
+  }
+
+  int literal_index() const { return literal_index_; }
+  int depth() const { return depth_; }
+
+  DECLARE_INSTRUCTION(MaterializedLiteral)
+
+ private:
+  int literal_index_;
+  int depth_;
+};
+
+
+class HArrayLiteral: public HMaterializedLiteral {
+ public:
+  HArrayLiteral(Handle<FixedArray> constant_elements,
+                int length,
+                int literal_index,
+                int depth)
+      : HMaterializedLiteral(literal_index, depth),
+        length_(length),
+        constant_elements_(constant_elements) {}
+
+  Handle<FixedArray> constant_elements() const { return constant_elements_; }
+  int length() const { return length_; }
+
+  bool IsCopyOnWrite() const;
+
+  DECLARE_CONCRETE_INSTRUCTION(ArrayLiteral, "array_literal")
+
+ private:
+  int length_;
+  Handle<FixedArray> constant_elements_;
+};
+
+
+class HObjectLiteral: public HMaterializedLiteral {
+ public:
+  HObjectLiteral(Handle<FixedArray> constant_properties,
+                 bool fast_elements,
+                 int literal_index,
+                 int depth)
+      : HMaterializedLiteral(literal_index, depth),
+        constant_properties_(constant_properties),
+        fast_elements_(fast_elements) {}
+
+  Handle<FixedArray> constant_properties() const {
+    return constant_properties_;
+  }
+  bool fast_elements() const { return fast_elements_; }
+
+  DECLARE_CONCRETE_INSTRUCTION(ObjectLiteral, "object_literal")
+
+ private:
+  Handle<FixedArray> constant_properties_;
+  bool fast_elements_;
+};
+
+
+class HRegExpLiteral: public HMaterializedLiteral {
+ public:
+  HRegExpLiteral(Handle<String> pattern,
+                 Handle<String> flags,
+                 int literal_index)
+      : HMaterializedLiteral(literal_index, 0),
+        pattern_(pattern),
+        flags_(flags) { }
+
+  Handle<String> pattern() { return pattern_; }
+  Handle<String> flags() { return flags_; }
+
+  DECLARE_CONCRETE_INSTRUCTION(RegExpLiteral, "regexp_literal")
+
+ private:
+  Handle<String> pattern_;
+  Handle<String> flags_;
+};
+
+
+class HFunctionLiteral: public HInstruction {
+ public:
+  HFunctionLiteral(Handle<SharedFunctionInfo> shared, bool pretenure)
+      : shared_info_(shared), pretenure_(pretenure) {
+    set_representation(Representation::Tagged());
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(FunctionLiteral, "function_literal")
+
+  Handle<SharedFunctionInfo> shared_info() const { return shared_info_; }
+  bool pretenure() const { return pretenure_; }
+
+ private:
+  Handle<SharedFunctionInfo> shared_info_;
+  bool pretenure_;
+};
+
+
+class HTypeof: public HUnaryOperation {
+ public:
+  explicit HTypeof(HValue* value) : HUnaryOperation(value) {
+    set_representation(Representation::Tagged());
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(Typeof, "typeof")
+};
+
+
+class HValueOf: public HUnaryOperation {
+ public:
+  explicit HValueOf(HValue* value) : HUnaryOperation(value) {
+    set_representation(Representation::Tagged());
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(ValueOf, "value_of")
+};
+
+
+class HDeleteProperty: public HBinaryOperation {
+ public:
+  HDeleteProperty(HValue* obj, HValue* key)
+      : HBinaryOperation(obj, key) {
+    set_representation(Representation::Tagged());
+    SetFlagMask(AllSideEffects());
+  }
+
+  virtual Representation RequiredInputRepresentation(int index) const {
+    return Representation::Tagged();
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(DeleteProperty, "delete_property")
+
+  HValue* object() const { return left(); }
+  HValue* key() const { return right(); }
+};
+
+#undef DECLARE_INSTRUCTION
+#undef DECLARE_CONCRETE_INSTRUCTION
+
+} }  // namespace v8::internal
+
+#endif  // V8_HYDROGEN_INSTRUCTIONS_H_
diff --git a/src/hydrogen.cc b/src/hydrogen.cc
new file mode 100644 (file)
index 0000000..d25917b
--- /dev/null
@@ -0,0 +1,5540 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "hydrogen.h"
+
+#include "codegen.h"
+#include "data-flow.h"
+#include "full-codegen.h"
+#include "hashmap.h"
+#include "lithium-allocator.h"
+#include "parser.h"
+#include "scopes.h"
+
+#if V8_TARGET_ARCH_IA32
+#include "ia32/lithium-codegen-ia32.h"
+#elif V8_TARGET_ARCH_X64
+#include "x64/lithium-codegen-x64.h"
+#elif V8_TARGET_ARCH_ARM
+#include "arm/lithium-codegen-arm.h"
+#else
+#error Unsupported target architecture.
+#endif
+
+namespace v8 {
+namespace internal {
+
+HBasicBlock::HBasicBlock(HGraph* graph)
+    : block_id_(graph->GetNextBlockID()),
+      graph_(graph),
+      phis_(4),
+      first_(NULL),
+      last_(NULL),
+      end_(NULL),
+      loop_information_(NULL),
+      predecessors_(2),
+      dominator_(NULL),
+      dominated_blocks_(4),
+      last_environment_(NULL),
+      argument_count_(-1),
+      first_instruction_index_(-1),
+      last_instruction_index_(-1),
+      deleted_phis_(4),
+      is_inline_return_target_(false),
+      inverted_(false),
+      deopt_predecessor_(NULL) {
+}
+
+
+void HBasicBlock::AttachLoopInformation() {
+  ASSERT(!IsLoopHeader());
+  loop_information_ = new HLoopInformation(this);
+}
+
+
+void HBasicBlock::DetachLoopInformation() {
+  ASSERT(IsLoopHeader());
+  loop_information_ = NULL;
+}
+
+
+void HBasicBlock::AddPhi(HPhi* phi) {
+  ASSERT(!IsStartBlock());
+  phis_.Add(phi);
+  phi->SetBlock(this);
+}
+
+
+void HBasicBlock::RemovePhi(HPhi* phi) {
+  ASSERT(phi->block() == this);
+  ASSERT(phis_.Contains(phi));
+  ASSERT(phi->HasNoUses());
+  phi->ClearOperands();
+  phis_.RemoveElement(phi);
+  phi->SetBlock(NULL);
+}
+
+
+void HBasicBlock::AddInstruction(HInstruction* instr) {
+  ASSERT(!IsStartBlock() || !IsFinished());
+  ASSERT(!instr->IsLinked());
+  ASSERT(!IsFinished());
+  if (first_ == NULL) {
+    HBlockEntry* entry = new HBlockEntry();
+    entry->InitializeAsFirst(this);
+    first_ = entry;
+  }
+  instr->InsertAfter(GetLastInstruction());
+}
+
+
+HInstruction* HBasicBlock::GetLastInstruction() {
+  if (end_ != NULL) return end_->previous();
+  if (first_ == NULL) return NULL;
+  if (last_ == NULL) last_ = first_;
+  while (last_->next() != NULL) last_ = last_->next();
+  return last_;
+}
+
+
+HSimulate* HBasicBlock::CreateSimulate(int id) {
+  ASSERT(HasEnvironment());
+  HEnvironment* environment = last_environment();
+  ASSERT(id == AstNode::kNoNumber ||
+         environment->closure()->shared()->VerifyBailoutId(id));
+
+  int push_count = environment->push_count();
+  int pop_count = environment->pop_count();
+
+  int length = environment->values()->length();
+  HSimulate* instr = new HSimulate(id, pop_count, length);
+  for (int i = push_count - 1; i >= 0; --i) {
+    instr->AddPushedValue(environment->ExpressionStackAt(i));
+  }
+  for (int i = 0; i < environment->assigned_variables()->length(); ++i) {
+    int index = environment->assigned_variables()->at(i);
+    instr->AddAssignedValue(index, environment->Lookup(index));
+  }
+  environment->ClearHistory();
+  return instr;
+}
+
+
+void HBasicBlock::Finish(HControlInstruction* end) {
+  ASSERT(!IsFinished());
+  AddInstruction(end);
+  end_ = end;
+  if (end->FirstSuccessor() != NULL) {
+    end->FirstSuccessor()->RegisterPredecessor(this);
+    if (end->SecondSuccessor() != NULL) {
+      end->SecondSuccessor()->RegisterPredecessor(this);
+    }
+  }
+}
+
+
+void HBasicBlock::Goto(HBasicBlock* block, bool include_stack_check) {
+  AddSimulate(AstNode::kNoNumber);
+  HGoto* instr = new HGoto(block);
+  instr->set_include_stack_check(include_stack_check);
+  Finish(instr);
+}
+
+
+void HBasicBlock::SetInitialEnvironment(HEnvironment* env) {
+  ASSERT(!HasEnvironment());
+  ASSERT(first() == NULL);
+  UpdateEnvironment(env);
+}
+
+
+void HBasicBlock::SetJoinId(int id) {
+  int length = predecessors_.length();
+  ASSERT(length > 0);
+  for (int i = 0; i < length; i++) {
+    HBasicBlock* predecessor = predecessors_[i];
+    ASSERT(predecessor->end()->IsGoto());
+    HSimulate* simulate = HSimulate::cast(predecessor->GetLastInstruction());
+    // We only need to verify the ID once.
+    ASSERT(i != 0 ||
+           predecessor->last_environment()->closure()->shared()
+               ->VerifyBailoutId(id));
+    simulate->set_ast_id(id);
+  }
+}
+
+
+bool HBasicBlock::Dominates(HBasicBlock* other) const {
+  HBasicBlock* current = other->dominator();
+  while (current != NULL) {
+    if (current == this) return true;
+    current = current->dominator();
+  }
+  return false;
+}
+
+
+void HBasicBlock::PostProcessLoopHeader(IterationStatement* stmt) {
+  ASSERT(IsLoopHeader());
+
+  SetJoinId(stmt->EntryId());
+  if (predecessors()->length() == 1) {
+    // This is a degenerated loop.
+    DetachLoopInformation();
+    return;
+  }
+
+  // Only the first entry into the loop is from outside the loop. All other
+  // entries must be back edges.
+  for (int i = 1; i < predecessors()->length(); ++i) {
+    loop_information()->RegisterBackEdge(predecessors()->at(i));
+  }
+}
+
+
+void HBasicBlock::RegisterPredecessor(HBasicBlock* pred) {
+  if (!predecessors_.is_empty()) {
+    // Only loop header blocks can have a predecessor added after
+    // instructions have been added to the block (they have phis for all
+    // values in the environment, these phis may be eliminated later).
+    ASSERT(IsLoopHeader() || first_ == NULL);
+    HEnvironment* incoming_env = pred->last_environment();
+    if (IsLoopHeader()) {
+      ASSERT(phis()->length() == incoming_env->values()->length());
+      for (int i = 0; i < phis_.length(); ++i) {
+        phis_[i]->AddInput(incoming_env->values()->at(i));
+      }
+    } else {
+      last_environment()->AddIncomingEdge(this, pred->last_environment());
+    }
+  } else if (!HasEnvironment() && !IsFinished()) {
+    ASSERT(!IsLoopHeader());
+    SetInitialEnvironment(pred->last_environment()->Copy());
+  }
+
+  predecessors_.Add(pred);
+}
+
+
+void HBasicBlock::AddDominatedBlock(HBasicBlock* block) {
+  ASSERT(!dominated_blocks_.Contains(block));
+  // Keep the list of dominated blocks sorted such that if there is two
+  // succeeding block in this list, the predecessor is before the successor.
+  int index = 0;
+  while (index < dominated_blocks_.length() &&
+         dominated_blocks_[index]->block_id() < block->block_id()) {
+    ++index;
+  }
+  dominated_blocks_.InsertAt(index, block);
+}
+
+
+void HBasicBlock::AssignCommonDominator(HBasicBlock* other) {
+  if (dominator_ == NULL) {
+    dominator_ = other;
+    other->AddDominatedBlock(this);
+  } else if (other->dominator() != NULL) {
+    HBasicBlock* first = dominator_;
+    HBasicBlock* second = other;
+
+    while (first != second) {
+      if (first->block_id() > second->block_id()) {
+        first = first->dominator();
+      } else {
+        second = second->dominator();
+      }
+      ASSERT(first != NULL && second != NULL);
+    }
+
+    if (dominator_ != first) {
+      ASSERT(dominator_->dominated_blocks_.Contains(this));
+      dominator_->dominated_blocks_.RemoveElement(this);
+      dominator_ = first;
+      first->AddDominatedBlock(this);
+    }
+  }
+}
+
+
+int HBasicBlock::PredecessorIndexOf(HBasicBlock* predecessor) const {
+  for (int i = 0; i < predecessors_.length(); ++i) {
+    if (predecessors_[i] == predecessor) return i;
+  }
+  UNREACHABLE();
+  return -1;
+}
+
+
+#ifdef DEBUG
+void HBasicBlock::Verify() {
+  // Check that every block is finished.
+  ASSERT(IsFinished());
+  ASSERT(block_id() >= 0);
+
+  // Verify that all blocks targetting a branch target, have the same boolean
+  // value on top of their expression stack.
+  if (!cond().is_null()) {
+    ASSERT(predecessors()->length() > 0);
+    for (int i = 1; i < predecessors()->length(); i++) {
+      HBasicBlock* pred = predecessors()->at(i);
+      HValue* top = pred->last_environment()->Top();
+      ASSERT(top->IsConstant());
+      Object* a = *HConstant::cast(top)->handle();
+      Object* b = *cond();
+      ASSERT(a == b);
+    }
+  }
+}
+#endif
+
+
+void HLoopInformation::RegisterBackEdge(HBasicBlock* block) {
+  this->back_edges_.Add(block);
+  AddBlock(block);
+}
+
+
+HBasicBlock* HLoopInformation::GetLastBackEdge() const {
+  int max_id = -1;
+  HBasicBlock* result = NULL;
+  for (int i = 0; i < back_edges_.length(); ++i) {
+    HBasicBlock* cur = back_edges_[i];
+    if (cur->block_id() > max_id) {
+      max_id = cur->block_id();
+      result = cur;
+    }
+  }
+  return result;
+}
+
+
+void HLoopInformation::AddBlock(HBasicBlock* block) {
+  if (block == loop_header()) return;
+  if (block->parent_loop_header() == loop_header()) return;
+  if (block->parent_loop_header() != NULL) {
+    AddBlock(block->parent_loop_header());
+  } else {
+    block->set_parent_loop_header(loop_header());
+    blocks_.Add(block);
+    for (int i = 0; i < block->predecessors()->length(); ++i) {
+      AddBlock(block->predecessors()->at(i));
+    }
+  }
+}
+
+
+#ifdef DEBUG
+
+// Checks reachability of the blocks in this graph and stores a bit in
+// the BitVector "reachable()" for every block that can be reached
+// from the start block of the graph. If "dont_visit" is non-null, the given
+// block is treated as if it would not be part of the graph. "visited_count()"
+// returns the number of reachable blocks.
+class ReachabilityAnalyzer BASE_EMBEDDED {
+ public:
+  ReachabilityAnalyzer(HBasicBlock* entry_block,
+                       int block_count,
+                       HBasicBlock* dont_visit)
+      : visited_count_(0),
+        stack_(16),
+        reachable_(block_count),
+        dont_visit_(dont_visit) {
+    PushBlock(entry_block);
+    Analyze();
+  }
+
+  int visited_count() const { return visited_count_; }
+  const BitVector* reachable() const { return &reachable_; }
+
+ private:
+  void PushBlock(HBasicBlock* block) {
+    if (block != NULL && block != dont_visit_ &&
+        !reachable_.Contains(block->block_id())) {
+      reachable_.Add(block->block_id());
+      stack_.Add(block);
+      visited_count_++;
+    }
+  }
+
+  void Analyze() {
+    while (!stack_.is_empty()) {
+      HControlInstruction* end = stack_.RemoveLast()->end();
+      PushBlock(end->FirstSuccessor());
+      PushBlock(end->SecondSuccessor());
+    }
+  }
+
+  int visited_count_;
+  ZoneList<HBasicBlock*> stack_;
+  BitVector reachable_;
+  HBasicBlock* dont_visit_;
+};
+
+
+void HGraph::Verify() const {
+  for (int i = 0; i < blocks_.length(); i++) {
+    HBasicBlock* block = blocks_.at(i);
+
+    block->Verify();
+
+    // Check that every block contains at least one node and that only the last
+    // node is a control instruction.
+    HInstruction* current = block->first();
+    ASSERT(current != NULL && current->IsBlockEntry());
+    while (current != NULL) {
+      ASSERT((current->next() == NULL) == current->IsControlInstruction());
+      ASSERT(current->block() == block);
+      current->Verify();
+      current = current->next();
+    }
+
+    // Check that successors are correctly set.
+    HBasicBlock* first = block->end()->FirstSuccessor();
+    HBasicBlock* second = block->end()->SecondSuccessor();
+    ASSERT(second == NULL || first != NULL);
+
+    // Check that the predecessor array is correct.
+    if (first != NULL) {
+      ASSERT(first->predecessors()->Contains(block));
+      if (second != NULL) {
+        ASSERT(second->predecessors()->Contains(block));
+      }
+    }
+
+    // Check that phis have correct arguments.
+    for (int j = 0; j < block->phis()->length(); j++) {
+      HPhi* phi = block->phis()->at(j);
+      phi->Verify();
+    }
+
+    // Check that all join blocks have predecessors that end with an
+    // unconditional goto and agree on their environment node id.
+    if (block->predecessors()->length() >= 2) {
+      int id = block->predecessors()->first()->last_environment()->ast_id();
+      for (int k = 0; k < block->predecessors()->length(); k++) {
+        HBasicBlock* predecessor = block->predecessors()->at(k);
+        ASSERT(predecessor->end()->IsGoto());
+        ASSERT(predecessor->last_environment()->ast_id() == id);
+      }
+    }
+  }
+
+  // Check special property of first block to have no predecessors.
+  ASSERT(blocks_.at(0)->predecessors()->is_empty());
+
+  // Check that the graph is fully connected.
+  ReachabilityAnalyzer analyzer(entry_block_, blocks_.length(), NULL);
+  ASSERT(analyzer.visited_count() == blocks_.length());
+
+  // Check that entry block dominator is NULL.
+  ASSERT(entry_block_->dominator() == NULL);
+
+  // Check dominators.
+  for (int i = 0; i < blocks_.length(); ++i) {
+    HBasicBlock* block = blocks_.at(i);
+    if (block->dominator() == NULL) {
+      // Only start block may have no dominator assigned to.
+      ASSERT(i == 0);
+    } else {
+      // Assert that block is unreachable if dominator must not be visited.
+      ReachabilityAnalyzer dominator_analyzer(entry_block_,
+                                              blocks_.length(),
+                                              block->dominator());
+      ASSERT(!dominator_analyzer.reachable()->Contains(block->block_id()));
+    }
+  }
+}
+
+#endif
+
+
+HConstant* HGraph::GetConstant(SetOncePointer<HConstant>* pointer,
+                               Object* value) {
+  if (!pointer->is_set()) {
+    HConstant* constant = new HConstant(Handle<Object>(value),
+                                        Representation::Tagged());
+    constant->InsertAfter(GetConstantUndefined());
+    pointer->set(constant);
+  }
+  return pointer->get();
+}
+
+
+HConstant* HGraph::GetConstant1() {
+  return GetConstant(&constant_1_, Smi::FromInt(1));
+}
+
+
+HConstant* HGraph::GetConstantMinus1() {
+  return GetConstant(&constant_minus1_, Smi::FromInt(-1));
+}
+
+
+HConstant* HGraph::GetConstantTrue() {
+  return GetConstant(&constant_true_, Heap::true_value());
+}
+
+
+HConstant* HGraph::GetConstantFalse() {
+  return GetConstant(&constant_false_, Heap::false_value());
+}
+
+
+void HSubgraph::AppendOptional(HSubgraph* graph,
+                               bool on_true_branch,
+                               HValue* boolean_value) {
+  ASSERT(HasExit() && graph->HasExit());
+  HBasicBlock* other_block = graph_->CreateBasicBlock();
+  HBasicBlock* join_block = graph_->CreateBasicBlock();
+
+  HBasicBlock* true_branch = other_block;
+  HBasicBlock* false_branch = graph->entry_block();
+  if (on_true_branch) {
+    true_branch = graph->entry_block();
+    false_branch = other_block;
+  }
+
+  exit_block_->Finish(new HBranch(true_branch, false_branch, boolean_value));
+  other_block->Goto(join_block);
+  graph->exit_block()->Goto(join_block);
+  exit_block_ = join_block;
+}
+
+
+void HSubgraph::AppendJoin(HSubgraph* then_graph,
+                           HSubgraph* else_graph,
+                           AstNode* node) {
+  if (then_graph->HasExit() && else_graph->HasExit()) {
+    // We need to merge, create new merge block.
+    HBasicBlock* join_block = graph_->CreateBasicBlock();
+    then_graph->exit_block()->Goto(join_block);
+    else_graph->exit_block()->Goto(join_block);
+    join_block->SetJoinId(node->id());
+    exit_block_ = join_block;
+  } else if (then_graph->HasExit()) {
+    exit_block_ = then_graph->exit_block_;
+  } else if (else_graph->HasExit()) {
+    exit_block_ = else_graph->exit_block_;
+  } else {
+    exit_block_ = NULL;
+  }
+}
+
+
+void HSubgraph::ResolveContinue(IterationStatement* statement) {
+  HBasicBlock* continue_block = BundleContinue(statement);
+  if (continue_block != NULL) {
+    exit_block_ = JoinBlocks(exit_block(),
+                             continue_block,
+                             statement->ContinueId());
+  }
+}
+
+
+HBasicBlock* HSubgraph::BundleBreak(BreakableStatement* statement) {
+  return BundleBreakContinue(statement, false, statement->ExitId());
+}
+
+
+HBasicBlock* HSubgraph::BundleContinue(IterationStatement* statement) {
+  return BundleBreakContinue(statement, true, statement->ContinueId());
+}
+
+
+HBasicBlock* HSubgraph::BundleBreakContinue(BreakableStatement* statement,
+                                            bool is_continue,
+                                            int join_id) {
+  HBasicBlock* result = NULL;
+  const ZoneList<BreakContinueInfo*>* infos = break_continue_info();
+  for (int i = 0; i < infos->length(); ++i) {
+    BreakContinueInfo* info = infos->at(i);
+    if (info->is_continue() == is_continue &&
+        info->target() == statement &&
+        !info->IsResolved()) {
+      if (result == NULL) {
+        result = graph_->CreateBasicBlock();
+      }
+      info->block()->Goto(result);
+      info->Resolve();
+    }
+  }
+
+  if (result != NULL) result->SetJoinId(join_id);
+
+  return result;
+}
+
+
+HBasicBlock* HSubgraph::JoinBlocks(HBasicBlock* a, HBasicBlock* b, int id) {
+  if (a == NULL) return b;
+  if (b == NULL) return a;
+  HBasicBlock* target = graph_->CreateBasicBlock();
+  a->Goto(target);
+  b->Goto(target);
+  target->SetJoinId(id);
+  return target;
+}
+
+
+void HSubgraph::AppendEndless(HSubgraph* body, IterationStatement* statement) {
+  ConnectExitTo(body->entry_block());
+  body->ResolveContinue(statement);
+  body->ConnectExitTo(body->entry_block(), true);
+  exit_block_ = body->BundleBreak(statement);
+  body->entry_block()->PostProcessLoopHeader(statement);
+}
+
+
+void HSubgraph::AppendDoWhile(HSubgraph* body,
+                              IterationStatement* statement,
+                              HSubgraph* go_back,
+                              HSubgraph* exit) {
+  ConnectExitTo(body->entry_block());
+  go_back->ConnectExitTo(body->entry_block(), true);
+
+  HBasicBlock* break_block = body->BundleBreak(statement);
+  exit_block_ =
+      JoinBlocks(exit->exit_block(), break_block, statement->ExitId());
+  body->entry_block()->PostProcessLoopHeader(statement);
+}
+
+
+void HSubgraph::AppendWhile(HSubgraph* condition,
+                            HSubgraph* body,
+                            IterationStatement* statement,
+                            HSubgraph* continue_subgraph,
+                            HSubgraph* exit) {
+  ConnectExitTo(condition->entry_block());
+
+  HBasicBlock* break_block = body->BundleBreak(statement);
+  exit_block_ =
+      JoinBlocks(exit->exit_block(), break_block, statement->ExitId());
+
+  if (continue_subgraph != NULL) {
+    body->ConnectExitTo(continue_subgraph->entry_block(), true);
+    continue_subgraph->entry_block()->SetJoinId(statement->EntryId());
+    exit_block_ = JoinBlocks(exit_block_,
+                             continue_subgraph->exit_block(),
+                             statement->ExitId());
+  } else {
+    body->ConnectExitTo(condition->entry_block(), true);
+  }
+  condition->entry_block()->PostProcessLoopHeader(statement);
+}
+
+
+void HSubgraph::Append(HSubgraph* next, BreakableStatement* stmt) {
+  exit_block_->Goto(next->entry_block());
+  exit_block_ = next->exit_block_;
+
+  if (stmt != NULL) {
+    next->entry_block()->SetJoinId(stmt->EntryId());
+    HBasicBlock* break_block = next->BundleBreak(stmt);
+    exit_block_ = JoinBlocks(exit_block(), break_block, stmt->ExitId());
+  }
+}
+
+
+void HSubgraph::FinishExit(HControlInstruction* instruction) {
+  ASSERT(HasExit());
+  exit_block_->Finish(instruction);
+  exit_block_->ClearEnvironment();
+  exit_block_ = NULL;
+}
+
+
+void HSubgraph::FinishBreakContinue(BreakableStatement* target,
+                                    bool is_continue) {
+  ASSERT(!exit_block_->IsFinished());
+  BreakContinueInfo* info = new BreakContinueInfo(target, exit_block_,
+                                                  is_continue);
+  break_continue_info_.Add(info);
+  exit_block_ = NULL;
+}
+
+
+HGraph::HGraph(CompilationInfo* info)
+    : HSubgraph(this),
+      next_block_id_(0),
+      info_(info),
+      blocks_(8),
+      values_(16),
+      phi_list_(NULL) {
+  start_environment_ = new HEnvironment(NULL, info->scope(), info->closure());
+  start_environment_->set_ast_id(info->function()->id());
+}
+
+
+Handle<Code> HGraph::Compile() {
+  int values = GetMaximumValueID();
+  if (values > LAllocator::max_initial_value_ids()) {
+    if (FLAG_trace_bailout) PrintF("Function is too big\n");
+    return Handle<Code>::null();
+  }
+
+  LAllocator allocator(values, this);
+  LChunkBuilder builder(this, &allocator);
+  LChunk* chunk = builder.Build();
+  if (chunk == NULL) return Handle<Code>::null();
+
+  if (!FLAG_alloc_lithium) return Handle<Code>::null();
+
+  allocator.Allocate(chunk);
+
+  if (!FLAG_use_lithium) return Handle<Code>::null();
+
+  MacroAssembler assembler(NULL, 0);
+  LCodeGen generator(chunk, &assembler, info());
+
+  if (FLAG_eliminate_empty_blocks) {
+    chunk->MarkEmptyBlocks();
+  }
+
+  if (generator.GenerateCode()) {
+    if (FLAG_trace_codegen) {
+      PrintF("Crankshaft Compiler - ");
+    }
+    CodeGenerator::MakeCodePrologue(info());
+    Code::Flags flags =
+        Code::ComputeFlags(Code::OPTIMIZED_FUNCTION, NOT_IN_LOOP);
+    Handle<Code> code =
+        CodeGenerator::MakeCodeEpilogue(&assembler, flags, info());
+    generator.FinishCode(code);
+    CodeGenerator::PrintCode(code, info());
+    return code;
+  }
+  return Handle<Code>::null();
+}
+
+
+HBasicBlock* HGraph::CreateBasicBlock() {
+  HBasicBlock* result = new HBasicBlock(this);
+  blocks_.Add(result);
+  return result;
+}
+
+
+void HGraph::Canonicalize() {
+  HPhase phase("Canonicalize", this);
+  if (FLAG_use_canonicalizing) {
+    for (int i = 0; i < blocks()->length(); ++i) {
+      HBasicBlock* b = blocks()->at(i);
+      for (HInstruction* insn = b->first(); insn != NULL; insn = insn->next()) {
+        HValue* value = insn->Canonicalize();
+        if (value != insn) {
+          if (value != NULL) {
+            insn->ReplaceAndDelete(value);
+          } else {
+            insn->Delete();
+          }
+        }
+      }
+    }
+  }
+}
+
+
+void HGraph::OrderBlocks() {
+  HPhase phase("Block ordering");
+  BitVector visited(blocks_.length());
+
+  ZoneList<HBasicBlock*> reverse_result(8);
+  HBasicBlock* start = blocks_[0];
+  Postorder(start, &visited, &reverse_result, NULL);
+
+  blocks_.Clear();
+  int index = 0;
+  for (int i = reverse_result.length() - 1; i >= 0; --i) {
+    HBasicBlock* b = reverse_result[i];
+    blocks_.Add(b);
+    b->set_block_id(index++);
+  }
+}
+
+
+void HGraph::PostorderLoopBlocks(HLoopInformation* loop,
+                                 BitVector* visited,
+                                 ZoneList<HBasicBlock*>* order,
+                                 HBasicBlock* loop_header) {
+  for (int i = 0; i < loop->blocks()->length(); ++i) {
+    HBasicBlock* b = loop->blocks()->at(i);
+    Postorder(b->end()->SecondSuccessor(), visited, order, loop_header);
+    Postorder(b->end()->FirstSuccessor(), visited, order, loop_header);
+    if (b->IsLoopHeader() && b != loop->loop_header()) {
+      PostorderLoopBlocks(b->loop_information(), visited, order, loop_header);
+    }
+  }
+}
+
+
+void HGraph::Postorder(HBasicBlock* block,
+                       BitVector* visited,
+                       ZoneList<HBasicBlock*>* order,
+                       HBasicBlock* loop_header) {
+  if (block == NULL || visited->Contains(block->block_id())) return;
+  if (block->parent_loop_header() != loop_header) return;
+  visited->Add(block->block_id());
+  if (block->IsLoopHeader()) {
+    PostorderLoopBlocks(block->loop_information(), visited, order, loop_header);
+    Postorder(block->end()->SecondSuccessor(), visited, order, block);
+    Postorder(block->end()->FirstSuccessor(), visited, order, block);
+  } else {
+    Postorder(block->end()->SecondSuccessor(), visited, order, loop_header);
+    Postorder(block->end()->FirstSuccessor(), visited, order, loop_header);
+  }
+  ASSERT(block->end()->FirstSuccessor() == NULL ||
+         order->Contains(block->end()->FirstSuccessor()) ||
+         block->end()->FirstSuccessor()->IsLoopHeader());
+  ASSERT(block->end()->SecondSuccessor() == NULL ||
+         order->Contains(block->end()->SecondSuccessor()) ||
+         block->end()->SecondSuccessor()->IsLoopHeader());
+  order->Add(block);
+}
+
+
+void HGraph::AssignDominators() {
+  HPhase phase("Assign dominators", this);
+  for (int i = 0; i < blocks_.length(); ++i) {
+    if (blocks_[i]->IsLoopHeader()) {
+      blocks_[i]->AssignCommonDominator(blocks_[i]->predecessors()->first());
+    } else {
+      for (int j = 0; j < blocks_[i]->predecessors()->length(); ++j) {
+        blocks_[i]->AssignCommonDominator(blocks_[i]->predecessors()->at(j));
+      }
+    }
+  }
+}
+
+
+void HGraph::EliminateRedundantPhis() {
+  HPhase phase("Phi elimination", this);
+  ZoneList<HValue*> uses_to_replace(2);
+
+  // Worklist of phis that can potentially be eliminated. Initialized
+  // with all phi nodes. When elimination of a phi node modifies
+  // another phi node the modified phi node is added to the worklist.
+  ZoneList<HPhi*> worklist(blocks_.length());
+  for (int i = 0; i < blocks_.length(); ++i) {
+    worklist.AddAll(*blocks_[i]->phis());
+  }
+
+  while (!worklist.is_empty()) {
+    HPhi* phi = worklist.RemoveLast();
+    HBasicBlock* block = phi->block();
+
+    // Skip phi node if it was already replaced.
+    if (block == NULL) continue;
+
+    // Get replacement value if phi is redundant.
+    HValue* value = phi->GetRedundantReplacement();
+
+    if (value != NULL) {
+      // Iterate through uses finding the ones that should be
+      // replaced.
+      const ZoneList<HValue*>* uses = phi->uses();
+      for (int i = 0; i < uses->length(); ++i) {
+        HValue* use = uses->at(i);
+        if (!use->block()->IsStartBlock()) {
+          uses_to_replace.Add(use);
+        }
+      }
+      // Replace the uses and add phis modified to the work list.
+      for (int i = 0; i < uses_to_replace.length(); ++i) {
+        HValue* use = uses_to_replace[i];
+        phi->ReplaceAtUse(use, value);
+        if (use->IsPhi()) worklist.Add(HPhi::cast(use));
+      }
+      uses_to_replace.Rewind(0);
+      block->RemovePhi(phi);
+    } else if (phi->HasNoUses() &&
+               !phi->HasReceiverOperand() &&
+               FLAG_eliminate_dead_phis) {
+      // We can't eliminate phis that have the receiver as an operand
+      // because in case of throwing an error we need the correct
+      // receiver value in the environment to construct a corrent
+      // stack trace.
+      block->RemovePhi(phi);
+      block->RecordDeletedPhi(phi->merged_index());
+    }
+  }
+}
+
+
+bool HGraph::CollectPhis() {
+  const ZoneList<HBasicBlock*>* blocks = graph_->blocks();
+  phi_list_ = new ZoneList<HPhi*>(blocks->length());
+  for (int i = 0; i < blocks->length(); ++i) {
+    for (int j = 0; j < blocks->at(i)->phis()->length(); j++) {
+      HPhi* phi = blocks->at(i)->phis()->at(j);
+      phi_list_->Add(phi);
+      // We don't support phi uses of arguments for now.
+      if (phi->CheckFlag(HValue::kIsArguments)) return false;
+    }
+  }
+  return true;
+}
+
+
+void HGraph::InferTypes(ZoneList<HValue*>* worklist) {
+  BitVector in_worklist(GetMaximumValueID());
+  for (int i = 0; i < worklist->length(); ++i) {
+    ASSERT(!in_worklist.Contains(worklist->at(i)->id()));
+    in_worklist.Add(worklist->at(i)->id());
+  }
+
+  while (!worklist->is_empty()) {
+    HValue* current = worklist->RemoveLast();
+    in_worklist.Remove(current->id());
+    if (current->UpdateInferredType()) {
+      for (int j = 0; j < current->uses()->length(); j++) {
+        HValue* use = current->uses()->at(j);
+        if (!in_worklist.Contains(use->id())) {
+          in_worklist.Add(use->id());
+          worklist->Add(use);
+        }
+      }
+    }
+  }
+}
+
+
+class HRangeAnalysis BASE_EMBEDDED {
+ public:
+  explicit HRangeAnalysis(HGraph* graph) : graph_(graph), changed_ranges_(16) {}
+
+  void Analyze();
+
+ private:
+  void TraceRange(const char* msg, ...);
+  void Analyze(HBasicBlock* block);
+  void InferControlFlowRange(HBranch* branch, HBasicBlock* dest);
+  void InferControlFlowRange(Token::Value op, HValue* value, HValue* other);
+  void InferPhiRange(HPhi* phi);
+  void InferRange(HValue* value);
+  void RollBackTo(int index);
+  void AddRange(HValue* value, Range* range);
+
+  HGraph* graph_;
+  ZoneList<HValue*> changed_ranges_;
+};
+
+
+void HRangeAnalysis::TraceRange(const char* msg, ...) {
+  if (FLAG_trace_range) {
+    va_list arguments;
+    va_start(arguments, msg);
+    OS::VPrint(msg, arguments);
+    va_end(arguments);
+  }
+}
+
+
+void HRangeAnalysis::Analyze() {
+  HPhase phase("Range analysis", graph_);
+  Analyze(graph_->blocks()->at(0));
+}
+
+
+void HRangeAnalysis::Analyze(HBasicBlock* block) {
+  TraceRange("Analyzing block B%d\n", block->block_id());
+
+  int last_changed_range = changed_ranges_.length() - 1;
+
+  // Infer range based on control flow.
+  if (block->predecessors()->length() == 1) {
+    HBasicBlock* pred = block->predecessors()->first();
+    if (pred->end()->IsBranch()) {
+      InferControlFlowRange(HBranch::cast(pred->end()), block);
+    }
+  }
+
+  // Process phi instructions.
+  for (int i = 0; i < block->phis()->length(); ++i) {
+    HPhi* phi = block->phis()->at(i);
+    InferPhiRange(phi);
+  }
+
+  // Go through all instructions of the current block.
+  HInstruction* instr = block->first();
+  while (instr != block->end()) {
+    InferRange(instr);
+    instr = instr->next();
+  }
+
+  // Continue analysis in all dominated blocks.
+  for (int i = 0; i < block->dominated_blocks()->length(); ++i) {
+    Analyze(block->dominated_blocks()->at(i));
+  }
+
+  RollBackTo(last_changed_range);
+}
+
+
+void HRangeAnalysis::InferControlFlowRange(HBranch* branch, HBasicBlock* dest) {
+  ASSERT(branch->FirstSuccessor() == dest || branch->SecondSuccessor() == dest);
+  ASSERT(branch->FirstSuccessor() != dest || branch->SecondSuccessor() != dest);
+
+  if (branch->value()->IsCompare()) {
+    HCompare* compare = HCompare::cast(branch->value());
+    Token::Value op = compare->token();
+    if (branch->SecondSuccessor() == dest) {
+      op = Token::NegateCompareOp(op);
+    }
+    Token::Value inverted_op = Token::InvertCompareOp(op);
+    InferControlFlowRange(op, compare->left(), compare->right());
+    InferControlFlowRange(inverted_op, compare->right(), compare->left());
+  }
+}
+
+
+// We know that value [op] other. Use this information to update the range on
+// value.
+void HRangeAnalysis::InferControlFlowRange(Token::Value op,
+                                           HValue* value,
+                                           HValue* other) {
+  Range* range = other->range();
+  if (range == NULL) range = new Range();
+  Range* new_range = NULL;
+
+  TraceRange("Control flow range infer %d %s %d\n",
+             value->id(),
+             Token::Name(op),
+             other->id());
+
+  if (op == Token::EQ || op == Token::EQ_STRICT) {
+    // The same range has to apply for value.
+    new_range = range->Copy();
+  } else if (op == Token::LT || op == Token::LTE) {
+    new_range = range->CopyClearLower();
+    if (op == Token::LT) {
+      new_range->Add(-1);
+    }
+  } else if (op == Token::GT || op == Token::GTE) {
+    new_range = range->CopyClearUpper();
+    if (op == Token::GT) {
+      new_range->Add(1);
+    }
+  }
+
+  if (new_range != NULL && !new_range->IsMostGeneric()) {
+    AddRange(value, new_range);
+  }
+}
+
+
+void HRangeAnalysis::InferPhiRange(HPhi* phi) {
+  // TODO(twuerthinger): Infer loop phi ranges.
+  InferRange(phi);
+}
+
+
+void HRangeAnalysis::InferRange(HValue* value) {
+  ASSERT(!value->HasRange());
+  if (!value->representation().IsNone()) {
+    value->ComputeInitialRange();
+    Range* range = value->range();
+    TraceRange("Initial inferred range of %d (%s) set to [%d,%d]\n",
+               value->id(),
+               value->Mnemonic(),
+               range->lower(),
+               range->upper());
+  }
+}
+
+
+void HRangeAnalysis::RollBackTo(int index) {
+  for (int i = index + 1; i < changed_ranges_.length(); ++i) {
+    changed_ranges_[i]->RemoveLastAddedRange();
+  }
+  changed_ranges_.Rewind(index + 1);
+}
+
+
+void HRangeAnalysis::AddRange(HValue* value, Range* range) {
+  Range* original_range = value->range();
+  value->AddNewRange(range);
+  changed_ranges_.Add(value);
+  Range* new_range = value->range();
+  TraceRange("Updated range of %d set to [%d,%d]\n",
+             value->id(),
+             new_range->lower(),
+             new_range->upper());
+  if (original_range != NULL) {
+    TraceRange("Original range was [%d,%d]\n",
+               original_range->lower(),
+               original_range->upper());
+  }
+  TraceRange("New information was [%d,%d]\n",
+             range->lower(),
+             range->upper());
+}
+
+
+void TraceGVN(const char* msg, ...) {
+  if (FLAG_trace_gvn) {
+    va_list arguments;
+    va_start(arguments, msg);
+    OS::VPrint(msg, arguments);
+    va_end(arguments);
+  }
+}
+
+
+HValueMap::HValueMap(const HValueMap* other)
+    : array_size_(other->array_size_),
+      lists_size_(other->lists_size_),
+      count_(other->count_),
+      present_flags_(other->present_flags_),
+      array_(Zone::NewArray<HValueMapListElement>(other->array_size_)),
+      lists_(Zone::NewArray<HValueMapListElement>(other->lists_size_)),
+      free_list_head_(other->free_list_head_) {
+  memcpy(array_, other->array_, array_size_ * sizeof(HValueMapListElement));
+  memcpy(lists_, other->lists_, lists_size_ * sizeof(HValueMapListElement));
+}
+
+
+void HValueMap::Kill(int flags) {
+  int depends_flags = HValue::ConvertChangesToDependsFlags(flags);
+  if ((present_flags_ & depends_flags) == 0) return;
+  present_flags_ = 0;
+  for (int i = 0; i < array_size_; ++i) {
+    HValue* value = array_[i].value;
+    if (value != NULL) {
+      // Clear list of collisions first, so we know if it becomes empty.
+      int kept = kNil;  // List of kept elements.
+      int next;
+      for (int current = array_[i].next; current != kNil; current = next) {
+        next = lists_[current].next;
+        if ((lists_[current].value->flags() & depends_flags) != 0) {
+          // Drop it.
+          count_--;
+          lists_[current].next = free_list_head_;
+          free_list_head_ = current;
+        } else {
+          // Keep it.
+          lists_[current].next = kept;
+          kept = current;
+          present_flags_ |= lists_[current].value->flags();
+        }
+      }
+      array_[i].next = kept;
+
+      // Now possibly drop directly indexed element.
+      if ((array_[i].value->flags() & depends_flags) != 0) {  // Drop it.
+        count_--;
+        int head = array_[i].next;
+        if (head == kNil) {
+          array_[i].value = NULL;
+        } else {
+          array_[i].value = lists_[head].value;
+          array_[i].next = lists_[head].next;
+          lists_[head].next = free_list_head_;
+          free_list_head_ = head;
+        }
+      } else {
+        present_flags_ |= array_[i].value->flags();  // Keep it.
+      }
+    }
+  }
+}
+
+
+HValue* HValueMap::Lookup(HValue* value) const {
+  uint32_t hash = value->Hashcode();
+  uint32_t pos = Bound(hash);
+  if (array_[pos].value != NULL) {
+    if (array_[pos].value->Equals(value)) return array_[pos].value;
+    int next = array_[pos].next;
+    while (next != kNil) {
+      if (lists_[next].value->Equals(value)) return lists_[next].value;
+      next = lists_[next].next;
+    }
+  }
+  return NULL;
+}
+
+
+void HValueMap::Resize(int new_size) {
+  ASSERT(new_size > count_);
+  // Hashing the values into the new array has no more collisions than in the
+  // old hash map, so we can use the existing lists_ array, if we are careful.
+
+  // Make sure we have at least one free element.
+  if (free_list_head_ == kNil) {
+    ResizeLists(lists_size_ << 1);
+  }
+
+  HValueMapListElement* new_array =
+      Zone::NewArray<HValueMapListElement>(new_size);
+  memset(new_array, 0, sizeof(HValueMapListElement) * new_size);
+
+  HValueMapListElement* old_array = array_;
+  int old_size = array_size_;
+
+  int old_count = count_;
+  count_ = 0;
+  // Do not modify present_flags_.  It is currently correct.
+  array_size_ = new_size;
+  array_ = new_array;
+
+  if (old_array != NULL) {
+    // Iterate over all the elements in lists, rehashing them.
+    for (int i = 0; i < old_size; ++i) {
+      if (old_array[i].value != NULL) {
+        int current = old_array[i].next;
+        while (current != kNil) {
+          Insert(lists_[current].value);
+          int next = lists_[current].next;
+          lists_[current].next = free_list_head_;
+          free_list_head_ = current;
+          current = next;
+        }
+        // Rehash the directly stored value.
+        Insert(old_array[i].value);
+      }
+    }
+  }
+  USE(old_count);
+  ASSERT(count_ == old_count);
+}
+
+
+void HValueMap::ResizeLists(int new_size) {
+  ASSERT(new_size > lists_size_);
+
+  HValueMapListElement* new_lists =
+      Zone::NewArray<HValueMapListElement>(new_size);
+  memset(new_lists, 0, sizeof(HValueMapListElement) * new_size);
+
+  HValueMapListElement* old_lists = lists_;
+  int old_size = lists_size_;
+
+  lists_size_ = new_size;
+  lists_ = new_lists;
+
+  if (old_lists != NULL) {
+    memcpy(lists_, old_lists, old_size * sizeof(HValueMapListElement));
+  }
+  for (int i = old_size; i < lists_size_; ++i) {
+    lists_[i].next = free_list_head_;
+    free_list_head_ = i;
+  }
+}
+
+
+void HValueMap::Insert(HValue* value) {
+  ASSERT(value != NULL);
+  // Resizing when half of the hashtable is filled up.
+  if (count_ >= array_size_ >> 1) Resize(array_size_ << 1);
+  ASSERT(count_ < array_size_);
+  count_++;
+  uint32_t pos = Bound(value->Hashcode());
+  if (array_[pos].value == NULL) {
+    array_[pos].value = value;
+    array_[pos].next = kNil;
+  } else {
+    if (free_list_head_ == kNil) {
+      ResizeLists(lists_size_ << 1);
+    }
+    int new_element_pos = free_list_head_;
+    ASSERT(new_element_pos != kNil);
+    free_list_head_ = lists_[free_list_head_].next;
+    lists_[new_element_pos].value = value;
+    lists_[new_element_pos].next = array_[pos].next;
+    ASSERT(array_[pos].next == kNil || lists_[array_[pos].next].value != NULL);
+    array_[pos].next = new_element_pos;
+  }
+}
+
+
+class HStackCheckEliminator BASE_EMBEDDED {
+ public:
+  explicit HStackCheckEliminator(HGraph* graph) : graph_(graph) { }
+
+  void Process();
+
+ private:
+  void RemoveStackCheck(HBasicBlock* block);
+
+  HGraph* graph_;
+};
+
+
+void HStackCheckEliminator::Process() {
+  // For each loop block walk the dominator tree from the backwards branch to
+  // the loop header. If a call instruction is encountered the backwards branch
+  // is dominated by a call and the stack check in the backwards branch can be
+  // removed.
+  for (int i = 0; i < graph_->blocks()->length(); i++) {
+    HBasicBlock* block = graph_->blocks()->at(i);
+    if (block->IsLoopHeader()) {
+      HBasicBlock* backedge = block->loop_information()->GetLastBackEdge();
+      HBasicBlock* dominator = backedge;
+      bool backedge_dominated_by_call = false;
+      while (dominator != block && !backedge_dominated_by_call) {
+        HInstruction* instr = dominator->first();
+        while (instr != NULL && !backedge_dominated_by_call) {
+          if (instr->IsCall()) {
+            RemoveStackCheck(backedge);
+            backedge_dominated_by_call = true;
+          }
+          instr = instr->next();
+        }
+        dominator = dominator->dominator();
+      }
+    }
+  }
+}
+
+
+void HStackCheckEliminator::RemoveStackCheck(HBasicBlock* block) {
+  HInstruction* instr = block->first();
+  while (instr != NULL) {
+    if (instr->IsGoto()) {
+      HGoto::cast(instr)->set_include_stack_check(false);
+      return;
+    }
+    instr = instr->next();
+  }
+}
+
+
+class HGlobalValueNumberer BASE_EMBEDDED {
+ public:
+  explicit HGlobalValueNumberer(HGraph* graph)
+      : graph_(graph),
+        block_side_effects_(graph_->blocks()->length()),
+        loop_side_effects_(graph_->blocks()->length()) {
+    ASSERT(Heap::allow_allocation(false));
+    block_side_effects_.AddBlock(0, graph_->blocks()->length());
+    loop_side_effects_.AddBlock(0, graph_->blocks()->length());
+  }
+  ~HGlobalValueNumberer() {
+    ASSERT(!Heap::allow_allocation(true));
+  }
+
+  void Analyze();
+
+ private:
+  void AnalyzeBlock(HBasicBlock* block, HValueMap* map);
+  void ComputeBlockSideEffects();
+  void LoopInvariantCodeMotion();
+  void ProcessLoopBlock(HBasicBlock* block,
+                        HBasicBlock* before_loop,
+                        int loop_kills);
+  bool ShouldMove(HInstruction* instr, HBasicBlock* loop_header);
+
+  HGraph* graph_;
+
+  // A map of block IDs to their side effects.
+  ZoneList<int> block_side_effects_;
+
+  // A map of loop header block IDs to their loop's side effects.
+  ZoneList<int> loop_side_effects_;
+};
+
+
+void HGlobalValueNumberer::Analyze() {
+  ComputeBlockSideEffects();
+  if (FLAG_loop_invariant_code_motion) {
+    LoopInvariantCodeMotion();
+  }
+  HValueMap* map = new HValueMap();
+  AnalyzeBlock(graph_->blocks()->at(0), map);
+}
+
+
+void HGlobalValueNumberer::ComputeBlockSideEffects() {
+  for (int i = graph_->blocks()->length() - 1; i >= 0; --i) {
+    // Compute side effects for the block.
+    HBasicBlock* block = graph_->blocks()->at(i);
+    HInstruction* instr = block->first();
+    int id = block->block_id();
+    int side_effects = 0;
+    while (instr != NULL) {
+      side_effects |= (instr->flags() & HValue::ChangesFlagsMask());
+      instr = instr->next();
+    }
+    block_side_effects_[id] |= side_effects;
+
+    // Loop headers are part of their loop.
+    if (block->IsLoopHeader()) {
+      loop_side_effects_[id] |= side_effects;
+    }
+
+    // Propagate loop side effects upwards.
+    if (block->HasParentLoopHeader()) {
+      int header_id = block->parent_loop_header()->block_id();
+      loop_side_effects_[header_id] |=
+          block->IsLoopHeader() ? loop_side_effects_[id] : side_effects;
+    }
+  }
+}
+
+
+void HGlobalValueNumberer::LoopInvariantCodeMotion() {
+  for (int i = graph_->blocks()->length() - 1; i >= 0; --i) {
+    HBasicBlock* block = graph_->blocks()->at(i);
+    if (block->IsLoopHeader()) {
+      int side_effects = loop_side_effects_[block->block_id()];
+      TraceGVN("Try loop invariant motion for block B%d effects=0x%x\n",
+               block->block_id(),
+               side_effects);
+
+      HBasicBlock* last = block->loop_information()->GetLastBackEdge();
+      for (int j = block->block_id(); j <= last->block_id(); ++j) {
+        ProcessLoopBlock(graph_->blocks()->at(j), block, side_effects);
+      }
+    }
+  }
+}
+
+
+void HGlobalValueNumberer::ProcessLoopBlock(HBasicBlock* block,
+                                            HBasicBlock* loop_header,
+                                            int loop_kills) {
+  HBasicBlock* pre_header = loop_header->predecessors()->at(0);
+  int depends_flags = HValue::ConvertChangesToDependsFlags(loop_kills);
+  TraceGVN("Loop invariant motion for B%d depends_flags=0x%x\n",
+           block->block_id(),
+           depends_flags);
+  HInstruction* instr = block->first();
+  while (instr != NULL) {
+    HInstruction* next = instr->next();
+    if (instr->CheckFlag(HValue::kUseGVN) &&
+        (instr->flags() & depends_flags) == 0) {
+      TraceGVN("Checking instruction %d (%s)\n",
+               instr->id(),
+               instr->Mnemonic());
+      bool inputs_loop_invariant = true;
+      for (int i = 0; i < instr->OperandCount(); ++i) {
+        if (instr->OperandAt(i)->IsDefinedAfter(pre_header)) {
+          inputs_loop_invariant = false;
+        }
+      }
+
+      if (inputs_loop_invariant && ShouldMove(instr, loop_header)) {
+        TraceGVN("Found loop invariant instruction %d\n", instr->id());
+        // Move the instruction out of the loop.
+        instr->Unlink();
+        instr->InsertBefore(pre_header->end());
+      }
+    }
+    instr = next;
+  }
+}
+
+// Only move instructions that postdominate the loop header (i.e. are
+// always executed inside the loop). This is to avoid unnecessary
+// deoptimizations assuming the loop is executed at least once.
+// TODO(fschneider): Better type feedback should give us information
+// about code that was never executed.
+bool HGlobalValueNumberer::ShouldMove(HInstruction* instr,
+                                      HBasicBlock* loop_header) {
+  if (!instr->IsChange() &&
+      FLAG_aggressive_loop_invariant_motion) return true;
+  HBasicBlock* block = instr->block();
+  bool result = true;
+  if (block != loop_header) {
+    for (int i = 1; i < loop_header->predecessors()->length(); ++i) {
+      bool found = false;
+      HBasicBlock* pred = loop_header->predecessors()->at(i);
+      while (pred != loop_header) {
+        if (pred == block) found = true;
+        pred = pred->dominator();
+      }
+      if (!found) {
+        result = false;
+        break;
+      }
+    }
+  }
+  return result;
+}
+
+
+void HGlobalValueNumberer::AnalyzeBlock(HBasicBlock* block, HValueMap* map) {
+  TraceGVN("Analyzing block B%d\n", block->block_id());
+
+  // If this is a loop header kill everything killed by the loop.
+  if (block->IsLoopHeader()) {
+    map->Kill(loop_side_effects_[block->block_id()]);
+  }
+
+  // Go through all instructions of the current block.
+  HInstruction* instr = block->first();
+  while (instr != NULL) {
+    HInstruction* next = instr->next();
+    int flags = (instr->flags() & HValue::ChangesFlagsMask());
+    if (flags != 0) {
+      ASSERT(!instr->CheckFlag(HValue::kUseGVN));
+      // Clear all instructions in the map that are affected by side effects.
+      map->Kill(flags);
+      TraceGVN("Instruction %d kills\n", instr->id());
+    } else if (instr->CheckFlag(HValue::kUseGVN)) {
+      HValue* other = map->Lookup(instr);
+      if (other != NULL) {
+        ASSERT(instr->Equals(other) && other->Equals(instr));
+        TraceGVN("Replacing value %d (%s) with value %d (%s)\n",
+                 instr->id(),
+                 instr->Mnemonic(),
+                 other->id(),
+                 other->Mnemonic());
+        instr->ReplaceValue(other);
+        instr->Delete();
+      } else {
+        map->Add(instr);
+      }
+    }
+    instr = next;
+  }
+
+  // Recursively continue analysis for all immediately dominated blocks.
+  int length = block->dominated_blocks()->length();
+  for (int i = 0; i < length; ++i) {
+    HBasicBlock* dominated = block->dominated_blocks()->at(i);
+    // No need to copy the map for the last child in the dominator tree.
+    HValueMap* successor_map = (i == length - 1) ? map : map->Copy();
+
+    // If the dominated block is not a successor to this block we have to
+    // kill everything killed on any path between this block and the
+    // dominated block.  Note we rely on the block ordering.
+    bool is_successor = false;
+    int predecessor_count = dominated->predecessors()->length();
+    for (int j = 0; !is_successor && j < predecessor_count; ++j) {
+      is_successor = (dominated->predecessors()->at(j) == block);
+    }
+
+    if (!is_successor) {
+      int side_effects = 0;
+      for (int j = block->block_id() + 1; j < dominated->block_id(); ++j) {
+        side_effects |= block_side_effects_[j];
+      }
+      successor_map->Kill(side_effects);
+    }
+
+    AnalyzeBlock(dominated, successor_map);
+  }
+}
+
+
+class HInferRepresentation BASE_EMBEDDED {
+ public:
+  explicit HInferRepresentation(HGraph* graph)
+      : graph_(graph), worklist_(8), in_worklist_(graph->GetMaximumValueID()) {}
+
+  void Analyze();
+
+ private:
+  Representation TryChange(HValue* current);
+  void AddToWorklist(HValue* current);
+  void InferBasedOnInputs(HValue* current);
+  void AddDependantsToWorklist(HValue* current);
+  void InferBasedOnUses(HValue* current);
+
+  HGraph* graph_;
+  ZoneList<HValue*> worklist_;
+  BitVector in_worklist_;
+};
+
+
+void HInferRepresentation::AddToWorklist(HValue* current) {
+  if (current->representation().IsSpecialization()) return;
+  if (!current->CheckFlag(HValue::kFlexibleRepresentation)) return;
+  if (in_worklist_.Contains(current->id())) return;
+  worklist_.Add(current);
+  in_worklist_.Add(current->id());
+}
+
+
+// This method tries to specialize the representation type of the value
+// given as a parameter. The value is asked to infer its representation type
+// based on its inputs. If the inferred type is more specialized, then this
+// becomes the new representation type of the node.
+void HInferRepresentation::InferBasedOnInputs(HValue* current) {
+  Representation r = current->representation();
+  if (r.IsSpecialization()) return;
+  ASSERT(current->CheckFlag(HValue::kFlexibleRepresentation));
+  Representation inferred = current->InferredRepresentation();
+  if (inferred.IsSpecialization()) {
+    current->ChangeRepresentation(inferred);
+    AddDependantsToWorklist(current);
+  }
+}
+
+
+void HInferRepresentation::AddDependantsToWorklist(HValue* current) {
+  for (int i = 0; i < current->uses()->length(); ++i) {
+    AddToWorklist(current->uses()->at(i));
+  }
+  for (int i = 0; i < current->OperandCount(); ++i) {
+    AddToWorklist(current->OperandAt(i));
+  }
+}
+
+
+// This method calculates whether specializing the representation of the value
+// given as the parameter has a benefit in terms of less necessary type
+// conversions. If there is a benefit, then the representation of the value is
+// specialized.
+void HInferRepresentation::InferBasedOnUses(HValue* current) {
+  Representation r = current->representation();
+  if (r.IsSpecialization() || current->HasNoUses()) return;
+  ASSERT(current->CheckFlag(HValue::kFlexibleRepresentation));
+  Representation new_rep = TryChange(current);
+  if (!new_rep.IsNone()) {
+    if (!current->representation().Equals(new_rep)) {
+      current->ChangeRepresentation(new_rep);
+      AddDependantsToWorklist(current);
+    }
+  }
+}
+
+
+Representation HInferRepresentation::TryChange(HValue* current) {
+  // Array of use counts for each representation.
+  int use_count[Representation::kNumRepresentations];
+  for (int i = 0; i < Representation::kNumRepresentations; i++) {
+    use_count[i] = 0;
+  }
+
+  for (int i = 0; i < current->uses()->length(); ++i) {
+    HValue* use = current->uses()->at(i);
+    int index = use->LookupOperandIndex(0, current);
+    Representation req_rep = use->RequiredInputRepresentation(index);
+    if (req_rep.IsNone()) continue;
+    if (use->IsPhi()) {
+      HPhi* phi = HPhi::cast(use);
+      phi->AddIndirectUsesTo(&use_count[0]);
+    }
+    use_count[req_rep.kind()]++;
+  }
+  int tagged_count = use_count[Representation::kTagged];
+  int double_count = use_count[Representation::kDouble];
+  int int32_count = use_count[Representation::kInteger32];
+  int non_tagged_count = double_count + int32_count;
+
+  // If a non-loop phi has tagged uses, don't convert it to untagged.
+  if (current->IsPhi() && !current->block()->IsLoopHeader()) {
+    if (tagged_count > 0) return Representation::None();
+  }
+
+  if (non_tagged_count >= tagged_count) {
+    // More untagged than tagged.
+    if (double_count > 0) {
+      // There is at least one usage that is a double => guess that the
+      // correct representation is double.
+      return Representation::Double();
+    } else if (int32_count > 0) {
+      return Representation::Integer32();
+    }
+  }
+  return Representation::None();
+}
+
+
+void HInferRepresentation::Analyze() {
+  HPhase phase("Infer representations", graph_);
+
+  // (1) Initialize bit vectors and count real uses. Each phi
+  // gets a bit-vector of length <number of phis>.
+  const ZoneList<HPhi*>* phi_list = graph_->phi_list();
+  int num_phis = phi_list->length();
+  ScopedVector<BitVector*> connected_phis(num_phis);
+  for (int i = 0; i < num_phis; i++) {
+    phi_list->at(i)->InitRealUses(i);
+    connected_phis[i] = new BitVector(num_phis);
+    connected_phis[i]->Add(i);
+  }
+
+  // (2) Do a fixed point iteration to find the set of connected phis.
+  // A phi is connected to another phi if its value is used either
+  // directly or indirectly through a transitive closure of the def-use
+  // relation.
+  bool change = true;
+  while (change) {
+    change = false;
+    for (int i = 0; i < num_phis; i++) {
+      HPhi* phi = phi_list->at(i);
+      for (int j = 0; j < phi->uses()->length(); j++) {
+        HValue* use = phi->uses()->at(j);
+        if (use->IsPhi()) {
+          int phi_use = HPhi::cast(use)->phi_id();
+          if (connected_phis[i]->UnionIsChanged(*connected_phis[phi_use])) {
+            change = true;
+          }
+        }
+      }
+    }
+  }
+
+  // (3) Sum up the non-phi use counts of all connected phis.
+  // Don't include the non-phi uses of the phi itself.
+  for (int i = 0; i < num_phis; i++) {
+    HPhi* phi = phi_list->at(i);
+    for (BitVector::Iterator it(connected_phis.at(i));
+         !it.Done();
+         it.Advance()) {
+      int index = it.Current();
+      if (index != i) {
+        HPhi* it_use = phi_list->at(it.Current());
+        phi->AddNonPhiUsesFrom(it_use);
+      }
+    }
+  }
+
+  for (int i = 0; i < graph_->blocks()->length(); ++i) {
+    HBasicBlock* block = graph_->blocks()->at(i);
+    const ZoneList<HPhi*>* phis = block->phis();
+    for (int j = 0; j < phis->length(); ++j) {
+      AddToWorklist(phis->at(j));
+    }
+
+    HInstruction* current = block->first();
+    while (current != NULL) {
+      AddToWorklist(current);
+      current = current->next();
+    }
+  }
+
+  while (!worklist_.is_empty()) {
+    HValue* current = worklist_.RemoveLast();
+    in_worklist_.Remove(current->id());
+    InferBasedOnInputs(current);
+    InferBasedOnUses(current);
+  }
+}
+
+
+void HGraph::InitializeInferredTypes() {
+  HPhase phase("Inferring types", this);
+  InitializeInferredTypes(0, this->blocks_.length() - 1);
+}
+
+
+void HGraph::InitializeInferredTypes(int from_inclusive, int to_inclusive) {
+  for (int i = from_inclusive; i <= to_inclusive; ++i) {
+    HBasicBlock* block = blocks_[i];
+
+    const ZoneList<HPhi*>* phis = block->phis();
+    for (int j = 0; j < phis->length(); j++) {
+      phis->at(j)->UpdateInferredType();
+    }
+
+    HInstruction* current = block->first();
+    while (current != NULL) {
+      current->UpdateInferredType();
+      current = current->next();
+    }
+
+    if (block->IsLoopHeader()) {
+      HBasicBlock* last_back_edge =
+          block->loop_information()->GetLastBackEdge();
+      InitializeInferredTypes(i + 1, last_back_edge->block_id());
+      // Skip all blocks already processed by the recursive call.
+      i = last_back_edge->block_id();
+      // Update phis of the loop header now after the whole loop body is
+      // guaranteed to be processed.
+      ZoneList<HValue*> worklist(block->phis()->length());
+      for (int j = 0; j < block->phis()->length(); ++j) {
+        worklist.Add(block->phis()->at(j));
+      }
+      InferTypes(&worklist);
+    }
+  }
+}
+
+
+void HGraph::PropagateMinusZeroChecks(HValue* value, BitVector* visited) {
+  HValue* current = value;
+  while (current != NULL) {
+    if (visited->Contains(current->id())) return;
+
+    // For phis, we must propagate the check to all of its inputs.
+    if (current->IsPhi()) {
+      visited->Add(current->id());
+      HPhi* phi = HPhi::cast(current);
+      for (int i = 0; i < phi->OperandCount(); ++i) {
+        PropagateMinusZeroChecks(phi->OperandAt(i), visited);
+      }
+      break;
+    }
+
+    // For multiplication and division, we must propagate to the left and
+    // the right side.
+    if (current->IsMul()) {
+      HMul* mul = HMul::cast(current);
+      mul->EnsureAndPropagateNotMinusZero(visited);
+      PropagateMinusZeroChecks(mul->left(), visited);
+      PropagateMinusZeroChecks(mul->right(), visited);
+    } else if (current->IsDiv()) {
+      HDiv* div = HDiv::cast(current);
+      div->EnsureAndPropagateNotMinusZero(visited);
+      PropagateMinusZeroChecks(div->left(), visited);
+      PropagateMinusZeroChecks(div->right(), visited);
+    }
+
+    current = current->EnsureAndPropagateNotMinusZero(visited);
+  }
+}
+
+
+void HGraph::InsertRepresentationChangeForUse(HValue* value,
+                                              HValue* use,
+                                              Representation to,
+                                              bool is_truncating) {
+  // Propagate flags for negative zero checks upwards from conversions
+  // int32-to-tagged and int32-to-double.
+  Representation from = value->representation();
+  if (from.IsInteger32()) {
+    ASSERT(to.IsTagged() || to.IsDouble());
+    BitVector visited(GetMaximumValueID());
+    PropagateMinusZeroChecks(value, &visited);
+  }
+
+  // Insert the representation change right before its use. For phi-uses we
+  // insert at the end of the corresponding predecessor.
+  HBasicBlock* insert_block = use->block();
+  if (use->IsPhi()) {
+    int index = 0;
+    while (use->OperandAt(index) != value) ++index;
+    insert_block = insert_block->predecessors()->at(index);
+  }
+
+  HInstruction* next = (insert_block == use->block())
+      ? HInstruction::cast(use)
+      : insert_block->end();
+
+  // For constants we try to make the representation change at compile
+  // time. When a representation change is not possible without loss of
+  // information we treat constants like normal instructions and insert the
+  // change instructions for them.
+  HInstruction* new_value = NULL;
+  if (value->IsConstant()) {
+    HConstant* constant = HConstant::cast(value);
+    // Try to create a new copy of the constant with the new representation.
+    new_value = is_truncating
+        ? constant->CopyToTruncatedInt32()
+        : constant->CopyToRepresentation(to);
+  }
+
+  if (new_value == NULL) {
+    new_value = new HChange(value, value->representation(), to);
+  }
+
+  new_value->InsertBefore(next);
+  value->ReplaceFirstAtUse(use, new_value, to);
+}
+
+
+int CompareConversionUses(HValue* a,
+                          HValue* b,
+                          Representation a_rep,
+                          Representation b_rep) {
+  if (a_rep.kind() > b_rep.kind()) {
+    // Make sure specializations are separated in the result array.
+    return 1;
+  }
+  // Put truncating conversions before non-truncating conversions.
+  bool a_truncate = a->CheckFlag(HValue::kTruncatingToInt32);
+  bool b_truncate = b->CheckFlag(HValue::kTruncatingToInt32);
+  if (a_truncate != b_truncate) {
+    return a_truncate ? -1 : 1;
+  }
+  // Sort by increasing block ID.
+  return a->block()->block_id() - b->block()->block_id();
+}
+
+
+void HGraph::InsertRepresentationChanges(HValue* current) {
+  Representation r = current->representation();
+  if (r.IsNone()) return;
+  if (current->uses()->length() == 0) return;
+
+  // Collect the representation changes in a sorted list.  This allows
+  // us to avoid duplicate changes without searching the list.
+  ZoneList<HValue*> to_convert(2);
+  ZoneList<Representation> to_convert_reps(2);
+  for (int i = 0; i < current->uses()->length(); ++i) {
+    HValue* use = current->uses()->at(i);
+    // The occurrences index means the index within the operand array of "use"
+    // at which "current" is used. While iterating through the use array we
+    // also have to iterate over the different occurrence indices.
+    int occurrence_index = 0;
+    if (use->UsesMultipleTimes(current)) {
+      occurrence_index = current->uses()->CountOccurrences(use, 0, i - 1);
+      if (FLAG_trace_representation) {
+        PrintF("Instruction %d is used multiple times at %d; occurrence=%d\n",
+               current->id(),
+               use->id(),
+               occurrence_index);
+      }
+    }
+    int operand_index = use->LookupOperandIndex(occurrence_index, current);
+    Representation req = use->RequiredInputRepresentation(operand_index);
+    if (req.IsNone() || req.Equals(r)) continue;
+    int index = 0;
+    while (to_convert.length() > index &&
+           CompareConversionUses(to_convert[index],
+                                 use,
+                                 to_convert_reps[index],
+                                 req) < 0) {
+      ++index;
+    }
+    if (FLAG_trace_representation) {
+      PrintF("Inserting a representation change to %s of %d for use at %d\n",
+             req.Mnemonic(),
+             current->id(),
+             use->id());
+    }
+    to_convert.InsertAt(index, use);
+    to_convert_reps.InsertAt(index, req);
+  }
+
+  for (int i = 0; i < to_convert.length(); ++i) {
+    HValue* use = to_convert[i];
+    Representation r_to = to_convert_reps[i];
+    bool is_truncating = use->CheckFlag(HValue::kTruncatingToInt32);
+    InsertRepresentationChangeForUse(current, use, r_to, is_truncating);
+  }
+
+  if (current->uses()->is_empty()) {
+    ASSERT(current->IsConstant());
+    current->Delete();
+  }
+}
+
+
+void HGraph::InsertRepresentationChanges() {
+  HPhase phase("Insert representation changes", this);
+
+
+  // Compute truncation flag for phis: Initially assume that all
+  // int32-phis allow truncation and iteratively remove the ones that
+  // are used in an operation that does not allow a truncating
+  // conversion.
+  // TODO(fschneider): Replace this with a worklist-based iteration.
+  for (int i = 0; i < phi_list()->length(); i++) {
+    HPhi* phi = phi_list()->at(i);
+    if (phi->representation().IsInteger32()) {
+      phi->SetFlag(HValue::kTruncatingToInt32);
+    }
+  }
+  bool change = true;
+  while (change) {
+    change = false;
+    for (int i = 0; i < phi_list()->length(); i++) {
+      HPhi* phi = phi_list()->at(i);
+      if (!phi->CheckFlag(HValue::kTruncatingToInt32)) continue;
+      for (int j = 0; j < phi->uses()->length(); j++) {
+        HValue* use = phi->uses()->at(j);
+        if (!use->CheckFlag(HValue::kTruncatingToInt32)) {
+          phi->ClearFlag(HValue::kTruncatingToInt32);
+          change = true;
+          break;
+        }
+      }
+    }
+  }
+
+  for (int i = 0; i < blocks_.length(); ++i) {
+    // Process phi instructions first.
+    for (int j = 0; j < blocks_[i]->phis()->length(); j++) {
+      HPhi* phi = blocks_[i]->phis()->at(j);
+      InsertRepresentationChanges(phi);
+    }
+
+    // Process normal instructions.
+    HInstruction* current = blocks_[i]->first();
+    while (current != NULL) {
+      InsertRepresentationChanges(current);
+      current = current->next();
+    }
+  }
+}
+
+
+// Implementation of utility classes to represent an expression's context in
+// the AST.
+AstContext::AstContext(HGraphBuilder* owner, Expression::Context kind)
+    : owner_(owner), kind_(kind), outer_(owner->ast_context()) {
+  owner->set_ast_context(this);  // Push.
+}
+
+
+AstContext::~AstContext() {
+  owner_->set_ast_context(outer_);  // Pop.
+}
+
+
+
+// HGraphBuilder infrastructure for bailing out and checking bailouts.
+#define BAILOUT(reason)                         \
+  do {                                          \
+    Bailout(reason);                            \
+    return;                                     \
+  } while (false)
+
+
+#define CHECK_BAILOUT                           \
+  do {                                          \
+    if (HasStackOverflow()) return;             \
+  } while (false)
+
+
+#define VISIT_FOR_EFFECT(expr)                  \
+  do {                                          \
+    VisitForEffect(expr);                       \
+    if (HasStackOverflow()) return;             \
+  } while (false)
+
+
+#define VISIT_FOR_VALUE(expr)                   \
+  do {                                          \
+    VisitForValue(expr);                        \
+    if (HasStackOverflow()) return;             \
+  } while (false)
+
+
+// 'thing' could be an expression, statement, or list of statements.
+#define ADD_TO_SUBGRAPH(graph, thing)       \
+  do {                                      \
+    AddToSubgraph(graph, thing);            \
+    if (HasStackOverflow()) return;         \
+  } while (false)
+
+
+class HGraphBuilder::SubgraphScope BASE_EMBEDDED {
+ public:
+  SubgraphScope(HGraphBuilder* builder, HSubgraph* new_subgraph)
+      : builder_(builder) {
+    old_subgraph_ = builder_->current_subgraph_;
+    subgraph_ = new_subgraph;
+    builder_->current_subgraph_ = subgraph_;
+  }
+
+  ~SubgraphScope() {
+    old_subgraph_->AddBreakContinueInfo(subgraph_);
+    builder_->current_subgraph_ = old_subgraph_;
+  }
+
+  HSubgraph* subgraph() const { return subgraph_; }
+
+ private:
+  HGraphBuilder* builder_;
+  HSubgraph* old_subgraph_;
+  HSubgraph* subgraph_;
+};
+
+
+void HGraphBuilder::Bailout(const char* reason) {
+  if (FLAG_trace_bailout) {
+    SmartPointer<char> debug_name = graph()->debug_name()->ToCString();
+    PrintF("Bailout in HGraphBuilder: @\"%s\": %s\n", *debug_name, reason);
+  }
+  SetStackOverflow();
+}
+
+
+void HGraphBuilder::VisitForEffect(Expression* expr) {
+#ifdef DEBUG
+  int original_count = environment()->total_count();
+#endif
+  BinaryOperation* binary_op = expr->AsBinaryOperation();
+
+  // We use special casing for expression types not handled properly by our
+  // usual trick of pretending they're in a value context and cleaning up
+  // later.
+  if (binary_op != NULL && binary_op->op() == Token::COMMA) {
+    VISIT_FOR_EFFECT(binary_op->left());
+    VISIT_FOR_EFFECT(binary_op->right());
+  } else {
+    { EffectContext for_effect(this);
+      Visit(expr);
+    }
+    if (HasStackOverflow() || !subgraph()->HasExit()) return;
+    // Discard return value.
+    Pop();
+    // TODO(kasperl): Try to improve the way we compute the last added
+    // instruction. The NULL check makes me uncomfortable.
+    HValue* last = subgraph()->exit_block()->GetLastInstruction();
+    // We need to ensure we emit a simulate after inlined functions in an
+    // effect context, to avoid having a bailout target the fictional
+    // environment with the return value on top.
+    if ((last != NULL && last->HasSideEffects()) ||
+        subgraph()->exit_block()->IsInlineReturnTarget()) {
+      AddSimulate(expr->id());
+    }
+  }
+
+  ASSERT(environment()->total_count() == original_count);
+}
+
+
+void HGraphBuilder::VisitForValue(Expression* expr) {
+#ifdef DEBUG
+  int original_height = environment()->values()->length();
+#endif
+  { ValueContext for_value(this);
+    Visit(expr);
+  }
+  if (HasStackOverflow() || !subgraph()->HasExit()) return;
+  // TODO(kasperl): Try to improve the way we compute the last added
+  // instruction. The NULL check makes me uncomfortable.
+  HValue* last = subgraph()->exit_block()->GetLastInstruction();
+  if (last != NULL && last->HasSideEffects()) {
+    AddSimulate(expr->id());
+  }
+  ASSERT(environment()->values()->length() == original_height + 1);
+}
+
+
+HValue* HGraphBuilder::VisitArgument(Expression* expr) {
+  VisitForValue(expr);
+  if (HasStackOverflow() || !subgraph()->HasExit()) return NULL;
+  return environment()->Top();
+}
+
+
+void HGraphBuilder::VisitArgumentList(ZoneList<Expression*>* arguments) {
+  for (int i = 0; i < arguments->length(); i++) {
+    VisitArgument(arguments->at(i));
+    if (HasStackOverflow() || !current_subgraph_->HasExit()) return;
+  }
+}
+
+
+HGraph* HGraphBuilder::CreateGraph(CompilationInfo* info) {
+  ASSERT(current_subgraph_ == NULL);
+  graph_ = new HGraph(info);
+
+  {
+    HPhase phase("Block building");
+    graph_->Initialize(CreateBasicBlock(graph_->start_environment()));
+    current_subgraph_ = graph_;
+
+    Scope* scope = info->scope();
+    SetupScope(scope);
+    VisitDeclarations(scope->declarations());
+
+    AddInstruction(new HStackCheck());
+
+    ZoneList<Statement*>* stmts = info->function()->body();
+    HSubgraph* body = CreateGotoSubgraph(environment());
+    AddToSubgraph(body, stmts);
+    if (HasStackOverflow()) return NULL;
+    current_subgraph_->Append(body, NULL);
+    body->entry_block()->SetJoinId(info->function()->id());
+
+    if (graph_->HasExit()) {
+      graph_->FinishExit(new HReturn(graph_->GetConstantUndefined()));
+    }
+  }
+
+  graph_->OrderBlocks();
+  graph_->AssignDominators();
+  graph_->EliminateRedundantPhis();
+  if (!graph_->CollectPhis()) {
+    Bailout("Phi-use of arguments object");
+    return NULL;
+  }
+
+  HInferRepresentation rep(graph_);
+  rep.Analyze();
+
+  if (FLAG_use_range) {
+    HRangeAnalysis rangeAnalysis(graph_);
+    rangeAnalysis.Analyze();
+  }
+
+  graph_->InitializeInferredTypes();
+  graph_->Canonicalize();
+  graph_->InsertRepresentationChanges();
+
+  // Eliminate redundant stack checks on backwards branches.
+  HStackCheckEliminator sce(graph_);
+  sce.Process();
+
+  // Perform common subexpression elimination and loop-invariant code motion.
+  if (FLAG_use_gvn) {
+    HPhase phase("Global value numbering", graph_);
+    HGlobalValueNumberer gvn(graph_);
+    gvn.Analyze();
+  }
+
+  return graph_;
+}
+
+
+void HGraphBuilder::AddToSubgraph(HSubgraph* graph, Statement* stmt) {
+  SubgraphScope scope(this, graph);
+  Visit(stmt);
+}
+
+
+void HGraphBuilder::AddToSubgraph(HSubgraph* graph, Expression* expr) {
+  SubgraphScope scope(this, graph);
+  VisitForValue(expr);
+}
+
+
+void HGraphBuilder::VisitCondition(Expression* expr,
+                                   HBasicBlock* true_block,
+                                   HBasicBlock* false_block,
+                                   bool invert_true,
+                                   bool invert_false) {
+  VisitForControl(expr, true_block, false_block, invert_true, invert_false);
+  CHECK_BAILOUT;
+#ifdef DEBUG
+  HValue* value = true_block->predecessors()->at(0)->last_environment()->Top();
+  true_block->set_cond(HConstant::cast(value)->handle());
+
+  value = false_block->predecessors()->at(0)->last_environment()->Top();
+  false_block->set_cond(HConstant::cast(value)->handle());
+#endif
+
+  true_block->SetJoinId(expr->id());
+  false_block->SetJoinId(expr->id());
+  true_block->last_environment()->Pop();
+  false_block->last_environment()->Pop();
+}
+
+
+void HGraphBuilder::AddConditionToSubgraph(HSubgraph* subgraph,
+                                           Expression* expr,
+                                           HSubgraph* true_graph,
+                                           HSubgraph* false_graph) {
+  SubgraphScope scope(this, subgraph);
+  VisitCondition(expr,
+                 true_graph->entry_block(),
+                 false_graph->entry_block(),
+                 false,
+                 false);
+}
+
+
+void HGraphBuilder::VisitForControl(Expression* expr,
+                                    HBasicBlock* true_block,
+                                    HBasicBlock* false_block,
+                                    bool invert_true,
+                                    bool invert_false) {
+  TestContext for_test(this, true_block, false_block,
+                       invert_true, invert_false);
+  BinaryOperation* binary_op = expr->AsBinaryOperation();
+  UnaryOperation* unary_op = expr->AsUnaryOperation();
+
+  if (unary_op != NULL && unary_op->op() == Token::NOT) {
+    VisitForControl(unary_op->expression(),
+                    false_block,
+                    true_block,
+                    !invert_false,
+                    !invert_true);
+  } else if (binary_op != NULL && binary_op->op() == Token::AND) {
+    // Translate left subexpression.
+    HBasicBlock* eval_right = graph()->CreateBasicBlock();
+    VisitForControl(binary_op->left(),
+                    eval_right,
+                    false_block,
+                    false,
+                    invert_false);
+    if (HasStackOverflow()) return;
+    eval_right->SetJoinId(binary_op->left()->id());
+
+    // Translate right subexpression.
+    eval_right->last_environment()->Pop();
+    subgraph()->set_exit_block(eval_right);
+    VisitForControl(binary_op->right(),
+                    true_block,
+                    false_block,
+                    invert_true,
+                    invert_false);
+  } else if (binary_op != NULL && binary_op->op() == Token::OR) {
+    // Translate left subexpression.
+    HBasicBlock* eval_right = graph()->CreateBasicBlock();
+    VisitForControl(binary_op->left(),
+                    true_block,
+                    eval_right,
+                    invert_true,
+                    false);
+    if (HasStackOverflow()) return;
+    eval_right->SetJoinId(binary_op->left()->id());
+
+    // Translate right subexpression
+    eval_right->last_environment()->Pop();
+    subgraph()->set_exit_block(eval_right);
+    VisitForControl(binary_op->right(),
+                    true_block,
+                    false_block,
+                    invert_true,
+                    invert_false);
+  } else {
+#ifdef DEBUG
+    int original_length = environment()->values()->length();
+#endif
+    // TODO(kmillikin): Refactor to avoid. This code is duplicated from
+    // VisitForValue, except without pushing a value context on the
+    // expression context stack.
+    Visit(expr);
+    if (HasStackOverflow() || !subgraph()->HasExit()) return;
+    HValue* last = subgraph()->exit_block()->GetLastInstruction();
+    if (last != NULL && last->HasSideEffects()) {
+      AddSimulate(expr->id());
+    }
+    ASSERT(environment()->values()->length() == original_length + 1);
+    HValue* value = Pop();
+    HBasicBlock* materialize_true = graph()->CreateBasicBlock();
+    HBasicBlock* materialize_false = graph()->CreateBasicBlock();
+    CurrentBlock()->Finish(new HBranch(materialize_true,
+                                       materialize_false,
+                                       value));
+    HValue* true_value = invert_true
+        ? graph()->GetConstantFalse()
+        : graph()->GetConstantTrue();
+    materialize_true->set_inverted(invert_true);
+    true_block->set_deopt_predecessor(materialize_true);
+
+    if (true_block->IsInlineReturnTarget()) {
+      materialize_true->AddLeaveInlined(true_value, true_block);
+    } else {
+      materialize_true->last_environment()->Push(true_value);
+      materialize_true->Goto(true_block);
+    }
+    HValue* false_value = invert_false
+        ? graph()->GetConstantTrue()
+        : graph()->GetConstantFalse();
+    materialize_false->set_inverted(invert_false);
+    false_block->set_deopt_predecessor(materialize_false);
+
+    if (false_block->IsInlineReturnTarget()) {
+      materialize_false->AddLeaveInlined(false_value, false_block);
+    } else {
+      materialize_false->last_environment()->Push(false_value);
+      materialize_false->Goto(false_block);
+    }
+    subgraph()->set_exit_block(NULL);
+  }
+}
+
+
+void HGraphBuilder::AddToSubgraph(HSubgraph* graph,
+                                  ZoneList<Statement*>* stmts) {
+  SubgraphScope scope(this, graph);
+  VisitStatements(stmts);
+}
+
+
+HInstruction* HGraphBuilder::AddInstruction(HInstruction* instr) {
+  ASSERT(current_subgraph_->HasExit());
+  current_subgraph_->exit_block()->AddInstruction(instr);
+  return instr;
+}
+
+
+void HGraphBuilder::AddSimulate(int id) {
+  ASSERT(current_subgraph_->HasExit());
+  current_subgraph_->exit_block()->AddSimulate(id);
+}
+
+
+void HGraphBuilder::AddPhi(HPhi* instr) {
+  ASSERT(current_subgraph_->HasExit());
+  current_subgraph_->exit_block()->AddPhi(instr);
+}
+
+
+void HGraphBuilder::PushAndAdd(HInstruction* instr) {
+  Push(instr);
+  AddInstruction(instr);
+}
+
+
+void HGraphBuilder::PushAndAdd(HInstruction* instr, int position) {
+  instr->set_position(position);
+  PushAndAdd(instr);
+}
+
+
+void HGraphBuilder::PushArgumentsForStubCall(int argument_count) {
+  const int kMaxStubArguments = 4;
+  ASSERT_GE(kMaxStubArguments, argument_count);
+  // Push the arguments on the stack.
+  HValue* arguments[kMaxStubArguments];
+  for (int i = argument_count - 1; i >= 0; i--) {
+    arguments[i] = Pop();
+  }
+  for (int i = 0; i < argument_count; i++) {
+    AddInstruction(new HPushArgument(arguments[i]));
+  }
+}
+
+
+void HGraphBuilder::ProcessCall(HCall* call, int source_position) {
+  for (int i = call->argument_count() - 1; i >= 0; --i) {
+    HValue* value = Pop();
+    HPushArgument* push = new HPushArgument(value);
+    call->SetArgumentAt(i, push);
+  }
+
+  for (int i = 0; i < call->argument_count(); ++i) {
+    AddInstruction(call->PushArgumentAt(i));
+  }
+
+  PushAndAdd(call, source_position);
+}
+
+
+void HGraphBuilder::SetupScope(Scope* scope) {
+  // We don't yet handle the function name for named function expressions.
+  if (scope->function() != NULL) BAILOUT("named function expression");
+
+  // We can't handle heap-allocated locals.
+  if (scope->num_heap_slots() > 0) BAILOUT("heap allocated locals");
+
+  HConstant* undefined_constant =
+      new HConstant(Factory::undefined_value(), Representation::Tagged());
+  AddInstruction(undefined_constant);
+  graph_->set_undefined_constant(undefined_constant);
+
+  // Set the initial values of parameters including "this".  "This" has
+  // parameter index 0.
+  int count = scope->num_parameters() + 1;
+  for (int i = 0; i < count; ++i) {
+    HInstruction* parameter = AddInstruction(new HParameter(i));
+    environment()->Bind(i, parameter);
+  }
+
+  // Set the initial values of stack-allocated locals.
+  for (int i = count; i < environment()->values()->length(); ++i) {
+    environment()->Bind(i, undefined_constant);
+  }
+
+  // Handle the arguments and arguments shadow variables specially (they do
+  // not have declarations).
+  if (scope->arguments() != NULL) {
+    HArgumentsObject* object = new HArgumentsObject;
+    AddInstruction(object);
+    graph()->SetArgumentsObject(object);
+    environment()->Bind(scope->arguments(), object);
+    environment()->Bind(scope->arguments_shadow(), object);
+  }
+}
+
+
+void HGraphBuilder::VisitStatements(ZoneList<Statement*>* statements) {
+  for (int i = 0; i < statements->length(); i++) {
+    Visit(statements->at(i));
+    if (HasStackOverflow() || !current_subgraph_->HasExit()) break;
+  }
+}
+
+
+HBasicBlock* HGraphBuilder::CreateBasicBlock(HEnvironment* env) {
+  HBasicBlock* b = graph()->CreateBasicBlock();
+  b->SetInitialEnvironment(env);
+  return b;
+}
+
+
+HSubgraph* HGraphBuilder::CreateInlinedSubgraph(HEnvironment* outer,
+                                                Handle<JSFunction> target,
+                                                FunctionLiteral* function) {
+  HConstant* undefined = graph()->GetConstantUndefined();
+  HEnvironment* inner =
+      outer->CopyForInlining(target, function, true, undefined);
+  HSubgraph* subgraph = new HSubgraph(graph());
+  subgraph->Initialize(CreateBasicBlock(inner));
+  return subgraph;
+}
+
+
+HSubgraph* HGraphBuilder::CreateGotoSubgraph(HEnvironment* env) {
+  HSubgraph* subgraph = new HSubgraph(graph());
+  HEnvironment* new_env = env->CopyWithoutHistory();
+  subgraph->Initialize(CreateBasicBlock(new_env));
+  return subgraph;
+}
+
+
+HSubgraph* HGraphBuilder::CreateEmptySubgraph() {
+  HSubgraph* subgraph = new HSubgraph(graph());
+  subgraph->Initialize(graph()->CreateBasicBlock());
+  return subgraph;
+}
+
+
+HSubgraph* HGraphBuilder::CreateBranchSubgraph(HEnvironment* env) {
+  HSubgraph* subgraph = new HSubgraph(graph());
+  HEnvironment* new_env = env->Copy();
+  subgraph->Initialize(CreateBasicBlock(new_env));
+  return subgraph;
+}
+
+
+HSubgraph* HGraphBuilder::CreateLoopHeaderSubgraph(HEnvironment* env) {
+  HSubgraph* subgraph = new HSubgraph(graph());
+  HBasicBlock* block = graph()->CreateBasicBlock();
+  HEnvironment* new_env = env->CopyAsLoopHeader(block);
+  block->SetInitialEnvironment(new_env);
+  subgraph->Initialize(block);
+  subgraph->entry_block()->AttachLoopInformation();
+  return subgraph;
+}
+
+
+void HGraphBuilder::VisitBlock(Block* stmt) {
+  if (stmt->labels() != NULL) {
+    HSubgraph* block_graph = CreateGotoSubgraph(environment());
+    ADD_TO_SUBGRAPH(block_graph, stmt->statements());
+    current_subgraph_->Append(block_graph, stmt);
+  } else {
+    VisitStatements(stmt->statements());
+  }
+}
+
+
+void HGraphBuilder::VisitExpressionStatement(ExpressionStatement* stmt) {
+  VisitForEffect(stmt->expression());
+}
+
+
+void HGraphBuilder::VisitEmptyStatement(EmptyStatement* stmt) {
+}
+
+
+void HGraphBuilder::VisitIfStatement(IfStatement* stmt) {
+  if (stmt->condition()->ToBooleanIsTrue()) {
+    Visit(stmt->then_statement());
+  } else if (stmt->condition()->ToBooleanIsFalse()) {
+    Visit(stmt->else_statement());
+  } else {
+    HSubgraph* then_graph = CreateEmptySubgraph();
+    HSubgraph* else_graph = CreateEmptySubgraph();
+    VisitCondition(stmt->condition(),
+                   then_graph->entry_block(),
+                   else_graph->entry_block(),
+                   false, false);
+    if (HasStackOverflow()) return;
+    ADD_TO_SUBGRAPH(then_graph, stmt->then_statement());
+    ADD_TO_SUBGRAPH(else_graph, stmt->else_statement());
+    current_subgraph_->AppendJoin(then_graph, else_graph, stmt);
+  }
+}
+
+
+void HGraphBuilder::VisitContinueStatement(ContinueStatement* stmt) {
+  current_subgraph_->FinishBreakContinue(stmt->target(), true);
+}
+
+
+void HGraphBuilder::VisitBreakStatement(BreakStatement* stmt) {
+  current_subgraph_->FinishBreakContinue(stmt->target(), false);
+}
+
+
+void HGraphBuilder::VisitReturnStatement(ReturnStatement* stmt) {
+  AstContext* context = call_context();
+  if (context == NULL) {
+    // Not an inlined return, so an actual one.
+    VISIT_FOR_VALUE(stmt->expression());
+    HValue* result = environment()->Pop();
+    subgraph()->FinishExit(new HReturn(result));
+  } else {
+    // Return from an inlined function, visit the subexpression in the
+    // expression context of the call.
+    if (context->IsTest()) {
+      TestContext* test = TestContext::cast(context);
+      VisitForControl(stmt->expression(),
+                      test->if_true(),
+                      test->if_false(),
+                      false,
+                      false);
+    } else {
+      HValue* return_value = NULL;
+      if (context->IsEffect()) {
+        VISIT_FOR_EFFECT(stmt->expression());
+        return_value = graph()->GetConstantUndefined();
+      } else {
+        ASSERT(context->IsValue());
+        VISIT_FOR_VALUE(stmt->expression());
+        return_value = environment()->Pop();
+      }
+      subgraph()->exit_block()->AddLeaveInlined(return_value,
+                                                function_return_);
+      subgraph()->set_exit_block(NULL);
+    }
+  }
+}
+
+
+void HGraphBuilder::VisitWithEnterStatement(WithEnterStatement* stmt) {
+  BAILOUT("WithEnterStatement");
+}
+
+
+void HGraphBuilder::VisitWithExitStatement(WithExitStatement* stmt) {
+  BAILOUT("WithExitStatement");
+}
+
+
+HCompare* HGraphBuilder::BuildSwitchCompare(HSubgraph* subgraph,
+                                            HValue* switch_value,
+                                            CaseClause* clause) {
+  AddToSubgraph(subgraph, clause->label());
+  if (HasStackOverflow()) return NULL;
+  HValue* clause_value = subgraph->environment()->Pop();
+  HCompare* compare = new HCompare(switch_value,
+                                   clause_value,
+                                   Token::EQ_STRICT);
+  compare->SetInputRepresentation(Representation::Integer32());
+  subgraph->exit_block()->AddInstruction(compare);
+  return compare;
+}
+
+
+void HGraphBuilder::VisitSwitchStatement(SwitchStatement* stmt) {
+  VISIT_FOR_VALUE(stmt->tag());
+  HValue* switch_value = Pop();
+
+  ZoneList<CaseClause*>* clauses = stmt->cases();
+  int num_clauses = clauses->length();
+  if (num_clauses == 0) return;
+  if (num_clauses > 128) BAILOUT("SwitchStatement: too many clauses");
+
+  for (int i = 0; i < num_clauses; i++) {
+    CaseClause* clause = clauses->at(i);
+    if (clause->is_default()) continue;
+    clause->RecordTypeFeedback(oracle());
+    if (!clause->IsSmiCompare()) BAILOUT("SwitchStatement: non-smi compare");
+    if (!clause->label()->IsSmiLiteral()) {
+      BAILOUT("SwitchStatement: non-literal switch label");
+    }
+  }
+
+  // The single exit block of the whole switch statement.
+  HBasicBlock* single_exit_block = graph_->CreateBasicBlock();
+
+  // Build a series of empty subgraphs for the comparisons.
+  // The default clause does not have a comparison subgraph.
+  ZoneList<HSubgraph*> compare_graphs(num_clauses);
+  for (int i = 0; i < num_clauses; i++) {
+    HSubgraph* subgraph = !clauses->at(i)->is_default()
+        ? CreateEmptySubgraph()
+        : NULL;
+    compare_graphs.Add(subgraph);
+  }
+
+  HSubgraph* prev_graph = current_subgraph_;
+  HCompare* prev_compare_inst = NULL;
+  for (int i = 0; i < num_clauses; i++) {
+    CaseClause* clause = clauses->at(i);
+    if (clause->is_default()) continue;
+
+    // Finish the previous graph by connecting it to the current.
+    HSubgraph* subgraph = compare_graphs.at(i);
+    if (prev_compare_inst == NULL) {
+      ASSERT(prev_graph == current_subgraph_);
+      prev_graph->exit_block()->Finish(new HGoto(subgraph->entry_block()));
+    } else {
+      HBasicBlock* empty = graph()->CreateBasicBlock();
+      prev_graph->exit_block()->Finish(new HBranch(empty,
+                                                   subgraph->entry_block(),
+                                                   prev_compare_inst));
+    }
+
+    // Build instructions for current subgraph.
+    prev_compare_inst = BuildSwitchCompare(subgraph, switch_value, clause);
+    if (HasStackOverflow()) return;
+
+    prev_graph = subgraph;
+  }
+
+  // Finish last comparison if there was at least one comparison.
+  // last_false_block is the (empty) false-block of the last comparison. If
+  // there are no comparisons at all (a single default clause), it is just
+  // the last block of the current subgraph.
+  HBasicBlock* last_false_block = current_subgraph_->exit_block();
+  if (prev_graph != current_subgraph_) {
+    last_false_block = graph()->CreateBasicBlock();
+    HBasicBlock* empty = graph()->CreateBasicBlock();
+    prev_graph->exit_block()->Finish(new HBranch(empty,
+                                                 last_false_block,
+                                                 prev_compare_inst));
+  }
+
+  // Build statement blocks, connect them to their comparison block and
+  // to the previous statement block, if there is a fall-through.
+  HSubgraph* previous_subgraph = NULL;
+  for (int i = 0; i < num_clauses; i++) {
+    CaseClause* clause = clauses->at(i);
+    HSubgraph* subgraph = CreateEmptySubgraph();
+
+    if (clause->is_default()) {
+      // Default clause: Connect it to the last false block.
+      last_false_block->Finish(new HGoto(subgraph->entry_block()));
+    } else {
+      // Connect with the corresponding comparison.
+      HBasicBlock* empty =
+          compare_graphs.at(i)->exit_block()->end()->FirstSuccessor();
+      empty->Finish(new HGoto(subgraph->entry_block()));
+    }
+
+    // Check for fall-through from previous statement block.
+    if (previous_subgraph != NULL && previous_subgraph->HasExit()) {
+      previous_subgraph->exit_block()->
+          Finish(new HGoto(subgraph->entry_block()));
+    }
+
+    ADD_TO_SUBGRAPH(subgraph, clause->statements());
+    HBasicBlock* break_block = subgraph->BundleBreak(stmt);
+    if (break_block != NULL) {
+      break_block->Finish(new HGoto(single_exit_block));
+    }
+
+    previous_subgraph = subgraph;
+  }
+
+  // If the last statement block has a fall-through, connect it to the
+  // single exit block.
+  if (previous_subgraph->HasExit()) {
+    previous_subgraph->exit_block()->Finish(new HGoto(single_exit_block));
+  }
+
+  // If there is no default clause finish the last comparison's false target.
+  if (!last_false_block->IsFinished()) {
+    last_false_block->Finish(new HGoto(single_exit_block));
+  }
+
+  if (single_exit_block->HasPredecessor()) {
+    current_subgraph_->set_exit_block(single_exit_block);
+  } else {
+    current_subgraph_->set_exit_block(NULL);
+  }
+}
+
+bool HGraph::HasOsrEntryAt(IterationStatement* statement) {
+  return statement->OsrEntryId() == info()->osr_ast_id();
+}
+
+
+void HSubgraph::PreProcessOsrEntry(IterationStatement* statement) {
+  if (!graph()->HasOsrEntryAt(statement)) return;
+
+  HBasicBlock* non_osr_entry = graph()->CreateBasicBlock();
+  HBasicBlock* osr_entry = graph()->CreateBasicBlock();
+  HValue* true_value = graph()->GetConstantTrue();
+  HBranch* branch = new HBranch(non_osr_entry, osr_entry, true_value);
+  exit_block()->Finish(branch);
+
+  HBasicBlock* loop_predecessor = graph()->CreateBasicBlock();
+  non_osr_entry->Goto(loop_predecessor);
+
+  int osr_entry_id = statement->OsrEntryId();
+  // We want the correct environment at the OsrEntry instruction.  Build
+  // it explicitly.  The expression stack should be empty.
+  int count = osr_entry->last_environment()->total_count();
+  ASSERT(count == (osr_entry->last_environment()->parameter_count() +
+                   osr_entry->last_environment()->local_count()));
+  for (int i = 0; i < count; ++i) {
+    HUnknownOSRValue* unknown = new HUnknownOSRValue;
+    osr_entry->AddInstruction(unknown);
+    osr_entry->last_environment()->Bind(i, unknown);
+  }
+
+  osr_entry->AddSimulate(osr_entry_id);
+  osr_entry->AddInstruction(new HOsrEntry(osr_entry_id));
+  osr_entry->Goto(loop_predecessor);
+  loop_predecessor->SetJoinId(statement->EntryId());
+  set_exit_block(loop_predecessor);
+}
+
+
+void HGraphBuilder::VisitDoWhileStatement(DoWhileStatement* stmt) {
+  ASSERT(subgraph()->HasExit());
+  subgraph()->PreProcessOsrEntry(stmt);
+
+  HSubgraph* body_graph = CreateLoopHeaderSubgraph(environment());
+  ADD_TO_SUBGRAPH(body_graph, stmt->body());
+  body_graph->ResolveContinue(stmt);
+
+  if (!body_graph->HasExit() || stmt->cond()->ToBooleanIsTrue()) {
+    current_subgraph_->AppendEndless(body_graph, stmt);
+  } else {
+    HSubgraph* go_back = CreateEmptySubgraph();
+    HSubgraph* exit = CreateEmptySubgraph();
+    AddConditionToSubgraph(body_graph, stmt->cond(), go_back, exit);
+    if (HasStackOverflow()) return;
+    current_subgraph_->AppendDoWhile(body_graph, stmt, go_back, exit);
+  }
+}
+
+
+bool HGraphBuilder::ShouldPeel(HSubgraph* cond, HSubgraph* body) {
+  return FLAG_use_peeling;
+}
+
+
+void HGraphBuilder::VisitWhileStatement(WhileStatement* stmt) {
+  ASSERT(subgraph()->HasExit());
+  subgraph()->PreProcessOsrEntry(stmt);
+
+  HSubgraph* cond_graph = NULL;
+  HSubgraph* body_graph = NULL;
+  HSubgraph* exit_graph = NULL;
+
+  // If the condition is constant true, do not generate a condition subgraph.
+  if (stmt->cond()->ToBooleanIsTrue()) {
+    body_graph = CreateLoopHeaderSubgraph(environment());
+    ADD_TO_SUBGRAPH(body_graph, stmt->body());
+  } else {
+    cond_graph = CreateLoopHeaderSubgraph(environment());
+    body_graph = CreateEmptySubgraph();
+    exit_graph = CreateEmptySubgraph();
+    AddConditionToSubgraph(cond_graph, stmt->cond(), body_graph, exit_graph);
+    if (HasStackOverflow()) return;
+    ADD_TO_SUBGRAPH(body_graph, stmt->body());
+  }
+
+  body_graph->ResolveContinue(stmt);
+
+  if (cond_graph != NULL) {
+    AppendPeeledWhile(stmt, cond_graph, body_graph, exit_graph);
+  } else {
+    // TODO(fschneider): Implement peeling for endless loops as well.
+    current_subgraph_->AppendEndless(body_graph, stmt);
+  }
+}
+
+
+void HGraphBuilder::AppendPeeledWhile(IterationStatement* stmt,
+                                      HSubgraph* cond_graph,
+                                      HSubgraph* body_graph,
+                                      HSubgraph* exit_graph) {
+  HSubgraph* loop = NULL;
+  if (body_graph->HasExit() && stmt != peeled_statement_ &&
+      ShouldPeel(cond_graph, body_graph)) {
+    // Save the last peeled iteration statement to prevent infinite recursion.
+    IterationStatement* outer_peeled_statement = peeled_statement_;
+    peeled_statement_ = stmt;
+    loop = CreateGotoSubgraph(body_graph->environment());
+    ADD_TO_SUBGRAPH(loop, stmt);
+    peeled_statement_ = outer_peeled_statement;
+  }
+  current_subgraph_->AppendWhile(cond_graph, body_graph, stmt, loop,
+                                 exit_graph);
+}
+
+
+void HGraphBuilder::VisitForStatement(ForStatement* stmt) {
+  // Only visit the init statement in the peeled part of the loop.
+  if (stmt->init() != NULL && peeled_statement_ != stmt) {
+    Visit(stmt->init());
+    CHECK_BAILOUT;
+  }
+  ASSERT(subgraph()->HasExit());
+  subgraph()->PreProcessOsrEntry(stmt);
+
+  HSubgraph* cond_graph = NULL;
+  HSubgraph* body_graph = NULL;
+  HSubgraph* exit_graph = NULL;
+  if (stmt->cond() != NULL) {
+    cond_graph = CreateLoopHeaderSubgraph(environment());
+    body_graph = CreateEmptySubgraph();
+    exit_graph = CreateEmptySubgraph();
+    AddConditionToSubgraph(cond_graph, stmt->cond(), body_graph, exit_graph);
+    if (HasStackOverflow()) return;
+    ADD_TO_SUBGRAPH(body_graph, stmt->body());
+  } else {
+    body_graph = CreateLoopHeaderSubgraph(environment());
+    ADD_TO_SUBGRAPH(body_graph, stmt->body());
+  }
+
+  HSubgraph* next_graph = NULL;
+  body_graph->ResolveContinue(stmt);
+
+  if (stmt->next() != NULL && body_graph->HasExit()) {
+    next_graph = CreateGotoSubgraph(body_graph->environment());
+    ADD_TO_SUBGRAPH(next_graph, stmt->next());
+    body_graph->Append(next_graph, NULL);
+    next_graph->entry_block()->SetJoinId(stmt->ContinueId());
+  }
+
+  if (cond_graph != NULL) {
+    AppendPeeledWhile(stmt, cond_graph, body_graph, exit_graph);
+  } else {
+    current_subgraph_->AppendEndless(body_graph, stmt);
+  }
+}
+
+
+void HGraphBuilder::VisitForInStatement(ForInStatement* stmt) {
+  BAILOUT("ForInStatement");
+}
+
+
+void HGraphBuilder::VisitTryCatchStatement(TryCatchStatement* stmt) {
+  BAILOUT("TryCatchStatement");
+}
+
+
+void HGraphBuilder::VisitTryFinallyStatement(TryFinallyStatement* stmt) {
+  BAILOUT("TryFinallyStatement");
+}
+
+
+void HGraphBuilder::VisitDebuggerStatement(DebuggerStatement* stmt) {
+  BAILOUT("DebuggerStatement");
+}
+
+
+void HGraphBuilder::VisitFunctionLiteral(FunctionLiteral* expr) {
+  Handle<SharedFunctionInfo> shared_info =
+      Compiler::BuildFunctionInfo(expr, graph_->info()->script());
+  CHECK_BAILOUT;
+  PushAndAdd(new HFunctionLiteral(shared_info, expr->pretenure()));
+}
+
+
+void HGraphBuilder::VisitSharedFunctionInfoLiteral(
+    SharedFunctionInfoLiteral* expr) {
+  BAILOUT("SharedFunctionInfoLiteral");
+}
+
+
+void HGraphBuilder::VisitConditional(Conditional* expr) {
+  HSubgraph* then_graph = CreateEmptySubgraph();
+  HSubgraph* else_graph = CreateEmptySubgraph();
+  VisitCondition(expr->condition(),
+                 then_graph->entry_block(),
+                 else_graph->entry_block(),
+                 false, false);
+  if (HasStackOverflow()) return;
+  ADD_TO_SUBGRAPH(then_graph, expr->then_expression());
+  ADD_TO_SUBGRAPH(else_graph, expr->else_expression());
+  current_subgraph_->AppendJoin(then_graph, else_graph, expr);
+}
+
+
+void HGraphBuilder::LookupGlobalPropertyCell(VariableProxy* expr,
+                                             LookupResult* lookup,
+                                             bool is_store) {
+  if (expr->is_this()) {
+    BAILOUT("global this reference");
+  }
+  if (!graph()->info()->has_global_object()) {
+    BAILOUT("no global object to optimize VariableProxy");
+  }
+  Handle<GlobalObject> global(graph()->info()->global_object());
+  global->Lookup(*expr->name(), lookup);
+  if (!lookup->IsProperty()) {
+    BAILOUT("global variable cell not yet introduced");
+  }
+  if (lookup->type() != NORMAL) {
+    BAILOUT("global variable has accessors");
+  }
+  if (is_store && lookup->IsReadOnly()) {
+    BAILOUT("read-only global variable");
+  }
+}
+
+
+void HGraphBuilder::HandleGlobalVariableLoad(VariableProxy* expr) {
+  LookupResult lookup;
+  LookupGlobalPropertyCell(expr, &lookup, false);
+  CHECK_BAILOUT;
+
+  Handle<GlobalObject> global(graph()->info()->global_object());
+  // TODO(3039103): Handle global property load through an IC call when access
+  // checks are enabled.
+  if (global->IsAccessCheckNeeded()) {
+    BAILOUT("global object requires access check");
+  }
+  Handle<JSGlobalPropertyCell> cell(global->GetPropertyCell(&lookup));
+  bool check_hole = !lookup.IsDontDelete() || lookup.IsReadOnly();
+  PushAndAdd(new HLoadGlobal(cell, check_hole));
+}
+
+
+void HGraphBuilder::VisitVariableProxy(VariableProxy* expr) {
+  Variable* variable = expr->AsVariable();
+  if (variable == NULL) {
+    BAILOUT("reference to rewritten variable");
+  } else if (variable->IsStackAllocated()) {
+    if (environment()->Lookup(variable)->CheckFlag(HValue::kIsArguments)) {
+      BAILOUT("unsupported context for arguments object");
+    }
+    Push(environment()->Lookup(variable));
+  } else if (variable->is_global()) {
+    HandleGlobalVariableLoad(expr);
+  } else {
+    BAILOUT("reference to non-stack-allocated/non-global variable");
+  }
+}
+
+
+void HGraphBuilder::VisitLiteral(Literal* expr) {
+  PushAndAdd(new HConstant(expr->handle(), Representation::Tagged()));
+}
+
+
+void HGraphBuilder::VisitRegExpLiteral(RegExpLiteral* expr) {
+  PushAndAdd(new HRegExpLiteral(expr->pattern(),
+                                expr->flags(),
+                                expr->literal_index()));
+}
+
+
+void HGraphBuilder::VisitObjectLiteral(ObjectLiteral* expr) {
+  HObjectLiteral* literal = (new HObjectLiteral(expr->constant_properties(),
+                                                expr->fast_elements(),
+                                                expr->literal_index(),
+                                                expr->depth()));
+  PushAndAdd(literal);
+
+  expr->CalculateEmitStore();
+
+  for (int i = 0; i < expr->properties()->length(); i++) {
+    ObjectLiteral::Property* property = expr->properties()->at(i);
+    if (property->IsCompileTimeValue()) continue;
+
+    Literal* key = property->key();
+    Expression* value = property->value();
+
+    switch (property->kind()) {
+      case ObjectLiteral::Property::MATERIALIZED_LITERAL:
+        ASSERT(!CompileTimeValue::IsCompileTimeValue(value));
+        // Fall through.
+      case ObjectLiteral::Property::COMPUTED:
+        if (key->handle()->IsSymbol()) {
+          if (property->emit_store()) {
+            VISIT_FOR_VALUE(value);
+            HValue* value = Pop();
+            Handle<String> name = Handle<String>::cast(key->handle());
+            AddInstruction(new HStoreNamedGeneric(literal, name, value));
+            AddSimulate(key->id());
+          } else {
+            VISIT_FOR_EFFECT(value);
+          }
+          break;
+        }
+        // Fall through.
+      case ObjectLiteral::Property::PROTOTYPE:
+      case ObjectLiteral::Property::SETTER:
+      case ObjectLiteral::Property::GETTER:
+        BAILOUT("Object literal with complex property");
+      default: UNREACHABLE();
+    }
+  }
+}
+
+
+void HGraphBuilder::VisitArrayLiteral(ArrayLiteral* expr) {
+  ZoneList<Expression*>* subexprs = expr->values();
+  int length = subexprs->length();
+
+  HArrayLiteral* literal = new HArrayLiteral(expr->constant_elements(),
+                                             length,
+                                             expr->literal_index(),
+                                             expr->depth());
+  PushAndAdd(literal);
+  HValue* elements = AddInstruction(new HLoadElements(literal));
+
+  for (int i = 0; i < length; i++) {
+    Expression* subexpr = subexprs->at(i);
+    // If the subexpression is a literal or a simple materialized literal it
+    // is already set in the cloned array.
+    if (CompileTimeValue::IsCompileTimeValue(subexpr)) continue;
+
+    VISIT_FOR_VALUE(subexpr);
+    HValue* value = Pop();
+    if (!Smi::IsValid(i)) BAILOUT("Non-smi key in array literal");
+    HValue* key = AddInstruction(new HConstant(Handle<Object>(Smi::FromInt(i)),
+                                               Representation::Integer32()));
+    AddInstruction(new HStoreKeyedFastElement(elements, key, value));
+    AddSimulate(expr->GetIdForElement(i));
+  }
+}
+
+
+void HGraphBuilder::VisitCatchExtensionObject(CatchExtensionObject* expr) {
+  BAILOUT("CatchExtensionObject");
+}
+
+
+HBasicBlock* HGraphBuilder::BuildTypeSwitch(ZoneMapList* maps,
+                                            ZoneList<HSubgraph*>* subgraphs,
+                                            HValue* receiver,
+                                            int join_id) {
+  ASSERT(subgraphs->length() == (maps->length() + 1));
+
+  // Build map compare subgraphs for all but the first map.
+  ZoneList<HSubgraph*> map_compare_subgraphs(maps->length() - 1);
+  for (int i = maps->length() - 1; i > 0; --i) {
+    HSubgraph* subgraph = CreateBranchSubgraph(environment());
+    SubgraphScope scope(this, subgraph);
+    HSubgraph* else_subgraph =
+        (i == (maps->length() - 1))
+        ? subgraphs->last()
+        : map_compare_subgraphs.last();
+    current_subgraph_->exit_block()->Finish(
+        new HCompareMapAndBranch(receiver,
+                                 maps->at(i),
+                                 subgraphs->at(i)->entry_block(),
+                                 else_subgraph->entry_block()));
+    map_compare_subgraphs.Add(subgraph);
+  }
+
+  // Generate first map check to end the current block.
+  AddInstruction(new HCheckNonSmi(receiver));
+  HSubgraph* else_subgraph =
+      (maps->length() == 1) ? subgraphs->at(1) : map_compare_subgraphs.last();
+  current_subgraph_->exit_block()->Finish(
+      new HCompareMapAndBranch(receiver,
+                               Handle<Map>(maps->first()),
+                               subgraphs->first()->entry_block(),
+                               else_subgraph->entry_block()));
+
+  // Join all the call subgraphs in a new basic block and make
+  // this basic block the current basic block.
+  HBasicBlock* join_block = graph_->CreateBasicBlock();
+  for (int i = 0; i < subgraphs->length(); ++i) {
+    if (subgraphs->at(i)->HasExit()) {
+      subgraphs->at(i)->exit_block()->Goto(join_block);
+    }
+  }
+
+  if (join_block->predecessors()->is_empty()) return NULL;
+  join_block->SetJoinId(join_id);
+  return join_block;
+}
+
+
+// Sets the lookup result and returns true if the store can be inlined.
+static bool ComputeStoredField(Handle<Map> type,
+                               Handle<String> name,
+                               LookupResult* lookup) {
+  type->LookupInDescriptors(NULL, *name, lookup);
+  if (!lookup->IsPropertyOrTransition()) return false;
+  if (lookup->type() == FIELD) return true;
+  return (lookup->type() == MAP_TRANSITION) &&
+      (type->unused_property_fields() > 0);
+}
+
+
+static int ComputeStoredFieldIndex(Handle<Map> type,
+                                   Handle<String> name,
+                                   LookupResult* lookup) {
+  ASSERT(lookup->type() == FIELD || lookup->type() == MAP_TRANSITION);
+  if (lookup->type() == FIELD) {
+    return lookup->GetLocalFieldIndexFromMap(*type);
+  } else {
+    Map* transition = lookup->GetTransitionMapFromMap(*type);
+    return transition->PropertyIndexFor(*name) - type->inobject_properties();
+  }
+}
+
+
+HInstruction* HGraphBuilder::BuildStoreNamedField(HValue* object,
+                                                  Handle<String> name,
+                                                  HValue* value,
+                                                  Handle<Map> type,
+                                                  LookupResult* lookup,
+                                                  bool smi_and_map_check) {
+  if (smi_and_map_check) {
+    AddInstruction(new HCheckNonSmi(object));
+    AddInstruction(new HCheckMap(object, type));
+  }
+
+  int index = ComputeStoredFieldIndex(type, name, lookup);
+  bool is_in_object = index < 0;
+  int offset = index * kPointerSize;
+  if (index < 0) {
+    // Negative property indices are in-object properties, indexed
+    // from the end of the fixed part of the object.
+    offset += type->instance_size();
+  } else {
+    offset += FixedArray::kHeaderSize;
+  }
+  HStoreNamedField* instr =
+      new HStoreNamedField(object, name, value, is_in_object, offset);
+  if (lookup->type() == MAP_TRANSITION) {
+    Handle<Map> transition(lookup->GetTransitionMapFromMap(*type));
+    instr->set_transition(transition);
+  }
+  return instr;
+}
+
+
+HInstruction* HGraphBuilder::BuildStoreNamedGeneric(HValue* object,
+                                                    Handle<String> name,
+                                                    HValue* value) {
+  return new HStoreNamedGeneric(object, name, value);
+}
+
+
+HInstruction* HGraphBuilder::BuildStoreNamed(HValue* object,
+                                             HValue* value,
+                                             Expression* expr) {
+  Property* prop = (expr->AsProperty() != NULL)
+      ? expr->AsProperty()
+      : expr->AsAssignment()->target()->AsProperty();
+  Literal* key = prop->key()->AsLiteral();
+  Handle<String> name = Handle<String>::cast(key->handle());
+  ASSERT(!name.is_null());
+
+  LookupResult lookup;
+  ZoneMapList* types = expr->GetReceiverTypes();
+  bool is_monomorphic = expr->IsMonomorphic() &&
+      ComputeStoredField(types->first(), name, &lookup);
+
+  return is_monomorphic
+      ? BuildStoreNamedField(object, name, value, types->first(), &lookup,
+                             true)  // Needs smi and map check.
+      : BuildStoreNamedGeneric(object, name, value);
+}
+
+
+void HGraphBuilder::HandlePolymorphicStoreNamedField(Assignment* expr,
+                                                     HValue* object,
+                                                     HValue* value,
+                                                     ZoneMapList* types,
+                                                     Handle<String> name) {
+  int number_of_types = Min(types->length(), kMaxStorePolymorphism);
+  ZoneMapList maps(number_of_types);
+  ZoneList<HSubgraph*> subgraphs(number_of_types + 1);
+  bool needs_generic = (types->length() > kMaxStorePolymorphism);
+
+  // Build subgraphs for each of the specific maps.
+  //
+  // TODO(ager): We should recognize when the prototype chains for
+  // different maps are identical. In that case we can avoid
+  // repeatedly generating the same prototype map checks.
+  for (int i = 0; i < number_of_types; ++i) {
+    Handle<Map> map = types->at(i);
+    LookupResult lookup;
+    if (ComputeStoredField(map, name, &lookup)) {
+      maps.Add(map);
+      HSubgraph* subgraph = CreateBranchSubgraph(environment());
+      SubgraphScope scope(this, subgraph);
+      HInstruction* instr =
+          BuildStoreNamedField(object, name, value, map, &lookup, false);
+      Push(value);
+      instr->set_position(expr->position());
+      AddInstruction(instr);
+      subgraphs.Add(subgraph);
+    } else {
+      needs_generic = true;
+    }
+  }
+
+  // If none of the properties were named fields we generate a
+  // generic store.
+  if (maps.length() == 0) {
+    HInstruction* instr = new HStoreNamedGeneric(object, name, value);
+    Push(value);
+    instr->set_position(expr->position());
+    AddInstruction(instr);
+    return;
+  }
+
+  // Build subgraph for generic store through IC.
+  {
+    HSubgraph* subgraph = CreateBranchSubgraph(environment());
+    SubgraphScope scope(this, subgraph);
+    if (!needs_generic && FLAG_deoptimize_uncommon_cases) {
+      subgraph->FinishExit(new HDeoptimize());
+    } else {
+      HInstruction* instr = new HStoreNamedGeneric(object, name, value);
+      Push(value);
+      instr->set_position(expr->position());
+      AddInstruction(instr);
+    }
+    subgraphs.Add(subgraph);
+  }
+
+  HBasicBlock* new_exit_block =
+      BuildTypeSwitch(&maps, &subgraphs, object, expr->id());
+  current_subgraph_->set_exit_block(new_exit_block);
+}
+
+
+void HGraphBuilder::HandlePropertyAssignment(Assignment* expr) {
+  Property* prop = expr->target()->AsProperty();
+  ASSERT(prop != NULL);
+  expr->RecordTypeFeedback(oracle());
+  VISIT_FOR_VALUE(prop->obj());
+
+  HValue* value = NULL;
+  HInstruction* instr = NULL;
+
+  if (prop->key()->IsPropertyName()) {
+    // Named store.
+    VISIT_FOR_VALUE(expr->value());
+    value = Pop();
+    HValue* object = Pop();
+
+    Literal* key = prop->key()->AsLiteral();
+    Handle<String> name = Handle<String>::cast(key->handle());
+    ASSERT(!name.is_null());
+
+    ZoneMapList* types = expr->GetReceiverTypes();
+    LookupResult lookup;
+
+    if (expr->IsMonomorphic()) {
+      instr = BuildStoreNamed(object, value, expr);
+
+    } else if (types != NULL && types->length() > 1) {
+      HandlePolymorphicStoreNamedField(expr, object, value, types, name);
+      return;
+
+    } else {
+      instr = new HStoreNamedGeneric(object, name, value);
+    }
+
+  } else {
+    // Keyed store.
+    VISIT_FOR_VALUE(prop->key());
+    VISIT_FOR_VALUE(expr->value());
+    value = Pop();
+    HValue* key = Pop();
+    HValue* object = Pop();
+
+    bool is_fast_elements = expr->IsMonomorphic() &&
+        expr->GetMonomorphicReceiverType()->has_fast_elements();
+
+    instr = is_fast_elements
+        ? BuildStoreKeyedFastElement(object, key, value, expr)
+        : BuildStoreKeyedGeneric(object, key, value);
+  }
+
+  Push(value);
+  instr->set_position(expr->position());
+  AddInstruction(instr);
+}
+
+
+void HGraphBuilder::HandleGlobalVariableAssignment(VariableProxy* proxy,
+                                                   HValue* value,
+                                                   int position) {
+  LookupResult lookup;
+  LookupGlobalPropertyCell(proxy, &lookup, true);
+  CHECK_BAILOUT;
+
+  Handle<GlobalObject> global(graph()->info()->global_object());
+  Handle<JSGlobalPropertyCell> cell(global->GetPropertyCell(&lookup));
+  HInstruction* instr = new HStoreGlobal(value, cell);
+  instr->set_position(position);
+  AddInstruction(instr);
+}
+
+
+void HGraphBuilder::HandleCompoundAssignment(Assignment* expr) {
+  Expression* target = expr->target();
+  VariableProxy* proxy = target->AsVariableProxy();
+  Variable* var = proxy->AsVariable();
+  Property* prop = target->AsProperty();
+  ASSERT(var == NULL || prop == NULL);
+
+  // We have a second position recorded in the FullCodeGenerator to have
+  // type feedback for the binary operation.
+  BinaryOperation* operation = expr->binary_operation();
+  operation->RecordTypeFeedback(oracle());
+
+  if (var != NULL) {
+    if (!var->is_global() && !var->IsStackAllocated()) {
+      BAILOUT("non-stack/non-global in compound assignment");
+    }
+
+    VISIT_FOR_VALUE(operation);
+
+    if (var->is_global()) {
+      HandleGlobalVariableAssignment(proxy, Top(), expr->position());
+    } else {
+      Bind(var, Top());
+    }
+  } else if (prop != NULL) {
+    prop->RecordTypeFeedback(oracle());
+
+    if (prop->key()->IsPropertyName()) {
+      // Named property.
+      VISIT_FOR_VALUE(prop->obj());
+      HValue* obj = Top();
+
+      HInstruction* load = NULL;
+      if (prop->IsMonomorphic()) {
+        Handle<String> name = prop->key()->AsLiteral()->AsPropertyName();
+        Handle<Map> map = prop->GetReceiverTypes()->first();
+        load = BuildLoadNamed(obj, prop, map, name);
+      } else {
+        load = BuildLoadNamedGeneric(obj, prop);
+      }
+      PushAndAdd(load);
+      if (load->HasSideEffects()) {
+        AddSimulate(expr->compound_bailout_id());
+      }
+
+      VISIT_FOR_VALUE(expr->value());
+      HValue* right = Pop();
+      HValue* left = Pop();
+
+      HInstruction* instr = BuildBinaryOperation(operation, left, right);
+      PushAndAdd(instr);
+      if (instr->HasSideEffects()) AddSimulate(operation->id());
+
+      HInstruction* store = BuildStoreNamed(obj, instr, prop);
+      AddInstruction(store);
+
+      // Drop the simulated receiver and value and put back the value.
+      Drop(2);
+      Push(instr);
+
+    } else {
+      // Keyed property.
+      VISIT_FOR_VALUE(prop->obj());
+      VISIT_FOR_VALUE(prop->key());
+      HValue* obj = environment()->ExpressionStackAt(1);
+      HValue* key = environment()->ExpressionStackAt(0);
+
+      bool is_fast_elements = prop->IsMonomorphic() &&
+          prop->GetMonomorphicReceiverType()->has_fast_elements();
+
+      HInstruction* load = is_fast_elements
+          ? BuildLoadKeyedFastElement(obj, key, prop)
+          : BuildLoadKeyedGeneric(obj, key);
+      PushAndAdd(load);
+      if (load->HasSideEffects()) {
+        AddSimulate(expr->compound_bailout_id());
+      }
+
+      VISIT_FOR_VALUE(expr->value());
+      HValue* right = Pop();
+      HValue* left = Pop();
+
+      HInstruction* instr = BuildBinaryOperation(operation, left, right);
+      PushAndAdd(instr);
+      if (instr->HasSideEffects()) AddSimulate(operation->id());
+
+      HInstruction* store = is_fast_elements
+          ? BuildStoreKeyedFastElement(obj, key, instr, prop)
+          : BuildStoreKeyedGeneric(obj, key, instr);
+      AddInstruction(store);
+
+      // Drop the simulated receiver, key and value and put back the value.
+      Drop(3);
+      Push(instr);
+    }
+  } else {
+    BAILOUT("invalid lhs in compound assignment");
+  }
+}
+
+
+void HGraphBuilder::VisitAssignment(Assignment* expr) {
+  VariableProxy* proxy = expr->target()->AsVariableProxy();
+  Variable* var = proxy->AsVariable();
+  Property* prop = expr->target()->AsProperty();
+  ASSERT(var == NULL || prop == NULL);
+
+  if (expr->is_compound()) {
+    HandleCompoundAssignment(expr);
+    return;
+  }
+
+  if (var != NULL) {
+    if (proxy->IsArguments()) BAILOUT("assignment to arguments");
+    if (var->is_global()) {
+      VISIT_FOR_VALUE(expr->value());
+      HandleGlobalVariableAssignment(proxy, Top(), expr->position());
+    } else {
+      // We allow reference to the arguments object only in assignemtns
+      // to local variables to make sure that the arguments object does
+      // not escape and is not modified.
+      VariableProxy* rhs = expr->value()->AsVariableProxy();
+      if (rhs != NULL &&
+          rhs->var()->IsStackAllocated() &&
+          environment()->Lookup(rhs->var())->CheckFlag(HValue::kIsArguments)) {
+        Push(environment()->Lookup(rhs->var()));
+      } else {
+        VISIT_FOR_VALUE(expr->value());
+      }
+
+      Bind(proxy->var(), Top());
+    }
+  } else if (prop != NULL) {
+    HandlePropertyAssignment(expr);
+  } else {
+    BAILOUT("unsupported invalid lhs");
+  }
+}
+
+
+void HGraphBuilder::VisitThrow(Throw* expr) {
+  VISIT_FOR_VALUE(expr->exception());
+
+  HValue* value = environment()->Pop();
+  HControlInstruction* instr = new HThrow(value);
+  instr->set_position(expr->position());
+  current_subgraph_->FinishExit(instr);
+}
+
+
+void HGraphBuilder::HandlePolymorphicLoadNamedField(Property* expr,
+                                                    HValue* object,
+                                                    ZoneMapList* types,
+                                                    Handle<String> name) {
+  int number_of_types = Min(types->length(), kMaxLoadPolymorphism);
+  ZoneMapList maps(number_of_types);
+  ZoneList<HSubgraph*> subgraphs(number_of_types + 1);
+  bool needs_generic = (types->length() > kMaxLoadPolymorphism);
+
+  // Build subgraphs for each of the specific maps.
+  //
+  // TODO(ager): We should recognize when the prototype chains for
+  // different maps are identical. In that case we can avoid
+  // repeatedly generating the same prototype map checks.
+  for (int i = 0; i < number_of_types; ++i) {
+    Handle<Map> map = types->at(i);
+    LookupResult lookup;
+    map->LookupInDescriptors(NULL, *name, &lookup);
+    if (lookup.IsProperty() && lookup.type() == FIELD) {
+      maps.Add(map);
+      HSubgraph* subgraph = CreateBranchSubgraph(environment());
+      SubgraphScope scope(this, subgraph);
+      HInstruction* instr =
+          BuildLoadNamedField(object, expr, map, &lookup, false);
+      PushAndAdd(instr, expr->position());
+      subgraphs.Add(subgraph);
+    } else {
+      needs_generic = true;
+    }
+  }
+
+  // If none of the properties were named fields we generate a
+  // generic load.
+  if (maps.length() == 0) {
+    HInstruction* instr = BuildLoadNamedGeneric(object, expr);
+    PushAndAdd(instr, expr->position());
+    return;
+  }
+
+  // Build subgraph for generic load through IC.
+  {
+    HSubgraph* subgraph = CreateBranchSubgraph(environment());
+    SubgraphScope scope(this, subgraph);
+    if (!needs_generic && FLAG_deoptimize_uncommon_cases) {
+      subgraph->FinishExit(new HDeoptimize());
+    } else {
+      HInstruction* instr = BuildLoadNamedGeneric(object, expr);
+      PushAndAdd(instr, expr->position());
+    }
+    subgraphs.Add(subgraph);
+  }
+
+  HBasicBlock* new_exit_block =
+      BuildTypeSwitch(&maps, &subgraphs, object, expr->id());
+  current_subgraph_->set_exit_block(new_exit_block);
+}
+
+
+HInstruction* HGraphBuilder::BuildLoadNamedField(HValue* object,
+                                                 Property* expr,
+                                                 Handle<Map> type,
+                                                 LookupResult* lookup,
+                                                 bool smi_and_map_check) {
+  if (smi_and_map_check) {
+    AddInstruction(new HCheckNonSmi(object));
+    AddInstruction(new HCheckMap(object, type));
+  }
+
+  int index = lookup->GetLocalFieldIndexFromMap(*type);
+  if (index < 0) {
+    // Negative property indices are in-object properties, indexed
+    // from the end of the fixed part of the object.
+    int offset = (index * kPointerSize) + type->instance_size();
+    return new HLoadNamedField(object, true, offset);
+  } else {
+    // Non-negative property indices are in the properties array.
+    int offset = (index * kPointerSize) + FixedArray::kHeaderSize;
+    return new HLoadNamedField(object, false, offset);
+  }
+}
+
+
+HInstruction* HGraphBuilder::BuildLoadNamedGeneric(HValue* obj,
+                                                   Property* expr) {
+  ASSERT(expr->key()->IsPropertyName());
+  Handle<Object> name = expr->key()->AsLiteral()->handle();
+  return new HLoadNamedGeneric(obj, name);
+}
+
+
+HInstruction* HGraphBuilder::BuildLoadNamed(HValue* obj,
+                                            Property* expr,
+                                            Handle<Map> map,
+                                            Handle<String> name) {
+  LookupResult lookup;
+  map->LookupInDescriptors(NULL, *name, &lookup);
+  if (lookup.IsProperty() && lookup.type() == FIELD) {
+    return BuildLoadNamedField(obj,
+                               expr,
+                               map,
+                               &lookup,
+                               true);
+  } else {
+    return BuildLoadNamedGeneric(obj, expr);
+  }
+}
+
+
+HInstruction* HGraphBuilder::BuildLoadKeyedGeneric(HValue* object,
+                                                   HValue* key) {
+  return new HLoadKeyedGeneric(object, key);
+}
+
+
+HInstruction* HGraphBuilder::BuildLoadKeyedFastElement(HValue* object,
+                                                       HValue* key,
+                                                       Property* expr) {
+  ASSERT(!expr->key()->IsPropertyName() && expr->IsMonomorphic());
+  AddInstruction(new HCheckNonSmi(object));
+  Handle<Map> map = expr->GetMonomorphicReceiverType();
+  ASSERT(map->has_fast_elements());
+  AddInstruction(new HCheckMap(object, map));
+  HInstruction* elements = AddInstruction(new HLoadElements(object));
+  HInstruction* length = AddInstruction(new HArrayLength(elements));
+  AddInstruction(new HBoundsCheck(key, length));
+  return new HLoadKeyedFastElement(elements, key);
+}
+
+
+HInstruction* HGraphBuilder::BuildStoreKeyedGeneric(HValue* object,
+                                                    HValue* key,
+                                                    HValue* value) {
+  return new HStoreKeyedGeneric(object, key, value);
+}
+
+
+HInstruction* HGraphBuilder::BuildStoreKeyedFastElement(HValue* object,
+                                                        HValue* key,
+                                                        HValue* val,
+                                                        Expression* expr) {
+  ASSERT(expr->IsMonomorphic());
+  AddInstruction(new HCheckNonSmi(object));
+  Handle<Map> map = expr->GetMonomorphicReceiverType();
+  ASSERT(map->has_fast_elements());
+  AddInstruction(new HCheckMap(object, map));
+  HInstruction* elements = AddInstruction(new HLoadElements(object));
+  AddInstruction(new HCheckMap(elements, Factory::fixed_array_map()));
+  bool is_array = (map->instance_type() == JS_ARRAY_TYPE);
+  HInstruction* length = NULL;
+  if (is_array) {
+    length = AddInstruction(new HArrayLength(object));
+  } else {
+    length = AddInstruction(new HArrayLength(elements));
+  }
+  AddInstruction(new HBoundsCheck(key, length));
+  return new HStoreKeyedFastElement(elements, key, val);
+}
+
+
+bool HGraphBuilder::TryArgumentsAccess(Property* expr) {
+  VariableProxy* proxy = expr->obj()->AsVariableProxy();
+  if (proxy == NULL) return false;
+  if (!proxy->var()->IsStackAllocated()) return false;
+  if (!environment()->Lookup(proxy->var())->CheckFlag(HValue::kIsArguments)) {
+    return false;
+  }
+
+  if (expr->key()->IsPropertyName()) {
+    Handle<String> name = expr->key()->AsLiteral()->AsPropertyName();
+    if (!name->IsEqualTo(CStrVector("length"))) return false;
+    HInstruction* elements = AddInstruction(new HArgumentsElements);
+    PushAndAdd(new HArgumentsLength(elements));
+  } else {
+    VisitForValue(expr->key());
+    if (HasStackOverflow()) return false;
+    HValue* key = Pop();
+    HInstruction* elements = AddInstruction(new HArgumentsElements);
+    HInstruction* length = AddInstruction(new HArgumentsLength(elements));
+    AddInstruction(new HBoundsCheck(key, length));
+    PushAndAdd(new HAccessArgumentsAt(elements, length, key));
+  }
+  return true;
+}
+
+
+void HGraphBuilder::VisitProperty(Property* expr) {
+  expr->RecordTypeFeedback(oracle());
+
+  if (TryArgumentsAccess(expr)) return;
+  CHECK_BAILOUT;
+
+  VISIT_FOR_VALUE(expr->obj());
+
+  HInstruction* instr = NULL;
+  if (expr->IsArrayLength()) {
+    HValue* array = Pop();
+    AddInstruction(new HCheckNonSmi(array));
+    instr = new HArrayLength(array);
+
+  } else if (expr->key()->IsPropertyName()) {
+    Handle<String> name = expr->key()->AsLiteral()->AsPropertyName();
+    ZoneMapList* types = expr->GetReceiverTypes();
+
+    HValue* obj = Pop();
+    if (expr->IsMonomorphic()) {
+      instr = BuildLoadNamed(obj, expr, types->first(), name);
+    } else if (types != NULL && types->length() > 1) {
+      HandlePolymorphicLoadNamedField(expr, obj, types, name);
+      return;
+
+    } else {
+      instr = BuildLoadNamedGeneric(obj, expr);
+    }
+
+  } else {
+    VISIT_FOR_VALUE(expr->key());
+
+    HValue* key = Pop();
+    HValue* obj = Pop();
+
+    bool is_fast_elements = expr->IsMonomorphic() &&
+        expr->GetMonomorphicReceiverType()->has_fast_elements();
+
+    instr = is_fast_elements
+        ? BuildLoadKeyedFastElement(obj, key, expr)
+        : BuildLoadKeyedGeneric(obj, key);
+  }
+  PushAndAdd(instr, expr->position());
+}
+
+
+void HGraphBuilder::AddCheckConstantFunction(Call* expr,
+                                             HValue* receiver,
+                                             Handle<Map> receiver_map,
+                                             bool smi_and_map_check) {
+  // Constant functions have the nice property that the map will change if they
+  // are overwritten.  Therefore it is enough to check the map of the holder and
+  // its prototypes.
+  if (smi_and_map_check) {
+    AddInstruction(new HCheckNonSmi(receiver));
+    AddInstruction(new HCheckMap(receiver, receiver_map));
+  }
+  if (!expr->holder().is_null()) {
+    AddInstruction(new HCheckPrototypeMaps(receiver,
+                                           expr->holder(),
+                                           receiver_map));
+  }
+}
+
+
+void HGraphBuilder::HandlePolymorphicCallNamed(Call* expr,
+                                               HValue* receiver,
+                                               ZoneMapList* types,
+                                               Handle<String> name) {
+  int argument_count = expr->arguments()->length() + 1;  // Plus receiver.
+  int number_of_types = Min(types->length(), kMaxCallPolymorphism);
+  ZoneMapList maps(number_of_types);
+  ZoneList<HSubgraph*> subgraphs(number_of_types + 1);
+  bool needs_generic = (types->length() > kMaxCallPolymorphism);
+
+  // Build subgraphs for each of the specific maps.
+  //
+  // TODO(ager): We should recognize when the prototype chains for
+  // different maps are identical. In that case we can avoid
+  // repeatedly generating the same prototype map checks.
+  for (int i = 0; i < number_of_types; ++i) {
+    Handle<Map> map = types->at(i);
+    if (expr->ComputeTarget(map, name)) {
+      maps.Add(map);
+      HSubgraph* subgraph = CreateBranchSubgraph(environment());
+      SubgraphScope scope(this, subgraph);
+      AddCheckConstantFunction(expr, receiver, map, false);
+      if (FLAG_trace_inlining && FLAG_polymorphic_inlining) {
+        PrintF("Trying to inline the polymorphic call to %s\n",
+               *name->ToCString());
+      }
+      if (!FLAG_polymorphic_inlining || !TryInline(expr)) {
+        // Check for bailout, as trying to inline might fail due to bailout
+        // during hydrogen processing.
+        CHECK_BAILOUT;
+        HCall* call = new HCallConstantFunction(expr->target(), argument_count);
+        ProcessCall(call, expr->position());
+      }
+      subgraphs.Add(subgraph);
+    } else {
+      needs_generic = true;
+    }
+  }
+
+  // If we couldn't compute the target for any of the maps just
+  // perform an IC call.
+  if (maps.length() == 0) {
+    HCall* call = new HCallNamed(name, argument_count);
+    ProcessCall(call, expr->position());
+    return;
+  }
+
+  // Build subgraph for generic call through IC.
+  {
+    HSubgraph* subgraph = CreateBranchSubgraph(environment());
+    SubgraphScope scope(this, subgraph);
+    if (!needs_generic && FLAG_deoptimize_uncommon_cases) {
+      subgraph->FinishExit(new HDeoptimize());
+    } else {
+      HCall* call = new HCallNamed(name, argument_count);
+      ProcessCall(call, expr->position());
+    }
+    subgraphs.Add(subgraph);
+  }
+
+  HBasicBlock* new_exit_block =
+      BuildTypeSwitch(&maps, &subgraphs, receiver, expr->id());
+  current_subgraph_->set_exit_block(new_exit_block);
+}
+
+
+void HGraphBuilder::TraceInline(Handle<JSFunction> target, bool result) {
+  SmartPointer<char> callee = target->shared()->DebugName()->ToCString();
+  SmartPointer<char> caller =
+      graph()->info()->function()->debug_name()->ToCString();
+  if (result) {
+    PrintF("Inlined %s called from %s.\n", *callee, *caller);
+  } else {
+    PrintF("Do not inline %s called from %s.\n", *callee, *caller);
+  }
+}
+
+
+bool HGraphBuilder::TryInline(Call* expr) {
+  if (!FLAG_use_inlining) return false;
+
+  // Precondition: call is monomorphic and we have found a target with the
+  // appropriate arity.
+  Handle<JSFunction> target = expr->target();
+
+  // Do a quick check on source code length to avoid parsing large
+  // inlining candidates.
+  if (FLAG_limit_inlining && target->shared()->SourceSize() > kMaxSourceSize) {
+    if (FLAG_trace_inlining) TraceInline(target, false);
+    return false;
+  }
+
+  // Target must be inlineable.
+  if (!target->IsInlineable()) return false;
+
+  // No context change required.
+  CompilationInfo* outer_info = graph()->info();
+  if (target->context() != outer_info->closure()->context() ||
+      outer_info->scope()->contains_with() ||
+      outer_info->scope()->num_heap_slots() > 0) {
+    return false;
+  }
+
+  // Don't inline deeper than two calls.
+  HEnvironment* env = environment();
+  if (env->outer() != NULL && env->outer()->outer() != NULL) return false;
+
+  // Don't inline recursive functions.
+  if (target->shared() == outer_info->closure()->shared()) return false;
+
+  // We don't want to add more than a certain number of nodes from inlining.
+  if (FLAG_limit_inlining && inlined_count_ > kMaxInlinedNodes) {
+    if (FLAG_trace_inlining) TraceInline(target, false);
+    return false;
+  }
+
+  int count_before = AstNode::Count();
+
+  // Parse and allocate variables.
+  Handle<SharedFunctionInfo> shared(target->shared());
+  CompilationInfo inner_info(shared);
+  if (!ParserApi::Parse(&inner_info) ||
+      !Scope::Analyze(&inner_info)) {
+    return false;
+  }
+  FunctionLiteral* function = inner_info.function();
+
+  // Count the number of AST nodes added by inlining this call.
+  int nodes_added = AstNode::Count() - count_before;
+  if (FLAG_limit_inlining && nodes_added > kMaxInlinedSize) {
+    if (FLAG_trace_inlining) TraceInline(target, false);
+    return false;
+  }
+
+  // Check if we can handle all declarations in the inlined functions.
+  VisitDeclarations(inner_info.scope()->declarations());
+  if (HasStackOverflow()) {
+    ClearStackOverflow();
+    return false;
+  }
+
+  // Don't inline functions that uses the arguments object or that
+  // have a mismatching number of parameters.
+  int arity = expr->arguments()->length();
+  if (function->scope()->arguments() != NULL ||
+      arity != target->shared()->formal_parameter_count()) {
+    return false;
+  }
+
+  // All statements in the body must be inlineable.
+  for (int i = 0, count = function->body()->length(); i < count; ++i) {
+    if (!function->body()->at(i)->IsInlineable()) return false;
+  }
+
+  // Generate the deoptimization data for the unoptimized version of
+  // the target function if we don't already have it.
+  if (!shared->has_deoptimization_support()) {
+    // Note that we compile here using the same AST that we will use for
+    // generating the optimized inline code.
+    inner_info.EnableDeoptimizationSupport();
+    if (!FullCodeGenerator::MakeCode(&inner_info)) return false;
+    shared->EnableDeoptimizationSupport(*inner_info.code());
+    Compiler::RecordFunctionCompilation(
+        Logger::FUNCTION_TAG,
+        Handle<String>(shared->DebugName()),
+        shared->start_position(),
+        &inner_info);
+  }
+
+  // Save the pending call context and type feedback oracle. Set up new ones
+  // for the inlined function.
+  ASSERT(shared->has_deoptimization_support());
+  AstContext* saved_call_context = call_context();
+  HBasicBlock* saved_function_return = function_return();
+  TypeFeedbackOracle* saved_oracle = oracle();
+  // On-stack replacement cannot target inlined functions.  Since we don't
+  // use a separate CompilationInfo structure for the inlined function, we
+  // save and restore the AST ID in the original compilation info.
+  int saved_osr_ast_id = graph()->info()->osr_ast_id();
+
+  TestContext* test_context = NULL;
+  if (ast_context()->IsTest()) {
+    // Inlined body is treated as if it occurs in an 'inlined' call context
+    // with true and false blocks that will forward to the real ones.
+    HBasicBlock* if_true = graph()->CreateBasicBlock();
+    HBasicBlock* if_false = graph()->CreateBasicBlock();
+    if_true->MarkAsInlineReturnTarget();
+    if_false->MarkAsInlineReturnTarget();
+    // AstContext constructor pushes on the context stack.
+    bool invert_true = TestContext::cast(ast_context())->invert_true();
+    bool invert_false = TestContext::cast(ast_context())->invert_false();
+    test_context = new TestContext(this, if_true, if_false,
+                                   invert_true, invert_false);
+    function_return_ = NULL;
+  } else {
+    // Inlined body is treated as if it occurs in the original call context.
+    function_return_ = graph()->CreateBasicBlock();
+    function_return_->MarkAsInlineReturnTarget();
+  }
+  call_context_ = ast_context();
+  TypeFeedbackOracle new_oracle(Handle<Code>(shared->code()));
+  oracle_ = &new_oracle;
+  graph()->info()->SetOsrAstId(AstNode::kNoNumber);
+
+  HSubgraph* body = CreateInlinedSubgraph(env, target, function);
+  body->exit_block()->AddInstruction(new HEnterInlined(target, function));
+  AddToSubgraph(body, function->body());
+  if (HasStackOverflow()) {
+    // Bail out if the inline function did, as we cannot residualize a call
+    // instead.
+    delete test_context;
+    call_context_ = saved_call_context;
+    function_return_ = saved_function_return;
+    oracle_ = saved_oracle;
+    graph()->info()->SetOsrAstId(saved_osr_ast_id);
+    return false;
+  }
+
+  // Update inlined nodes count.
+  inlined_count_ += nodes_added;
+
+  if (FLAG_trace_inlining) TraceInline(target, true);
+
+  if (body->HasExit()) {
+    // Add a return of undefined if control can fall off the body.  In a
+    // test context, undefined is false.
+    HValue* return_value = NULL;
+    HBasicBlock* target = NULL;
+    if (test_context == NULL) {
+      ASSERT(function_return_ != NULL);
+      return_value = graph()->GetConstantUndefined();
+      target = function_return_;
+    } else {
+      return_value = graph()->GetConstantFalse();
+      target = test_context->if_false();
+    }
+    body->exit_block()->AddLeaveInlined(return_value, target);
+    body->set_exit_block(NULL);
+  }
+
+  // Record the environment at the inlined function call.
+  AddSimulate(expr->ReturnId());
+
+  // Jump to the function entry (without re-recording the environment).
+  subgraph()->exit_block()->Finish(new HGoto(body->entry_block()));
+
+  // Fix up the function exits.
+  if (test_context != NULL) {
+    HBasicBlock* if_true = test_context->if_true();
+    HBasicBlock* if_false = test_context->if_false();
+    if_true->SetJoinId(expr->id());
+    if_false->SetJoinId(expr->id());
+    ASSERT(ast_context() == test_context);
+    delete test_context;  // Destructor pops from expression context stack.
+    // Forward to the real test context.
+
+    // Discard the lingering branch value (which may be true or false,
+    // depending on whether the final condition was negated) and jump to the
+    // true target with a true branch value.
+    HBasicBlock* true_target = TestContext::cast(ast_context())->if_true();
+    bool invert_true = TestContext::cast(ast_context())->invert_true();
+    HValue* true_value = invert_true
+        ? graph()->GetConstantFalse()
+        : graph()->GetConstantTrue();
+    if_true->last_environment()->Pop();
+    if (true_target->IsInlineReturnTarget()) {
+      if_true->AddLeaveInlined(true_value, true_target);
+    } else {
+      if_true->last_environment()->Push(true_value);
+      if_true->Goto(true_target);
+    }
+
+    // Do the same for the false target.
+    HBasicBlock* false_target = TestContext::cast(ast_context())->if_false();
+    bool invert_false = TestContext::cast(ast_context())->invert_false();
+    HValue* false_value = invert_false
+        ? graph()->GetConstantTrue()
+        : graph()->GetConstantFalse();
+    if_false->last_environment()->Pop();
+    if (false_target->IsInlineReturnTarget()) {
+      if_false->AddLeaveInlined(false_value, false_target);
+    } else {
+      if_false->last_environment()->Push(false_value);
+      if_false->Goto(false_target);
+    }
+
+    // TODO(kmillikin): Come up with a better way to handle this. It is too
+    // subtle. NULL here indicates that the enclosing context has no control
+    // flow to handle.
+    subgraph()->set_exit_block(NULL);
+
+  } else {
+    function_return_->SetJoinId(expr->id());
+    subgraph()->set_exit_block(function_return_);
+  }
+
+  call_context_ = saved_call_context;
+  function_return_ = saved_function_return;
+  oracle_ = saved_oracle;
+  graph()->info()->SetOsrAstId(saved_osr_ast_id);
+  return true;
+}
+
+
+void HBasicBlock::AddLeaveInlined(HValue* return_value, HBasicBlock* target) {
+  ASSERT(target->IsInlineReturnTarget());
+  AddInstruction(new HLeaveInlined);
+  HEnvironment* outer = last_environment()->outer();
+  outer->Push(return_value);
+  UpdateEnvironment(outer);
+  Goto(target);
+}
+
+
+bool HGraphBuilder::TryMathFunctionInline(Call* expr) {
+  // Try to inline calls like Math.* as operations in the calling function.
+  MathFunctionId id = expr->target()->shared()->math_function_id();
+  int argument_count = expr->arguments()->length() + 1;  // Plus receiver.
+  switch (id) {
+    case kMathRound:
+    case kMathFloor:
+    case kMathAbs:
+    case kMathSqrt:
+      if (argument_count == 2) {
+        HValue* argument = Pop();
+        // Pop receiver.
+        Pop();
+        HUnaryMathOperation* op = new HUnaryMathOperation(argument, id);
+        PushAndAdd(op, expr->position());
+        return true;
+      }
+      break;
+    default:
+      // Either not a special math function or not yet supported for inlining.
+      break;
+  }
+  return false;
+}
+
+
+bool HGraphBuilder::TryCallApply(Call* expr) {
+  Expression* callee = expr->expression();
+  Property* prop = callee->AsProperty();
+  ASSERT(prop != NULL);
+
+  if (graph()->info()->scope()->arguments() == NULL) return false;
+
+  Handle<String> name = prop->key()->AsLiteral()->AsPropertyName();
+  if (!name->IsEqualTo(CStrVector("apply"))) return false;
+
+  ZoneList<Expression*>* args = expr->arguments();
+  if (args->length() != 2) return false;
+
+  VariableProxy* arg_two = args->at(1)->AsVariableProxy();
+  if (arg_two == NULL) return false;
+  HValue* arg_two_value = environment()->Lookup(arg_two->var());
+  if (!arg_two_value->CheckFlag(HValue::kIsArguments)) return false;
+
+  if (!expr->IsMonomorphic()) return false;
+
+  // Found pattern f.apply(receiver, arguments).
+  VisitForValue(prop->obj());
+  if (HasStackOverflow()) return false;
+  HValue* function = Pop();
+  VisitForValue(args->at(0));
+  if (HasStackOverflow()) return false;
+  HValue* receiver = Pop();
+  HInstruction* elements = AddInstruction(new HArgumentsElements);
+  HInstruction* length = AddInstruction(new HArgumentsLength(elements));
+  AddCheckConstantFunction(expr,
+                           function,
+                           expr->GetReceiverTypes()->first(),
+                           true);
+  PushAndAdd(new HApplyArguments(function, receiver, length, elements),
+             expr->position());
+  return true;
+}
+
+
+void HGraphBuilder::VisitCall(Call* expr) {
+  Expression* callee = expr->expression();
+  int argument_count = expr->arguments()->length() + 1;  // Plus receiver.
+  HCall* call = NULL;
+
+  Property* prop = callee->AsProperty();
+  if (prop != NULL) {
+    if (!prop->key()->IsPropertyName()) {
+      // Keyed function call.
+      VisitArgument(prop->obj());
+      CHECK_BAILOUT;
+
+      VISIT_FOR_VALUE(prop->key());
+      // Push receiver and key like the non-optimized code generator expects it.
+      HValue* key = Pop();
+      HValue* receiver = Pop();
+      Push(key);
+      Push(receiver);
+
+      VisitArgumentList(expr->arguments());
+      CHECK_BAILOUT;
+
+      call = new HCallKeyed(key, argument_count);
+      ProcessCall(call, expr->position());
+      HValue* result = Pop();
+      // Drop the receiver from the environment and put back the result of
+      // the call.
+      Drop(1);
+      Push(result);
+      return;
+    }
+
+    // Named function call.
+    expr->RecordTypeFeedback(oracle());
+
+    if (TryCallApply(expr)) return;
+    CHECK_BAILOUT;
+
+    HValue* receiver = VisitArgument(prop->obj());
+    CHECK_BAILOUT;
+    VisitArgumentList(expr->arguments());
+    CHECK_BAILOUT;
+
+    Handle<String> name = prop->key()->AsLiteral()->AsPropertyName();
+
+    expr->RecordTypeFeedback(oracle());
+    ZoneMapList* types = expr->GetReceiverTypes();
+
+    if (expr->IsMonomorphic()) {
+      AddCheckConstantFunction(expr, receiver, types->first(), true);
+
+      if (TryMathFunctionInline(expr) || TryInline(expr)) {
+        return;
+      } else {
+        // Check for bailout, as the TryInline call in the if condition above
+        // might return false due to bailout during hydrogen processing.
+        CHECK_BAILOUT;
+        call = new HCallConstantFunction(expr->target(), argument_count);
+      }
+    } else if (types != NULL && types->length() > 1) {
+      HandlePolymorphicCallNamed(expr, receiver, types, name);
+      return;
+
+    } else {
+      call = new HCallNamed(name, argument_count);
+    }
+
+  } else {
+    Variable* var = expr->expression()->AsVariableProxy()->AsVariable();
+    bool global_call = (var != NULL) && var->is_global() && !var->is_this();
+
+    if (!global_call) {
+      ++argument_count;
+      VisitArgument(expr->expression());
+      CHECK_BAILOUT;
+    }
+
+    if (global_call) {
+      // If there is a global property cell for the name at compile time and
+      // access check is not enabled we assume that the function will not change
+      // and generate optimized code for calling the function.
+      CompilationInfo* info = graph()->info();
+      bool known_global_function = info->has_global_object() &&
+          !info->global_object()->IsAccessCheckNeeded() &&
+          expr->ComputeGlobalTarget(Handle<GlobalObject>(info->global_object()),
+                                    var->name());
+      if (known_global_function) {
+        // Push the global object instead of the global receiver because
+        // code generated by the full code generator expects it.
+        PushAndAdd(new HGlobalObject);
+        VisitArgumentList(expr->arguments());
+        CHECK_BAILOUT;
+
+        VISIT_FOR_VALUE(expr->expression());
+        HValue* function = Pop();
+        AddInstruction(new HCheckFunction(function, expr->target()));
+
+        // Replace the global object with the global receiver.
+        HGlobalReceiver* global_receiver = new HGlobalReceiver;
+        // Index of the receiver from the top of the expression stack.
+        const int receiver_index = argument_count - 1;
+        AddInstruction(global_receiver);
+        ASSERT(environment()->ExpressionStackAt(receiver_index)->
+               IsGlobalObject());
+        environment()->SetExpressionStackAt(receiver_index, global_receiver);
+
+        if (TryInline(expr)) return;
+        // Check for bailout, as trying to inline might fail due to bailout
+        // during hydrogen processing.
+        CHECK_BAILOUT;
+
+        call = new HCallKnownGlobal(expr->target(), argument_count);
+      } else {
+        PushAndAdd(new HGlobalObject);
+        VisitArgumentList(expr->arguments());
+        CHECK_BAILOUT;
+
+        call = new HCallGlobal(var->name(), argument_count);
+      }
+
+    } else {
+      PushAndAdd(new HGlobalReceiver);
+      VisitArgumentList(expr->arguments());
+      CHECK_BAILOUT;
+
+      call = new HCallFunction(argument_count);
+    }
+  }
+
+  ProcessCall(call, expr->position());
+}
+
+
+void HGraphBuilder::VisitCallNew(CallNew* expr) {
+  // The constructor function is also used as the receiver argument to the
+  // JS construct call builtin.
+  VisitArgument(expr->expression());
+  CHECK_BAILOUT;
+  VisitArgumentList(expr->arguments());
+  CHECK_BAILOUT;
+
+  int argument_count = expr->arguments()->length() + 1;  // Plus constructor.
+  HCall* call = new HCallNew(argument_count);
+
+  ProcessCall(call, expr->position());
+}
+
+
+// Support for generating inlined runtime functions.
+
+// Lookup table for generators for runtime calls that are  generated inline.
+// Elements of the table are member pointers to functions of HGraphBuilder.
+#define INLINE_FUNCTION_GENERATOR_ADDRESS(Name, argc, ressize)          \
+    &HGraphBuilder::Generate##Name,
+
+const HGraphBuilder::InlineFunctionGenerator
+    HGraphBuilder::kInlineFunctionGenerators[] = {
+        INLINE_FUNCTION_LIST(INLINE_FUNCTION_GENERATOR_ADDRESS)
+        INLINE_RUNTIME_FUNCTION_LIST(INLINE_FUNCTION_GENERATOR_ADDRESS)
+};
+#undef INLINE_FUNCTION_GENERATOR_ADDRESS
+
+
+void HGraphBuilder::VisitCallRuntime(CallRuntime* expr) {
+  Handle<String> name = expr->name();
+  if (name->IsEqualTo(CStrVector("_Log"))) {
+    Push(graph()->GetConstantUndefined());
+    return;
+  }
+
+  Runtime::Function* function = expr->function();
+  if (expr->is_jsruntime()) {
+    BAILOUT("call to a JavaScript runtime function");
+  }
+  ASSERT(function != NULL);
+
+  VisitArgumentList(expr->arguments());
+  CHECK_BAILOUT;
+
+  int argument_count = expr->arguments()->length();
+  if (function->intrinsic_type == Runtime::INLINE) {
+    ASSERT(name->length() > 0);
+    ASSERT(name->Get(0) == '_');
+    // Call to an inline function.
+    int lookup_index = static_cast<int>(function->function_id) -
+        static_cast<int>(Runtime::kFirstInlineFunction);
+    ASSERT(lookup_index >= 0);
+    ASSERT(static_cast<size_t>(lookup_index) <
+           ARRAY_SIZE(kInlineFunctionGenerators));
+    InlineFunctionGenerator generator = kInlineFunctionGenerators[lookup_index];
+
+    // Call the inline code generator using the pointer-to-member.
+    (this->*generator)(argument_count);
+  } else {
+    ASSERT(function->intrinsic_type == Runtime::RUNTIME);
+    HCall* call = new HCallRuntime(name, expr->function(), argument_count);
+    ProcessCall(call, RelocInfo::kNoPosition);
+  }
+}
+
+
+void HGraphBuilder::VisitUnaryOperation(UnaryOperation* expr) {
+  Token::Value op = expr->op();
+  if (op == Token::VOID) {
+    VISIT_FOR_EFFECT(expr->expression());
+    Push(graph()->GetConstantUndefined());
+  } else if (op == Token::DELETE) {
+    Property* prop = expr->expression()->AsProperty();
+    Variable* var = expr->expression()->AsVariableProxy()->AsVariable();
+    if (prop == NULL && var == NULL) {
+      // Result of deleting non-property, non-variable reference is true.
+      // Evaluate the subexpression for side effects.
+      VISIT_FOR_EFFECT(expr->expression());
+      Push(graph_->GetConstantTrue());
+    } else if (var != NULL &&
+               !var->is_global() &&
+               var->AsSlot() != NULL &&
+               var->AsSlot()->type() != Slot::LOOKUP) {
+      // Result of deleting non-global, non-dynamic variables is false.
+      // The subexpression does not have side effects.
+      Push(graph_->GetConstantFalse());
+    } else if (prop != NULL) {
+      VISIT_FOR_VALUE(prop->obj());
+      VISIT_FOR_VALUE(prop->key());
+      HValue* key = Pop();
+      HValue* obj = Pop();
+      PushAndAdd(new HDeleteProperty(obj, key));
+    } else if (var->is_global()) {
+      BAILOUT("delete with global variable");
+    } else {
+      BAILOUT("delete with non-global variable");
+    }
+  } else if (op == Token::NOT) {
+    HSubgraph* true_graph = CreateEmptySubgraph();
+    HSubgraph* false_graph = CreateEmptySubgraph();
+    VisitCondition(expr->expression(),
+                   false_graph->entry_block(),
+                   true_graph->entry_block(),
+                   true, true);
+    if (HasStackOverflow()) return;
+    true_graph->environment()->Push(graph_->GetConstantTrue());
+    false_graph->environment()->Push(graph_->GetConstantFalse());
+    current_subgraph_->AppendJoin(true_graph, false_graph, expr);
+  } else if (op == Token::BIT_NOT || op == Token::SUB) {
+    VISIT_FOR_VALUE(expr->expression());
+    HValue* value = Pop();
+    HInstruction* instr = NULL;
+    switch (op) {
+      case Token::BIT_NOT:
+        instr = new HBitNot(value);
+        break;
+      case Token::SUB:
+        instr = new HMul(graph_->GetConstantMinus1(), value);
+        break;
+      default:
+        UNREACHABLE();
+        break;
+    }
+    PushAndAdd(instr);
+  } else if (op == Token::TYPEOF) {
+    VISIT_FOR_VALUE(expr->expression());
+    HValue* value = Pop();
+    PushAndAdd(new HTypeof(value));
+  } else {
+    BAILOUT("Value: unsupported unary operation");
+  }
+}
+
+
+void HGraphBuilder::VisitIncrementOperation(IncrementOperation* expr) {
+  // IncrementOperation is never visited by the visitor. It only
+  // occurs as a subexpression of CountOperation.
+  UNREACHABLE();
+}
+
+
+HInstruction* HGraphBuilder::BuildIncrement(HValue* value, bool increment) {
+  HConstant* delta = increment
+      ? graph_->GetConstant1()
+      : graph_->GetConstantMinus1();
+  HInstruction* instr = new HAdd(value, delta);
+  AssumeRepresentation(instr,  Representation::Integer32());
+  return instr;
+}
+
+
+void HGraphBuilder::VisitCountOperation(CountOperation* expr) {
+  IncrementOperation* increment = expr->increment();
+  Expression* target = increment->expression();
+  VariableProxy* proxy = target->AsVariableProxy();
+  Variable* var = proxy->AsVariable();
+  Property* prop = target->AsProperty();
+  ASSERT(var == NULL || prop == NULL);
+  bool inc = expr->op() == Token::INC;
+
+  if (var != NULL) {
+    if (!var->is_global() && !var->IsStackAllocated()) {
+      BAILOUT("non-stack/non-global variable in count operation");
+    }
+
+    VISIT_FOR_VALUE(target);
+
+    HValue* value = Pop();
+    HInstruction* instr = BuildIncrement(value, inc);
+    AddInstruction(instr);
+
+    if (expr->is_prefix()) {
+      Push(instr);
+    } else {
+      Push(value);
+    }
+
+    if (var->is_global()) {
+      HandleGlobalVariableAssignment(proxy, instr, expr->position());
+    } else {
+      ASSERT(var->IsStackAllocated());
+      Bind(var, instr);
+    }
+
+  } else if (prop != NULL) {
+    prop->RecordTypeFeedback(oracle());
+
+    if (prop->key()->IsPropertyName()) {
+      // Named property.
+
+      // Match the full code generator stack by simulate an extra stack element
+      // for postfix operations in a value context.
+      if (expr->is_postfix() && !ast_context()->IsEffect()) {
+        Push(graph_->GetConstantUndefined());
+      }
+
+      VISIT_FOR_VALUE(prop->obj());
+      HValue* obj = Top();
+
+      HInstruction* load = NULL;
+      if (prop->IsMonomorphic()) {
+        Handle<String> name = prop->key()->AsLiteral()->AsPropertyName();
+        Handle<Map> map = prop->GetReceiverTypes()->first();
+        load = BuildLoadNamed(obj, prop, map, name);
+      } else {
+        load = BuildLoadNamedGeneric(obj, prop);
+      }
+      PushAndAdd(load);
+      if (load->HasSideEffects()) AddSimulate(increment->id());
+
+      HValue* value = Pop();
+
+      HInstruction* instr = BuildIncrement(value, inc);
+      AddInstruction(instr);
+
+      HInstruction* store = BuildStoreNamed(obj, instr, prop);
+      AddInstruction(store);
+
+      // Drop simulated receiver and push the result.
+      // There is no deoptimization to after the increment, so we can simulate
+      // the expression stack here.
+      Drop(1);
+      if (expr->is_prefix()) {
+        Push(instr);
+      } else {
+        if (!ast_context()->IsEffect()) Drop(1);  // Drop simulated zero.
+        Push(value);
+      }
+
+    } else {
+      // Keyed property.
+
+      // Match the full code generator stack by simulate an extra stack element
+      // for postfix operations in a value context.
+      if (expr->is_postfix() && !ast_context()->IsEffect()) {
+        Push(graph_->GetConstantUndefined());
+      }
+
+      VISIT_FOR_VALUE(prop->obj());
+      VISIT_FOR_VALUE(prop->key());
+
+      HValue* obj = environment()->ExpressionStackAt(1);
+      HValue* key = environment()->ExpressionStackAt(0);
+
+      bool is_fast_elements = prop->IsMonomorphic() &&
+          prop->GetMonomorphicReceiverType()->has_fast_elements();
+
+      HInstruction* load = is_fast_elements
+          ? BuildLoadKeyedFastElement(obj, key, prop)
+          : BuildLoadKeyedGeneric(obj, key);
+      PushAndAdd(load);
+      if (load->HasSideEffects()) AddSimulate(increment->id());
+
+      HValue* value = Pop();
+
+      HInstruction* instr = BuildIncrement(value, inc);
+      AddInstruction(instr);
+
+      HInstruction* store = is_fast_elements
+          ? BuildStoreKeyedFastElement(obj, key, instr, prop)
+          : new HStoreKeyedGeneric(obj, key, instr);
+      AddInstruction(store);
+
+      // Drop simulated receiver and key and push the result.
+      // There is no deoptimization to after the increment, so we can simulate
+      // the expression stack here.
+      Drop(2);
+      if (expr->is_prefix()) {
+        Push(instr);
+      } else {
+        if (!ast_context()->IsEffect()) Drop(1);  // Drop simulated zero.
+        Push(value);
+      }
+    }
+  } else {
+    BAILOUT("invalid lhs in count operation");
+  }
+}
+
+
+HInstruction* HGraphBuilder::BuildBinaryOperation(BinaryOperation* expr,
+                                                  HValue* left,
+                                                  HValue* right) {
+  HInstruction* instr = NULL;
+  switch (expr->op()) {
+    case Token::ADD:
+      instr = new HAdd(left, right);
+      break;
+    case Token::SUB:
+      instr = new HSub(left, right);
+      break;
+    case Token::MUL:
+      instr = new HMul(left, right);
+      break;
+    case Token::MOD:
+      instr = new HMod(left, right);
+      break;
+    case Token::DIV:
+      instr = new HDiv(left, right);
+      break;
+    case Token::BIT_XOR:
+      instr = new HBitXor(left, right);
+      break;
+    case Token::BIT_AND:
+      instr = new HBitAnd(left, right);
+      break;
+    case Token::BIT_OR:
+      instr = new HBitOr(left, right);
+      break;
+    case Token::SAR:
+      instr = new HSar(left, right);
+      break;
+    case Token::SHR:
+      instr = new HShr(left, right);
+      break;
+    case Token::SHL:
+      instr = new HShl(left, right);
+      break;
+    default:
+      UNREACHABLE();
+  }
+  TypeInfo info = oracle()->BinaryType(expr, TypeFeedbackOracle::RESULT);
+  // If we hit an uninitialized binary op stub we will get type info
+  // for a smi operation. If one of the operands is a constant string
+  // do not generate code assuming it is a smi operation.
+  if (info.IsSmi() &&
+      ((left->IsConstant() && HConstant::cast(left)->HasStringValue()) ||
+       (right->IsConstant() && HConstant::cast(right)->HasStringValue()))) {
+    return instr;
+  }
+  if (FLAG_trace_representation) {
+    PrintF("Info: %s/%s\n", info.ToString(), ToRepresentation(info).Mnemonic());
+  }
+  AssumeRepresentation(instr, ToRepresentation(info));
+  return instr;
+}
+
+
+// Check for the form (%_ClassOf(foo) === 'BarClass').
+static bool IsClassOfTest(CompareOperation* expr) {
+  if (expr->op() != Token::EQ_STRICT) return false;
+  CallRuntime* call = expr->left()->AsCallRuntime();
+  if (call == NULL) return false;
+  Literal* literal = expr->right()->AsLiteral();
+  if (literal == NULL) return false;
+  if (!literal->handle()->IsString()) return false;
+  if (!call->name()->IsEqualTo(CStrVector("_ClassOf"))) return false;
+  ASSERT(call->arguments()->length() == 1);
+  return true;
+}
+
+
+void HGraphBuilder::VisitBinaryOperation(BinaryOperation* expr) {
+  if (expr->op() == Token::COMMA) {
+    VISIT_FOR_EFFECT(expr->left());
+    VISIT_FOR_VALUE(expr->right());
+  } else if (expr->op() == Token::AND || expr->op() == Token::OR) {
+    VISIT_FOR_VALUE(expr->left());
+    ASSERT(current_subgraph_->HasExit());
+
+    HValue* left = Top();
+    bool is_logical_and = (expr->op() == Token::AND);
+
+    HEnvironment* environment_copy = environment()->Copy();
+    environment_copy->Pop();
+    HSubgraph* right_subgraph;
+    right_subgraph = CreateBranchSubgraph(environment_copy);
+    ADD_TO_SUBGRAPH(right_subgraph, expr->right());
+    current_subgraph_->AppendOptional(right_subgraph, is_logical_and, left);
+    current_subgraph_->exit_block()->SetJoinId(expr->id());
+  } else {
+    VISIT_FOR_VALUE(expr->left());
+    VISIT_FOR_VALUE(expr->right());
+
+    HValue* right = Pop();
+    HValue* left = Pop();
+    HInstruction* instr = BuildBinaryOperation(expr, left, right);
+    PushAndAdd(instr, expr->position());
+  }
+}
+
+
+void HGraphBuilder::AssumeRepresentation(HValue* value, Representation r) {
+  if (value->CheckFlag(HValue::kFlexibleRepresentation)) {
+    if (FLAG_trace_representation) {
+      PrintF("Assume representation for %s to be %s (%d)\n",
+             value->Mnemonic(),
+             r.Mnemonic(),
+             graph_->GetMaximumValueID());
+    }
+    value->ChangeRepresentation(r);
+    // The representation of the value is dictated by type feedback.
+    value->ClearFlag(HValue::kFlexibleRepresentation);
+  } else if (FLAG_trace_representation) {
+    PrintF("No representation assumed\n");
+  }
+}
+
+
+Representation HGraphBuilder::ToRepresentation(TypeInfo info) {
+  if (info.IsSmi()) return Representation::Integer32();
+  if (info.IsInteger32()) return Representation::Integer32();
+  if (info.IsDouble()) return Representation::Double();
+  if (info.IsNumber()) return Representation::Double();
+  return Representation::Tagged();
+}
+
+
+void HGraphBuilder::VisitCompareOperation(CompareOperation* expr) {
+  if (IsClassOfTest(expr)) {
+    CallRuntime* call = expr->left()->AsCallRuntime();
+    VISIT_FOR_VALUE(call->arguments()->at(0));
+    HValue* value = Pop();
+    Literal* literal = expr->right()->AsLiteral();
+    Handle<String> rhs = Handle<String>::cast(literal->handle());
+    HInstruction* instr = new HClassOfTest(value, rhs);
+    PushAndAdd(instr, expr->position());
+    return;
+  }
+
+  // Check for the pattern: typeof <expression> == <string literal>.
+  UnaryOperation* left_unary = expr->left()->AsUnaryOperation();
+  Literal* right_literal = expr->right()->AsLiteral();
+  if ((expr->op() == Token::EQ || expr->op() == Token::EQ_STRICT) &&
+      left_unary != NULL && left_unary->op() == Token::TYPEOF &&
+      right_literal != NULL && right_literal->handle()->IsString()) {
+    VISIT_FOR_VALUE(left_unary->expression());
+    HValue* left = Pop();
+    HInstruction* instr = new HTypeofIs(left,
+        Handle<String>::cast(right_literal->handle()));
+    PushAndAdd(instr, expr->position());
+    return;
+  }
+
+  VISIT_FOR_VALUE(expr->left());
+  VISIT_FOR_VALUE(expr->right());
+
+  HValue* right = Pop();
+  HValue* left = Pop();
+  Token::Value op = expr->op();
+
+  TypeInfo info = oracle()->CompareType(expr, TypeFeedbackOracle::RESULT);
+  HInstruction* instr = NULL;
+  if (op == Token::INSTANCEOF) {
+    instr = new HInstanceOf(left, right);
+  } else if (op == Token::IN) {
+    BAILOUT("Unsupported comparison: in");
+  } else if (info.IsNonPrimitive()) {
+    switch (op) {
+      case Token::EQ:
+      case Token::EQ_STRICT: {
+        AddInstruction(HCheckInstanceType::NewIsJSObjectOrJSFunction(left));
+        AddInstruction(HCheckInstanceType::NewIsJSObjectOrJSFunction(right));
+        instr = new HCompareJSObjectEq(left, right);
+        break;
+      }
+      default:
+        BAILOUT("Unsupported non-primitive compare");
+        break;
+    }
+  } else {
+    HCompare* compare = new HCompare(left, right, op);
+    Representation r = ToRepresentation(info);
+    compare->SetInputRepresentation(r);
+    instr = compare;
+  }
+  PushAndAdd(instr, expr->position());
+}
+
+
+void HGraphBuilder::VisitCompareToNull(CompareToNull* expr) {
+  VISIT_FOR_VALUE(expr->expression());
+
+  HValue* value = Pop();
+  HIsNull* compare = new HIsNull(value, expr->is_strict());
+
+  PushAndAdd(compare);
+}
+
+
+void HGraphBuilder::VisitThisFunction(ThisFunction* expr) {
+  BAILOUT("ThisFunction");
+}
+
+
+void HGraphBuilder::VisitDeclaration(Declaration* decl) {
+  // We allow only declarations that do not require code generation.
+  // The following all require code generation: global variables and
+  // functions, variables with slot type LOOKUP, declarations with
+  // mode CONST, and functions.
+  Variable* var = decl->proxy()->var();
+  Slot* slot = var->AsSlot();
+  if (var->is_global() ||
+      (slot != NULL && slot->type() == Slot::LOOKUP) ||
+      decl->mode() == Variable::CONST ||
+      decl->fun() != NULL) {
+    BAILOUT("unsupported declaration");
+  }
+}
+
+
+// Generators for inline runtime functions.
+// Support for types.
+void HGraphBuilder::GenerateIsSmi(int argument_count) {
+  ASSERT(argument_count == 1);
+
+  HValue* value = Pop();
+  PushAndAdd(new HIsSmi(value));
+}
+
+
+void HGraphBuilder::GenerateIsSpecObject(int argument_count) {
+  ASSERT(argument_count == 1);
+
+  HValue* value = Pop();
+  HHasInstanceType* test =
+      new HHasInstanceType(value, FIRST_JS_OBJECT_TYPE, LAST_TYPE);
+  PushAndAdd(test);
+}
+
+
+void HGraphBuilder::GenerateIsFunction(int argument_count) {
+  ASSERT(argument_count == 1);
+
+  HValue* value = Pop();
+  HHasInstanceType* test =
+      new HHasInstanceType(value, JS_FUNCTION_TYPE);
+  PushAndAdd(test);
+}
+
+
+void HGraphBuilder::GenerateHasCachedArrayIndex(int argument_count) {
+  ASSERT(argument_count == 1);
+
+  HValue* value = Pop();
+  HHasCachedArrayIndex* spec_test = new HHasCachedArrayIndex(value);
+  PushAndAdd(spec_test);
+}
+
+
+void HGraphBuilder::GenerateIsArray(int argument_count) {
+  ASSERT(argument_count == 1);
+
+  HValue* value = Pop();
+  HHasInstanceType* test =
+      new HHasInstanceType(value, JS_ARRAY_TYPE);
+  PushAndAdd(test);
+}
+
+
+void HGraphBuilder::GenerateIsRegExp(int argument_count) {
+  ASSERT(argument_count == 1);
+
+  HValue* value = Pop();
+  HHasInstanceType* test =
+      new HHasInstanceType(value, JS_REGEXP_TYPE);
+  PushAndAdd(test);
+}
+
+
+void HGraphBuilder::GenerateIsNonNegativeSmi(int argument_count) {
+  BAILOUT("inlined runtime function: IsNonNegativeSmi");
+}
+
+
+void HGraphBuilder::GenerateIsObject(int argument_count) {
+  BAILOUT("inlined runtime function: IsObject");
+}
+
+
+void HGraphBuilder::GenerateIsUndetectableObject(int argument_count) {
+  BAILOUT("inlined runtime function: IsUndetectableObject");
+}
+
+
+void HGraphBuilder::GenerateIsStringWrapperSafeForDefaultValueOf(
+    int argument_count) {
+  BAILOUT("inlined runtime function: IsStringWrapperSafeForDefaultValueOf");
+}
+
+
+  // Support for construct call checks.
+void HGraphBuilder::GenerateIsConstructCall(int argument_count) {
+  BAILOUT("inlined runtime function: IsConstructCall");
+}
+
+
+// Support for arguments.length and arguments[?].
+void HGraphBuilder::GenerateArgumentsLength(int argument_count) {
+  ASSERT(argument_count == 0);
+  HInstruction* elements = AddInstruction(new HArgumentsElements);
+  PushAndAdd(new HArgumentsLength(elements));
+}
+
+
+void HGraphBuilder::GenerateArguments(int argument_count) {
+  ASSERT(argument_count == 1);
+  HValue* index = Pop();
+  HInstruction* elements = AddInstruction(new HArgumentsElements);
+  HInstruction* length = AddInstruction(new HArgumentsLength(elements));
+  PushAndAdd(new HAccessArgumentsAt(elements, length, index));
+}
+
+
+// Support for accessing the class and value fields of an object.
+void HGraphBuilder::GenerateClassOf(int argument_count) {
+  // The special form detected by IsClassOfTest is detected before we get here
+  // and does not cause a bailout.
+  BAILOUT("inlined runtime function: ClassOf");
+}
+
+
+void HGraphBuilder::GenerateValueOf(int argument_count) {
+  ASSERT(argument_count == 1);
+
+  HValue* value = Pop();
+  HValueOf* op = new HValueOf(value);
+  PushAndAdd(op);
+}
+
+
+void HGraphBuilder::GenerateSetValueOf(int argument_count) {
+  BAILOUT("inlined runtime function: SetValueOf");
+}
+
+
+// Fast support for charCodeAt(n).
+void HGraphBuilder::GenerateStringCharCodeAt(int argument_count) {
+  BAILOUT("inlined runtime function: StringCharCodeAt");
+}
+
+
+// Fast support for string.charAt(n) and string[n].
+void HGraphBuilder::GenerateStringCharFromCode(int argument_count) {
+  BAILOUT("inlined runtime function: StringCharFromCode");
+}
+
+
+// Fast support for string.charAt(n) and string[n].
+void HGraphBuilder::GenerateStringCharAt(int argument_count) {
+  ASSERT_EQ(2, argument_count);
+  PushArgumentsForStubCall(argument_count);
+  PushAndAdd(new HCallStub(CodeStub::StringCharAt, argument_count),
+             RelocInfo::kNoPosition);
+}
+
+
+// Fast support for object equality testing.
+void HGraphBuilder::GenerateObjectEquals(int argument_count) {
+  ASSERT(argument_count == 2);
+
+  HValue* right = Pop();
+  HValue* left = Pop();
+  PushAndAdd(new HCompareJSObjectEq(left, right));
+}
+
+
+void HGraphBuilder::GenerateLog(int argument_count) {
+  UNREACHABLE();  // We caught this in VisitCallRuntime.
+}
+
+
+// Fast support for Math.random().
+void HGraphBuilder::GenerateRandomHeapNumber(int argument_count) {
+  BAILOUT("inlined runtime function: RandomHeapNumber");
+}
+
+
+// Fast support for StringAdd.
+void HGraphBuilder::GenerateStringAdd(int argument_count) {
+  ASSERT_EQ(2, argument_count);
+  PushArgumentsForStubCall(argument_count);
+  PushAndAdd(new HCallStub(CodeStub::StringAdd, argument_count),
+             RelocInfo::kNoPosition);
+}
+
+
+// Fast support for SubString.
+void HGraphBuilder::GenerateSubString(int argument_count) {
+  ASSERT_EQ(3, argument_count);
+  PushArgumentsForStubCall(argument_count);
+  PushAndAdd(new HCallStub(CodeStub::SubString, argument_count),
+             RelocInfo::kNoPosition);
+}
+
+
+// Fast support for StringCompare.
+void HGraphBuilder::GenerateStringCompare(int argument_count) {
+  ASSERT_EQ(2, argument_count);
+  PushArgumentsForStubCall(argument_count);
+  PushAndAdd(new HCallStub(CodeStub::StringCompare, argument_count),
+             RelocInfo::kNoPosition);
+}
+
+
+// Support for direct calls from JavaScript to native RegExp code.
+void HGraphBuilder::GenerateRegExpExec(int argument_count) {
+  ASSERT_EQ(4, argument_count);
+  PushArgumentsForStubCall(argument_count);
+  PushAndAdd(new HCallStub(CodeStub::RegExpExec, argument_count),
+             RelocInfo::kNoPosition);
+}
+
+
+// Construct a RegExp exec result with two in-object properties.
+void HGraphBuilder::GenerateRegExpConstructResult(int argument_count) {
+  ASSERT_EQ(3, argument_count);
+  PushArgumentsForStubCall(argument_count);
+  PushAndAdd(new HCallStub(CodeStub::RegExpConstructResult, argument_count),
+             RelocInfo::kNoPosition);
+}
+
+
+// Support for fast native caches.
+void HGraphBuilder::GenerateGetFromCache(int argument_count) {
+  BAILOUT("inlined runtime function: GetFromCache");
+}
+
+
+// Fast support for number to string.
+void HGraphBuilder::GenerateNumberToString(int argument_count) {
+  ASSERT_EQ(1, argument_count);
+  PushArgumentsForStubCall(argument_count);
+  PushAndAdd(new HCallStub(CodeStub::NumberToString, argument_count),
+             RelocInfo::kNoPosition);
+}
+
+
+// Fast swapping of elements. Takes three expressions, the object and two
+// indices. This should only be used if the indices are known to be
+// non-negative and within bounds of the elements array at the call site.
+void HGraphBuilder::GenerateSwapElements(int argument_count) {
+  BAILOUT("inlined runtime function: SwapElements");
+}
+
+
+// Fast call for custom callbacks.
+void HGraphBuilder::GenerateCallFunction(int argument_count) {
+  BAILOUT("inlined runtime function: CallFunction");
+}
+
+
+// Fast call to math functions.
+void HGraphBuilder::GenerateMathPow(int argument_count) {
+  ASSERT_EQ(2, argument_count);
+  PushArgumentsForStubCall(argument_count);
+  PushAndAdd(new HCallStub(CodeStub::MathPow, argument_count),
+             RelocInfo::kNoPosition);
+}
+
+
+void HGraphBuilder::GenerateMathSin(int argument_count) {
+  ASSERT_EQ(1, argument_count);
+  PushArgumentsForStubCall(argument_count);
+  HCallStub* instr =
+      new HCallStub(CodeStub::TranscendentalCache, argument_count);
+  instr->set_transcendental_type(TranscendentalCache::SIN);
+  PushAndAdd(instr, RelocInfo::kNoPosition);
+}
+
+
+void HGraphBuilder::GenerateMathCos(int argument_count) {
+  ASSERT_EQ(1, argument_count);
+  PushArgumentsForStubCall(argument_count);
+  HCallStub* instr =
+      new HCallStub(CodeStub::TranscendentalCache, argument_count);
+  instr->set_transcendental_type(TranscendentalCache::COS);
+  PushAndAdd(instr, RelocInfo::kNoPosition);
+}
+
+
+void HGraphBuilder::GenerateMathLog(int argument_count) {
+  ASSERT_EQ(1, argument_count);
+  PushArgumentsForStubCall(argument_count);
+  HCallStub* instr =
+      new HCallStub(CodeStub::TranscendentalCache, argument_count);
+  instr->set_transcendental_type(TranscendentalCache::LOG);
+  PushAndAdd(instr, RelocInfo::kNoPosition);
+}
+
+
+void HGraphBuilder::GenerateMathSqrt(int argument_count) {
+  BAILOUT("inlined runtime function: MathSqrt");
+}
+
+
+// Check whether two RegExps are equivalent
+void HGraphBuilder::GenerateIsRegExpEquivalent(int argument_count) {
+  BAILOUT("inlined runtime function: IsRegExpEquivalent");
+}
+
+
+void HGraphBuilder::GenerateGetCachedArrayIndex(int argument_count) {
+  BAILOUT("inlined runtime function: GetCachedArrayIndex");
+}
+
+
+void HGraphBuilder::GenerateFastAsciiArrayJoin(int argument_count) {
+  BAILOUT("inlined runtime function: FastAsciiArrayJoin");
+}
+
+
+#undef BAILOUT
+#undef CHECK_BAILOUT
+#undef VISIT_FOR_EFFECT
+#undef VISIT_FOR_VALUE
+#undef ADD_TO_SUBGRAPH
+
+
+HEnvironment::HEnvironment(HEnvironment* outer,
+                           Scope* scope,
+                           Handle<JSFunction> closure)
+    : closure_(closure),
+      values_(0),
+      assigned_variables_(4),
+      parameter_count_(0),
+      local_count_(0),
+      outer_(outer),
+      pop_count_(0),
+      push_count_(0),
+      ast_id_(AstNode::kNoNumber) {
+  Initialize(scope->num_parameters() + 1, scope->num_stack_slots(), 0);
+}
+
+
+HEnvironment::HEnvironment(const HEnvironment* other)
+    : values_(0),
+      assigned_variables_(0),
+      parameter_count_(0),
+      local_count_(0),
+      outer_(NULL),
+      pop_count_(0),
+      push_count_(0),
+      ast_id_(other->ast_id()) {
+  Initialize(other);
+}
+
+
+void HEnvironment::Initialize(int parameter_count,
+                              int local_count,
+                              int stack_height) {
+  parameter_count_ = parameter_count;
+  local_count_ = local_count;
+
+  // Avoid reallocating the temporaries' backing store on the first Push.
+  int total = parameter_count + local_count + stack_height;
+  values_.Initialize(total + 4);
+  for (int i = 0; i < total; ++i) values_.Add(NULL);
+}
+
+
+void HEnvironment::AddIncomingEdge(HBasicBlock* block, HEnvironment* other) {
+  ASSERT(!block->IsLoopHeader());
+  ASSERT(values_.length() == other->values_.length());
+
+  int length = values_.length();
+  for (int i = 0; i < length; ++i) {
+    HValue* value = values_[i];
+    if (value != NULL && value->IsPhi() && value->block() == block) {
+      // There is already a phi for the i'th value.
+      HPhi* phi = HPhi::cast(value);
+      // Assert index is correct and that we haven't missed an incoming edge.
+      ASSERT(phi->merged_index() == i);
+      ASSERT(phi->OperandCount() == block->predecessors()->length());
+      phi->AddInput(other->values_[i]);
+    } else if (values_[i] != other->values_[i]) {
+      // There is a fresh value on the incoming edge, a phi is needed.
+      ASSERT(values_[i] != NULL && other->values_[i] != NULL);
+      HPhi* phi = new HPhi(i);
+      HValue* old_value = values_[i];
+      for (int j = 0; j < block->predecessors()->length(); j++) {
+        phi->AddInput(old_value);
+      }
+      phi->AddInput(other->values_[i]);
+      this->values_[i] = phi;
+      block->AddPhi(phi);
+    }
+  }
+}
+
+
+void HEnvironment::Initialize(const HEnvironment* other) {
+  closure_ = other->closure();
+  values_.AddAll(other->values_);
+  assigned_variables_.AddAll(other->assigned_variables_);
+  parameter_count_ = other->parameter_count_;
+  local_count_ = other->local_count_;
+  if (other->outer_ != NULL) outer_ = other->outer_->Copy();  // Deep copy.
+  pop_count_ = other->pop_count_;
+  push_count_ = other->push_count_;
+  ast_id_ = other->ast_id_;
+}
+
+
+int HEnvironment::IndexFor(Variable* variable) const {
+  Slot* slot = variable->AsSlot();
+  ASSERT(slot != NULL && slot->IsStackAllocated());
+  if (slot->type() == Slot::PARAMETER) {
+    return slot->index() + 1;
+  } else {
+    return parameter_count_ + slot->index();
+  }
+}
+
+
+HEnvironment* HEnvironment::Copy() const {
+  return new HEnvironment(this);
+}
+
+
+HEnvironment* HEnvironment::CopyWithoutHistory() const {
+  HEnvironment* result = Copy();
+  result->ClearHistory();
+  return result;
+}
+
+
+HEnvironment* HEnvironment::CopyAsLoopHeader(HBasicBlock* loop_header) const {
+  HEnvironment* new_env = Copy();
+  for (int i = 0; i < values_.length(); ++i) {
+    HPhi* phi = new HPhi(i);
+    phi->AddInput(values_[i]);
+    new_env->values_[i] = phi;
+    loop_header->AddPhi(phi);
+  }
+  new_env->ClearHistory();
+  return new_env;
+}
+
+
+HEnvironment* HEnvironment::CopyForInlining(Handle<JSFunction> target,
+                                            FunctionLiteral* function,
+                                            bool is_speculative,
+                                            HConstant* undefined) const {
+  // Outer environment is a copy of this one without the arguments.
+  int arity = function->scope()->num_parameters();
+  HEnvironment* outer = Copy();
+  outer->Drop(arity + 1);  // Including receiver.
+  outer->ClearHistory();
+  HEnvironment* inner = new HEnvironment(outer, function->scope(), target);
+  // Get the argument values from the original environment.
+  if (is_speculative) {
+    for (int i = 0; i <= arity; ++i) {  // Include receiver.
+      HValue* push = ExpressionStackAt(arity - i);
+      inner->SetValueAt(i, push);
+    }
+  } else {
+    for (int i = 0; i <= arity; ++i) {  // Include receiver.
+      inner->SetValueAt(i, ExpressionStackAt(arity - i));
+    }
+  }
+
+  // Initialize the stack-allocated locals to undefined.
+  int local_base = arity + 1;
+  int local_count = function->scope()->num_stack_slots();
+  for (int i = 0; i < local_count; ++i) {
+    inner->SetValueAt(local_base + i, undefined);
+  }
+
+  inner->set_ast_id(function->id());
+  return inner;
+}
+
+
+void HEnvironment::PrintTo(StringStream* stream) {
+  for (int i = 0; i < total_count(); i++) {
+    if (i == 0) stream->Add("parameters\n");
+    if (i == parameter_count()) stream->Add("locals\n");
+    if (i == parameter_count() + local_count()) stream->Add("expressions");
+    HValue* val = values_.at(i);
+    stream->Add("%d: ", i);
+    if (val != NULL) {
+      val->PrintNameTo(stream);
+    } else {
+      stream->Add("NULL");
+    }
+    stream->Add("\n");
+  }
+}
+
+
+void HEnvironment::PrintToStd() {
+  HeapStringAllocator string_allocator;
+  StringStream trace(&string_allocator);
+  PrintTo(&trace);
+  PrintF("%s", *trace.ToCString());
+}
+
+
+void HTracer::TraceCompilation(FunctionLiteral* function) {
+  Tag tag(this, "compilation");
+  Handle<String> name = function->debug_name();
+  PrintStringProperty("name", *name->ToCString());
+  PrintStringProperty("method", *name->ToCString());
+  PrintLongProperty("date", static_cast<int64_t>(OS::TimeCurrentMillis()));
+}
+
+
+void HTracer::TraceLithium(const char* name, LChunk* chunk) {
+  Trace(name, chunk->graph(), chunk);
+}
+
+
+void HTracer::TraceHydrogen(const char* name, HGraph* graph) {
+  Trace(name, graph, NULL);
+}
+
+
+void HTracer::Trace(const char* name, HGraph* graph, LChunk* chunk) {
+  Tag tag(this, "cfg");
+  PrintStringProperty("name", name);
+  const ZoneList<HBasicBlock*>* blocks = graph->blocks();
+  for (int i = 0; i < blocks->length(); i++) {
+    HBasicBlock* current = blocks->at(i);
+    Tag block_tag(this, "block");
+    PrintBlockProperty("name", current->block_id());
+    PrintIntProperty("from_bci", -1);
+    PrintIntProperty("to_bci", -1);
+
+    if (!current->predecessors()->is_empty()) {
+      PrintIndent();
+      trace_.Add("predecessors");
+      for (int j = 0; j < current->predecessors()->length(); ++j) {
+        trace_.Add(" \"B%d\"", current->predecessors()->at(j)->block_id());
+      }
+      trace_.Add("\n");
+    } else {
+      PrintEmptyProperty("predecessors");
+    }
+
+    if (current->end() == NULL || current->end()->FirstSuccessor() == NULL) {
+      PrintEmptyProperty("successors");
+    } else if (current->end()->SecondSuccessor() == NULL) {
+      PrintBlockProperty("successors",
+                             current->end()->FirstSuccessor()->block_id());
+    } else {
+      PrintBlockProperty("successors",
+                             current->end()->FirstSuccessor()->block_id(),
+                             current->end()->SecondSuccessor()->block_id());
+    }
+
+    PrintEmptyProperty("xhandlers");
+    PrintEmptyProperty("flags");
+
+    if (current->dominator() != NULL) {
+      PrintBlockProperty("dominator", current->dominator()->block_id());
+    }
+
+    if (chunk != NULL) {
+      int first_index = current->first_instruction_index();
+      int last_index = current->last_instruction_index();
+      PrintIntProperty(
+          "first_lir_id",
+          LifetimePosition::FromInstructionIndex(first_index).Value());
+      PrintIntProperty(
+          "last_lir_id",
+          LifetimePosition::FromInstructionIndex(last_index).Value());
+    }
+
+    {
+      Tag states_tag(this, "states");
+      Tag locals_tag(this, "locals");
+      int total = current->phis()->length();
+      trace_.Add("size %d\n", total);
+      trace_.Add("method \"None\"");
+      for (int j = 0; j < total; ++j) {
+        HPhi* phi = current->phis()->at(j);
+        trace_.Add("%d ", phi->merged_index());
+        phi->PrintNameTo(&trace_);
+        trace_.Add(" ");
+        phi->PrintTo(&trace_);
+        trace_.Add("\n");
+      }
+    }
+
+    {
+      Tag HIR_tag(this, "HIR");
+      HInstruction* instruction = current->first();
+      while (instruction != NULL) {
+        int bci = 0;
+        int uses = instruction->uses()->length();
+        trace_.Add("%d %d ", bci, uses);
+        instruction->PrintNameTo(&trace_);
+        trace_.Add(" ");
+        instruction->PrintTo(&trace_);
+        trace_.Add(" <|@\n");
+        instruction = instruction->next();
+      }
+    }
+
+
+    if (chunk != NULL) {
+      Tag LIR_tag(this, "LIR");
+      int first_index = current->first_instruction_index();
+      int last_index = current->last_instruction_index();
+      if (first_index != -1 && last_index != -1) {
+        const ZoneList<LInstruction*>* instructions = chunk->instructions();
+        for (int i = first_index; i <= last_index; ++i) {
+          LInstruction* linstr = instructions->at(i);
+          if (linstr != NULL) {
+            trace_.Add("%d ",
+                       LifetimePosition::FromInstructionIndex(i).Value());
+            linstr->PrintTo(&trace_);
+            trace_.Add(" <|@\n");
+          }
+        }
+      }
+    }
+  }
+}
+
+
+void HTracer::TraceLiveRanges(const char* name, LAllocator* allocator) {
+  Tag tag(this, "intervals");
+  PrintStringProperty("name", name);
+
+  const ZoneList<LiveRange*>* fixed_d = allocator->fixed_double_live_ranges();
+  for (int i = 0; i < fixed_d->length(); ++i) {
+    TraceLiveRange(fixed_d->at(i), "fixed");
+  }
+
+  const ZoneList<LiveRange*>* fixed = allocator->fixed_live_ranges();
+  for (int i = 0; i < fixed->length(); ++i) {
+    TraceLiveRange(fixed->at(i), "fixed");
+  }
+
+  const ZoneList<LiveRange*>* live_ranges = allocator->live_ranges();
+  for (int i = 0; i < live_ranges->length(); ++i) {
+    TraceLiveRange(live_ranges->at(i), "object");
+  }
+}
+
+
+void HTracer::TraceLiveRange(LiveRange* range, const char* type) {
+  if (range != NULL && !range->IsEmpty()) {
+    trace_.Add("%d %s", range->id(), type);
+    if (range->HasRegisterAssigned()) {
+      LOperand* op = range->CreateAssignedOperand();
+      int assigned_reg = op->index();
+      if (op->IsDoubleRegister()) {
+        trace_.Add(" \"%s\"",
+                   DoubleRegister::AllocationIndexToString(assigned_reg));
+      } else {
+        ASSERT(op->IsRegister());
+        trace_.Add(" \"%s\"", Register::AllocationIndexToString(assigned_reg));
+      }
+    } else if (range->IsSpilled()) {
+      LOperand* op = range->TopLevel()->GetSpillOperand();
+      if (op->IsDoubleStackSlot()) {
+        trace_.Add(" \"double_stack:%d\"", op->index());
+      } else {
+        ASSERT(op->IsStackSlot());
+        trace_.Add(" \"stack:%d\"", op->index());
+      }
+    }
+    int parent_index = -1;
+    if (range->IsChild()) {
+      parent_index = range->parent()->id();
+    } else {
+      parent_index = range->id();
+    }
+    LOperand* op = range->FirstHint();
+    int hint_index = -1;
+    if (op != NULL && op->IsUnallocated()) hint_index = op->VirtualRegister();
+    trace_.Add(" %d %d", parent_index, hint_index);
+    UseInterval* cur_interval = range->first_interval();
+    while (cur_interval != NULL) {
+      trace_.Add(" [%d, %d[",
+                 cur_interval->start().Value(),
+                 cur_interval->end().Value());
+      cur_interval = cur_interval->next();
+    }
+
+    UsePosition* current_pos = range->first_pos();
+    while (current_pos != NULL) {
+      if (current_pos->RegisterIsBeneficial()) {
+        trace_.Add(" %d M", current_pos->pos().Value());
+      }
+      current_pos = current_pos->next();
+    }
+
+    trace_.Add(" \"\"\n");
+  }
+}
+
+
+void HTracer::FlushToFile() {
+  AppendChars(filename_, *trace_.ToCString(), trace_.length(), false);
+  trace_.Reset();
+}
+
+
+void HStatistics::Print() {
+  PrintF("Timing results:\n");
+  int64_t sum = 0;
+  for (int i = 0; i < timing_.length(); ++i) {
+    sum += timing_[i];
+  }
+
+  for (int i = 0; i < names_.length(); ++i) {
+    PrintF("%30s", names_[i]);
+    double ms = static_cast<double>(timing_[i]) / 1000;
+    double percent = static_cast<double>(timing_[i]) * 100 / sum;
+    PrintF(" - %0.3f ms / %0.3f %% \n", ms, percent);
+  }
+  PrintF("%30s - %0.3f ms \n", "Sum", static_cast<double>(sum) / 1000);
+  PrintF("---------------------------------------------------------------\n");
+  PrintF("%30s - %0.3f ms (%0.1f times slower than full code gen)\n",
+         "Total",
+         static_cast<double>(total_) / 1000,
+         static_cast<double>(total_) / full_code_gen_);
+}
+
+
+void HStatistics::SaveTiming(const char* name, int64_t ticks) {
+  if (name == HPhase::kFullCodeGen) {
+    full_code_gen_ += ticks;
+  } else if (name == HPhase::kTotal) {
+    total_ += ticks;
+  } else {
+    for (int i = 0; i < names_.length(); ++i) {
+      if (names_[i] == name) {
+        timing_[i] += ticks;
+        return;
+      }
+    }
+    names_.Add(name);
+    timing_.Add(ticks);
+  }
+}
+
+
+const char* const HPhase::kFullCodeGen = "Full code generator";
+const char* const HPhase::kTotal = "Total";
+
+
+void HPhase::Begin(const char* name,
+                   HGraph* graph,
+                   LChunk* chunk,
+                   LAllocator* allocator) {
+  name_ = name;
+  graph_ = graph;
+  chunk_ = chunk;
+  allocator_ = allocator;
+  if (allocator != NULL && chunk_ == NULL) {
+    chunk_ = allocator->chunk();
+  }
+  if (FLAG_time_hydrogen) start_ = OS::Ticks();
+}
+
+
+void HPhase::End() const {
+  if (FLAG_time_hydrogen) {
+    int64_t end = OS::Ticks();
+    HStatistics::Instance()->SaveTiming(name_, end - start_);
+  }
+
+  if (FLAG_trace_hydrogen) {
+    if (graph_ != NULL) HTracer::Instance()->TraceHydrogen(name_, graph_);
+    if (chunk_ != NULL) HTracer::Instance()->TraceLithium(name_, chunk_);
+    if (allocator_ != NULL) {
+      HTracer::Instance()->TraceLiveRanges(name_, allocator_);
+    }
+  }
+
+#ifdef DEBUG
+  if (graph_ != NULL) graph_->Verify();
+  if (chunk_ != NULL) chunk_->Verify();
+  if (allocator_ != NULL) allocator_->Verify();
+#endif
+}
+
+} }  // namespace v8::internal
diff --git a/src/hydrogen.h b/src/hydrogen.h
new file mode 100644 (file)
index 0000000..91f3c9e
--- /dev/null
@@ -0,0 +1,1061 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_HYDROGEN_H_
+#define V8_HYDROGEN_H_
+
+#include "v8.h"
+
+#include "ast.h"
+#include "compiler.h"
+#include "data-flow.h"
+#include "hydrogen-instructions.h"
+#include "zone.h"
+
+namespace v8 {
+namespace internal {
+
+// Forward declarations.
+class HEnvironment;
+class HGraph;
+class HLoopInformation;
+class HTracer;
+class LAllocator;
+class LChunk;
+class LiveRange;
+
+
+class HBasicBlock: public ZoneObject {
+ public:
+  explicit HBasicBlock(HGraph* graph);
+  virtual ~HBasicBlock() { }
+
+  // Simple accessors.
+  int block_id() const { return block_id_; }
+  void set_block_id(int id) { block_id_ = id; }
+  HGraph* graph() const { return graph_; }
+  const ZoneList<HPhi*>* phis() const { return &phis_; }
+  HInstruction* first() const { return first_; }
+  HInstruction* GetLastInstruction();
+  HControlInstruction* end() const { return end_; }
+  HLoopInformation* loop_information() const { return loop_information_; }
+  const ZoneList<HBasicBlock*>* predecessors() const { return &predecessors_; }
+  bool HasPredecessor() const { return predecessors_.length() > 0; }
+  const ZoneList<HBasicBlock*>* dominated_blocks() const {
+    return &dominated_blocks_;
+  }
+  const ZoneList<int>* deleted_phis() const {
+    return &deleted_phis_;
+  }
+  void RecordDeletedPhi(int merge_index) {
+    deleted_phis_.Add(merge_index);
+  }
+  HBasicBlock* dominator() const { return dominator_; }
+  HEnvironment* last_environment() const { return last_environment_; }
+  int argument_count() const { return argument_count_; }
+  void set_argument_count(int count) { argument_count_ = count; }
+  int first_instruction_index() const { return first_instruction_index_; }
+  void set_first_instruction_index(int index) {
+    first_instruction_index_ = index;
+  }
+  int last_instruction_index() const { return last_instruction_index_; }
+  void set_last_instruction_index(int index) {
+    last_instruction_index_ = index;
+  }
+
+  void AttachLoopInformation();
+  void DetachLoopInformation();
+  bool IsLoopHeader() const { return loop_information() != NULL; }
+  bool IsStartBlock() const { return block_id() == 0; }
+  void PostProcessLoopHeader(IterationStatement* stmt);
+
+  bool IsFinished() const { return end_ != NULL; }
+  void AddPhi(HPhi* phi);
+  void RemovePhi(HPhi* phi);
+  void AddInstruction(HInstruction* instr);
+  bool Dominates(HBasicBlock* other) const;
+
+  void SetInitialEnvironment(HEnvironment* env);
+  void ClearEnvironment() { last_environment_ = NULL; }
+  bool HasEnvironment() const { return last_environment_ != NULL; }
+  void UpdateEnvironment(HEnvironment* env) { last_environment_ = env; }
+  HBasicBlock* parent_loop_header() const {
+    if (!HasParentLoopHeader()) return NULL;
+    return parent_loop_header_.get();
+  }
+
+  void set_parent_loop_header(HBasicBlock* block) {
+    parent_loop_header_.set(block);
+  }
+
+  bool HasParentLoopHeader() const { return parent_loop_header_.is_set(); }
+
+  void SetJoinId(int id);
+
+  void Finish(HControlInstruction* last);
+  void Goto(HBasicBlock* block, bool include_stack_check = false);
+
+  int PredecessorIndexOf(HBasicBlock* predecessor) const;
+  void AddSimulate(int id) { AddInstruction(CreateSimulate(id)); }
+  void AssignCommonDominator(HBasicBlock* other);
+
+  // Add the inlined function exit sequence, adding an HLeaveInlined
+  // instruction and updating the bailout environment.
+  void AddLeaveInlined(HValue* return_value, HBasicBlock* target);
+
+  // If a target block is tagged as an inline function return, all
+  // predecessors should contain the inlined exit sequence:
+  //
+  // LeaveInlined
+  // Simulate (caller's environment)
+  // Goto (target block)
+  bool IsInlineReturnTarget() const { return is_inline_return_target_; }
+  void MarkAsInlineReturnTarget() { is_inline_return_target_ = true; }
+
+  // If this block is a successor of a branch, his flags tells whether the
+  // preceding branch was inverted or not.
+  bool inverted() { return inverted_; }
+  void set_inverted(bool b) { inverted_ = b; }
+
+  HBasicBlock* deopt_predecessor() { return deopt_predecessor_; }
+  void set_deopt_predecessor(HBasicBlock* block) { deopt_predecessor_ = block; }
+
+  Handle<Object> cond() { return cond_; }
+  void set_cond(Handle<Object> value) { cond_ = value; }
+
+#ifdef DEBUG
+  void Verify();
+#endif
+
+ private:
+  void RegisterPredecessor(HBasicBlock* pred);
+  void AddDominatedBlock(HBasicBlock* block);
+
+  HSimulate* CreateSimulate(int id);
+
+  int block_id_;
+  HGraph* graph_;
+  ZoneList<HPhi*> phis_;
+  HInstruction* first_;
+  HInstruction* last_;  // Last non-control instruction of the block.
+  HControlInstruction* end_;
+  HLoopInformation* loop_information_;
+  ZoneList<HBasicBlock*> predecessors_;
+  HBasicBlock* dominator_;
+  ZoneList<HBasicBlock*> dominated_blocks_;
+  HEnvironment* last_environment_;
+  // Outgoing parameter count at block exit, set during lithium translation.
+  int argument_count_;
+  // Instruction indices into the lithium code stream.
+  int first_instruction_index_;
+  int last_instruction_index_;
+  ZoneList<int> deleted_phis_;
+  SetOncePointer<HBasicBlock> parent_loop_header_;
+  bool is_inline_return_target_;
+  bool inverted_;
+  HBasicBlock* deopt_predecessor_;
+  Handle<Object> cond_;
+};
+
+
+class HLoopInformation: public ZoneObject {
+ public:
+  explicit HLoopInformation(HBasicBlock* loop_header)
+      : back_edges_(4), loop_header_(loop_header), blocks_(8) {
+    blocks_.Add(loop_header);
+  }
+  virtual ~HLoopInformation() {}
+
+  const ZoneList<HBasicBlock*>* back_edges() const { return &back_edges_; }
+  const ZoneList<HBasicBlock*>* blocks() const { return &blocks_; }
+  HBasicBlock* loop_header() const { return loop_header_; }
+  HBasicBlock* GetLastBackEdge() const;
+  void RegisterBackEdge(HBasicBlock* block);
+
+ private:
+  void AddBlock(HBasicBlock* block);
+
+  ZoneList<HBasicBlock*> back_edges_;
+  HBasicBlock* loop_header_;
+  ZoneList<HBasicBlock*> blocks_;
+};
+
+
+class HSubgraph: public ZoneObject {
+ public:
+  explicit HSubgraph(HGraph* graph)
+      : graph_(graph),
+        entry_block_(NULL),
+        exit_block_(NULL),
+        break_continue_info_(4) {
+  }
+
+  HGraph* graph() const { return graph_; }
+  HEnvironment* environment() const {
+    ASSERT(HasExit());
+    return exit_block_->last_environment();
+  }
+
+  bool HasExit() const { return exit_block_ != NULL; }
+
+  void PreProcessOsrEntry(IterationStatement* statement);
+
+  void AppendOptional(HSubgraph* graph,
+                      bool on_true_branch,
+                      HValue* boolean_value);
+  void AppendJoin(HSubgraph* then_graph, HSubgraph* else_graph, AstNode* node);
+  void AppendWhile(HSubgraph* condition,
+                   HSubgraph* body,
+                   IterationStatement* statement,
+                   HSubgraph* continue_subgraph,
+                   HSubgraph* exit);
+  void AppendDoWhile(HSubgraph* body,
+                     IterationStatement* statement,
+                     HSubgraph* go_back,
+                     HSubgraph* exit);
+  void AppendEndless(HSubgraph* body, IterationStatement* statement);
+  void Append(HSubgraph* next, BreakableStatement* statement);
+  void ResolveContinue(IterationStatement* statement);
+  HBasicBlock* BundleBreak(BreakableStatement* statement);
+  HBasicBlock* BundleContinue(IterationStatement* statement);
+  HBasicBlock* BundleBreakContinue(BreakableStatement* statement,
+                                   bool is_continue,
+                                   int join_id);
+  HBasicBlock* JoinBlocks(HBasicBlock* a, HBasicBlock* b, int id);
+
+  void FinishExit(HControlInstruction* instruction);
+  void FinishBreakContinue(BreakableStatement* target, bool is_continue);
+  void Initialize(HBasicBlock* block) {
+    ASSERT(entry_block_ == NULL);
+    entry_block_ = block;
+    exit_block_ = block;
+  }
+  HBasicBlock* entry_block() const { return entry_block_; }
+  HBasicBlock* exit_block() const { return exit_block_; }
+  void set_exit_block(HBasicBlock* block) {
+    exit_block_ = block;
+  }
+
+  void ConnectExitTo(HBasicBlock* other, bool include_stack_check = false) {
+    if (HasExit()) {
+      exit_block()->Goto(other, include_stack_check);
+    }
+  }
+
+  void AddBreakContinueInfo(HSubgraph* other) {
+    break_continue_info_.AddAll(other->break_continue_info_);
+  }
+
+ protected:
+  class BreakContinueInfo: public ZoneObject {
+   public:
+    BreakContinueInfo(BreakableStatement* target, HBasicBlock* block,
+                      bool is_continue)
+      : target_(target), block_(block), continue_(is_continue) {}
+    BreakableStatement* target() const { return target_; }
+    HBasicBlock* block() const { return block_; }
+    bool is_continue() const { return continue_; }
+    bool IsResolved() const { return block_ == NULL; }
+    void Resolve() { block_ = NULL; }
+
+   private:
+    BreakableStatement* target_;
+    HBasicBlock* block_;
+    bool continue_;
+  };
+
+  const ZoneList<BreakContinueInfo*>* break_continue_info() const {
+    return &break_continue_info_;
+  }
+
+  HGraph* graph_;  // The graph this is a subgraph of.
+  HBasicBlock* entry_block_;
+  HBasicBlock* exit_block_;
+
+ private:
+  ZoneList<BreakContinueInfo*> break_continue_info_;
+};
+
+
+class HGraph: public HSubgraph {
+ public:
+  explicit HGraph(CompilationInfo* info);
+
+  CompilationInfo* info() const { return info_; }
+  const ZoneList<HBasicBlock*>* blocks() const { return &blocks_; }
+  const ZoneList<HPhi*>* phi_list() const { return phi_list_; }
+  Handle<String> debug_name() const { return info_->function()->debug_name(); }
+  HEnvironment* start_environment() const { return start_environment_; }
+
+  void InitializeInferredTypes();
+  void InsertTypeConversions();
+  void InsertRepresentationChanges();
+  bool ProcessArgumentsObject();
+  void EliminateRedundantPhis();
+  void Canonicalize();
+  void OrderBlocks();
+  void AssignDominators();
+
+  // Returns false if there are phi-uses of the arguments-object
+  // which are not supported by the optimizing compiler.
+  bool CollectPhis();
+
+  Handle<Code> Compile();
+
+  void set_undefined_constant(HConstant* constant) {
+    undefined_constant_.set(constant);
+  }
+  HConstant* GetConstantUndefined() const { return undefined_constant_.get(); }
+  HConstant* GetConstant1();
+  HConstant* GetConstantMinus1();
+  HConstant* GetConstantTrue();
+  HConstant* GetConstantFalse();
+
+  HBasicBlock* CreateBasicBlock();
+  HArgumentsObject* GetArgumentsObject() const {
+    return arguments_object_.get();
+  }
+  bool HasArgumentsObject() const { return arguments_object_.is_set(); }
+
+  void SetArgumentsObject(HArgumentsObject* object) {
+    arguments_object_.set(object);
+  }
+
+  // True iff. we are compiling for OSR and the statement is the entry.
+  bool HasOsrEntryAt(IterationStatement* statement);
+
+  int GetMaximumValueID() const { return values_.length(); }
+  int GetNextBlockID() { return next_block_id_++; }
+  int GetNextValueID(HValue* value) {
+    values_.Add(value);
+    return values_.length() - 1;
+  }
+  HValue* LookupValue(int id) const {
+    if (id >= 0 && id < values_.length()) return values_[id];
+    return NULL;
+  }
+
+#ifdef DEBUG
+  void Verify() const;
+#endif
+
+ private:
+  void Postorder(HBasicBlock* block,
+                 BitVector* visited,
+                 ZoneList<HBasicBlock*>* order,
+                 HBasicBlock* loop_header);
+  void PostorderLoopBlocks(HLoopInformation* loop,
+                           BitVector* visited,
+                           ZoneList<HBasicBlock*>* order,
+                           HBasicBlock* loop_header);
+  HConstant* GetConstant(SetOncePointer<HConstant>* pointer,
+                         Object* value);
+
+  void InsertTypeConversions(HInstruction* instr);
+  void PropagateMinusZeroChecks(HValue* value, BitVector* visited);
+  void InsertRepresentationChangeForUse(HValue* value,
+                                        HValue* use,
+                                        Representation to,
+                                        bool truncating);
+  void InsertRepresentationChanges(HValue* current);
+  void InferTypes(ZoneList<HValue*>* worklist);
+  void InitializeInferredTypes(int from_inclusive, int to_inclusive);
+  void CheckForBackEdge(HBasicBlock* block, HBasicBlock* successor);
+
+  int next_block_id_;
+  CompilationInfo* info_;
+  HEnvironment* start_environment_;
+  ZoneList<HBasicBlock*> blocks_;
+  ZoneList<HValue*> values_;
+  ZoneList<HPhi*>* phi_list_;
+  SetOncePointer<HConstant> undefined_constant_;
+  SetOncePointer<HConstant> constant_1_;
+  SetOncePointer<HConstant> constant_minus1_;
+  SetOncePointer<HConstant> constant_true_;
+  SetOncePointer<HConstant> constant_false_;
+  SetOncePointer<HArgumentsObject> arguments_object_;
+
+  friend class HSubgraph;
+
+  DISALLOW_COPY_AND_ASSIGN(HGraph);
+};
+
+
+class HEnvironment: public ZoneObject {
+ public:
+  HEnvironment(HEnvironment* outer,
+               Scope* scope,
+               Handle<JSFunction> closure);
+
+  void Bind(Variable* variable, HValue* value) {
+    Bind(IndexFor(variable), value);
+
+    if (FLAG_trace_environment) {
+      PrintF("Slot index=%d name=%s\n",
+             variable->AsSlot()->index(),
+             *variable->name()->ToCString());
+    }
+  }
+
+  void Bind(int index, HValue* value) {
+    ASSERT(value != NULL);
+    if (!assigned_variables_.Contains(index)) {
+      assigned_variables_.Add(index);
+    }
+    values_[index] = value;
+  }
+
+  HValue* Lookup(Variable* variable) const {
+    return Lookup(IndexFor(variable));
+  }
+  HValue* Lookup(int index) const {
+    HValue* result = values_[index];
+    ASSERT(result != NULL);
+    return result;
+  }
+
+  void Push(HValue* value) {
+    ASSERT(value != NULL);
+    ++push_count_;
+    values_.Add(value);
+  }
+
+  HValue* Top() const { return ExpressionStackAt(0); }
+
+  HValue* ExpressionStackAt(int index_from_top) const {
+    int index = values_.length() - index_from_top - 1;
+    ASSERT(IsExpressionStackIndex(index));
+    return values_[index];
+  }
+
+  void SetExpressionStackAt(int index_from_top, HValue* value) {
+    int index = values_.length() - index_from_top - 1;
+    ASSERT(IsExpressionStackIndex(index));
+    values_[index] = value;
+  }
+
+  HValue* Pop() {
+    ASSERT(!IsExpressionStackEmpty());
+    if (push_count_ > 0) {
+      --push_count_;
+      ASSERT(push_count_ >= 0);
+    } else {
+      ++pop_count_;
+    }
+    return values_.RemoveLast();
+  }
+
+  void Drop(int count) {
+    for (int i = 0; i < count; ++i) {
+      Pop();
+    }
+  }
+
+  Handle<JSFunction> closure() const { return closure_; }
+
+  // ID of the original AST node to identify deoptimization points.
+  int ast_id() const { return ast_id_; }
+  void set_ast_id(int id) { ast_id_ = id; }
+
+  const ZoneList<HValue*>* values() const { return &values_; }
+  const ZoneList<int>* assigned_variables() const {
+    return &assigned_variables_;
+  }
+  int parameter_count() const { return parameter_count_; }
+  int local_count() const { return local_count_; }
+  int push_count() const { return push_count_; }
+  int pop_count() const { return pop_count_; }
+  int total_count() const { return values_.length(); }
+  HEnvironment* outer() const { return outer_; }
+  HEnvironment* Copy() const;
+  HEnvironment* CopyWithoutHistory() const;
+  HEnvironment* CopyAsLoopHeader(HBasicBlock* block) const;
+
+  // Create an "inlined version" of this environment, where the original
+  // environment is the outer environment but the top expression stack
+  // elements are moved to an inner environment as parameters. If
+  // is_speculative, the argument values are expected to be PushArgument
+  // instructions, otherwise they are the actual values.
+  HEnvironment* CopyForInlining(Handle<JSFunction> target,
+                                FunctionLiteral* function,
+                                bool is_speculative,
+                                HConstant* undefined) const;
+
+  void AddIncomingEdge(HBasicBlock* block, HEnvironment* other);
+  void ClearHistory() {
+    pop_count_ = 0;
+    push_count_ = 0;
+    assigned_variables_.Clear();
+  }
+  void SetValueAt(int index, HValue* value) {
+    ASSERT(index < total_count());
+    values_[index] = value;
+  }
+
+  void PrintTo(StringStream* stream);
+  void PrintToStd();
+
+ private:
+  explicit HEnvironment(const HEnvironment* other);
+
+  bool IsExpressionStackIndex(int index) const {
+    return index >= parameter_count_ + local_count_;
+  }
+  bool IsExpressionStackEmpty() const {
+    int length = values_.length();
+    int first_expression = parameter_count() + local_count();
+    ASSERT(length >= first_expression);
+    return length == first_expression;
+  }
+  void Initialize(int parameter_count, int local_count, int stack_height);
+  void Initialize(const HEnvironment* other);
+  int VariableToIndex(Variable* var);
+  int IndexFor(Variable* variable) const;
+
+  Handle<JSFunction> closure_;
+  // Value array [parameters] [locals] [temporaries].
+  ZoneList<HValue*> values_;
+  ZoneList<int> assigned_variables_;
+  int parameter_count_;
+  int local_count_;
+  HEnvironment* outer_;
+  int pop_count_;
+  int push_count_;
+  int ast_id_;
+};
+
+
+class HGraphBuilder;
+
+class AstContext {
+ public:
+  bool IsEffect() const { return kind_ == Expression::kEffect; }
+  bool IsValue() const { return kind_ == Expression::kValue; }
+  bool IsTest() const { return kind_ == Expression::kTest; }
+
+ protected:
+  AstContext(HGraphBuilder* owner, Expression::Context kind);
+  virtual ~AstContext();
+
+ private:
+  HGraphBuilder* owner_;
+  Expression::Context kind_;
+  AstContext* outer_;
+};
+
+
+class EffectContext: public AstContext {
+ public:
+  explicit EffectContext(HGraphBuilder* owner)
+      : AstContext(owner, Expression::kEffect) {
+  }
+};
+
+
+class ValueContext: public AstContext {
+ public:
+  explicit ValueContext(HGraphBuilder* owner)
+      : AstContext(owner, Expression::kValue) {
+  }
+};
+
+
+class TestContext: public AstContext {
+ public:
+  TestContext(HGraphBuilder* owner,
+              HBasicBlock* if_true,
+              HBasicBlock* if_false,
+              bool invert_true,
+              bool invert_false)
+      : AstContext(owner, Expression::kTest),
+        if_true_(if_true),
+        if_false_(if_false),
+        invert_true_(invert_true),
+        invert_false_(invert_false) {
+  }
+
+  static TestContext* cast(AstContext* context) {
+    ASSERT(context->IsTest());
+    return reinterpret_cast<TestContext*>(context);
+  }
+
+  HBasicBlock* if_true() const { return if_true_; }
+  HBasicBlock* if_false() const { return if_false_; }
+
+  bool invert_true() { return invert_true_; }
+  bool invert_false() { return invert_false_; }
+
+ private:
+  HBasicBlock* if_true_;
+  HBasicBlock* if_false_;
+  bool invert_true_;
+  bool invert_false_;
+};
+
+
+class HGraphBuilder: public AstVisitor {
+ public:
+  explicit HGraphBuilder(TypeFeedbackOracle* oracle)
+      : oracle_(oracle),
+        graph_(NULL),
+        current_subgraph_(NULL),
+        peeled_statement_(NULL),
+        ast_context_(NULL),
+        call_context_(NULL),
+        function_return_(NULL),
+        inlined_count_(0) { }
+
+  HGraph* CreateGraph(CompilationInfo* info);
+
+ private:
+  // Type of a member function that generates inline code for a native function.
+  typedef void (HGraphBuilder::*InlineFunctionGenerator)(int argument_count);
+
+  // Forward declarations for inner scope classes.
+  class SubgraphScope;
+
+  static const InlineFunctionGenerator kInlineFunctionGenerators[];
+
+  static const int kMaxCallPolymorphism = 4;
+  static const int kMaxLoadPolymorphism = 4;
+  static const int kMaxStorePolymorphism = 4;
+
+  static const int kMaxInlinedNodes = 196;
+  static const int kMaxInlinedSize = 196;
+  static const int kMaxSourceSize = 600;
+
+  // Simple accessors.
+  TypeFeedbackOracle* oracle() const { return oracle_; }
+  HGraph* graph() const { return graph_; }
+  HSubgraph* subgraph() const { return current_subgraph_; }
+  AstContext* ast_context() const { return ast_context_; }
+  void set_ast_context(AstContext* context) { ast_context_ = context; }
+  AstContext* call_context() const { return call_context_; }
+  HBasicBlock* function_return() const { return function_return_; }
+  HEnvironment* environment() const { return subgraph()->environment(); }
+
+  HBasicBlock* CurrentBlock() const { return subgraph()->exit_block(); }
+
+  // Generators for inline runtime functions.
+#define INLINE_FUNCTION_GENERATOR_DECLARATION(Name, argc, ressize)          \
+    void Generate##Name(int argument_count);
+
+  INLINE_FUNCTION_LIST(INLINE_FUNCTION_GENERATOR_DECLARATION)
+  INLINE_RUNTIME_FUNCTION_LIST(INLINE_FUNCTION_GENERATOR_DECLARATION)
+#undef INLINE_FUNCTION_GENERATOR_DECLARATION
+
+  void Bailout(const char* reason);
+
+  void AppendPeeledWhile(IterationStatement* stmt,
+                         HSubgraph* cond_graph,
+                         HSubgraph* body_graph,
+                         HSubgraph* exit_graph);
+
+  void AddToSubgraph(HSubgraph* graph, ZoneList<Statement*>* stmts);
+  void AddToSubgraph(HSubgraph* graph, Statement* stmt);
+  void AddToSubgraph(HSubgraph* graph, Expression* expr);
+  void AddConditionToSubgraph(HSubgraph* subgraph,
+                              Expression* expr,
+                              HSubgraph* true_graph,
+                              HSubgraph* false_graph);
+
+  void Push(HValue* value) { environment()->Push(value); }
+  HValue* Pop() { return environment()->Pop(); }
+  HValue* Top() const { return environment()->Top(); }
+  void Drop(int n) { environment()->Drop(n); }
+  void Bind(Variable* var, HValue* value) { environment()->Bind(var, value); }
+
+  void VisitForValue(Expression* expr);
+  void VisitForEffect(Expression* expr);
+  void VisitForControl(Expression* expr,
+                       HBasicBlock* true_block,
+                       HBasicBlock* false_block,
+                       bool invert_true,
+                       bool invert_false);
+
+  // Visit an expression in a 'condition' context, i.e., in a control
+  // context but not a subexpression of logical and, or, or not.
+  void VisitCondition(Expression* expr,
+                      HBasicBlock* true_graph,
+                      HBasicBlock* false_graph,
+                      bool invert_true,
+                      bool invert_false);
+  // Visit an argument and wrap it in a PushArgument instruction.
+  HValue* VisitArgument(Expression* expr);
+  void VisitArgumentList(ZoneList<Expression*>* arguments);
+
+  HInstruction* AddInstruction(HInstruction* instr);
+  void AddSimulate(int id);
+  void AddPhi(HPhi* phi);
+
+  void PushAndAdd(HInstruction* instr);
+  void PushAndAdd(HInstruction* instr, int position);
+
+  void PushArgumentsForStubCall(int argument_count);
+
+  // Initialize the arguments to the call based on then environment, add it
+  // to the graph, and drop the arguments from the environment.
+  void ProcessCall(HCall* call, int source_position);
+
+  void AssumeRepresentation(HValue* value, Representation r);
+  static Representation ToRepresentation(TypeInfo info);
+
+  void SetupScope(Scope* scope);
+  virtual void VisitStatements(ZoneList<Statement*>* statements);
+
+#define DECLARE_VISIT(type) virtual void Visit##type(type* node);
+  AST_NODE_LIST(DECLARE_VISIT)
+#undef DECLARE_VISIT
+
+  bool ShouldPeel(HSubgraph* cond, HSubgraph* body);
+
+  HBasicBlock* CreateBasicBlock(HEnvironment* env);
+  HSubgraph* CreateEmptySubgraph();
+  HSubgraph* CreateGotoSubgraph(HEnvironment* env);
+  HSubgraph* CreateBranchSubgraph(HEnvironment* env);
+  HSubgraph* CreateLoopHeaderSubgraph(HEnvironment* env);
+  HSubgraph* CreateInlinedSubgraph(HEnvironment* outer,
+                                   Handle<JSFunction> target,
+                                   FunctionLiteral* function);
+
+  // Helpers for flow graph construction.
+  void LookupGlobalPropertyCell(VariableProxy* expr,
+                                LookupResult* lookup,
+                                bool is_store);
+
+  bool TryArgumentsAccess(Property* expr);
+  bool TryCallApply(Call* expr);
+  bool TryInline(Call* expr);
+  bool TryMathFunctionInline(Call* expr);
+  void TraceInline(Handle<JSFunction> target, bool result);
+
+  void HandleGlobalVariableAssignment(VariableProxy* proxy,
+                                      HValue* value,
+                                      int position);
+  void HandleGlobalVariableLoad(VariableProxy* expr);
+  void HandlePropertyAssignment(Assignment* expr);
+  void HandleCompoundAssignment(Assignment* expr);
+  void HandlePolymorphicLoadNamedField(Property* expr,
+                                       HValue* object,
+                                       ZoneMapList* types,
+                                       Handle<String> name);
+  void HandlePolymorphicStoreNamedField(Assignment* expr,
+                                        HValue* object,
+                                        HValue* value,
+                                        ZoneMapList* types,
+                                        Handle<String> name);
+  void HandlePolymorphicCallNamed(Call* expr,
+                                  HValue* receiver,
+                                  ZoneMapList* types,
+                                  Handle<String> name);
+
+  HInstruction* BuildBinaryOperation(BinaryOperation* expr,
+                                     HValue* left,
+                                     HValue* right);
+  HInstruction* BuildIncrement(HValue* value, bool increment);
+  HInstruction* BuildLoadNamedField(HValue* object,
+                                    Property* expr,
+                                    Handle<Map> type,
+                                    LookupResult* result,
+                                    bool smi_and_map_check);
+  HInstruction* BuildLoadNamedGeneric(HValue* object, Property* expr);
+  HInstruction* BuildLoadKeyedFastElement(HValue* object,
+                                          HValue* key,
+                                          Property* expr);
+  HInstruction* BuildLoadKeyedGeneric(HValue* object,
+                                      HValue* key);
+
+  HInstruction* BuildLoadNamed(HValue* object,
+                               Property* prop,
+                               Handle<Map> map,
+                               Handle<String> name);
+  HInstruction* BuildStoreNamed(HValue* object,
+                                HValue* value,
+                                Expression* expr);
+  HInstruction* BuildStoreNamedField(HValue* object,
+                                     Handle<String> name,
+                                     HValue* value,
+                                     Handle<Map> type,
+                                     LookupResult* lookup,
+                                     bool smi_and_map_check);
+  HInstruction* BuildStoreNamedGeneric(HValue* object,
+                                       Handle<String> name,
+                                       HValue* value);
+  HInstruction* BuildStoreKeyedGeneric(HValue* object,
+                                       HValue* key,
+                                       HValue* value);
+
+  HInstruction* BuildStoreKeyedFastElement(HValue* object,
+                                           HValue* key,
+                                           HValue* val,
+                                           Expression* expr);
+
+  HCompare* BuildSwitchCompare(HSubgraph* subgraph,
+                               HValue* switch_value,
+                               CaseClause* clause);
+
+  void AddCheckConstantFunction(Call* expr,
+                                HValue* receiver,
+                                Handle<Map> receiver_map,
+                                bool smi_and_map_check);
+
+
+  HBasicBlock* BuildTypeSwitch(ZoneMapList* maps,
+                               ZoneList<HSubgraph*>* subgraphs,
+                               HValue* receiver,
+                               int join_id);
+
+  TypeFeedbackOracle* oracle_;
+  HGraph* graph_;
+  HSubgraph* current_subgraph_;
+  IterationStatement* peeled_statement_;
+  // Expression context of the currently visited subexpression. NULL when
+  // visiting statements.
+  AstContext* ast_context_;
+
+  // During function inlining, expression context of the call being
+  // inlined. NULL when not inlining.
+  AstContext* call_context_;
+
+  // When inlining a call in an effect or value context, the return
+  // block. NULL otherwise. When inlining a call in a test context, there
+  // are a pair of target blocks in the call context.
+  HBasicBlock* function_return_;
+
+  int inlined_count_;
+
+  friend class AstContext;  // Pushes and pops the AST context stack.
+
+  DISALLOW_COPY_AND_ASSIGN(HGraphBuilder);
+};
+
+
+class HValueMap: public ZoneObject {
+ public:
+  HValueMap()
+      : array_size_(0),
+        lists_size_(0),
+        count_(0),
+        present_flags_(0),
+        array_(NULL),
+        lists_(NULL),
+        free_list_head_(kNil) {
+    ResizeLists(kInitialSize);
+    Resize(kInitialSize);
+  }
+
+  void Kill(int flags);
+
+  void Add(HValue* value) {
+    present_flags_ |= value->flags();
+    Insert(value);
+  }
+
+  HValue* Lookup(HValue* value) const;
+  HValueMap* Copy() const { return new HValueMap(this); }
+
+ private:
+  // A linked list of HValue* values.  Stored in arrays.
+  struct HValueMapListElement {
+    HValue* value;
+    int next;  // Index in the array of the next list element.
+  };
+  static const int kNil = -1;  // The end of a linked list
+
+  // Must be a power of 2.
+  static const int kInitialSize = 16;
+
+  explicit HValueMap(const HValueMap* other);
+
+  void Resize(int new_size);
+  void ResizeLists(int new_size);
+  void Insert(HValue* value);
+  uint32_t Bound(uint32_t value) const { return value & (array_size_ - 1); }
+
+  int array_size_;
+  int lists_size_;
+  int count_;  // The number of values stored in the HValueMap.
+  int present_flags_;  // All flags that are in any value in the HValueMap.
+  HValueMapListElement* array_;  // Primary store - contains the first value
+  // with a given hash.  Colliding elements are stored in linked lists.
+  HValueMapListElement* lists_;  // The linked lists containing hash collisions.
+  int free_list_head_;  // Unused elements in lists_ are on the free list.
+};
+
+
+class HStatistics: public Malloced {
+ public:
+  void Print();
+  void SaveTiming(const char* name, int64_t ticks);
+  static HStatistics* Instance() {
+    static SetOncePointer<HStatistics> instance;
+    if (!instance.is_set()) {
+      instance.set(new HStatistics());
+    }
+    return instance.get();
+  }
+
+ private:
+
+  HStatistics() : timing_(5), names_(5), total_(0), full_code_gen_(0) { }
+
+  List<int64_t> timing_;
+  List<const char*> names_;
+  int64_t total_;
+  int64_t full_code_gen_;
+};
+
+
+class HPhase BASE_EMBEDDED {
+ public:
+  static const char* const kFullCodeGen;
+  static const char* const kTotal;
+
+  explicit HPhase(const char* name) { Begin(name, NULL, NULL, NULL); }
+  HPhase(const char* name, HGraph* graph) {
+    Begin(name, graph, NULL, NULL);
+  }
+  HPhase(const char* name, LChunk* chunk) {
+    Begin(name, NULL, chunk, NULL);
+  }
+  HPhase(const char* name, LAllocator* allocator) {
+    Begin(name, NULL, NULL, allocator);
+  }
+
+  ~HPhase() {
+    End();
+  }
+
+ private:
+  void Begin(const char* name,
+             HGraph* graph,
+             LChunk* chunk,
+             LAllocator* allocator);
+  void End() const;
+
+  int64_t start_;
+  const char* name_;
+  HGraph* graph_;
+  LChunk* chunk_;
+  LAllocator* allocator_;
+};
+
+
+class HTracer: public Malloced {
+ public:
+  void TraceCompilation(FunctionLiteral* function);
+  void TraceHydrogen(const char* name, HGraph* graph);
+  void TraceLithium(const char* name, LChunk* chunk);
+  void TraceLiveRanges(const char* name, LAllocator* allocator);
+
+  static HTracer* Instance() {
+    static SetOncePointer<HTracer> instance;
+    if (!instance.is_set()) {
+      instance.set(new HTracer("hydrogen.cfg"));
+    }
+    return instance.get();
+  }
+
+ private:
+  class Tag BASE_EMBEDDED {
+   public:
+    Tag(HTracer* tracer, const char* name) {
+      name_ = name;
+      tracer_ = tracer;
+      tracer->PrintIndent();
+      tracer->trace_.Add("begin_%s\n", name);
+      tracer->indent_++;
+    }
+
+    ~Tag() {
+      tracer_->indent_--;
+      tracer_->PrintIndent();
+      tracer_->trace_.Add("end_%s\n", name_);
+      ASSERT(tracer_->indent_ >= 0);
+      tracer_->FlushToFile();
+    }
+
+   private:
+    HTracer* tracer_;
+    const char* name_;
+  };
+
+  explicit HTracer(const char* filename)
+      : filename_(filename), trace_(&string_allocator_), indent_(0) {
+    WriteChars(filename, "", 0, false);
+  }
+
+  void TraceLiveRange(LiveRange* range, const char* type);
+  void Trace(const char* name, HGraph* graph, LChunk* chunk);
+  void FlushToFile();
+
+  void PrintEmptyProperty(const char* name) {
+    PrintIndent();
+    trace_.Add("%s\n", name);
+  }
+
+  void PrintStringProperty(const char* name, const char* value) {
+    PrintIndent();
+    trace_.Add("%s \"%s\"\n", name, value);
+  }
+
+  void PrintLongProperty(const char* name, int64_t value) {
+    PrintIndent();
+    trace_.Add("%s %d000\n", name, static_cast<int>(value / 1000));
+  }
+
+  void PrintBlockProperty(const char* name, int block_id) {
+    PrintIndent();
+    trace_.Add("%s \"B%d\"\n", name, block_id);
+  }
+
+  void PrintBlockProperty(const char* name, int block_id1, int block_id2) {
+    PrintIndent();
+    trace_.Add("%s \"B%d\" \"B%d\"\n", name, block_id1, block_id2);
+  }
+
+  void PrintIntProperty(const char* name, int value) {
+    PrintIndent();
+    trace_.Add("%s %d\n", name, value);
+  }
+
+  void PrintIndent() {
+    for (int i = 0; i < indent_; i++) {
+      trace_.Add("  ");
+    }
+  }
+
+  const char* filename_;
+  HeapStringAllocator string_allocator_;
+  StringStream trace_;
+  int indent_;
+};
+
+
+} }  // namespace v8::internal
+
+#endif  // V8_HYDROGEN_H_
index ecbdfdc..54cfb5c 100644 (file)
@@ -120,6 +120,30 @@ Address* RelocInfo::target_reference_address() {
 }
 
 
+Handle<JSGlobalPropertyCell> RelocInfo::target_cell_handle() {
+  ASSERT(rmode_ == RelocInfo::GLOBAL_PROPERTY_CELL);
+  Address address = Memory::Address_at(pc_);
+  return Handle<JSGlobalPropertyCell>(
+      reinterpret_cast<JSGlobalPropertyCell**>(address));
+}
+
+
+JSGlobalPropertyCell* RelocInfo::target_cell() {
+  ASSERT(rmode_ == RelocInfo::GLOBAL_PROPERTY_CELL);
+  Address address = Memory::Address_at(pc_);
+  Object* object = HeapObject::FromAddress(
+      address - JSGlobalPropertyCell::kValueOffset);
+  return reinterpret_cast<JSGlobalPropertyCell*>(object);
+}
+
+
+void RelocInfo::set_target_cell(JSGlobalPropertyCell* cell) {
+  ASSERT(rmode_ == RelocInfo::GLOBAL_PROPERTY_CELL);
+  Address address = cell->address() + JSGlobalPropertyCell::kValueOffset;
+  Memory::Address_at(pc_) = address;
+}
+
+
 Address RelocInfo::call_address() {
   ASSERT((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
          (IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence()));
@@ -167,6 +191,8 @@ void RelocInfo::Visit(ObjectVisitor* visitor) {
     visitor->VisitPointer(target_object_address());
   } else if (RelocInfo::IsCodeTarget(mode)) {
     visitor->VisitCodeTarget(this);
+  } else if (mode == RelocInfo::GLOBAL_PROPERTY_CELL) {
+    visitor->VisitGlobalPropertyCell(this);
   } else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
     visitor->VisitExternalReference(target_reference_address());
 #ifdef ENABLE_DEBUGGER_SUPPORT
@@ -190,6 +216,8 @@ void RelocInfo::Visit() {
     StaticVisitor::VisitPointer(target_object_address());
   } else if (RelocInfo::IsCodeTarget(mode)) {
     StaticVisitor::VisitCodeTarget(this);
+  } else if (mode == RelocInfo::GLOBAL_PROPERTY_CELL) {
+    StaticVisitor::VisitGlobalPropertyCell(this);
   } else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
     StaticVisitor::VisitExternalReference(target_reference_address());
 #ifdef ENABLE_DEBUGGER_SUPPORT
@@ -246,6 +274,12 @@ Immediate::Immediate(Smi* value) {
 }
 
 
+Immediate::Immediate(Address addr) {
+  x_ = reinterpret_cast<int32_t>(addr);
+  rmode_ = RelocInfo::NONE;
+}
+
+
 void Assembler::emit(uint32_t x) {
   *reinterpret_cast<uint32_t*>(pc_) = x;
   pc_ += sizeof(uint32_t);
index 8687f2e..9582656 100644 (file)
@@ -32,7 +32,7 @@
 
 // The original source code covered by the above license above has been modified
 // significantly by Google Inc.
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2010 the V8 project authors. All rights reserved.
 
 #include "v8.h"
 
@@ -56,10 +56,10 @@ uint64_t CpuFeatures::found_by_runtime_probing_ = 0;
 
 // The Probe method needs executable memory, so it uses Heap::CreateCode.
 // Allocation failure is silent and leads to safe default.
-void CpuFeatures::Probe() {
+void CpuFeatures::Probe(bool portable) {
   ASSERT(Heap::HasBeenSetup());
   ASSERT(supported_ == 0);
-  if (Serializer::enabled()) {
+  if (portable && Serializer::enabled()) {
     supported_ |= OS::CpuFeaturesImpliedByPlatform();
     return;  // No features if we might serialize.
   }
@@ -137,7 +137,7 @@ void CpuFeatures::Probe() {
   found_by_runtime_probing_ = supported_;
   uint64_t os_guarantees = OS::CpuFeaturesImpliedByPlatform();
   supported_ |= os_guarantees;
-  found_by_runtime_probing_ &= ~os_guarantees;
+  found_by_runtime_probing_ &= portable ? ~os_guarantees : 0;
 }
 
 
@@ -435,6 +435,13 @@ void Assembler::push(const Immediate& x) {
 }
 
 
+void Assembler::push_imm32(int32_t imm32) {
+  EnsureSpace ensure_space(this);
+  EMIT(0x68);
+  emit(imm32);
+}
+
+
 void Assembler::push(Register src) {
   EnsureSpace ensure_space(this);
   last_pc_ = pc_;
@@ -1542,7 +1549,9 @@ void Assembler::bind(NearLabel* L) {
   L->bind_to(pc_offset());
 }
 
+
 void Assembler::call(Label* L) {
+  positions_recorder()->WriteRecordedPositions();
   EnsureSpace ensure_space(this);
   last_pc_ = pc_;
   if (L->is_bound()) {
@@ -1561,6 +1570,7 @@ void Assembler::call(Label* L) {
 
 
 void Assembler::call(byte* entry, RelocInfo::Mode rmode) {
+  positions_recorder()->WriteRecordedPositions();
   EnsureSpace ensure_space(this);
   last_pc_ = pc_;
   ASSERT(!RelocInfo::IsCodeTarget(rmode));
@@ -1570,6 +1580,7 @@ void Assembler::call(byte* entry, RelocInfo::Mode rmode) {
 
 
 void Assembler::call(const Operand& adr) {
+  positions_recorder()->WriteRecordedPositions();
   EnsureSpace ensure_space(this);
   last_pc_ = pc_;
   EMIT(0xFF);
@@ -2420,6 +2431,17 @@ void Assembler::movd(XMMRegister dst, const Operand& src) {
 }
 
 
+void Assembler::pand(XMMRegister dst, XMMRegister src) {
+  ASSERT(CpuFeatures::IsEnabled(SSE2));
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  EMIT(0x66);
+  EMIT(0x0F);
+  EMIT(0xDB);
+  emit_sse_operand(dst, src);
+}
+
+
 void Assembler::pxor(XMMRegister dst, XMMRegister src) {
   ASSERT(CpuFeatures::IsEnabled(SSE2));
   EnsureSpace ensure_space(this);
@@ -2491,7 +2513,7 @@ void Assembler::RecordDebugBreakSlot() {
 
 
 void Assembler::RecordComment(const char* msg) {
-  if (FLAG_debug_code) {
+  if (FLAG_code_comments) {
     EnsureSpace ensure_space(this);
     RecordRelocInfo(RelocInfo::COMMENT, reinterpret_cast<intptr_t>(msg));
   }
@@ -2623,9 +2645,15 @@ void Assembler::emit_farith(int b1, int b2, int i) {
 }
 
 
-void Assembler::dd(uint32_t data, RelocInfo::Mode reloc_info) {
+void Assembler::db(uint8_t data) {
+  EnsureSpace ensure_space(this);
+  EMIT(data);
+}
+
+
+void Assembler::dd(uint32_t data) {
   EnsureSpace ensure_space(this);
-  emit(data, reloc_info);
+  emit(data);
 }
 
 
index 681bcf9..2b4624c 100644 (file)
@@ -30,7 +30,7 @@
 
 // The original source code covered by the above license above has been
 // modified significantly by Google Inc.
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2010 the V8 project authors. All rights reserved.
 
 // A light-weight IA32 Assembler.
 
@@ -64,7 +64,36 @@ namespace internal {
 // and best performance in optimized code.
 //
 struct Register {
-  bool is_valid() const { return 0 <= code_ && code_ < 8; }
+  static const int kNumAllocatableRegisters = 5;
+  static const int kNumRegisters = 8;
+
+  static int ToAllocationIndex(Register reg) {
+    ASSERT(reg.code() < 4 || reg.code() == 7);
+    return (reg.code() == 7) ? 4 : reg.code();
+  }
+
+  static Register FromAllocationIndex(int index) {
+    ASSERT(index >= 0 && index < kNumAllocatableRegisters);
+    return (index == 4) ? from_code(7) : from_code(index);
+  }
+
+  static const char* AllocationIndexToString(int index) {
+    ASSERT(index >= 0 && index < kNumAllocatableRegisters);
+    const char* const names[] = {
+      "eax",
+      "ecx",
+      "edx",
+      "ebx",
+      "edi"
+    };
+    return names[index];
+  }
+
+  static Register from_code(int code) {
+    Register r = { code };
+    return r;
+  }
+  bool is_valid() const { return 0 <= code_ && code_ < kNumRegisters; }
   bool is(Register reg) const { return code_ == reg.code_; }
   // eax, ebx, ecx and edx are byte registers, the rest are not.
   bool is_byte_register() const { return code_ <= 3; }
@@ -93,7 +122,40 @@ const Register no_reg = { -1 };
 
 
 struct XMMRegister {
-  bool is_valid() const { return 0 <= code_ && code_ < 8; }
+  static const int kNumAllocatableRegisters = 7;
+  static const int kNumRegisters = 8;
+
+  static int ToAllocationIndex(XMMRegister reg) {
+    ASSERT(reg.code() != 0);
+    return reg.code() - 1;
+  }
+
+  static XMMRegister FromAllocationIndex(int index) {
+    ASSERT(index >= 0 && index < kNumAllocatableRegisters);
+    return from_code(index + 1);
+  }
+
+  static const char* AllocationIndexToString(int index) {
+    ASSERT(index >= 0 && index < kNumAllocatableRegisters);
+    const char* const names[] = {
+      "xmm1",
+      "xmm2",
+      "xmm3",
+      "xmm4",
+      "xmm5",
+      "xmm6",
+      "xmm7"
+    };
+    return names[index];
+  }
+
+  static XMMRegister from_code(int code) {
+    XMMRegister r = { code };
+    return r;
+  }
+
+  bool is_valid() const { return 0 <= code_ && code_ < kNumRegisters; }
+  bool is(XMMRegister reg) const { return code_ == reg.code_; }
   int code() const {
     ASSERT(is_valid());
     return code_;
@@ -102,6 +164,7 @@ struct XMMRegister {
   int code_;
 };
 
+
 const XMMRegister xmm0 = { 0 };
 const XMMRegister xmm1 = { 1 };
 const XMMRegister xmm2 = { 2 };
@@ -111,6 +174,17 @@ const XMMRegister xmm5 = { 5 };
 const XMMRegister xmm6 = { 6 };
 const XMMRegister xmm7 = { 7 };
 
+
+typedef XMMRegister DoubleRegister;
+
+
+// Index of register used in pusha/popa.
+// Order of pushed registers: EAX, ECX, EDX, EBX, ESP, EBP, ESI, and EDI
+inline int EspIndexForPushAll(Register reg) {
+  return Register::kNumRegisters - 1 - reg.code();
+}
+
+
 enum Condition {
   // any value < 0 is considered no_condition
   no_condition  = -1,
@@ -202,6 +276,7 @@ class Immediate BASE_EMBEDDED {
   inline explicit Immediate(const ExternalReference& ext);
   inline explicit Immediate(Handle<Object> handle);
   inline explicit Immediate(Smi* value);
+  inline explicit Immediate(Address addr);
 
   static Immediate CodeRelativeOffset(Label* label) {
     return Immediate(label);
@@ -281,6 +356,11 @@ class Operand BASE_EMBEDDED {
                    RelocInfo::EXTERNAL_REFERENCE);
   }
 
+  static Operand Cell(Handle<JSGlobalPropertyCell> cell) {
+    return Operand(reinterpret_cast<int32_t>(cell.location()),
+                   RelocInfo::GLOBAL_PROPERTY_CELL);
+  }
+
   // Returns true if this Operand is a wrapper for the specified register.
   bool is_reg(Register reg) const;
 
@@ -369,9 +449,12 @@ class Displacement BASE_EMBEDDED {
 //   }
 class CpuFeatures : public AllStatic {
  public:
-  // Detect features of the target CPU. Set safe defaults if the serializer
-  // is enabled (snapshots must be portable).
-  static void Probe();
+  // Detect features of the target CPU. If the portable flag is set,
+  // the method sets safe defaults if the serializer is enabled
+  // (snapshots must be portable).
+  static void Probe(bool portable);
+  static void Clear() { supported_ = 0; }
+
   // Check whether a feature is supported by the target CPU.
   static bool IsSupported(CpuFeature f) {
     if (f == SSE2 && !FLAG_enable_sse2) return false;
@@ -484,6 +567,11 @@ class Assembler : public Malloced {
   // The debug break slot must be able to contain a call instruction.
   static const int kDebugBreakSlotLength = kCallInstructionLength;
 
+  // One byte opcode for test eax,0xXXXXXXXX.
+  static const byte kTestEaxByte = 0xA9;
+  // One byte opcode for test al, 0xXX.
+  static const byte kTestAlByte = 0xA8;
+
   // ---------------------------------------------------------------------------
   // Code generation
   //
@@ -519,6 +607,7 @@ class Assembler : public Malloced {
   void popfd();
 
   void push(const Immediate& x);
+  void push_imm32(int32_t imm32);
   void push(Register src);
   void push(const Operand& src);
 
@@ -818,6 +907,7 @@ class Assembler : public Malloced {
   void movd(XMMRegister dst, const Operand& src);
   void movsd(XMMRegister dst, XMMRegister src);
 
+  void pand(XMMRegister dst, XMMRegister src);
   void pxor(XMMRegister dst, XMMRegister src);
   void ptest(XMMRegister dst, XMMRegister src);
 
@@ -845,12 +935,13 @@ class Assembler : public Malloced {
   void RecordDebugBreakSlot();
 
   // Record a comment relocation entry that can be used by a disassembler.
-  // Use --debug_code to enable.
+  // Use --code-comments to enable.
   void RecordComment(const char* msg);
 
-  // Writes a single word of data in the code stream.
-  // Used for inline tables, e.g., jump-tables.
-  void dd(uint32_t data, RelocInfo::Mode reloc_info);
+  // Writes a single byte or word of data in the code stream.  Used for
+  // inline tables, e.g., jump-tables.
+  void db(uint8_t data);
+  void dd(uint32_t data);
 
   int pc_offset() const { return pc_ - buffer_; }
 
@@ -878,8 +969,8 @@ class Assembler : public Malloced {
   void emit_sse_operand(XMMRegister dst, XMMRegister src);
   void emit_sse_operand(Register dst, XMMRegister src);
 
- private:
   byte* addr_at(int pos)  { return buffer_ + pos; }
+ private:
   byte byte_at(int pos)  { return buffer_[pos]; }
   void set_byte_at(int pos, byte value) { buffer_[pos] = value; }
   uint32_t long_at(int pos)  {
index 0ad3e6d..c28e144 100644 (file)
@@ -1,4 +1,4 @@
-// Copyright 2006-2009 the V8 project authors. All rights reserved.
+// Copyright 2010 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -31,6 +31,8 @@
 
 #include "code-stubs.h"
 #include "codegen-inl.h"
+#include "deoptimizer.h"
+#include "full-codegen.h"
 
 namespace v8 {
 namespace internal {
@@ -480,6 +482,85 @@ void Builtins::Generate_LazyCompile(MacroAssembler* masm) {
 }
 
 
+void Builtins::Generate_LazyRecompile(MacroAssembler* masm) {
+  // Enter an internal frame.
+  __ EnterInternalFrame();
+
+  // Push a copy of the function onto the stack.
+  __ push(edi);
+
+  __ push(edi);  // Function is also the parameter to the runtime call.
+  __ CallRuntime(Runtime::kLazyRecompile, 1);
+
+  // Restore function and tear down temporary frame.
+  __ pop(edi);
+  __ LeaveInternalFrame();
+
+  // Do a tail-call of the compiled function.
+  __ lea(ecx, FieldOperand(eax, Code::kHeaderSize));
+  __ jmp(Operand(ecx));
+}
+
+
+static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm,
+                                             Deoptimizer::BailoutType type) {
+  // Enter an internal frame.
+  __ EnterInternalFrame();
+
+  // Pass the function and deoptimization type to the runtime system.
+  __ push(Immediate(Smi::FromInt(static_cast<int>(type))));
+  __ CallRuntime(Runtime::kNotifyDeoptimized, 1);
+
+  // Tear down temporary frame.
+  __ LeaveInternalFrame();
+
+  // Get the full codegen state from the stack and untag it.
+  __ mov(ecx, Operand(esp, 1 * kPointerSize));
+  __ SmiUntag(ecx);
+
+  // Switch on the state.
+  NearLabel not_no_registers, not_tos_eax;
+  __ cmp(ecx, FullCodeGenerator::NO_REGISTERS);
+  __ j(not_equal, &not_no_registers);
+  __ ret(1 * kPointerSize);  // Remove state.
+
+  __ bind(&not_no_registers);
+  __ mov(eax, Operand(esp, 2 * kPointerSize));
+  __ cmp(ecx, FullCodeGenerator::TOS_REG);
+  __ j(not_equal, &not_tos_eax);
+  __ ret(2 * kPointerSize);  // Remove state, eax.
+
+  __ bind(&not_tos_eax);
+  __ Abort("no cases left");
+}
+
+
+void Builtins::Generate_NotifyDeoptimized(MacroAssembler* masm) {
+  Generate_NotifyDeoptimizedHelper(masm, Deoptimizer::EAGER);
+}
+
+
+void Builtins::Generate_NotifyLazyDeoptimized(MacroAssembler* masm) {
+  Generate_NotifyDeoptimizedHelper(masm, Deoptimizer::LAZY);
+}
+
+
+void Builtins::Generate_NotifyOSR(MacroAssembler* masm) {
+  // TODO(kasperl): Do we need to save/restore the XMM registers too?
+
+  // For now, we are relying on the fact that Runtime::NotifyOSR
+  // doesn't do any garbage collection which allows us to save/restore
+  // the registers without worrying about which of them contain
+  // pointers. This seems a bit fragile.
+  __ pushad();
+  __ EnterInternalFrame();
+  __ CallRuntime(Runtime::kNotifyOSR, 0);
+  __ LeaveInternalFrame();
+  __ popad();
+  __ ret(0);
+}
+
+
 void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
   // 1. Make sure we have at least one argument.
   { Label done;
@@ -1418,6 +1499,76 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
 }
 
 
+void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
+  // We shouldn't be performing on-stack replacement in the first
+  // place if the CPU features we need for the optimized Crankshaft
+  // code aren't supported.
+  CpuFeatures::Probe(false);
+  if (!CpuFeatures::IsSupported(SSE2)) {
+    __ Abort("Unreachable code: Cannot optimize without SSE2 support.");
+    return;
+  }
+
+  // Get the loop depth of the stack guard check. This is recorded in
+  // a test(eax, depth) instruction right after the call.
+  Label stack_check;
+  __ mov(ebx, Operand(esp, 0));  // return address
+  if (FLAG_debug_code) {
+    __ cmpb(Operand(ebx, 0), Assembler::kTestAlByte);
+    __ Assert(equal, "test eax instruction not found after loop stack check");
+  }
+  __ movzx_b(ebx, Operand(ebx, 1));  // depth
+
+  // Get the loop nesting level at which we allow OSR from the
+  // unoptimized code and check if we want to do OSR yet. If not we
+  // should perform a stack guard check so we can get interrupts while
+  // waiting for on-stack replacement.
+  __ mov(eax, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
+  __ mov(ecx, FieldOperand(eax, JSFunction::kSharedFunctionInfoOffset));
+  __ mov(ecx, FieldOperand(ecx, SharedFunctionInfo::kCodeOffset));
+  __ cmpb(ebx, FieldOperand(ecx, Code::kAllowOSRAtLoopNestingLevelOffset));
+  __ j(greater, &stack_check);
+
+  // Pass the function to optimize as the argument to the on-stack
+  // replacement runtime function.
+  __ EnterInternalFrame();
+  __ push(eax);
+  __ CallRuntime(Runtime::kCompileForOnStackReplacement, 1);
+  __ LeaveInternalFrame();
+
+  // If the result was -1 it means that we couldn't optimize the
+  // function. Just return and continue in the unoptimized version.
+  NearLabel skip;
+  __ cmp(Operand(eax), Immediate(Smi::FromInt(-1)));
+  __ j(not_equal, &skip);
+  __ ret(0);
+
+  // If we decide not to perform on-stack replacement we perform a
+  // stack guard check to enable interrupts.
+  __ bind(&stack_check);
+  NearLabel ok;
+  ExternalReference stack_limit =
+      ExternalReference::address_of_stack_limit();
+  __ cmp(esp, Operand::StaticVariable(stack_limit));
+  __ j(above_equal, &ok, taken);
+  StackCheckStub stub;
+  __ TailCallStub(&stub);
+  __ Abort("Unreachable code: returned from tail call.");
+  __ bind(&ok);
+  __ ret(0);
+
+  __ bind(&skip);
+  // Untag the AST id and push it on the stack.
+  __ SmiUntag(eax);
+  __ push(eax);
+
+  // Generate the code for doing the frame-to-frame translation using
+  // the deoptimizer infrastructure.
+  Deoptimizer::EntryGenerator generator(masm, Deoptimizer::OSR);
+  generator.Generate();
+}
+
+
 #undef __
 
 } }  // namespace v8::internal
index 6cf6d12..3233be7 100644 (file)
@@ -64,6 +64,8 @@ void FastNewClosureStub::Generate(MacroAssembler* masm) {
   __ mov(FieldOperand(eax, JSFunction::kSharedFunctionInfoOffset), edx);
   __ mov(FieldOperand(eax, JSFunction::kContextOffset), esi);
   __ mov(FieldOperand(eax, JSFunction::kLiteralsOffset), ebx);
+  __ mov(FieldOperand(eax, JSFunction::kNextFunctionLinkOffset),
+         Immediate(Factory::undefined_value()));
 
   // Initialize the code pointer in the function to be the one
   // found in the shared function info object.
@@ -446,6 +448,11 @@ class FloatingPointHelper : public AllStatic {
                                  Label* non_float,
                                  Register scratch);
 
+  // Checks that the two floating point numbers on top of the FPU stack
+  // have int32 values.
+  static void CheckFloatOperandsAreInt32(MacroAssembler* masm,
+                                         Label* non_int32);
+
   // Takes the operands in edx and eax and loads them as integers in eax
   // and ecx.
   static void LoadAsIntegers(MacroAssembler* masm,
@@ -460,8 +467,16 @@ class FloatingPointHelper : public AllStatic {
                                      bool use_sse3,
                                      Label* operand_conversion_failure);
 
-  // Test if operands are smis or heap numbers and load them
-  // into xmm0 and xmm1 if they are. Operands are in edx and eax.
+  // Must only be called after LoadUnknownsAsIntegers.  Assumes that the
+  // operands are pushed on the stack, and that their conversions to int32
+  // are in eax and ecx.  Checks that the original numbers were in the int32
+  // range.
+  static void CheckLoadedIntegersWereInt32(MacroAssembler* masm,
+                                           bool use_sse3,
+                                           Label* not_int32);
+
+  // Assumes that operands are smis or heap numbers and loads them
+  // into xmm0 and xmm1. Operands are in edx and eax.
   // Leaves operands unchanged.
   static void LoadSSE2Operands(MacroAssembler* masm);
 
@@ -474,6 +489,12 @@ class FloatingPointHelper : public AllStatic {
   // Similar to LoadSSE2Operands but assumes that both operands are smis.
   // Expects operands in edx, eax.
   static void LoadSSE2Smis(MacroAssembler* masm, Register scratch);
+
+  // Checks that the two floating point numbers loaded into xmm0 and xmm1
+  // have int32 values.
+  static void CheckSSE2OperandsAreInt32(MacroAssembler* masm,
+                                        Label* non_int32,
+                                        Register scratch);
 };
 
 
@@ -709,22 +730,27 @@ void GenericBinaryOpStub::GenerateSmiCode(MacroAssembler* masm, Label* slow) {
     case Token::SHL: {
       Comment perform_float(masm, "-- Perform float operation on smis");
       __ bind(&use_fp_on_smis);
-      // Result we want is in left == edx, so we can put the allocated heap
-      // number in eax.
-      __ AllocateHeapNumber(eax, ecx, ebx, slow);
-      // Store the result in the HeapNumber and return.
-      if (CpuFeatures::IsSupported(SSE2)) {
-        CpuFeatures::Scope use_sse2(SSE2);
-        __ cvtsi2sd(xmm0, Operand(left));
-        __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
+      if (runtime_operands_type_ != BinaryOpIC::UNINIT_OR_SMI) {
+        // Result we want is in left == edx, so we can put the allocated heap
+        // number in eax.
+        __ AllocateHeapNumber(eax, ecx, ebx, slow);
+        // Store the result in the HeapNumber and return.
+        if (CpuFeatures::IsSupported(SSE2)) {
+          CpuFeatures::Scope use_sse2(SSE2);
+          __ cvtsi2sd(xmm0, Operand(left));
+          __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
+        } else {
+          // It's OK to overwrite the right argument on the stack because we
+          // are about to return.
+          __ mov(Operand(esp, 1 * kPointerSize), left);
+          __ fild_s(Operand(esp, 1 * kPointerSize));
+          __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
+        }
+        GenerateReturn(masm);
       } else {
-        // It's OK to overwrite the right argument on the stack because we
-        // are about to return.
-        __ mov(Operand(esp, 1 * kPointerSize), left);
-        __ fild_s(Operand(esp, 1 * kPointerSize));
-        __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
+        ASSERT(runtime_operands_type_ == BinaryOpIC::UNINIT_OR_SMI);
+        __ jmp(slow);
       }
-      GenerateReturn(masm);
       break;
     }
 
@@ -757,31 +783,36 @@ void GenericBinaryOpStub::GenerateSmiCode(MacroAssembler* masm, Label* slow) {
         default: UNREACHABLE();
           break;
       }
-      __ AllocateHeapNumber(ecx, ebx, no_reg, slow);
-      if (CpuFeatures::IsSupported(SSE2)) {
-        CpuFeatures::Scope use_sse2(SSE2);
-        FloatingPointHelper::LoadSSE2Smis(masm, ebx);
-        switch (op_) {
-          case Token::ADD: __ addsd(xmm0, xmm1); break;
-          case Token::SUB: __ subsd(xmm0, xmm1); break;
-          case Token::MUL: __ mulsd(xmm0, xmm1); break;
-          case Token::DIV: __ divsd(xmm0, xmm1); break;
-          default: UNREACHABLE();
-        }
-        __ movdbl(FieldOperand(ecx, HeapNumber::kValueOffset), xmm0);
-      } else {  // SSE2 not available, use FPU.
-        FloatingPointHelper::LoadFloatSmis(masm, ebx);
-        switch (op_) {
-          case Token::ADD: __ faddp(1); break;
-          case Token::SUB: __ fsubp(1); break;
-          case Token::MUL: __ fmulp(1); break;
-          case Token::DIV: __ fdivp(1); break;
-          default: UNREACHABLE();
+      if (runtime_operands_type_ != BinaryOpIC::UNINIT_OR_SMI) {
+        __ AllocateHeapNumber(ecx, ebx, no_reg, slow);
+        if (CpuFeatures::IsSupported(SSE2)) {
+          CpuFeatures::Scope use_sse2(SSE2);
+          FloatingPointHelper::LoadSSE2Smis(masm, ebx);
+          switch (op_) {
+            case Token::ADD: __ addsd(xmm0, xmm1); break;
+            case Token::SUB: __ subsd(xmm0, xmm1); break;
+            case Token::MUL: __ mulsd(xmm0, xmm1); break;
+            case Token::DIV: __ divsd(xmm0, xmm1); break;
+            default: UNREACHABLE();
+          }
+          __ movdbl(FieldOperand(ecx, HeapNumber::kValueOffset), xmm0);
+        } else {  // SSE2 not available, use FPU.
+          FloatingPointHelper::LoadFloatSmis(masm, ebx);
+          switch (op_) {
+            case Token::ADD: __ faddp(1); break;
+            case Token::SUB: __ fsubp(1); break;
+            case Token::MUL: __ fmulp(1); break;
+            case Token::DIV: __ fdivp(1); break;
+            default: UNREACHABLE();
+          }
+          __ fstp_d(FieldOperand(ecx, HeapNumber::kValueOffset));
         }
-        __ fstp_d(FieldOperand(ecx, HeapNumber::kValueOffset));
+        __ mov(eax, ecx);
+        GenerateReturn(masm);
+      } else {
+        ASSERT(runtime_operands_type_ == BinaryOpIC::UNINIT_OR_SMI);
+        __ jmp(slow);
       }
-      __ mov(eax, ecx);
-      GenerateReturn(masm);
       break;
     }
 
@@ -821,6 +852,13 @@ void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
 
   __ IncrementCounter(&Counters::generic_binary_stub_calls, 1);
 
+  if (runtime_operands_type_ == BinaryOpIC::UNINIT_OR_SMI) {
+    Label slow;
+    if (ShouldGenerateSmiCode()) GenerateSmiCode(masm, &slow);
+    __ bind(&slow);
+    GenerateTypeTransition(masm);
+  }
+
   // Generate fast case smi code if requested. This flag is set when the fast
   // case smi code is not generated by the caller. Generating it here will speed
   // up common operations.
@@ -998,43 +1036,1325 @@ void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
         }
         break;
       }
-      default: UNREACHABLE(); break;
+      default: UNREACHABLE(); break;
+    }
+  }
+
+  // If all else fails, use the runtime system to get the correct
+  // result. If arguments was passed in registers now place them on the
+  // stack in the correct order below the return address.
+
+  // Avoid hitting the string ADD code below when allocation fails in
+  // the floating point code above.
+  if (op_ != Token::ADD) {
+    __ bind(&call_runtime);
+  }
+
+  if (HasArgsInRegisters()) {
+    GenerateRegisterArgsPush(masm);
+  }
+
+  switch (op_) {
+    case Token::ADD: {
+      // Test for string arguments before calling runtime.
+
+      // If this stub has already generated FP-specific code then the arguments
+      // are already in edx, eax
+      if (!ShouldGenerateFPCode() && !HasArgsInRegisters()) {
+        GenerateLoadArguments(masm);
+      }
+
+      // Registers containing left and right operands respectively.
+      Register lhs, rhs;
+      if (HasArgsReversed()) {
+        lhs = eax;
+        rhs = edx;
+      } else {
+        lhs = edx;
+        rhs = eax;
+      }
+
+      // Test if left operand is a string.
+      NearLabel lhs_not_string;
+      __ test(lhs, Immediate(kSmiTagMask));
+      __ j(zero, &lhs_not_string);
+      __ CmpObjectType(lhs, FIRST_NONSTRING_TYPE, ecx);
+      __ j(above_equal, &lhs_not_string);
+
+      StringAddStub string_add_left_stub(NO_STRING_CHECK_LEFT_IN_STUB);
+      __ TailCallStub(&string_add_left_stub);
+
+      NearLabel call_runtime_with_args;
+      // Left operand is not a string, test right.
+      __ bind(&lhs_not_string);
+      __ test(rhs, Immediate(kSmiTagMask));
+      __ j(zero, &call_runtime_with_args);
+      __ CmpObjectType(rhs, FIRST_NONSTRING_TYPE, ecx);
+      __ j(above_equal, &call_runtime_with_args);
+
+      StringAddStub string_add_right_stub(NO_STRING_CHECK_RIGHT_IN_STUB);
+      __ TailCallStub(&string_add_right_stub);
+
+      // Neither argument is a string.
+      __ bind(&call_runtime);
+      if (HasArgsInRegisters()) {
+        GenerateRegisterArgsPush(masm);
+      }
+      __ bind(&call_runtime_with_args);
+      __ InvokeBuiltin(Builtins::ADD, JUMP_FUNCTION);
+      break;
+    }
+    case Token::SUB:
+      __ InvokeBuiltin(Builtins::SUB, JUMP_FUNCTION);
+      break;
+    case Token::MUL:
+      __ InvokeBuiltin(Builtins::MUL, JUMP_FUNCTION);
+      break;
+    case Token::DIV:
+      __ InvokeBuiltin(Builtins::DIV, JUMP_FUNCTION);
+      break;
+    case Token::MOD:
+      __ InvokeBuiltin(Builtins::MOD, JUMP_FUNCTION);
+      break;
+    case Token::BIT_OR:
+      __ InvokeBuiltin(Builtins::BIT_OR, JUMP_FUNCTION);
+      break;
+    case Token::BIT_AND:
+      __ InvokeBuiltin(Builtins::BIT_AND, JUMP_FUNCTION);
+      break;
+    case Token::BIT_XOR:
+      __ InvokeBuiltin(Builtins::BIT_XOR, JUMP_FUNCTION);
+      break;
+    case Token::SAR:
+      __ InvokeBuiltin(Builtins::SAR, JUMP_FUNCTION);
+      break;
+    case Token::SHL:
+      __ InvokeBuiltin(Builtins::SHL, JUMP_FUNCTION);
+      break;
+    case Token::SHR:
+      __ InvokeBuiltin(Builtins::SHR, JUMP_FUNCTION);
+      break;
+    default:
+      UNREACHABLE();
+  }
+}
+
+
+void GenericBinaryOpStub::GenerateHeapResultAllocation(MacroAssembler* masm,
+                                                       Label* alloc_failure) {
+  Label skip_allocation;
+  OverwriteMode mode = mode_;
+  if (HasArgsReversed()) {
+    if (mode == OVERWRITE_RIGHT) {
+      mode = OVERWRITE_LEFT;
+    } else if (mode == OVERWRITE_LEFT) {
+      mode = OVERWRITE_RIGHT;
+    }
+  }
+  switch (mode) {
+    case OVERWRITE_LEFT: {
+      // If the argument in edx is already an object, we skip the
+      // allocation of a heap number.
+      __ test(edx, Immediate(kSmiTagMask));
+      __ j(not_zero, &skip_allocation, not_taken);
+      // Allocate a heap number for the result. Keep eax and edx intact
+      // for the possible runtime call.
+      __ AllocateHeapNumber(ebx, ecx, no_reg, alloc_failure);
+      // Now edx can be overwritten losing one of the arguments as we are
+      // now done and will not need it any more.
+      __ mov(edx, Operand(ebx));
+      __ bind(&skip_allocation);
+      // Use object in edx as a result holder
+      __ mov(eax, Operand(edx));
+      break;
+    }
+    case OVERWRITE_RIGHT:
+      // If the argument in eax is already an object, we skip the
+      // allocation of a heap number.
+      __ test(eax, Immediate(kSmiTagMask));
+      __ j(not_zero, &skip_allocation, not_taken);
+      // Fall through!
+    case NO_OVERWRITE:
+      // Allocate a heap number for the result. Keep eax and edx intact
+      // for the possible runtime call.
+      __ AllocateHeapNumber(ebx, ecx, no_reg, alloc_failure);
+      // Now eax can be overwritten losing one of the arguments as we are
+      // now done and will not need it any more.
+      __ mov(eax, ebx);
+      __ bind(&skip_allocation);
+      break;
+    default: UNREACHABLE();
+  }
+}
+
+
+void GenericBinaryOpStub::GenerateLoadArguments(MacroAssembler* masm) {
+  // If arguments are not passed in registers read them from the stack.
+  ASSERT(!HasArgsInRegisters());
+  __ mov(eax, Operand(esp, 1 * kPointerSize));
+  __ mov(edx, Operand(esp, 2 * kPointerSize));
+}
+
+
+void GenericBinaryOpStub::GenerateReturn(MacroAssembler* masm) {
+  // If arguments are not passed in registers remove them from the stack before
+  // returning.
+  if (!HasArgsInRegisters()) {
+    __ ret(2 * kPointerSize);  // Remove both operands
+  } else {
+    __ ret(0);
+  }
+}
+
+
+void GenericBinaryOpStub::GenerateRegisterArgsPush(MacroAssembler* masm) {
+  ASSERT(HasArgsInRegisters());
+  __ pop(ecx);
+  if (HasArgsReversed()) {
+    __ push(eax);
+    __ push(edx);
+  } else {
+    __ push(edx);
+    __ push(eax);
+  }
+  __ push(ecx);
+}
+
+
+void GenericBinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
+  // Ensure the operands are on the stack.
+  if (HasArgsInRegisters()) {
+    GenerateRegisterArgsPush(masm);
+  }
+
+  __ pop(ecx);  // Save return address.
+
+  // Left and right arguments are now on top.
+  // Push this stub's key. Although the operation and the type info are
+  // encoded into the key, the encoding is opaque, so push them too.
+  __ push(Immediate(Smi::FromInt(MinorKey())));
+  __ push(Immediate(Smi::FromInt(op_)));
+  __ push(Immediate(Smi::FromInt(runtime_operands_type_)));
+
+  __ push(ecx);  // Push return address.
+
+  // Patch the caller to an appropriate specialized stub and return the
+  // operation result to the caller of the stub.
+  __ TailCallExternalReference(
+      ExternalReference(IC_Utility(IC::kBinaryOp_Patch)),
+      5,
+      1);
+}
+
+
+Handle<Code> GetBinaryOpStub(int key, BinaryOpIC::TypeInfo type_info) {
+  GenericBinaryOpStub stub(key, type_info);
+  return stub.GetCode();
+}
+
+
+Handle<Code> GetTypeRecordingBinaryOpStub(int key,
+    TRBinaryOpIC::TypeInfo type_info,
+    TRBinaryOpIC::TypeInfo result_type_info) {
+  TypeRecordingBinaryOpStub stub(key, type_info, result_type_info);
+  return stub.GetCode();
+}
+
+
+void TypeRecordingBinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
+  __ pop(ecx);  // Save return address.
+  __ push(edx);
+  __ push(eax);
+  // Left and right arguments are now on top.
+  // Push this stub's key. Although the operation and the type info are
+  // encoded into the key, the encoding is opaque, so push them too.
+  __ push(Immediate(Smi::FromInt(MinorKey())));
+  __ push(Immediate(Smi::FromInt(op_)));
+  __ push(Immediate(Smi::FromInt(operands_type_)));
+
+  __ push(ecx);  // Push return address.
+
+  // Patch the caller to an appropriate specialized stub and return the
+  // operation result to the caller of the stub.
+  __ TailCallExternalReference(
+      ExternalReference(IC_Utility(IC::kTypeRecordingBinaryOp_Patch)),
+      5,
+      1);
+}
+
+
+// Prepare for a type transition runtime call when the args are already on
+// the stack, under the return address.
+void TypeRecordingBinaryOpStub::GenerateTypeTransitionWithSavedArgs(
+    MacroAssembler* masm) {
+  __ pop(ecx);  // Save return address.
+  // Left and right arguments are already on top of the stack.
+  // Push this stub's key. Although the operation and the type info are
+  // encoded into the key, the encoding is opaque, so push them too.
+  __ push(Immediate(Smi::FromInt(MinorKey())));
+  __ push(Immediate(Smi::FromInt(op_)));
+  __ push(Immediate(Smi::FromInt(operands_type_)));
+
+  __ push(ecx);  // Push return address.
+
+  // Patch the caller to an appropriate specialized stub and return the
+  // operation result to the caller of the stub.
+  __ TailCallExternalReference(
+      ExternalReference(IC_Utility(IC::kTypeRecordingBinaryOp_Patch)),
+      5,
+      1);
+}
+
+
+void TypeRecordingBinaryOpStub::Generate(MacroAssembler* masm) {
+  switch (operands_type_) {
+    case TRBinaryOpIC::UNINITIALIZED:
+      GenerateTypeTransition(masm);
+      break;
+    case TRBinaryOpIC::SMI:
+      GenerateSmiStub(masm);
+      break;
+    case TRBinaryOpIC::INT32:
+      GenerateInt32Stub(masm);
+      break;
+    case TRBinaryOpIC::HEAP_NUMBER:
+      GenerateHeapNumberStub(masm);
+      break;
+    case TRBinaryOpIC::STRING:
+      GenerateStringStub(masm);
+      break;
+    case TRBinaryOpIC::GENERIC:
+      GenerateGeneric(masm);
+      break;
+    default:
+      UNREACHABLE();
+  }
+}
+
+
+const char* TypeRecordingBinaryOpStub::GetName() {
+  if (name_ != NULL) return name_;
+  const int kMaxNameLength = 100;
+  name_ = Bootstrapper::AllocateAutoDeletedArray(kMaxNameLength);
+  if (name_ == NULL) return "OOM";
+  const char* op_name = Token::Name(op_);
+  const char* overwrite_name;
+  switch (mode_) {
+    case NO_OVERWRITE: overwrite_name = "Alloc"; break;
+    case OVERWRITE_RIGHT: overwrite_name = "OverwriteRight"; break;
+    case OVERWRITE_LEFT: overwrite_name = "OverwriteLeft"; break;
+    default: overwrite_name = "UnknownOverwrite"; break;
+  }
+
+  OS::SNPrintF(Vector<char>(name_, kMaxNameLength),
+               "TypeRecordingBinaryOpStub_%s_%s_%s",
+               op_name,
+               overwrite_name,
+               TRBinaryOpIC::GetName(operands_type_));
+  return name_;
+}
+
+
+void TypeRecordingBinaryOpStub::GenerateSmiCode(MacroAssembler* masm,
+    Label* slow,
+    SmiCodeGenerateHeapNumberResults allow_heapnumber_results) {
+  // 1. Move arguments into edx, eax except for DIV and MOD, which need the
+  // dividend in eax and edx free for the division.  Use eax, ebx for those.
+  Comment load_comment(masm, "-- Load arguments");
+  Register left = edx;
+  Register right = eax;
+  if (op_ == Token::DIV || op_ == Token::MOD) {
+    left = eax;
+    right = ebx;
+      __ mov(ebx, eax);
+      __ mov(eax, edx);
+  }
+
+
+  // 2. Prepare the smi check of both operands by oring them together.
+  Comment smi_check_comment(masm, "-- Smi check arguments");
+  Label not_smis;
+  Register combined = ecx;
+  ASSERT(!left.is(combined) && !right.is(combined));
+  switch (op_) {
+    case Token::BIT_OR:
+      // Perform the operation into eax and smi check the result.  Preserve
+      // eax in case the result is not a smi.
+      ASSERT(!left.is(ecx) && !right.is(ecx));
+      __ mov(ecx, right);
+      __ or_(right, Operand(left));  // Bitwise or is commutative.
+      combined = right;
+      break;
+
+    case Token::BIT_XOR:
+    case Token::BIT_AND:
+    case Token::ADD:
+    case Token::SUB:
+    case Token::MUL:
+    case Token::DIV:
+    case Token::MOD:
+      __ mov(combined, right);
+      __ or_(combined, Operand(left));
+      break;
+
+    case Token::SHL:
+    case Token::SAR:
+    case Token::SHR:
+      // Move the right operand into ecx for the shift operation, use eax
+      // for the smi check register.
+      ASSERT(!left.is(ecx) && !right.is(ecx));
+      __ mov(ecx, right);
+      __ or_(right, Operand(left));
+      combined = right;
+      break;
+
+    default:
+      break;
+  }
+
+  // 3. Perform the smi check of the operands.
+  STATIC_ASSERT(kSmiTag == 0);  // Adjust zero check if not the case.
+  __ test(combined, Immediate(kSmiTagMask));
+  __ j(not_zero, &not_smis, not_taken);
+
+  // 4. Operands are both smis, perform the operation leaving the result in
+  // eax and check the result if necessary.
+  Comment perform_smi(masm, "-- Perform smi operation");
+  Label use_fp_on_smis;
+  switch (op_) {
+    case Token::BIT_OR:
+      // Nothing to do.
+      break;
+
+    case Token::BIT_XOR:
+      ASSERT(right.is(eax));
+      __ xor_(right, Operand(left));  // Bitwise xor is commutative.
+      break;
+
+    case Token::BIT_AND:
+      ASSERT(right.is(eax));
+      __ and_(right, Operand(left));  // Bitwise and is commutative.
+      break;
+
+    case Token::SHL:
+      // Remove tags from operands (but keep sign).
+      __ SmiUntag(left);
+      __ SmiUntag(ecx);
+      // Perform the operation.
+      __ shl_cl(left);
+      // Check that the *signed* result fits in a smi.
+      __ cmp(left, 0xc0000000);
+      __ j(sign, &use_fp_on_smis, not_taken);
+      // Tag the result and store it in register eax.
+      __ SmiTag(left);
+      __ mov(eax, left);
+      break;
+
+    case Token::SAR:
+      // Remove tags from operands (but keep sign).
+      __ SmiUntag(left);
+      __ SmiUntag(ecx);
+      // Perform the operation.
+      __ sar_cl(left);
+      // Tag the result and store it in register eax.
+      __ SmiTag(left);
+      __ mov(eax, left);
+      break;
+
+    case Token::SHR:
+      // Remove tags from operands (but keep sign).
+      __ SmiUntag(left);
+      __ SmiUntag(ecx);
+      // Perform the operation.
+      __ shr_cl(left);
+      // Check that the *unsigned* result fits in a smi.
+      // Neither of the two high-order bits can be set:
+      // - 0x80000000: high bit would be lost when smi tagging.
+      // - 0x40000000: this number would convert to negative when
+      // Smi tagging these two cases can only happen with shifts
+      // by 0 or 1 when handed a valid smi.
+      __ test(left, Immediate(0xc0000000));
+      __ j(not_zero, slow, not_taken);
+      // Tag the result and store it in register eax.
+      __ SmiTag(left);
+      __ mov(eax, left);
+      break;
+
+    case Token::ADD:
+      ASSERT(right.is(eax));
+      __ add(right, Operand(left));  // Addition is commutative.
+      __ j(overflow, &use_fp_on_smis, not_taken);
+      break;
+
+    case Token::SUB:
+      __ sub(left, Operand(right));
+      __ j(overflow, &use_fp_on_smis, not_taken);
+      __ mov(eax, left);
+      break;
+
+    case Token::MUL:
+      // If the smi tag is 0 we can just leave the tag on one operand.
+      STATIC_ASSERT(kSmiTag == 0);  // Adjust code below if not the case.
+      // We can't revert the multiplication if the result is not a smi
+      // so save the right operand.
+      __ mov(ebx, right);
+      // Remove tag from one of the operands (but keep sign).
+      __ SmiUntag(right);
+      // Do multiplication.
+      __ imul(right, Operand(left));  // Multiplication is commutative.
+      __ j(overflow, &use_fp_on_smis, not_taken);
+      // Check for negative zero result.  Use combined = left | right.
+      __ NegativeZeroTest(right, combined, &use_fp_on_smis);
+      break;
+
+    case Token::DIV:
+      // We can't revert the division if the result is not a smi so
+      // save the left operand.
+      __ mov(edi, left);
+      // Check for 0 divisor.
+      __ test(right, Operand(right));
+      __ j(zero, &use_fp_on_smis, not_taken);
+      // Sign extend left into edx:eax.
+      ASSERT(left.is(eax));
+      __ cdq();
+      // Divide edx:eax by right.
+      __ idiv(right);
+      // Check for the corner case of dividing the most negative smi by
+      // -1. We cannot use the overflow flag, since it is not set by idiv
+      // instruction.
+      STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
+      __ cmp(eax, 0x40000000);
+      __ j(equal, &use_fp_on_smis);
+      // Check for negative zero result.  Use combined = left | right.
+      __ NegativeZeroTest(eax, combined, &use_fp_on_smis);
+      // Check that the remainder is zero.
+      __ test(edx, Operand(edx));
+      __ j(not_zero, &use_fp_on_smis);
+      // Tag the result and store it in register eax.
+      __ SmiTag(eax);
+      break;
+
+    case Token::MOD:
+      // Check for 0 divisor.
+      __ test(right, Operand(right));
+      __ j(zero, &not_smis, not_taken);
+
+      // Sign extend left into edx:eax.
+      ASSERT(left.is(eax));
+      __ cdq();
+      // Divide edx:eax by right.
+      __ idiv(right);
+      // Check for negative zero result.  Use combined = left | right.
+      __ NegativeZeroTest(edx, combined, slow);
+      // Move remainder to register eax.
+      __ mov(eax, edx);
+      break;
+
+    default:
+      UNREACHABLE();
+  }
+
+  // 5. Emit return of result in eax.  Some operations have registers pushed.
+  switch (op_) {
+    case Token::ADD:
+    case Token::SUB:
+    case Token::MUL:
+    case Token::DIV:
+      __ ret(0);
+      break;
+    case Token::MOD:
+    case Token::BIT_OR:
+    case Token::BIT_AND:
+    case Token::BIT_XOR:
+    case Token::SAR:
+    case Token::SHL:
+    case Token::SHR:
+      __ ret(2 * kPointerSize);
+      break;
+    default:
+      UNREACHABLE();
+  }
+
+  // 6. For some operations emit inline code to perform floating point
+  // operations on known smis (e.g., if the result of the operation
+  // overflowed the smi range).
+  if (allow_heapnumber_results == NO_HEAPNUMBER_RESULTS) {
+    __ bind(&use_fp_on_smis);
+    switch (op_) {
+      // Undo the effects of some operations, and some register moves.
+      case Token::SHL:
+        // The arguments are saved on the stack, and only used from there.
+        break;
+      case Token::ADD:
+        // Revert right = right + left.
+        __ sub(right, Operand(left));
+        break;
+      case Token::SUB:
+        // Revert left = left - right.
+        __ add(left, Operand(right));
+        break;
+      case Token::MUL:
+        // Right was clobbered but a copy is in ebx.
+        __ mov(right, ebx);
+        break;
+      case Token::DIV:
+        // Left was clobbered but a copy is in edi.  Right is in ebx for
+        // division.  They should be in eax, ebx for jump to not_smi.
+        __ mov(eax, edi);
+        break;
+      default:
+        // No other operators jump to use_fp_on_smis.
+        break;
+    }
+    __ jmp(&not_smis);
+  } else {
+    ASSERT(allow_heapnumber_results == ALLOW_HEAPNUMBER_RESULTS);
+    switch (op_) {
+      case Token::SHL: {
+        Comment perform_float(masm, "-- Perform float operation on smis");
+        __ bind(&use_fp_on_smis);
+        // Result we want is in left == edx, so we can put the allocated heap
+        // number in eax.
+        __ AllocateHeapNumber(eax, ecx, ebx, slow);
+        // Store the result in the HeapNumber and return.
+        if (CpuFeatures::IsSupported(SSE2)) {
+          CpuFeatures::Scope use_sse2(SSE2);
+          __ cvtsi2sd(xmm0, Operand(left));
+          __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
+        } else {
+          // It's OK to overwrite the right argument on the stack because we
+          // are about to return.
+          __ mov(Operand(esp, 1 * kPointerSize), left);
+          __ fild_s(Operand(esp, 1 * kPointerSize));
+          __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
+        }
+      __ ret(2 * kPointerSize);
+      break;
+      }
+
+      case Token::ADD:
+      case Token::SUB:
+      case Token::MUL:
+      case Token::DIV: {
+        Comment perform_float(masm, "-- Perform float operation on smis");
+        __ bind(&use_fp_on_smis);
+        // Restore arguments to edx, eax.
+        switch (op_) {
+          case Token::ADD:
+            // Revert right = right + left.
+            __ sub(right, Operand(left));
+            break;
+          case Token::SUB:
+            // Revert left = left - right.
+            __ add(left, Operand(right));
+            break;
+          case Token::MUL:
+            // Right was clobbered but a copy is in ebx.
+            __ mov(right, ebx);
+            break;
+          case Token::DIV:
+            // Left was clobbered but a copy is in edi.  Right is in ebx for
+            // division.
+            __ mov(edx, edi);
+            __ mov(eax, right);
+            break;
+          default: UNREACHABLE();
+            break;
+        }
+        __ AllocateHeapNumber(ecx, ebx, no_reg, slow);
+        if (CpuFeatures::IsSupported(SSE2)) {
+          CpuFeatures::Scope use_sse2(SSE2);
+          FloatingPointHelper::LoadSSE2Smis(masm, ebx);
+          switch (op_) {
+            case Token::ADD: __ addsd(xmm0, xmm1); break;
+            case Token::SUB: __ subsd(xmm0, xmm1); break;
+            case Token::MUL: __ mulsd(xmm0, xmm1); break;
+            case Token::DIV: __ divsd(xmm0, xmm1); break;
+            default: UNREACHABLE();
+          }
+          __ movdbl(FieldOperand(ecx, HeapNumber::kValueOffset), xmm0);
+        } else {  // SSE2 not available, use FPU.
+          FloatingPointHelper::LoadFloatSmis(masm, ebx);
+          switch (op_) {
+            case Token::ADD: __ faddp(1); break;
+            case Token::SUB: __ fsubp(1); break;
+            case Token::MUL: __ fmulp(1); break;
+            case Token::DIV: __ fdivp(1); break;
+            default: UNREACHABLE();
+          }
+          __ fstp_d(FieldOperand(ecx, HeapNumber::kValueOffset));
+        }
+        __ mov(eax, ecx);
+        __ ret(0);
+        break;
+      }
+
+      default:
+        break;
+    }
+  }
+
+  // 7. Non-smi operands, fall out to the non-smi code with the operands in
+  // edx and eax.
+  Comment done_comment(masm, "-- Enter non-smi code");
+  __ bind(&not_smis);
+  switch (op_) {
+    case Token::BIT_OR:
+    case Token::SHL:
+    case Token::SAR:
+    case Token::SHR:
+      // Right operand is saved in ecx and eax was destroyed by the smi
+      // check.
+      __ mov(eax, ecx);
+      break;
+
+    case Token::DIV:
+    case Token::MOD:
+      // Operands are in eax, ebx at this point.
+      __ mov(edx, eax);
+      __ mov(eax, ebx);
+      break;
+
+    default:
+      break;
+  }
+}
+
+
+void TypeRecordingBinaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
+  Label call_runtime;
+
+  switch (op_) {
+    case Token::ADD:
+    case Token::SUB:
+    case Token::MUL:
+    case Token::DIV:
+      break;
+    case Token::MOD:
+    case Token::BIT_OR:
+    case Token::BIT_AND:
+    case Token::BIT_XOR:
+    case Token::SAR:
+    case Token::SHL:
+    case Token::SHR:
+      GenerateRegisterArgsPush(masm);
+      break;
+    default:
+      UNREACHABLE();
+  }
+
+  if (result_type_ == TRBinaryOpIC::UNINITIALIZED ||
+      result_type_ == TRBinaryOpIC::SMI) {
+    GenerateSmiCode(masm, &call_runtime, NO_HEAPNUMBER_RESULTS);
+  } else {
+    GenerateSmiCode(masm, &call_runtime, ALLOW_HEAPNUMBER_RESULTS);
+  }
+  __ bind(&call_runtime);
+  switch (op_) {
+    case Token::ADD:
+    case Token::SUB:
+    case Token::MUL:
+    case Token::DIV:
+      GenerateTypeTransition(masm);
+      break;
+    case Token::MOD:
+    case Token::BIT_OR:
+    case Token::BIT_AND:
+    case Token::BIT_XOR:
+    case Token::SAR:
+    case Token::SHL:
+    case Token::SHR:
+      GenerateTypeTransitionWithSavedArgs(masm);
+      break;
+    default:
+      UNREACHABLE();
+  }
+}
+
+
+
+void TypeRecordingBinaryOpStub::GenerateStringStub(MacroAssembler* masm) {
+  Label call_runtime;
+  ASSERT(operands_type_ == TRBinaryOpIC::STRING);
+  ASSERT(op_ == Token::ADD);
+  // If one of the arguments is a string, call the string add stub.
+  // Otherwise, transition to the generic TRBinaryOpIC type.
+
+  // Registers containing left and right operands respectively.
+  Register left = edx;
+  Register right = eax;
+
+  // Test if left operand is a string.
+  NearLabel left_not_string;
+  __ test(left, Immediate(kSmiTagMask));
+  __ j(zero, &left_not_string);
+  __ CmpObjectType(left, FIRST_NONSTRING_TYPE, ecx);
+  __ j(above_equal, &left_not_string);
+
+  StringAddStub string_add_left_stub(NO_STRING_CHECK_LEFT_IN_STUB);
+  GenerateRegisterArgsPush(masm);
+  __ TailCallStub(&string_add_left_stub);
+
+  // Left operand is not a string, test right.
+  __ bind(&left_not_string);
+  __ test(right, Immediate(kSmiTagMask));
+  __ j(zero, &call_runtime);
+  __ CmpObjectType(right, FIRST_NONSTRING_TYPE, ecx);
+  __ j(above_equal, &call_runtime);
+
+  StringAddStub string_add_right_stub(NO_STRING_CHECK_RIGHT_IN_STUB);
+  GenerateRegisterArgsPush(masm);
+  __ TailCallStub(&string_add_right_stub);
+
+  // Neither argument is a string.
+  __ bind(&call_runtime);
+  GenerateTypeTransition(masm);
+}
+
+
+void TypeRecordingBinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
+  Label call_runtime;
+  ASSERT(operands_type_ == TRBinaryOpIC::INT32);
+
+  // Floating point case.
+  switch (op_) {
+    case Token::ADD:
+    case Token::SUB:
+    case Token::MUL:
+    case Token::DIV: {
+      Label not_floats;
+      Label not_int32;
+      if (CpuFeatures::IsSupported(SSE2)) {
+        CpuFeatures::Scope use_sse2(SSE2);
+        FloatingPointHelper::LoadSSE2Operands(masm, &not_floats);
+        FloatingPointHelper::CheckSSE2OperandsAreInt32(masm, &not_int32, ecx);
+        switch (op_) {
+          case Token::ADD: __ addsd(xmm0, xmm1); break;
+          case Token::SUB: __ subsd(xmm0, xmm1); break;
+          case Token::MUL: __ mulsd(xmm0, xmm1); break;
+          case Token::DIV: __ divsd(xmm0, xmm1); break;
+          default: UNREACHABLE();
+        }
+        // Check result type if it is currently Int32.
+        if (result_type_ <= TRBinaryOpIC::INT32) {
+          __ cvttsd2si(ecx, Operand(xmm0));
+          __ cvtsi2sd(xmm2, Operand(ecx));
+          __ ucomisd(xmm0, xmm2);
+          __ j(not_zero, &not_int32);
+          __ j(carry, &not_int32);
+        }
+        GenerateHeapResultAllocation(masm, &call_runtime);
+        __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
+        __ ret(0);
+      } else {  // SSE2 not available, use FPU.
+        FloatingPointHelper::CheckFloatOperands(masm, &not_floats, ebx);
+        FloatingPointHelper::LoadFloatOperands(
+            masm,
+            ecx,
+            FloatingPointHelper::ARGS_IN_REGISTERS);
+        FloatingPointHelper::CheckFloatOperandsAreInt32(masm, &not_int32);
+        switch (op_) {
+          case Token::ADD: __ faddp(1); break;
+          case Token::SUB: __ fsubp(1); break;
+          case Token::MUL: __ fmulp(1); break;
+          case Token::DIV: __ fdivp(1); break;
+          default: UNREACHABLE();
+        }
+        Label after_alloc_failure;
+        GenerateHeapResultAllocation(masm, &after_alloc_failure);
+        __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
+        __ ret(0);
+        __ bind(&after_alloc_failure);
+        __ ffree();
+        __ jmp(&call_runtime);
+      }
+
+      __ bind(&not_floats);
+      __ bind(&not_int32);
+      GenerateTypeTransition(masm);
+      break;
+    }
+
+    case Token::MOD: {
+      // For MOD we go directly to runtime in the non-smi case.
+      break;
+    }
+    case Token::BIT_OR:
+    case Token::BIT_AND:
+    case Token::BIT_XOR:
+    case Token::SAR:
+    case Token::SHL:
+    case Token::SHR: {
+      GenerateRegisterArgsPush(masm);
+      Label not_floats;
+      Label not_int32;
+      Label non_smi_result;
+      /*  {
+        CpuFeatures::Scope use_sse2(SSE2);
+        FloatingPointHelper::LoadSSE2Operands(masm, &not_floats);
+        FloatingPointHelper::CheckSSE2OperandsAreInt32(masm, &not_int32, ecx);
+        }*/
+      FloatingPointHelper::LoadUnknownsAsIntegers(masm,
+                                                  use_sse3_,
+                                                  &not_floats);
+      FloatingPointHelper::CheckLoadedIntegersWereInt32(masm, use_sse3_,
+                                                        &not_int32);
+      switch (op_) {
+        case Token::BIT_OR:  __ or_(eax, Operand(ecx)); break;
+        case Token::BIT_AND: __ and_(eax, Operand(ecx)); break;
+        case Token::BIT_XOR: __ xor_(eax, Operand(ecx)); break;
+        case Token::SAR: __ sar_cl(eax); break;
+        case Token::SHL: __ shl_cl(eax); break;
+        case Token::SHR: __ shr_cl(eax); break;
+        default: UNREACHABLE();
+      }
+      if (op_ == Token::SHR) {
+        // Check if result is non-negative and fits in a smi.
+        __ test(eax, Immediate(0xc0000000));
+        __ j(not_zero, &call_runtime);
+      } else {
+        // Check if result fits in a smi.
+        __ cmp(eax, 0xc0000000);
+        __ j(negative, &non_smi_result);
+      }
+      // Tag smi result and return.
+      __ SmiTag(eax);
+      __ ret(2 * kPointerSize);  // Drop two pushed arguments from the stack.
+
+      // All ops except SHR return a signed int32 that we load in
+      // a HeapNumber.
+      if (op_ != Token::SHR) {
+        __ bind(&non_smi_result);
+        // Allocate a heap number if needed.
+        __ mov(ebx, Operand(eax));  // ebx: result
+        NearLabel skip_allocation;
+        switch (mode_) {
+          case OVERWRITE_LEFT:
+          case OVERWRITE_RIGHT:
+            // If the operand was an object, we skip the
+            // allocation of a heap number.
+            __ mov(eax, Operand(esp, mode_ == OVERWRITE_RIGHT ?
+                                1 * kPointerSize : 2 * kPointerSize));
+            __ test(eax, Immediate(kSmiTagMask));
+            __ j(not_zero, &skip_allocation, not_taken);
+            // Fall through!
+          case NO_OVERWRITE:
+            __ AllocateHeapNumber(eax, ecx, edx, &call_runtime);
+            __ bind(&skip_allocation);
+            break;
+          default: UNREACHABLE();
+        }
+        // Store the result in the HeapNumber and return.
+        if (CpuFeatures::IsSupported(SSE2)) {
+          CpuFeatures::Scope use_sse2(SSE2);
+          __ cvtsi2sd(xmm0, Operand(ebx));
+          __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
+        } else {
+          __ mov(Operand(esp, 1 * kPointerSize), ebx);
+          __ fild_s(Operand(esp, 1 * kPointerSize));
+          __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
+        }
+        __ ret(2 * kPointerSize);  // Drop two pushed arguments from the stack.
+      }
+
+      __ bind(&not_floats);
+      __ bind(&not_int32);
+      GenerateTypeTransitionWithSavedArgs(masm);
+      break;
+    }
+    default: UNREACHABLE(); break;
+  }
+
+  // If an allocation fails, or SHR or MOD hit a hard case,
+  // use the runtime system to get the correct result.
+  __ bind(&call_runtime);
+
+  switch (op_) {
+    case Token::ADD:
+      GenerateRegisterArgsPush(masm);
+      __ InvokeBuiltin(Builtins::ADD, JUMP_FUNCTION);
+      break;
+    case Token::SUB:
+      GenerateRegisterArgsPush(masm);
+      __ InvokeBuiltin(Builtins::SUB, JUMP_FUNCTION);
+      break;
+    case Token::MUL:
+      GenerateRegisterArgsPush(masm);
+      __ InvokeBuiltin(Builtins::MUL, JUMP_FUNCTION);
+      break;
+    case Token::DIV:
+      GenerateRegisterArgsPush(masm);
+      __ InvokeBuiltin(Builtins::DIV, JUMP_FUNCTION);
+      break;
+    case Token::MOD:
+      GenerateRegisterArgsPush(masm);
+      __ InvokeBuiltin(Builtins::MOD, JUMP_FUNCTION);
+      break;
+    case Token::BIT_OR:
+      __ InvokeBuiltin(Builtins::BIT_OR, JUMP_FUNCTION);
+      break;
+    case Token::BIT_AND:
+      __ InvokeBuiltin(Builtins::BIT_AND, JUMP_FUNCTION);
+      break;
+    case Token::BIT_XOR:
+      __ InvokeBuiltin(Builtins::BIT_XOR, JUMP_FUNCTION);
+      break;
+    case Token::SAR:
+      __ InvokeBuiltin(Builtins::SAR, JUMP_FUNCTION);
+      break;
+    case Token::SHL:
+      __ InvokeBuiltin(Builtins::SHL, JUMP_FUNCTION);
+      break;
+    case Token::SHR:
+      __ InvokeBuiltin(Builtins::SHR, JUMP_FUNCTION);
+      break;
+    default:
+      UNREACHABLE();
+  }
+}
+
+
+void TypeRecordingBinaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) {
+  Label call_runtime;
+  ASSERT(operands_type_ == TRBinaryOpIC::HEAP_NUMBER ||
+         operands_type_ == TRBinaryOpIC::INT32);
+
+  // Floating point case.
+  switch (op_) {
+    case Token::ADD:
+    case Token::SUB:
+    case Token::MUL:
+    case Token::DIV: {
+      Label not_floats;
+      if (CpuFeatures::IsSupported(SSE2)) {
+        CpuFeatures::Scope use_sse2(SSE2);
+        FloatingPointHelper::LoadSSE2Operands(masm, &not_floats);
+
+        switch (op_) {
+          case Token::ADD: __ addsd(xmm0, xmm1); break;
+          case Token::SUB: __ subsd(xmm0, xmm1); break;
+          case Token::MUL: __ mulsd(xmm0, xmm1); break;
+          case Token::DIV: __ divsd(xmm0, xmm1); break;
+          default: UNREACHABLE();
+        }
+        GenerateHeapResultAllocation(masm, &call_runtime);
+        __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
+        __ ret(0);
+      } else {  // SSE2 not available, use FPU.
+        FloatingPointHelper::CheckFloatOperands(masm, &not_floats, ebx);
+        FloatingPointHelper::LoadFloatOperands(
+            masm,
+            ecx,
+            FloatingPointHelper::ARGS_IN_REGISTERS);
+        switch (op_) {
+          case Token::ADD: __ faddp(1); break;
+          case Token::SUB: __ fsubp(1); break;
+          case Token::MUL: __ fmulp(1); break;
+          case Token::DIV: __ fdivp(1); break;
+          default: UNREACHABLE();
+        }
+        Label after_alloc_failure;
+        GenerateHeapResultAllocation(masm, &after_alloc_failure);
+        __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
+        __ ret(0);
+        __ bind(&after_alloc_failure);
+        __ ffree();
+        __ jmp(&call_runtime);
+      }
+
+      __ bind(&not_floats);
+      GenerateTypeTransition(masm);
+      break;
+    }
+
+    case Token::MOD: {
+      // For MOD we go directly to runtime in the non-smi case.
+      break;
+    }
+    case Token::BIT_OR:
+    case Token::BIT_AND:
+    case Token::BIT_XOR:
+    case Token::SAR:
+    case Token::SHL:
+    case Token::SHR: {
+      GenerateRegisterArgsPush(masm);
+      Label not_floats;
+      Label non_smi_result;
+      FloatingPointHelper::LoadUnknownsAsIntegers(masm,
+                                                  use_sse3_,
+                                                  &not_floats);
+      switch (op_) {
+        case Token::BIT_OR:  __ or_(eax, Operand(ecx)); break;
+        case Token::BIT_AND: __ and_(eax, Operand(ecx)); break;
+        case Token::BIT_XOR: __ xor_(eax, Operand(ecx)); break;
+        case Token::SAR: __ sar_cl(eax); break;
+        case Token::SHL: __ shl_cl(eax); break;
+        case Token::SHR: __ shr_cl(eax); break;
+        default: UNREACHABLE();
+      }
+      if (op_ == Token::SHR) {
+        // Check if result is non-negative and fits in a smi.
+        __ test(eax, Immediate(0xc0000000));
+        __ j(not_zero, &call_runtime);
+      } else {
+        // Check if result fits in a smi.
+        __ cmp(eax, 0xc0000000);
+        __ j(negative, &non_smi_result);
+      }
+      // Tag smi result and return.
+      __ SmiTag(eax);
+      __ ret(2 * kPointerSize);  // Drop two pushed arguments from the stack.
+
+      // All ops except SHR return a signed int32 that we load in
+      // a HeapNumber.
+      if (op_ != Token::SHR) {
+        __ bind(&non_smi_result);
+        // Allocate a heap number if needed.
+        __ mov(ebx, Operand(eax));  // ebx: result
+        NearLabel skip_allocation;
+        switch (mode_) {
+          case OVERWRITE_LEFT:
+          case OVERWRITE_RIGHT:
+            // If the operand was an object, we skip the
+            // allocation of a heap number.
+            __ mov(eax, Operand(esp, mode_ == OVERWRITE_RIGHT ?
+                                1 * kPointerSize : 2 * kPointerSize));
+            __ test(eax, Immediate(kSmiTagMask));
+            __ j(not_zero, &skip_allocation, not_taken);
+            // Fall through!
+          case NO_OVERWRITE:
+            __ AllocateHeapNumber(eax, ecx, edx, &call_runtime);
+            __ bind(&skip_allocation);
+            break;
+          default: UNREACHABLE();
+        }
+        // Store the result in the HeapNumber and return.
+        if (CpuFeatures::IsSupported(SSE2)) {
+          CpuFeatures::Scope use_sse2(SSE2);
+          __ cvtsi2sd(xmm0, Operand(ebx));
+          __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
+        } else {
+          __ mov(Operand(esp, 1 * kPointerSize), ebx);
+          __ fild_s(Operand(esp, 1 * kPointerSize));
+          __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
+        }
+        __ ret(2 * kPointerSize);  // Drop two pushed arguments from the stack.
+      }
+
+      __ bind(&not_floats);
+      GenerateTypeTransitionWithSavedArgs(masm);
+      break;
+    }
+    default: UNREACHABLE(); break;
+  }
+
+  // If an allocation fails, or SHR or MOD hit a hard case,
+  // use the runtime system to get the correct result.
+  __ bind(&call_runtime);
+
+  switch (op_) {
+    case Token::ADD:
+      GenerateRegisterArgsPush(masm);
+      __ InvokeBuiltin(Builtins::ADD, JUMP_FUNCTION);
+      break;
+    case Token::SUB:
+      GenerateRegisterArgsPush(masm);
+      __ InvokeBuiltin(Builtins::SUB, JUMP_FUNCTION);
+      break;
+    case Token::MUL:
+      GenerateRegisterArgsPush(masm);
+      __ InvokeBuiltin(Builtins::MUL, JUMP_FUNCTION);
+      break;
+    case Token::DIV:
+      GenerateRegisterArgsPush(masm);
+      __ InvokeBuiltin(Builtins::DIV, JUMP_FUNCTION);
+      break;
+    case Token::MOD:
+      GenerateRegisterArgsPush(masm);
+      __ InvokeBuiltin(Builtins::MOD, JUMP_FUNCTION);
+      break;
+    case Token::BIT_OR:
+      __ InvokeBuiltin(Builtins::BIT_OR, JUMP_FUNCTION);
+      break;
+    case Token::BIT_AND:
+      __ InvokeBuiltin(Builtins::BIT_AND, JUMP_FUNCTION);
+      break;
+    case Token::BIT_XOR:
+      __ InvokeBuiltin(Builtins::BIT_XOR, JUMP_FUNCTION);
+      break;
+    case Token::SAR:
+      __ InvokeBuiltin(Builtins::SAR, JUMP_FUNCTION);
+      break;
+    case Token::SHL:
+      __ InvokeBuiltin(Builtins::SHL, JUMP_FUNCTION);
+      break;
+    case Token::SHR:
+      __ InvokeBuiltin(Builtins::SHR, JUMP_FUNCTION);
+      break;
+    default:
+      UNREACHABLE();
+  }
+}
+
+
+void TypeRecordingBinaryOpStub::GenerateGeneric(MacroAssembler* masm) {
+  Label call_runtime;
+
+  __ IncrementCounter(&Counters::generic_binary_stub_calls, 1);
+
+  switch (op_) {
+    case Token::ADD:
+    case Token::SUB:
+    case Token::MUL:
+    case Token::DIV:
+      break;
+    case Token::MOD:
+    case Token::BIT_OR:
+    case Token::BIT_AND:
+    case Token::BIT_XOR:
+    case Token::SAR:
+    case Token::SHL:
+    case Token::SHR:
+      GenerateRegisterArgsPush(masm);
+      break;
+    default:
+      UNREACHABLE();
+  }
+
+  GenerateSmiCode(masm, &call_runtime, ALLOW_HEAPNUMBER_RESULTS);
+
+  // Floating point case.
+  switch (op_) {
+    case Token::ADD:
+    case Token::SUB:
+    case Token::MUL:
+    case Token::DIV: {
+      Label not_floats;
+      if (CpuFeatures::IsSupported(SSE2)) {
+        CpuFeatures::Scope use_sse2(SSE2);
+        FloatingPointHelper::LoadSSE2Operands(masm, &not_floats);
+
+        switch (op_) {
+          case Token::ADD: __ addsd(xmm0, xmm1); break;
+          case Token::SUB: __ subsd(xmm0, xmm1); break;
+          case Token::MUL: __ mulsd(xmm0, xmm1); break;
+          case Token::DIV: __ divsd(xmm0, xmm1); break;
+          default: UNREACHABLE();
+        }
+        GenerateHeapResultAllocation(masm, &call_runtime);
+        __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
+        __ ret(0);
+      } else {  // SSE2 not available, use FPU.
+        FloatingPointHelper::CheckFloatOperands(masm, &not_floats, ebx);
+        FloatingPointHelper::LoadFloatOperands(
+            masm,
+            ecx,
+            FloatingPointHelper::ARGS_IN_REGISTERS);
+        switch (op_) {
+          case Token::ADD: __ faddp(1); break;
+          case Token::SUB: __ fsubp(1); break;
+          case Token::MUL: __ fmulp(1); break;
+          case Token::DIV: __ fdivp(1); break;
+          default: UNREACHABLE();
+        }
+        Label after_alloc_failure;
+        GenerateHeapResultAllocation(masm, &after_alloc_failure);
+        __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
+        __ ret(0);
+        __ bind(&after_alloc_failure);
+          __ ffree();
+          __ jmp(&call_runtime);
+      }
+        __ bind(&not_floats);
+        break;
+      }
+    case Token::MOD: {
+      // For MOD we go directly to runtime in the non-smi case.
+      break;
+    }
+    case Token::BIT_OR:
+    case Token::BIT_AND:
+      case Token::BIT_XOR:
+    case Token::SAR:
+    case Token::SHL:
+    case Token::SHR: {
+      Label non_smi_result;
+      FloatingPointHelper::LoadUnknownsAsIntegers(masm,
+                                                  use_sse3_,
+                                                  &call_runtime);
+      switch (op_) {
+        case Token::BIT_OR:  __ or_(eax, Operand(ecx)); break;
+        case Token::BIT_AND: __ and_(eax, Operand(ecx)); break;
+        case Token::BIT_XOR: __ xor_(eax, Operand(ecx)); break;
+        case Token::SAR: __ sar_cl(eax); break;
+        case Token::SHL: __ shl_cl(eax); break;
+        case Token::SHR: __ shr_cl(eax); break;
+        default: UNREACHABLE();
+      }
+      if (op_ == Token::SHR) {
+        // Check if result is non-negative and fits in a smi.
+        __ test(eax, Immediate(0xc0000000));
+        __ j(not_zero, &call_runtime);
+      } else {
+        // Check if result fits in a smi.
+        __ cmp(eax, 0xc0000000);
+        __ j(negative, &non_smi_result);
+      }
+      // Tag smi result and return.
+      __ SmiTag(eax);
+      __ ret(2 * kPointerSize);  // Drop the arguments from the stack.
+
+      // All ops except SHR return a signed int32 that we load in
+      // a HeapNumber.
+      if (op_ != Token::SHR) {
+        __ bind(&non_smi_result);
+        // Allocate a heap number if needed.
+        __ mov(ebx, Operand(eax));  // ebx: result
+        NearLabel skip_allocation;
+        switch (mode_) {
+          case OVERWRITE_LEFT:
+          case OVERWRITE_RIGHT:
+            // If the operand was an object, we skip the
+              // allocation of a heap number.
+            __ mov(eax, Operand(esp, mode_ == OVERWRITE_RIGHT ?
+                                1 * kPointerSize : 2 * kPointerSize));
+            __ test(eax, Immediate(kSmiTagMask));
+            __ j(not_zero, &skip_allocation, not_taken);
+            // Fall through!
+          case NO_OVERWRITE:
+            __ AllocateHeapNumber(eax, ecx, edx, &call_runtime);
+            __ bind(&skip_allocation);
+            break;
+          default: UNREACHABLE();
+        }
+        // Store the result in the HeapNumber and return.
+        if (CpuFeatures::IsSupported(SSE2)) {
+          CpuFeatures::Scope use_sse2(SSE2);
+          __ cvtsi2sd(xmm0, Operand(ebx));
+          __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
+        } else {
+          __ mov(Operand(esp, 1 * kPointerSize), ebx);
+          __ fild_s(Operand(esp, 1 * kPointerSize));
+          __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
+        }
+        __ ret(2 * kPointerSize);
+      }
+      break;
     }
+    default: UNREACHABLE(); break;
   }
 
   // If all else fails, use the runtime system to get the correct
-  // result. If arguments was passed in registers now place them on the
-  // stack in the correct order below the return address.
-
-  // Avoid hitting the string ADD code below when allocation fails in
-  // the floating point code above.
-  if (op_ != Token::ADD) {
-    __ bind(&call_runtime);
-  }
-
-  if (HasArgsInRegisters()) {
-    GenerateRegisterArgsPush(masm);
-  }
-
+  // result.
+  __ bind(&call_runtime);
   switch (op_) {
     case Token::ADD: {
+      GenerateRegisterArgsPush(masm);
       // Test for string arguments before calling runtime.
-
-      // If this stub has already generated FP-specific code then the arguments
-      // are already in edx, eax
-      if (!ShouldGenerateFPCode() && !HasArgsInRegisters()) {
-        GenerateLoadArguments(masm);
-      }
-
       // Registers containing left and right operands respectively.
       Register lhs, rhs;
-      if (HasArgsReversed()) {
-        lhs = eax;
-        rhs = edx;
-      } else {
-        lhs = edx;
-        rhs = eax;
-      }
+      lhs = edx;
+      rhs = eax;
 
       // Test if left operand is a string.
       NearLabel lhs_not_string;
@@ -1046,33 +2366,32 @@ void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
       StringAddStub string_add_left_stub(NO_STRING_CHECK_LEFT_IN_STUB);
       __ TailCallStub(&string_add_left_stub);
 
-      NearLabel call_runtime_with_args;
+      NearLabel call_add_runtime;
       // Left operand is not a string, test right.
       __ bind(&lhs_not_string);
       __ test(rhs, Immediate(kSmiTagMask));
-      __ j(zero, &call_runtime_with_args);
+      __ j(zero, &call_add_runtime);
       __ CmpObjectType(rhs, FIRST_NONSTRING_TYPE, ecx);
-      __ j(above_equal, &call_runtime_with_args);
+      __ j(above_equal, &call_add_runtime);
 
       StringAddStub string_add_right_stub(NO_STRING_CHECK_RIGHT_IN_STUB);
       __ TailCallStub(&string_add_right_stub);
 
       // Neither argument is a string.
-      __ bind(&call_runtime);
-      if (HasArgsInRegisters()) {
-        GenerateRegisterArgsPush(masm);
-      }
-      __ bind(&call_runtime_with_args);
+      __ bind(&call_add_runtime);
       __ InvokeBuiltin(Builtins::ADD, JUMP_FUNCTION);
       break;
     }
     case Token::SUB:
+      GenerateRegisterArgsPush(masm);
       __ InvokeBuiltin(Builtins::SUB, JUMP_FUNCTION);
       break;
     case Token::MUL:
+      GenerateRegisterArgsPush(masm);
       __ InvokeBuiltin(Builtins::MUL, JUMP_FUNCTION);
       break;
     case Token::DIV:
+      GenerateRegisterArgsPush(masm);
       __ InvokeBuiltin(Builtins::DIV, JUMP_FUNCTION);
       break;
     case Token::MOD:
@@ -1102,17 +2421,11 @@ void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
 }
 
 
-void GenericBinaryOpStub::GenerateHeapResultAllocation(MacroAssembler* masm,
-                                                       Label* alloc_failure) {
+void TypeRecordingBinaryOpStub::GenerateHeapResultAllocation(
+    MacroAssembler* masm,
+    Label* alloc_failure) {
   Label skip_allocation;
   OverwriteMode mode = mode_;
-  if (HasArgsReversed()) {
-    if (mode == OVERWRITE_RIGHT) {
-      mode = OVERWRITE_LEFT;
-    } else if (mode == OVERWRITE_LEFT) {
-      mode = OVERWRITE_RIGHT;
-    }
-  }
   switch (mode) {
     case OVERWRITE_LEFT: {
       // If the argument in edx is already an object, we skip the
@@ -1150,71 +2463,14 @@ void GenericBinaryOpStub::GenerateHeapResultAllocation(MacroAssembler* masm,
 }
 
 
-void GenericBinaryOpStub::GenerateLoadArguments(MacroAssembler* masm) {
-  // If arguments are not passed in registers read them from the stack.
-  ASSERT(!HasArgsInRegisters());
-  __ mov(eax, Operand(esp, 1 * kPointerSize));
-  __ mov(edx, Operand(esp, 2 * kPointerSize));
-}
-
-
-void GenericBinaryOpStub::GenerateReturn(MacroAssembler* masm) {
-  // If arguments are not passed in registers remove them from the stack before
-  // returning.
-  if (!HasArgsInRegisters()) {
-    __ ret(2 * kPointerSize);  // Remove both operands
-  } else {
-    __ ret(0);
-  }
-}
-
-
-void GenericBinaryOpStub::GenerateRegisterArgsPush(MacroAssembler* masm) {
-  ASSERT(HasArgsInRegisters());
+void TypeRecordingBinaryOpStub::GenerateRegisterArgsPush(MacroAssembler* masm) {
   __ pop(ecx);
-  if (HasArgsReversed()) {
-    __ push(eax);
-    __ push(edx);
-  } else {
-    __ push(edx);
-    __ push(eax);
-  }
+  __ push(edx);
+  __ push(eax);
   __ push(ecx);
 }
 
 
-void GenericBinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
-  // Ensure the operands are on the stack.
-  if (HasArgsInRegisters()) {
-    GenerateRegisterArgsPush(masm);
-  }
-
-  __ pop(ecx);  // Save return address.
-
-  // Left and right arguments are now on top.
-  // Push this stub's key. Although the operation and the type info are
-  // encoded into the key, the encoding is opaque, so push them too.
-  __ push(Immediate(Smi::FromInt(MinorKey())));
-  __ push(Immediate(Smi::FromInt(op_)));
-  __ push(Immediate(Smi::FromInt(runtime_operands_type_)));
-
-  __ push(ecx);  // Push return address.
-
-  // Patch the caller to an appropriate specialized stub and return the
-  // operation result to the caller of the stub.
-  __ TailCallExternalReference(
-      ExternalReference(IC_Utility(IC::kBinaryOp_Patch)),
-      5,
-      1);
-}
-
-
-Handle<Code> GetBinaryOpStub(int key, BinaryOpIC::TypeInfo type_info) {
-  GenericBinaryOpStub stub(key, type_info);
-  return stub.GetCode();
-}
-
-
 void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
   // Input on stack:
   // esp[4]: argument (should be number).
@@ -1707,6 +2963,13 @@ void FloatingPointHelper::LoadAsIntegers(MacroAssembler* masm,
 }
 
 
+void FloatingPointHelper::CheckLoadedIntegersWereInt32(MacroAssembler* masm,
+                                                       bool use_sse3,
+                                                       Label* not_int32) {
+  return;
+}
+
+
 void FloatingPointHelper::LoadFloatOperand(MacroAssembler* masm,
                                            Register number) {
   NearLabel load_smi, done;
@@ -1802,6 +3065,22 @@ void FloatingPointHelper::LoadSSE2Smis(MacroAssembler* masm,
 }
 
 
+void FloatingPointHelper::CheckSSE2OperandsAreInt32(MacroAssembler* masm,
+                                                    Label* non_int32,
+                                                    Register scratch) {
+  __ cvttsd2si(scratch, Operand(xmm0));
+  __ cvtsi2sd(xmm2, Operand(scratch));
+  __ ucomisd(xmm0, xmm2);
+  __ j(not_zero, non_int32);
+  __ j(carry, non_int32);
+  __ cvttsd2si(scratch, Operand(xmm1));
+  __ cvtsi2sd(xmm2, Operand(scratch));
+  __ ucomisd(xmm1, xmm2);
+  __ j(not_zero, non_int32);
+  __ j(carry, non_int32);
+}
+
+
 void FloatingPointHelper::LoadFloatOperands(MacroAssembler* masm,
                                             Register scratch,
                                             ArgLocation arg_location) {
@@ -1885,6 +3164,12 @@ void FloatingPointHelper::CheckFloatOperands(MacroAssembler* masm,
 }
 
 
+void FloatingPointHelper::CheckFloatOperandsAreInt32(MacroAssembler* masm,
+                                                     Label* non_int32) {
+  return;
+}
+
+
 void GenericUnaryOpStub::Generate(MacroAssembler* masm) {
   Label slow, done, undo;
 
@@ -2019,6 +3304,160 @@ void GenericUnaryOpStub::Generate(MacroAssembler* masm) {
 }
 
 
+void MathPowStub::Generate(MacroAssembler* masm) {
+  // Registers are used as follows:
+  // edx = base
+  // eax = exponent
+  // ecx = temporary, result
+
+  CpuFeatures::Scope use_sse2(SSE2);
+  Label allocate_return, call_runtime;
+
+  // Load input parameters.
+  __ mov(edx, Operand(esp, 2 * kPointerSize));
+  __ mov(eax, Operand(esp, 1 * kPointerSize));
+
+  // Save 1 in xmm3 - we need this several times later on.
+  __ mov(ecx, Immediate(1));
+  __ cvtsi2sd(xmm3, Operand(ecx));
+
+  Label exponent_nonsmi;
+  Label base_nonsmi;
+  // If the exponent is a heap number go to that specific case.
+  __ test(eax, Immediate(kSmiTagMask));
+  __ j(not_zero, &exponent_nonsmi);
+  __ test(edx, Immediate(kSmiTagMask));
+  __ j(not_zero, &base_nonsmi);
+
+  // Optimized version when both exponent and base is a smi.
+  Label powi;
+  __ SmiUntag(edx);
+  __ cvtsi2sd(xmm0, Operand(edx));
+  __ jmp(&powi);
+  // exponent is smi and base is a heapnumber.
+  __ bind(&base_nonsmi);
+  __ cmp(FieldOperand(edx, HeapObject::kMapOffset),
+         Factory::heap_number_map());
+  __ j(not_equal, &call_runtime);
+
+  __ movdbl(xmm0, FieldOperand(edx, HeapNumber::kValueOffset));
+
+  // Optimized version of pow if exponent is a smi.
+  // xmm0 contains the base.
+  __ bind(&powi);
+  __ SmiUntag(eax);
+
+  // Save exponent in base as we need to check if exponent is negative later.
+  // We know that base and exponent are in different registers.
+  __ mov(edx, eax);
+
+  // Get absolute value of exponent.
+  NearLabel no_neg;
+  __ cmp(eax, 0);
+  __ j(greater_equal, &no_neg);
+  __ neg(eax);
+  __ bind(&no_neg);
+
+  // Load xmm1 with 1.
+  __ movsd(xmm1, xmm3);
+  NearLabel while_true;
+  NearLabel no_multiply;
+
+  __ bind(&while_true);
+  __ shr(eax, 1);
+  __ j(not_carry, &no_multiply);
+  __ mulsd(xmm1, xmm0);
+  __ bind(&no_multiply);
+  __ test(eax, Operand(eax));
+  __ mulsd(xmm0, xmm0);
+  __ j(not_zero, &while_true);
+
+  // base has the original value of the exponent - if the exponent  is
+  // negative return 1/result.
+  __ test(edx, Operand(edx));
+  __ j(positive, &allocate_return);
+  // Special case if xmm1 has reached infinity.
+  __ mov(ecx, Immediate(0x7FB00000));
+  __ movd(xmm0, Operand(ecx));
+  __ cvtss2sd(xmm0, xmm0);
+  __ ucomisd(xmm0, xmm1);
+  __ j(equal, &call_runtime);
+  __ divsd(xmm3, xmm1);
+  __ movsd(xmm1, xmm3);
+  __ jmp(&allocate_return);
+
+  // exponent (or both) is a heapnumber - no matter what we should now work
+  // on doubles.
+  __ bind(&exponent_nonsmi);
+  __ cmp(FieldOperand(eax, HeapObject::kMapOffset),
+         Factory::heap_number_map());
+  __ j(not_equal, &call_runtime);
+  __ movdbl(xmm1, FieldOperand(eax, HeapNumber::kValueOffset));
+  // Test if exponent is nan.
+  __ ucomisd(xmm1, xmm1);
+  __ j(parity_even, &call_runtime);
+
+  NearLabel base_not_smi;
+  NearLabel handle_special_cases;
+  __ test(edx, Immediate(kSmiTagMask));
+  __ j(not_zero, &base_not_smi);
+  __ SmiUntag(edx);
+  __ cvtsi2sd(xmm0, Operand(edx));
+  __ jmp(&handle_special_cases);
+
+  __ bind(&base_not_smi);
+  __ cmp(FieldOperand(edx, HeapObject::kMapOffset),
+         Factory::heap_number_map());
+  __ j(not_equal, &call_runtime);
+  __ mov(ecx, FieldOperand(edx, HeapNumber::kExponentOffset));
+  __ and_(ecx, HeapNumber::kExponentMask);
+  __ cmp(Operand(ecx), Immediate(HeapNumber::kExponentMask));
+  // base is NaN or +/-Infinity
+  __ j(greater_equal, &call_runtime);
+  __ movdbl(xmm0, FieldOperand(edx, HeapNumber::kValueOffset));
+
+  // base is in xmm0 and exponent is in xmm1.
+  __ bind(&handle_special_cases);
+  NearLabel not_minus_half;
+  // Test for -0.5.
+  // Load xmm2 with -0.5.
+  __ mov(ecx, Immediate(0xBF000000));
+  __ movd(xmm2, Operand(ecx));
+  __ cvtss2sd(xmm2, xmm2);
+  // xmm2 now has -0.5.
+  __ ucomisd(xmm2, xmm1);
+  __ j(not_equal, &not_minus_half);
+
+  // Calculates reciprocal of square root.
+  // Note that 1/sqrt(x) = sqrt(1/x))
+  __ divsd(xmm3, xmm0);
+  __ movsd(xmm1, xmm3);
+  __ sqrtsd(xmm1, xmm1);
+  __ jmp(&allocate_return);
+
+  // Test for 0.5.
+  __ bind(&not_minus_half);
+  // Load xmm2 with 0.5.
+  // Since xmm3 is 1 and xmm2 is -0.5 this is simply xmm2 + xmm3.
+  __ addsd(xmm2, xmm3);
+  // xmm2 now has 0.5.
+  __ ucomisd(xmm2, xmm1);
+  __ j(not_equal, &call_runtime);
+  // Calculates square root.
+  __ movsd(xmm1, xmm0);
+  __ sqrtsd(xmm1, xmm1);
+
+  __ bind(&allocate_return);
+  __ AllocateHeapNumber(ecx, eax, edx, &call_runtime);
+  __ movdbl(FieldOperand(ecx, HeapNumber::kValueOffset), xmm1);
+  __ mov(eax, ecx);
+  __ ret(2);
+
+  __ bind(&call_runtime);
+  __ TailCallRuntime(Runtime::kMath_pow_cfunction, 2, 1);
+}
+
+
 void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
   // The key is in edx and the parameter count is in eax.
 
@@ -2513,6 +3952,87 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
 }
 
 
+void RegExpConstructResultStub::Generate(MacroAssembler* masm) {
+  const int kMaxInlineLength = 100;
+  Label slowcase;
+  NearLabel done;
+  __ mov(ebx, Operand(esp, kPointerSize * 3));
+  __ test(ebx, Immediate(kSmiTagMask));
+  __ j(not_zero, &slowcase);
+  __ cmp(Operand(ebx), Immediate(Smi::FromInt(kMaxInlineLength)));
+  __ j(above, &slowcase);
+  // Smi-tagging is equivalent to multiplying by 2.
+  STATIC_ASSERT(kSmiTag == 0);
+  STATIC_ASSERT(kSmiTagSize == 1);
+  // Allocate RegExpResult followed by FixedArray with size in ebx.
+  // JSArray:   [Map][empty properties][Elements][Length-smi][index][input]
+  // Elements:  [Map][Length][..elements..]
+  __ AllocateInNewSpace(JSRegExpResult::kSize + FixedArray::kHeaderSize,
+                        times_half_pointer_size,
+                        ebx,  // In: Number of elements (times 2, being a smi)
+                        eax,  // Out: Start of allocation (tagged).
+                        ecx,  // Out: End of allocation.
+                        edx,  // Scratch register
+                        &slowcase,
+                        TAG_OBJECT);
+  // eax: Start of allocated area, object-tagged.
+
+  // Set JSArray map to global.regexp_result_map().
+  // Set empty properties FixedArray.
+  // Set elements to point to FixedArray allocated right after the JSArray.
+  // Interleave operations for better latency.
+  __ mov(edx, ContextOperand(esi, Context::GLOBAL_INDEX));
+  __ mov(ecx, Immediate(Factory::empty_fixed_array()));
+  __ lea(ebx, Operand(eax, JSRegExpResult::kSize));
+  __ mov(edx, FieldOperand(edx, GlobalObject::kGlobalContextOffset));
+  __ mov(FieldOperand(eax, JSObject::kElementsOffset), ebx);
+  __ mov(FieldOperand(eax, JSObject::kPropertiesOffset), ecx);
+  __ mov(edx, ContextOperand(edx, Context::REGEXP_RESULT_MAP_INDEX));
+  __ mov(FieldOperand(eax, HeapObject::kMapOffset), edx);
+
+  // Set input, index and length fields from arguments.
+  __ mov(ecx, Operand(esp, kPointerSize * 1));
+  __ mov(FieldOperand(eax, JSRegExpResult::kInputOffset), ecx);
+  __ mov(ecx, Operand(esp, kPointerSize * 2));
+  __ mov(FieldOperand(eax, JSRegExpResult::kIndexOffset), ecx);
+  __ mov(ecx, Operand(esp, kPointerSize * 3));
+  __ mov(FieldOperand(eax, JSArray::kLengthOffset), ecx);
+
+  // Fill out the elements FixedArray.
+  // eax: JSArray.
+  // ebx: FixedArray.
+  // ecx: Number of elements in array, as smi.
+
+  // Set map.
+  __ mov(FieldOperand(ebx, HeapObject::kMapOffset),
+         Immediate(Factory::fixed_array_map()));
+  // Set length.
+  __ mov(FieldOperand(ebx, FixedArray::kLengthOffset), ecx);
+  // Fill contents of fixed-array with the-hole.
+  __ SmiUntag(ecx);
+  __ mov(edx, Immediate(Factory::the_hole_value()));
+  __ lea(ebx, FieldOperand(ebx, FixedArray::kHeaderSize));
+  // Fill fixed array elements with hole.
+  // eax: JSArray.
+  // ecx: Number of elements to fill.
+  // ebx: Start of elements in FixedArray.
+  // edx: the hole.
+  Label loop;
+  __ test(ecx, Operand(ecx));
+  __ bind(&loop);
+  __ j(less_equal, &done);  // Jump if ecx is negative or zero.
+  __ sub(Operand(ecx), Immediate(1));
+  __ mov(Operand(ebx, ecx, times_pointer_size, 0), edx);
+  __ jmp(&loop);
+
+  __ bind(&done);
+  __ ret(3 * kPointerSize);
+
+  __ bind(&slowcase);
+  __ TailCallRuntime(Runtime::kRegExpConstructResult, 3, 1);
+}
+
+
 void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm,
                                                          Register object,
                                                          Register result,
@@ -3131,7 +4651,7 @@ void CEntryStub::GenerateCore(MacroAssembler* masm,
   __ j(zero, &failure_returned, not_taken);
 
   // Exit the JavaScript to C++ exit frame.
-  __ LeaveExitFrame();
+  __ LeaveExitFrame(save_doubles_);
   __ ret(0);
 
   // Handling of failure.
@@ -3231,7 +4751,7 @@ void CEntryStub::Generate(MacroAssembler* masm) {
   // a garbage collection and retrying the builtin (twice).
 
   // Enter the exit frame that transitions from JavaScript to C++.
-  __ EnterExitFrame();
+  __ EnterExitFrame(save_doubles_);
 
   // eax: result parameter for PerformGC, if any (setup below)
   // ebx: pointer to builtin function  (C callee-saved)
@@ -4579,6 +6099,192 @@ void StringCompareStub::Generate(MacroAssembler* masm) {
   __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
 }
 
+
+void StringCharAtStub::Generate(MacroAssembler* masm) {
+  // Expects two arguments (object, index) on the stack:
+
+  // Stack frame on entry.
+  //  esp[0]: return address
+  //  esp[4]: index
+  //  esp[8]: object
+
+  Register object = ebx;
+  Register index = eax;
+  Register scratch1 = ecx;
+  Register scratch2 = edx;
+  Register result = eax;
+
+  __ pop(scratch1);  // Return address.
+  __ pop(index);
+  __ pop(object);
+  __ push(scratch1);
+
+  Label need_conversion;
+  Label index_out_of_range;
+  Label done;
+  StringCharAtGenerator generator(object,
+                                  index,
+                                  scratch1,
+                                  scratch2,
+                                  result,
+                                  &need_conversion,
+                                  &need_conversion,
+                                  &index_out_of_range,
+                                  STRING_INDEX_IS_NUMBER);
+  generator.GenerateFast(masm);
+  __ jmp(&done);
+
+  __ bind(&index_out_of_range);
+  // When the index is out of range, the spec requires us to return
+  // the empty string.
+  __ Set(result, Immediate(Factory::empty_string()));
+  __ jmp(&done);
+
+  __ bind(&need_conversion);
+  // Move smi zero into the result register, which will trigger
+  // conversion.
+  __ Set(result, Immediate(Smi::FromInt(0)));
+  __ jmp(&done);
+
+  StubRuntimeCallHelper call_helper;
+  generator.GenerateSlow(masm, call_helper);
+
+  __ bind(&done);
+  __ ret(0);
+}
+
+void ICCompareStub::GenerateSmis(MacroAssembler* masm) {
+  ASSERT(state_ == CompareIC::SMIS);
+  NearLabel miss;
+  __ mov(ecx, Operand(edx));
+  __ or_(ecx, Operand(eax));
+  __ test(ecx, Immediate(kSmiTagMask));
+  __ j(not_zero, &miss, not_taken);
+
+  if (GetCondition() == equal) {
+    // For equality we do not care about the sign of the result.
+    __ sub(eax, Operand(edx));
+  } else {
+    NearLabel done;
+    __ sub(edx, Operand(eax));
+    __ j(no_overflow, &done);
+    // Correct sign of result in case of overflow.
+    __ not_(edx);
+    __ bind(&done);
+    __ mov(eax, edx);
+  }
+  __ ret(0);
+
+  __ bind(&miss);
+  GenerateMiss(masm);
+}
+
+
+void ICCompareStub::GenerateHeapNumbers(MacroAssembler* masm) {
+  ASSERT(state_ == CompareIC::HEAP_NUMBERS);
+
+  NearLabel generic_stub;
+  NearLabel unordered;
+  NearLabel miss;
+  __ mov(ecx, Operand(edx));
+  __ and_(ecx, Operand(eax));
+  __ test(ecx, Immediate(kSmiTagMask));
+  __ j(zero, &generic_stub, not_taken);
+
+  __ CmpObjectType(eax, HEAP_NUMBER_TYPE, ecx);
+  __ j(not_equal, &miss, not_taken);
+  __ CmpObjectType(edx, HEAP_NUMBER_TYPE, ecx);
+  __ j(not_equal, &miss, not_taken);
+
+  // Inlining the double comparison and falling back to the general compare
+  // stub if NaN is involved or SS2 or CMOV is unsupported.
+  if (CpuFeatures::IsSupported(SSE2) && CpuFeatures::IsSupported(CMOV)) {
+    CpuFeatures::Scope scope1(SSE2);
+    CpuFeatures::Scope scope2(CMOV);
+
+    // Load left and right operand
+    __ movdbl(xmm0, FieldOperand(edx, HeapNumber::kValueOffset));
+    __ movdbl(xmm1, FieldOperand(eax, HeapNumber::kValueOffset));
+
+    // Compare operands
+    __ ucomisd(xmm0, xmm1);
+
+    // Don't base result on EFLAGS when a NaN is involved.
+    __ j(parity_even, &unordered, not_taken);
+
+    // Return a result of -1, 0, or 1, based on EFLAGS.
+    // Performing mov, because xor would destroy the flag register.
+    __ mov(eax, 0);  // equal
+    __ mov(ecx, Immediate(Smi::FromInt(1)));
+    __ cmov(above, eax, Operand(ecx));
+    __ mov(ecx, Immediate(Smi::FromInt(-1)));
+    __ cmov(below, eax, Operand(ecx));
+    __ ret(0);
+
+    __ bind(&unordered);
+  }
+
+  CompareStub stub(GetCondition(), strict(), NO_COMPARE_FLAGS);
+  __ bind(&generic_stub);
+  __ jmp(stub.GetCode(), RelocInfo::CODE_TARGET);
+
+  __ bind(&miss);
+  GenerateMiss(masm);
+}
+
+
+void ICCompareStub::GenerateObjects(MacroAssembler* masm) {
+  ASSERT(state_ == CompareIC::OBJECTS);
+  NearLabel miss;
+  __ mov(ecx, Operand(edx));
+  __ and_(ecx, Operand(eax));
+  __ test(ecx, Immediate(kSmiTagMask));
+  __ j(zero, &miss, not_taken);
+
+  __ CmpObjectType(eax, JS_OBJECT_TYPE, ecx);
+  __ j(not_equal, &miss, not_taken);
+  __ CmpObjectType(edx, JS_OBJECT_TYPE, ecx);
+  __ j(not_equal, &miss, not_taken);
+
+  ASSERT(GetCondition() == equal);
+  __ sub(eax, Operand(edx));
+  __ ret(0);
+
+  __ bind(&miss);
+  GenerateMiss(masm);
+}
+
+
+void ICCompareStub::GenerateMiss(MacroAssembler* masm) {
+  // Save the registers.
+  __ pop(ecx);
+  __ push(edx);
+  __ push(eax);
+  __ push(ecx);
+
+  // Call the runtime system in a fresh internal frame.
+  ExternalReference miss = ExternalReference(IC_Utility(IC::kCompareIC_Miss));
+  __ EnterInternalFrame();
+  __ push(edx);
+  __ push(eax);
+  __ push(Immediate(Smi::FromInt(op_)));
+  __ CallExternalReference(miss, 3);
+  __ LeaveInternalFrame();
+
+  // Compute the entry point of the rewritten stub.
+  __ lea(edi, FieldOperand(eax, Code::kHeaderSize));
+
+  // Restore registers.
+  __ pop(ecx);
+  __ pop(eax);
+  __ pop(edx);
+  __ push(ecx);
+
+  // Do a tail call to the rewritten stub.
+  __ jmp(Operand(edi));
+}
+
+
 #undef __
 
 } }  // namespace v8::internal
index 351636f..2973101 100644 (file)
@@ -83,7 +83,7 @@ class GenericBinaryOpStub: public CodeStub {
         args_in_registers_(false),
         args_reversed_(false),
         static_operands_type_(operands_type),
-        runtime_operands_type_(BinaryOpIC::DEFAULT),
+        runtime_operands_type_(BinaryOpIC::UNINIT_OR_SMI),
         name_(NULL) {
     if (static_operands_type_.IsSmi()) {
       mode_ = NO_OVERWRITE;
@@ -117,6 +117,11 @@ class GenericBinaryOpStub: public CodeStub {
         || op_ == Token::MUL || op_ == Token::DIV;
   }
 
+  void SetArgsInRegisters() {
+    ASSERT(ArgsInRegistersSupported());
+    args_in_registers_ = true;
+  }
+
  private:
   Token::Value op_;
   OverwriteMode mode_;
@@ -157,7 +162,7 @@ class GenericBinaryOpStub: public CodeStub {
   class ArgsReversedBits: public BitField<bool, 11, 1> {};
   class FlagBits: public BitField<GenericBinaryFlags, 12, 1> {};
   class StaticTypeInfoBits: public BitField<int, 13, 3> {};
-  class RuntimeTypeInfoBits: public BitField<BinaryOpIC::TypeInfo, 16, 2> {};
+  class RuntimeTypeInfoBits: public BitField<BinaryOpIC::TypeInfo, 16, 3> {};
 
   Major MajorKey() { return GenericBinaryOp; }
   int MinorKey() {
@@ -185,7 +190,6 @@ class GenericBinaryOpStub: public CodeStub {
     return (op_ == Token::ADD) || (op_ == Token::MUL);
   }
 
-  void SetArgsInRegisters() { args_in_registers_ = true; }
   void SetArgsReversed() { args_reversed_ = true; }
   bool HasSmiCodeInStub() { return (flags_ & NO_SMI_CODE_IN_STUB) == 0; }
   bool HasArgsInRegisters() { return args_in_registers_; }
@@ -207,6 +211,123 @@ class GenericBinaryOpStub: public CodeStub {
     return BinaryOpIC::ToState(runtime_operands_type_);
   }
 
+  virtual void FinishCode(Code* code) {
+    code->set_binary_op_type(runtime_operands_type_);
+  }
+
+  friend class CodeGenerator;
+};
+
+
+class TypeRecordingBinaryOpStub: public CodeStub {
+ public:
+  TypeRecordingBinaryOpStub(Token::Value op, OverwriteMode mode)
+      : op_(op),
+        mode_(mode),
+        operands_type_(TRBinaryOpIC::UNINITIALIZED),
+        result_type_(TRBinaryOpIC::UNINITIALIZED),
+        name_(NULL) {
+    use_sse3_ = CpuFeatures::IsSupported(SSE3);
+    ASSERT(OpBits::is_valid(Token::NUM_TOKENS));
+  }
+
+  TypeRecordingBinaryOpStub(int key,
+      TRBinaryOpIC::TypeInfo operands_type,
+      TRBinaryOpIC::TypeInfo result_type = TRBinaryOpIC::UNINITIALIZED)
+      : op_(OpBits::decode(key)),
+        mode_(ModeBits::decode(key)),
+        use_sse3_(SSE3Bits::decode(key)),
+        operands_type_(operands_type),
+        result_type_(result_type),
+        name_(NULL) {
+  }
+
+  // Generate code to call the stub with the supplied arguments. This will add
+  // code at the call site to prepare arguments either in registers or on the
+  // stack together with the actual call.
+  void GenerateCall(MacroAssembler* masm, Register left, Register right);
+  void GenerateCall(MacroAssembler* masm, Register left, Smi* right);
+  void GenerateCall(MacroAssembler* masm, Smi* left, Register right);
+
+ private:
+  enum SmiCodeGenerateHeapNumberResults {
+    ALLOW_HEAPNUMBER_RESULTS,
+    NO_HEAPNUMBER_RESULTS
+  };
+
+  Token::Value op_;
+  OverwriteMode mode_;
+  bool use_sse3_;
+
+  // Operand type information determined at runtime.
+  TRBinaryOpIC::TypeInfo operands_type_;
+  TRBinaryOpIC::TypeInfo result_type_;
+
+  char* name_;
+
+  const char* GetName();
+
+#ifdef DEBUG
+  void Print() {
+    PrintF("TypeRecordingBinaryOpStub %d (op %s), "
+           "(mode %d, runtime_type_info %s)\n",
+           MinorKey(),
+           Token::String(op_),
+           static_cast<int>(mode_),
+           TRBinaryOpIC::GetName(operands_type_));
+  }
+#endif
+
+  // Minor key encoding in 16 bits RRRTTTSOOOOOOOMM.
+  class ModeBits: public BitField<OverwriteMode, 0, 2> {};
+  class OpBits: public BitField<Token::Value, 2, 7> {};
+  class SSE3Bits: public BitField<bool, 9, 1> {};
+  class OperandTypeInfoBits: public BitField<TRBinaryOpIC::TypeInfo, 10, 3> {};
+  class ResultTypeInfoBits: public BitField<TRBinaryOpIC::TypeInfo, 13, 3> {};
+
+  Major MajorKey() { return TypeRecordingBinaryOp; }
+  int MinorKey() {
+    return OpBits::encode(op_)
+           | ModeBits::encode(mode_)
+           | SSE3Bits::encode(use_sse3_)
+           | OperandTypeInfoBits::encode(operands_type_)
+           | ResultTypeInfoBits::encode(result_type_);
+  }
+
+  void Generate(MacroAssembler* masm);
+  void GenerateGeneric(MacroAssembler* masm);
+  void GenerateSmiCode(MacroAssembler* masm,
+                       Label* slow,
+                       SmiCodeGenerateHeapNumberResults heapnumber_results);
+  void GenerateLoadArguments(MacroAssembler* masm);
+  void GenerateReturn(MacroAssembler* masm);
+  void GenerateUninitializedStub(MacroAssembler* masm);
+  void GenerateSmiStub(MacroAssembler* masm);
+  void GenerateInt32Stub(MacroAssembler* masm);
+  void GenerateHeapNumberStub(MacroAssembler* masm);
+  void GenerateStringStub(MacroAssembler* masm);
+  void GenerateGenericStub(MacroAssembler* masm);
+
+  void GenerateHeapResultAllocation(MacroAssembler* masm, Label* alloc_failure);
+  void GenerateRegisterArgsPush(MacroAssembler* masm);
+  void GenerateTypeTransition(MacroAssembler* masm);
+  void GenerateTypeTransitionWithSavedArgs(MacroAssembler* masm);
+
+  bool IsOperationCommutative() {
+    return (op_ == Token::ADD) || (op_ == Token::MUL);
+  }
+
+  virtual int GetCodeKind() { return Code::TYPE_RECORDING_BINARY_OP_IC; }
+
+  virtual InlineCacheState GetICState() {
+    return TRBinaryOpIC::ToState(operands_type_);
+  }
+
+  virtual void FinishCode(Code* code) {
+    code->set_type_recording_binary_op_type(operands_type_);
+    code->set_type_recording_binary_op_result_type(result_type_);
+  }
+
   friend class CodeGenerator;
 };
 
index a791ed5..022c117 100644 (file)
@@ -104,12 +104,12 @@ void VirtualFrameRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
 }
 
 
-void ICRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const {
+void StubRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const {
   masm->EnterInternalFrame();
 }
 
 
-void ICRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
+void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
   masm->LeaveInternalFrame();
 }
 
@@ -7398,6 +7398,7 @@ void CodeGenerator::GenerateRegExpExec(ZoneList<Expression*>* args) {
   Load(args->at(1));
   Load(args->at(2));
   Load(args->at(3));
+
   RegExpExecStub stub;
   Result result = frame_->CallStub(&stub, 4);
   frame_->Push(&result);
@@ -7405,91 +7406,15 @@ void CodeGenerator::GenerateRegExpExec(ZoneList<Expression*>* args) {
 
 
 void CodeGenerator::GenerateRegExpConstructResult(ZoneList<Expression*>* args) {
-  // No stub. This code only occurs a few times in regexp.js.
-  const int kMaxInlineLength = 100;
   ASSERT_EQ(3, args->length());
+
   Load(args->at(0));  // Size of array, smi.
   Load(args->at(1));  // "index" property value.
   Load(args->at(2));  // "input" property value.
-  {
-    VirtualFrame::SpilledScope spilled_scope;
-
-    Label slowcase;
-    Label done;
-    __ mov(ebx, Operand(esp, kPointerSize * 2));
-    __ test(ebx, Immediate(kSmiTagMask));
-    __ j(not_zero, &slowcase);
-    __ cmp(Operand(ebx), Immediate(Smi::FromInt(kMaxInlineLength)));
-    __ j(above, &slowcase);
-    // Smi-tagging is equivalent to multiplying by 2.
-    STATIC_ASSERT(kSmiTag == 0);
-    STATIC_ASSERT(kSmiTagSize == 1);
-    // Allocate RegExpResult followed by FixedArray with size in ebx.
-    // JSArray:   [Map][empty properties][Elements][Length-smi][index][input]
-    // Elements:  [Map][Length][..elements..]
-    __ AllocateInNewSpace(JSRegExpResult::kSize + FixedArray::kHeaderSize,
-                          times_half_pointer_size,
-                          ebx,  // In: Number of elements (times 2, being a smi)
-                          eax,  // Out: Start of allocation (tagged).
-                          ecx,  // Out: End of allocation.
-                          edx,  // Scratch register
-                          &slowcase,
-                          TAG_OBJECT);
-    // eax: Start of allocated area, object-tagged.
-
-    // Set JSArray map to global.regexp_result_map().
-    // Set empty properties FixedArray.
-    // Set elements to point to FixedArray allocated right after the JSArray.
-    // Interleave operations for better latency.
-    __ mov(edx, ContextOperand(esi, Context::GLOBAL_INDEX));
-    __ mov(ecx, Immediate(Factory::empty_fixed_array()));
-    __ lea(ebx, Operand(eax, JSRegExpResult::kSize));
-    __ mov(edx, FieldOperand(edx, GlobalObject::kGlobalContextOffset));
-    __ mov(FieldOperand(eax, JSObject::kElementsOffset), ebx);
-    __ mov(FieldOperand(eax, JSObject::kPropertiesOffset), ecx);
-    __ mov(edx, ContextOperand(edx, Context::REGEXP_RESULT_MAP_INDEX));
-    __ mov(FieldOperand(eax, HeapObject::kMapOffset), edx);
-
-    // Set input, index and length fields from arguments.
-    __ pop(FieldOperand(eax, JSRegExpResult::kInputOffset));
-    __ pop(FieldOperand(eax, JSRegExpResult::kIndexOffset));
-    __ pop(ecx);
-    __ mov(FieldOperand(eax, JSArray::kLengthOffset), ecx);
-
-    // Fill out the elements FixedArray.
-    // eax: JSArray.
-    // ebx: FixedArray.
-    // ecx: Number of elements in array, as smi.
-
-    // Set map.
-    __ mov(FieldOperand(ebx, HeapObject::kMapOffset),
-           Immediate(Factory::fixed_array_map()));
-    // Set length.
-    __ mov(FieldOperand(ebx, FixedArray::kLengthOffset), ecx);
-    // Fill contents of fixed-array with the-hole.
-    __ SmiUntag(ecx);
-    __ mov(edx, Immediate(Factory::the_hole_value()));
-    __ lea(ebx, FieldOperand(ebx, FixedArray::kHeaderSize));
-    // Fill fixed array elements with hole.
-    // eax: JSArray.
-    // ecx: Number of elements to fill.
-    // ebx: Start of elements in FixedArray.
-    // edx: the hole.
-    Label loop;
-    __ test(ecx, Operand(ecx));
-    __ bind(&loop);
-    __ j(less_equal, &done);  // Jump if ecx is negative or zero.
-    __ sub(Operand(ecx), Immediate(1));
-    __ mov(Operand(ebx, ecx, times_pointer_size, 0), edx);
-    __ jmp(&loop);
-
-    __ bind(&slowcase);
-    __ CallRuntime(Runtime::kRegExpConstructResult, 3);
 
-    __ bind(&done);
-  }
-  frame_->Forget(3);
-  frame_->Push(eax);
+  RegExpConstructResultStub stub;
+  Result result = frame_->CallStub(&stub, 3);
+  frame_->Push(&result);
 }
 
 
@@ -10082,14 +10007,15 @@ void Reference::SetValue(InitState init_state) {
 
 #define __ masm.
 
+
+static void MemCopyWrapper(void* dest, const void* src, size_t size) {
+  memcpy(dest, src, size);
+}
+
+
 MemCopyFunction CreateMemCopyFunction() {
-  size_t actual_size;
-  byte* buffer = static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize,
-                                                 &actual_size,
-                                                 true));
-  CHECK(buffer);
-  HandleScope handles;
-  MacroAssembler masm(buffer, static_cast<int>(actual_size));
+  HandleScope scope;
+  MacroAssembler masm(NULL, 1 * KB);
 
   // Generated code is put into a fixed, unmovable, buffer, and not into
   // the V8 heap. We can't, and don't, refer to any relocatable addresses
@@ -10183,6 +10109,7 @@ MemCopyFunction CreateMemCopyFunction() {
       __ movdqu(xmm0, Operand(src, count, times_1, -0x10));
       __ movdqu(Operand(dst, count, times_1, -0x10), xmm0);
 
+      __ mov(eax, Operand(esp, stack_offset + kDestinationOffset));
       __ pop(esi);
       __ pop(edi);
       __ ret(0);
@@ -10229,6 +10156,7 @@ MemCopyFunction CreateMemCopyFunction() {
       __ movdqu(xmm0, Operand(src, count, times_1, -0x10));
       __ movdqu(Operand(dst, count, times_1, -0x10), xmm0);
 
+      __ mov(eax, Operand(esp, stack_offset + kDestinationOffset));
       __ pop(esi);
       __ pop(edi);
       __ ret(0);
@@ -10272,6 +10200,7 @@ MemCopyFunction CreateMemCopyFunction() {
     __ mov(eax, Operand(src, count, times_1, -4));
     __ mov(Operand(dst, count, times_1, -4), eax);
 
+    __ mov(eax, Operand(esp, stack_offset + kDestinationOffset));
     __ pop(esi);
     __ pop(edi);
     __ ret(0);
@@ -10279,8 +10208,15 @@ MemCopyFunction CreateMemCopyFunction() {
 
   CodeDesc desc;
   masm.GetCode(&desc);
-  // Call the function from C++.
-  return FUNCTION_CAST<MemCopyFunction>(buffer);
+  ASSERT(desc.reloc_size == 0);
+
+  // Copy the generated code into an executable chunk and return a pointer
+  // to the first instruction in it as a C++ function pointer.
+  LargeObjectChunk* chunk = LargeObjectChunk::New(desc.instr_size, EXECUTABLE);
+  if (chunk == NULL) return &MemCopyWrapper;
+  memcpy(chunk->GetStartAddress(), desc.buffer, desc.instr_size);
+  CPU::FlushICache(chunk->GetStartAddress(), desc.instr_size);
+  return FUNCTION_CAST<MemCopyFunction>(chunk->GetStartAddress());
 }
 
 #undef __
index 1030856..46b12cb 100644 (file)
@@ -43,9 +43,6 @@ class RegisterAllocator;
 class RegisterFile;
 class RuntimeCallHelper;
 
-enum InitState { CONST_INIT, NOT_CONST_INIT };
-enum TypeofState { INSIDE_TYPEOF, NOT_INSIDE_TYPEOF };
-
 
 // -------------------------------------------------------------------------
 // Reference support
@@ -310,6 +307,9 @@ class CodeGenerator: public AstVisitor {
                                        Code::Flags flags,
                                        CompilationInfo* info);
 
+  // Print the code after compiling it.
+  static void PrintCode(Handle<Code> code, CompilationInfo* info);
+
 #ifdef ENABLE_LOGGING_AND_PROFILING
   static bool ShouldGenerateLog(Expression* type);
 #endif
@@ -398,8 +398,9 @@ class CodeGenerator: public AstVisitor {
   // Node visitors.
   void VisitStatements(ZoneList<Statement*>* statements);
 
+  virtual void VisitSlot(Slot* node);
 #define DEF_VISIT(type) \
-  void Visit##type(type* node);
+  virtual void Visit##type(type* node);
   AST_NODE_LIST(DEF_VISIT)
 #undef DEF_VISIT
 
@@ -783,6 +784,7 @@ class CodeGenerator: public AstVisitor {
   friend class FastCodeGenerator;
   friend class FullCodeGenerator;
   friend class FullCodeGenSyntaxChecker;
+  friend class LCodeGen;
 
   friend class CodeGeneratorPatcher;  // Used in test-log-stack-tracer.cc
 
index b15140f..d64257f 100644 (file)
@@ -42,7 +42,11 @@ namespace v8 {
 namespace internal {
 
 void CPU::Setup() {
-  CpuFeatures::Probe();
+  CpuFeatures::Clear();
+  CpuFeatures::Probe(true);
+  if (!CpuFeatures::IsSupported(SSE2) || Serializer::enabled()) {
+    V8::DisableCrankshaft();
+  }
 }
 
 
diff --git a/src/ia32/deoptimizer-ia32.cc b/src/ia32/deoptimizer-ia32.cc
new file mode 100644 (file)
index 0000000..f3e62db
--- /dev/null
@@ -0,0 +1,615 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "codegen.h"
+#include "deoptimizer.h"
+#include "full-codegen.h"
+#include "safepoint-table.h"
+
+namespace v8 {
+namespace internal {
+
+
+int Deoptimizer::table_entry_size_ = 10;
+
+void Deoptimizer::DeoptimizeFunction(JSFunction* function) {
+  AssertNoAllocation no_allocation;
+
+  if (!function->IsOptimized()) return;
+
+  // Get the optimized code.
+  Code* code = function->code();
+
+  // Invalidate the relocation information, as it will become invalid by the
+  // code patching below, and is not needed any more.
+  code->InvalidateRelocation();
+
+  // For each return after a safepoint insert a absolute call to the
+  // corresponding deoptimization entry.
+  unsigned last_pc_offset = 0;
+  SafepointTable table(function->code());
+  for (unsigned i = 0; i < table.length(); i++) {
+    unsigned pc_offset = table.GetPcOffset(i);
+    int deoptimization_index = table.GetDeoptimizationIndex(i);
+    int gap_code_size = table.GetGapCodeSize(i);
+#ifdef DEBUG
+    // Destroy the code which is not supposed to run again.
+    unsigned instructions = pc_offset - last_pc_offset;
+    CodePatcher destroyer(code->instruction_start() + last_pc_offset,
+                          instructions);
+    for (unsigned i = 0; i < instructions; i++) {
+      destroyer.masm()->int3();
+    }
+#endif
+    last_pc_offset = pc_offset;
+    if (deoptimization_index != Safepoint::kNoDeoptimizationIndex) {
+      CodePatcher patcher(
+          code->instruction_start() + pc_offset + gap_code_size,
+          Assembler::kCallInstructionLength);
+      patcher.masm()->call(GetDeoptimizationEntry(deoptimization_index, LAZY),
+                           RelocInfo::NONE);
+      last_pc_offset += gap_code_size + Assembler::kCallInstructionLength;
+    }
+  }
+#ifdef DEBUG
+  // Destroy the code which is not supposed to run again.
+  unsigned instructions = code->safepoint_table_start() - last_pc_offset;
+  CodePatcher destroyer(code->instruction_start() + last_pc_offset,
+                        instructions);
+  for (unsigned i = 0; i < instructions; i++) {
+    destroyer.masm()->int3();
+  }
+#endif
+
+  // Add the deoptimizing code to the list.
+  DeoptimizingCodeListNode* node = new DeoptimizingCodeListNode(code);
+  node->set_next(deoptimizing_code_list_);
+  deoptimizing_code_list_ = node;
+
+  // Set the code for the function to non-optimized version.
+  function->ReplaceCode(function->shared()->code());
+
+  if (FLAG_trace_deopt) {
+    PrintF("[forced deoptimization: ");
+    function->PrintName();
+    PrintF(" / %x]\n", reinterpret_cast<uint32_t>(function));
+  }
+}
+
+
+void Deoptimizer::PatchStackCheckCode(RelocInfo* rinfo,
+                                      Code* replacement_code) {
+  // The stack check code matches the pattern (on ia32, for example):
+  //
+  //     cmp esp, <limit>
+  //     jae ok
+  //     call <stack guard>
+  // ok: ...
+  //
+  // We will patch the code to:
+  //
+  //     cmp esp, <limit>  ;; Not changed
+  //     nop
+  //     nop
+  //     call <on-stack replacment>
+  // ok:
+  Address call_target_address = rinfo->pc();
+  ASSERT(*(call_target_address - 3) == 0x73 &&  // jae
+         *(call_target_address - 2) == 0x05 &&  // offset
+         *(call_target_address - 1) == 0xe8);   // call
+  *(call_target_address - 3) = 0x90;  // nop
+  *(call_target_address - 2) = 0x90;  // nop
+  rinfo->set_target_address(replacement_code->entry());
+}
+
+
+void Deoptimizer::RevertStackCheckCode(RelocInfo* rinfo, Code* check_code) {
+  Address call_target_address = rinfo->pc();
+  ASSERT(*(call_target_address - 3) == 0x90 &&  // nop
+         *(call_target_address - 2) == 0x90 &&  // nop
+         *(call_target_address - 1) == 0xe8);   // call
+  *(call_target_address - 3) = 0x73;  // jae
+  *(call_target_address - 2) = 0x05;  // offset
+  rinfo->set_target_address(check_code->entry());
+}
+
+
+static int LookupBailoutId(DeoptimizationInputData* data, unsigned ast_id) {
+  ByteArray* translations = data->TranslationByteArray();
+  int length = data->DeoptCount();
+  for (int i = 0; i < length; i++) {
+    if (static_cast<unsigned>(data->AstId(i)->value()) == ast_id) {
+      TranslationIterator it(translations,  data->TranslationIndex(i)->value());
+      int value = it.Next();
+      ASSERT(Translation::BEGIN == static_cast<Translation::Opcode>(value));
+      // Read the number of frames.
+      value = it.Next();
+      if (value == 1) return i;
+    }
+  }
+  UNREACHABLE();
+  return -1;
+}
+
+
+void Deoptimizer::DoComputeOsrOutputFrame() {
+  DeoptimizationInputData* data = DeoptimizationInputData::cast(
+      optimized_code_->deoptimization_data());
+  unsigned ast_id = data->OsrAstId()->value();
+  // TODO(kasperl): This should not be the bailout_id_. It should be
+  // the ast id. Confusing.
+  ASSERT(bailout_id_ == ast_id);
+
+  int bailout_id = LookupBailoutId(data, ast_id);
+  unsigned translation_index = data->TranslationIndex(bailout_id)->value();
+  ByteArray* translations = data->TranslationByteArray();
+
+  TranslationIterator iterator(translations, translation_index);
+  Translation::Opcode opcode =
+      static_cast<Translation::Opcode>(iterator.Next());
+  ASSERT(Translation::BEGIN == opcode);
+  USE(opcode);
+  int count = iterator.Next();
+  ASSERT(count == 1);
+  USE(count);
+
+  opcode = static_cast<Translation::Opcode>(iterator.Next());
+  USE(opcode);
+  ASSERT(Translation::FRAME == opcode);
+  unsigned node_id = iterator.Next();
+  USE(node_id);
+  ASSERT(node_id == ast_id);
+  JSFunction* function = JSFunction::cast(ComputeLiteral(iterator.Next()));
+  USE(function);
+  ASSERT(function == function_);
+  unsigned height = iterator.Next();
+  unsigned height_in_bytes = height * kPointerSize;
+  USE(height_in_bytes);
+
+  unsigned fixed_size = ComputeFixedSize(function_);
+  unsigned input_frame_size = input_->GetFrameSize();
+  ASSERT(fixed_size + height_in_bytes == input_frame_size);
+
+  unsigned stack_slot_size = optimized_code_->stack_slots() * kPointerSize;
+  unsigned outgoing_height = data->ArgumentsStackHeight(bailout_id)->value();
+  unsigned outgoing_size = outgoing_height * kPointerSize;
+  unsigned output_frame_size = fixed_size + stack_slot_size + outgoing_size;
+  ASSERT(outgoing_size == 0);  // OSR does not happen in the middle of a call.
+
+  if (FLAG_trace_osr) {
+    PrintF("[on-stack replacement: begin 0x%08" V8PRIxPTR " ",
+           reinterpret_cast<intptr_t>(function_));
+    function_->PrintName();
+    PrintF(" => node=%u, frame=%d->%d]\n",
+           ast_id,
+           input_frame_size,
+           output_frame_size);
+  }
+
+  // There's only one output frame in the OSR case.
+  output_count_ = 1;
+  output_ = new FrameDescription*[1];
+  output_[0] = new(output_frame_size) FrameDescription(
+      output_frame_size, function_);
+
+  // Clear the incoming parameters in the optimized frame to avoid
+  // confusing the garbage collector.
+  unsigned output_offset = output_frame_size - kPointerSize;
+  int parameter_count = function_->shared()->formal_parameter_count() + 1;
+  for (int i = 0; i < parameter_count; ++i) {
+    output_[0]->SetFrameSlot(output_offset, 0);
+    output_offset -= kPointerSize;
+  }
+
+  // Translate the incoming parameters. This may overwrite some of the
+  // incoming argument slots we've just cleared.
+  int input_offset = input_frame_size - kPointerSize;
+  bool ok = true;
+  int limit = input_offset - (parameter_count * kPointerSize);
+  while (ok && input_offset > limit) {
+    ok = DoOsrTranslateCommand(&iterator, &input_offset);
+  }
+
+  // There are no translation commands for the caller's pc and fp, the
+  // context, and the function.  Set them up explicitly.
+  for (int i = 0; ok && i < 4; i++) {
+    uint32_t input_value = input_->GetFrameSlot(input_offset);
+    if (FLAG_trace_osr) {
+      PrintF("    [esp + %d] <- 0x%08x ; [esp + %d] (fixed part)\n",
+             output_offset,
+             input_value,
+             input_offset);
+    }
+    output_[0]->SetFrameSlot(output_offset, input_->GetFrameSlot(input_offset));
+    input_offset -= kPointerSize;
+    output_offset -= kPointerSize;
+  }
+
+  // Translate the rest of the frame.
+  while (ok && input_offset >= 0) {
+    ok = DoOsrTranslateCommand(&iterator, &input_offset);
+  }
+
+  // If translation of any command failed, continue using the input frame.
+  if (!ok) {
+    delete output_[0];
+    output_[0] = input_;
+    output_[0]->SetPc(reinterpret_cast<uint32_t>(from_));
+  } else {
+    // Setup the frame pointer and the context pointer.
+    output_[0]->SetRegister(ebp.code(), input_->GetRegister(ebp.code()));
+    output_[0]->SetRegister(esi.code(), input_->GetRegister(esi.code()));
+
+    unsigned pc_offset = data->OsrPcOffset()->value();
+    uint32_t pc = reinterpret_cast<uint32_t>(
+        optimized_code_->entry() + pc_offset);
+    output_[0]->SetPc(pc);
+  }
+  Code* continuation = Builtins::builtin(Builtins::NotifyOSR);
+  output_[0]->SetContinuation(
+      reinterpret_cast<uint32_t>(continuation->entry()));
+
+  if (FLAG_trace_osr) {
+    PrintF("[on-stack replacement translation %s: 0x%08" V8PRIxPTR " ",
+           ok ? "finished" : "aborted",
+           reinterpret_cast<intptr_t>(function));
+    function->PrintName();
+    PrintF(" => pc=0x%0x]\n", output_[0]->GetPc());
+  }
+}
+
+
+void Deoptimizer::DoComputeFrame(TranslationIterator* iterator,
+                                 int frame_index) {
+  // Read the ast node id, function, and frame height for this output frame.
+  Translation::Opcode opcode =
+      static_cast<Translation::Opcode>(iterator->Next());
+  USE(opcode);
+  ASSERT(Translation::FRAME == opcode);
+  int node_id = iterator->Next();
+  JSFunction* function = JSFunction::cast(ComputeLiteral(iterator->Next()));
+  unsigned height = iterator->Next();
+  unsigned height_in_bytes = height * kPointerSize;
+  if (FLAG_trace_deopt) {
+    PrintF("  translating ");
+    function->PrintName();
+    PrintF(" => node=%d, height=%d\n", node_id, height_in_bytes);
+  }
+
+  // The 'fixed' part of the frame consists of the incoming parameters and
+  // the part described by JavaScriptFrameConstants.
+  unsigned fixed_frame_size = ComputeFixedSize(function);
+  unsigned input_frame_size = input_->GetFrameSize();
+  unsigned output_frame_size = height_in_bytes + fixed_frame_size;
+
+  // Allocate and store the output frame description.
+  FrameDescription* output_frame =
+      new(output_frame_size) FrameDescription(output_frame_size, function);
+
+  bool is_bottommost = (0 == frame_index);
+  bool is_topmost = (output_count_ - 1 == frame_index);
+  ASSERT(frame_index >= 0 && frame_index < output_count_);
+  ASSERT(output_[frame_index] == NULL);
+  output_[frame_index] = output_frame;
+
+  // The top address for the bottommost output frame can be computed from
+  // the input frame pointer and the output frame's height.  For all
+  // subsequent output frames, it can be computed from the previous one's
+  // top address and the current frame's size.
+  uint32_t top_address;
+  if (is_bottommost) {
+    // 2 = context and function in the frame.
+    top_address =
+        input_->GetRegister(ebp.code()) - (2 * kPointerSize) - height_in_bytes;
+  } else {
+    top_address = output_[frame_index - 1]->GetTop() - output_frame_size;
+  }
+  output_frame->SetTop(top_address);
+
+  // Compute the incoming parameter translation.
+  int parameter_count = function->shared()->formal_parameter_count() + 1;
+  unsigned output_offset = output_frame_size;
+  unsigned input_offset = input_frame_size;
+  for (int i = 0; i < parameter_count; ++i) {
+    output_offset -= kPointerSize;
+    DoTranslateCommand(iterator, frame_index, output_offset);
+  }
+  input_offset -= (parameter_count * kPointerSize);
+
+  // There are no translation commands for the caller's pc and fp, the
+  // context, and the function.  Synthesize their values and set them up
+  // explicitly.
+  //
+  // The caller's pc for the bottommost output frame is the same as in the
+  // input frame.  For all subsequent output frames, it can be read from the
+  // previous one.  This frame's pc can be computed from the non-optimized
+  // function code and AST id of the bailout.
+  output_offset -= kPointerSize;
+  input_offset -= kPointerSize;
+  uint32_t value;
+  if (is_bottommost) {
+    value = input_->GetFrameSlot(input_offset);
+  } else {
+    value = output_[frame_index - 1]->GetPc();
+  }
+  output_frame->SetFrameSlot(output_offset, value);
+  if (FLAG_trace_deopt) {
+    PrintF("    0x%08x: [top + %d] <- 0x%08x ; caller's pc\n",
+           top_address + output_offset, output_offset, value);
+  }
+
+  // The caller's frame pointer for the bottommost output frame is the same
+  // as in the input frame.  For all subsequent output frames, it can be
+  // read from the previous one.  Also compute and set this frame's frame
+  // pointer.
+  output_offset -= kPointerSize;
+  input_offset -= kPointerSize;
+  if (is_bottommost) {
+    value = input_->GetFrameSlot(input_offset);
+  } else {
+    value = output_[frame_index - 1]->GetFp();
+  }
+  output_frame->SetFrameSlot(output_offset, value);
+  unsigned fp_value = top_address + output_offset;
+  ASSERT(!is_bottommost || input_->GetRegister(ebp.code()) == fp_value);
+  output_frame->SetFp(fp_value);
+  if (is_topmost) output_frame->SetRegister(ebp.code(), fp_value);
+  if (FLAG_trace_deopt) {
+    PrintF("    0x%08x: [top + %d] <- 0x%08x ; caller's fp\n",
+           fp_value, output_offset, value);
+  }
+
+  // The context can be gotten from the function so long as we don't
+  // optimize functions that need local contexts.
+  output_offset -= kPointerSize;
+  input_offset -= kPointerSize;
+  value = reinterpret_cast<uint32_t>(function->context());
+  // The context for the bottommost output frame should also agree with the
+  // input frame.
+  ASSERT(!is_bottommost || input_->GetFrameSlot(input_offset) == value);
+  output_frame->SetFrameSlot(output_offset, value);
+  if (is_topmost) output_frame->SetRegister(esi.code(), value);
+  if (FLAG_trace_deopt) {
+    PrintF("    0x%08x: [top + %d] <- 0x%08x ; context\n",
+           top_address + output_offset, output_offset, value);
+  }
+
+  // The function was mentioned explicitly in the BEGIN_FRAME.
+  output_offset -= kPointerSize;
+  input_offset -= kPointerSize;
+  value = reinterpret_cast<uint32_t>(function);
+  // The function for the bottommost output frame should also agree with the
+  // input frame.
+  ASSERT(!is_bottommost || input_->GetFrameSlot(input_offset) == value);
+  output_frame->SetFrameSlot(output_offset, value);
+  if (FLAG_trace_deopt) {
+    PrintF("    0x%08x: [top + %d] <- 0x%08x ; function\n",
+           top_address + output_offset, output_offset, value);
+  }
+
+  // Translate the rest of the frame.
+  for (unsigned i = 0; i < height; ++i) {
+    output_offset -= kPointerSize;
+    DoTranslateCommand(iterator, frame_index, output_offset);
+  }
+  ASSERT(0 == output_offset);
+
+  // Compute this frame's PC, state, and continuation.
+  Code* non_optimized_code = function->shared()->code();
+  FixedArray* raw_data = non_optimized_code->deoptimization_data();
+  DeoptimizationOutputData* data = DeoptimizationOutputData::cast(raw_data);
+  Address start = non_optimized_code->instruction_start();
+  unsigned pc_and_state = GetOutputInfo(data, node_id, function->shared());
+  unsigned pc_offset = FullCodeGenerator::PcField::decode(pc_and_state);
+  uint32_t pc_value = reinterpret_cast<uint32_t>(start + pc_offset);
+  output_frame->SetPc(pc_value);
+
+  FullCodeGenerator::State state =
+      FullCodeGenerator::StateField::decode(pc_and_state);
+  output_frame->SetState(Smi::FromInt(state));
+
+  // Set the continuation for the topmost frame.
+  if (is_topmost) {
+    Code* continuation = (bailout_type_ == EAGER)
+        ? Builtins::builtin(Builtins::NotifyDeoptimized)
+        : Builtins::builtin(Builtins::NotifyLazyDeoptimized);
+    output_frame->SetContinuation(
+        reinterpret_cast<uint32_t>(continuation->entry()));
+  }
+
+  if (output_count_ - 1 == frame_index) iterator->Done();
+}
+
+
+#define __ masm()->
+
+void Deoptimizer::EntryGenerator::Generate() {
+  GeneratePrologue();
+  CpuFeatures::Scope scope(SSE2);
+
+  // Save all general purpose registers before messing with them.
+  const int kNumberOfRegisters = Register::kNumRegisters;
+
+  const int kDoubleRegsSize = kDoubleSize *
+                              XMMRegister::kNumAllocatableRegisters;
+  __ sub(Operand(esp), Immediate(kDoubleRegsSize));
+  for (int i = 0; i < XMMRegister::kNumAllocatableRegisters; ++i) {
+    XMMRegister xmm_reg = XMMRegister::FromAllocationIndex(i);
+    int offset = i * kDoubleSize;
+    __ movdbl(Operand(esp, offset), xmm_reg);
+  }
+
+  __ pushad();
+
+  const int kSavedRegistersAreaSize = kNumberOfRegisters * kPointerSize +
+                                      kDoubleRegsSize;
+
+  // Get the bailout id from the stack.
+  __ mov(ebx, Operand(esp, kSavedRegistersAreaSize));
+
+  // Get the address of the location in the code object if possible
+  // and compute the fp-to-sp delta in register edx.
+  if (type() == EAGER) {
+    __ Set(ecx, Immediate(0));
+    __ lea(edx, Operand(esp, kSavedRegistersAreaSize + 1 * kPointerSize));
+  } else {
+    __ mov(ecx, Operand(esp, kSavedRegistersAreaSize + 1 * kPointerSize));
+    __ lea(edx, Operand(esp, kSavedRegistersAreaSize + 2 * kPointerSize));
+  }
+  __ sub(edx, Operand(ebp));
+  __ neg(edx);
+
+  // Allocate a new deoptimizer object.
+  __ PrepareCallCFunction(5, eax);
+  __ mov(eax, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
+  __ mov(Operand(esp, 0 * kPointerSize), eax);  // Function.
+  __ mov(Operand(esp, 1 * kPointerSize), Immediate(type()));  // Bailout type.
+  __ mov(Operand(esp, 2 * kPointerSize), ebx);  // Bailout id.
+  __ mov(Operand(esp, 3 * kPointerSize), ecx);  // Code address or 0.
+  __ mov(Operand(esp, 4 * kPointerSize), edx);  // Fp-to-sp delta.
+  __ CallCFunction(ExternalReference::new_deoptimizer_function(), 5);
+
+  // Preserve deoptimizer object in register eax and get the input
+  // frame descriptor pointer.
+  __ mov(ebx, Operand(eax, Deoptimizer::input_offset()));
+
+  // Fill in the input registers.
+  for (int i = 0; i < kNumberOfRegisters; i++) {
+    int offset = (i * kIntSize) + FrameDescription::registers_offset();
+    __ mov(ecx, Operand(esp, (kNumberOfRegisters - 1 - i) * kPointerSize));
+    __ mov(Operand(ebx, offset), ecx);
+  }
+
+  // Fill in the double input registers.
+  int double_regs_offset = FrameDescription::double_registers_offset();
+  for (int i = 0; i < XMMRegister::kNumAllocatableRegisters; ++i) {
+    int dst_offset = i * kDoubleSize + double_regs_offset;
+    int src_offset = i * kDoubleSize + kNumberOfRegisters * kPointerSize;
+    __ movdbl(xmm0, Operand(esp, src_offset));
+    __ movdbl(Operand(ebx, dst_offset), xmm0);
+  }
+
+  // Remove the bailout id and the general purpose registers from the stack.
+  if (type() == EAGER) {
+    __ add(Operand(esp), Immediate(kSavedRegistersAreaSize + kPointerSize));
+  } else {
+    __ add(Operand(esp), Immediate(kSavedRegistersAreaSize + 2 * kPointerSize));
+  }
+
+  // Compute a pointer to the unwinding limit in register ecx; that is
+  // the first stack slot not part of the input frame.
+  __ mov(ecx, Operand(ebx, FrameDescription::frame_size_offset()));
+  __ add(ecx, Operand(esp));
+
+  // Unwind the stack down to - but not including - the unwinding
+  // limit and copy the contents of the activation frame to the input
+  // frame description.
+  __ lea(edx, Operand(ebx, FrameDescription::frame_content_offset()));
+  Label pop_loop;
+  __ bind(&pop_loop);
+  __ pop(Operand(edx, 0));
+  __ add(Operand(edx), Immediate(sizeof(uint32_t)));
+  __ cmp(ecx, Operand(esp));
+  __ j(not_equal, &pop_loop);
+
+  // Compute the output frame in the deoptimizer.
+  __ push(eax);
+  __ PrepareCallCFunction(1, ebx);
+  __ mov(Operand(esp, 0 * kPointerSize), eax);
+  __ CallCFunction(ExternalReference::compute_output_frames_function(), 1);
+  __ pop(eax);
+
+  // Replace the current frame with the output frames.
+  Label outer_push_loop, inner_push_loop;
+  // Outer loop state: eax = current FrameDescription**, edx = one past the
+  // last FrameDescription**.
+  __ mov(edx, Operand(eax, Deoptimizer::output_count_offset()));
+  __ mov(eax, Operand(eax, Deoptimizer::output_offset()));
+  __ lea(edx, Operand(eax, edx, times_4, 0));
+  __ bind(&outer_push_loop);
+  // Inner loop state: ebx = current FrameDescription*, ecx = loop index.
+  __ mov(ebx, Operand(eax, 0));
+  __ mov(ecx, Operand(ebx, FrameDescription::frame_size_offset()));
+  __ bind(&inner_push_loop);
+  __ sub(Operand(ecx), Immediate(sizeof(uint32_t)));
+  __ push(Operand(ebx, ecx, times_1, FrameDescription::frame_content_offset()));
+  __ test(ecx, Operand(ecx));
+  __ j(not_zero, &inner_push_loop);
+  __ add(Operand(eax), Immediate(kPointerSize));
+  __ cmp(eax, Operand(edx));
+  __ j(below, &outer_push_loop);
+
+  // In case of OSR, we have to restore the XMM registers.
+  if (type() == OSR) {
+    for (int i = 0; i < XMMRegister::kNumAllocatableRegisters; ++i) {
+      XMMRegister xmm_reg = XMMRegister::FromAllocationIndex(i);
+      int src_offset = i * kDoubleSize + double_regs_offset;
+      __ movdbl(xmm_reg, Operand(ebx, src_offset));
+    }
+  }
+
+  // Push state, pc, and continuation from the last output frame.
+  if (type() != OSR) {
+    __ push(Operand(ebx, FrameDescription::state_offset()));
+  }
+  __ push(Operand(ebx, FrameDescription::pc_offset()));
+  __ push(Operand(ebx, FrameDescription::continuation_offset()));
+
+
+  // Push the registers from the last output frame.
+  for (int i = 0; i < kNumberOfRegisters; i++) {
+    int offset = (i * kIntSize) + FrameDescription::registers_offset();
+    __ push(Operand(ebx, offset));
+  }
+
+  // Restore the registers from the stack.
+  __ popad();
+
+  // Return to the continuation point.
+  __ ret(0);
+}
+
+
+void Deoptimizer::TableEntryGenerator::GeneratePrologue() {
+  // Create a sequence of deoptimization entries.
+  Label done;
+  for (int i = 0; i < count(); i++) {
+    int start = masm()->pc_offset();
+    USE(start);
+    __ push_imm32(i);
+    __ jmp(&done);
+    ASSERT(masm()->pc_offset() - start == table_entry_size_);
+  }
+  __ bind(&done);
+}
+
+#undef __
+
+
+} }  // namespace v8::internal
index 437c741..3734fca 100644 (file)
@@ -1171,6 +1171,14 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
                             NameOfXMMRegister(regop),
                             NameOfXMMRegister(rm));
              data++;
+          } else if (*data == 0xDB) {
+             data++;
+             int mod, regop, rm;
+             get_modrm(*data, &mod, &regop, &rm);
+             AppendToBuffer("pand %s,%s",
+                            NameOfXMMRegister(regop),
+                            NameOfXMMRegister(rm));
+             data++;
           } else if (*data == 0x73) {
              data++;
              int mod, regop, rm;
index c3fe6c7..8084694 100644 (file)
@@ -49,6 +49,10 @@ static const int kNumJSCallerSaved = 5;
 
 typedef Object* JSCallerSavedBuffer[kNumJSCallerSaved];
 
+
+// Number of registers for which space is reserved in safepoints.
+static const int kNumSafepointRegisters = 8;
+
 // ----------------------------------------------------
 
 
@@ -90,6 +94,7 @@ class ExitFrameConstants : public AllStatic {
 
 class StandardFrameConstants : public AllStatic {
  public:
+  static const int kFixedFrameSize    =  4;
   static const int kExpressionsOffset = -3 * kPointerSize;
   static const int kMarkerOffset      = -2 * kPointerSize;
   static const int kContextOffset     = -1 * kPointerSize;
index d175b9c..1f7095f 100644 (file)
@@ -168,7 +168,12 @@ void FullCodeGenerator::Generate(CompilationInfo* info) {
     }
   }
 
+  if (FLAG_trace) {
+    __ CallRuntime(Runtime::kTraceEnter, 0);
+  }
+
   { Comment cmnt(masm_, "[ Stack check");
+    PrepareForBailout(info->function(), NO_REGISTERS);
     NearLabel ok;
     ExternalReference stack_limit =
         ExternalReference::address_of_stack_limit();
@@ -179,10 +184,6 @@ void FullCodeGenerator::Generate(CompilationInfo* info) {
     __ bind(&ok);
   }
 
-  if (FLAG_trace) {
-    __ CallRuntime(Runtime::kTraceEnter, 0);
-  }
-
   { Comment cmnt(masm_, "[ Body");
     ASSERT(loop_depth() == 0);
     VisitStatements(function()->body());
@@ -197,6 +198,27 @@ void FullCodeGenerator::Generate(CompilationInfo* info) {
 }
 
 
+void FullCodeGenerator::EmitStackCheck(IterationStatement* stmt) {
+  Comment cmnt(masm_, "[ Stack check");
+  NearLabel ok;
+  ExternalReference stack_limit = ExternalReference::address_of_stack_limit();
+  __ cmp(esp, Operand::StaticVariable(stack_limit));
+  __ j(above_equal, &ok, taken);
+  StackCheckStub stub;
+  __ CallStub(&stub);
+  __ bind(&ok);
+  PrepareForBailoutForId(stmt->EntryId(), NO_REGISTERS);
+  PrepareForBailoutForId(stmt->OsrEntryId(), NO_REGISTERS);
+  RecordStackCheck(stmt->OsrEntryId());
+  // Loop stack checks can be patched to perform on-stack
+  // replacement. In order to decide whether or not to perform OSR we
+  // embed the loop depth in a test instruction after the call so we
+  // can extract it from the OSR builtin.
+  ASSERT(loop_depth() > 0);
+  __ test(eax, Immediate(Min(loop_depth(), Code::kMaxLoopNestingMarker)));
+}
+
+
 void FullCodeGenerator::EmitReturnSequence() {
   Comment cmnt(masm_, "[ Return sequence");
   if (return_label_.is_bound()) {
@@ -213,7 +235,7 @@ void FullCodeGenerator::EmitReturnSequence() {
     Label check_exit_codesize;
     masm_->bind(&check_exit_codesize);
 #endif
-    CodeGenerator::RecordPositions(masm_, function()->end_position() - 1);
+    SetSourcePosition(function()->end_position() - 1);
     __ RecordJSReturn();
     // Do not use the leave instruction here because it is too short to
     // patch with the code required by the debugger.
@@ -266,6 +288,7 @@ void FullCodeGenerator::StackValueContext::Plug(Slot* slot) const {
 void FullCodeGenerator::TestContext::Plug(Slot* slot) const {
   // For simplicity we always test the accumulator register.
   codegen()->Move(result_register(), slot);
+  codegen()->PrepareForBailoutBeforeSplit(TOS_REG, false, NULL, NULL);
   codegen()->DoTest(true_label_, false_label_, fall_through_);
 }
 
@@ -309,22 +332,26 @@ void FullCodeGenerator::StackValueContext::Plug(Handle<Object> lit) const {
 
 
 void FullCodeGenerator::TestContext::Plug(Handle<Object> lit) const {
+  codegen()->PrepareForBailoutBeforeSplit(TOS_REG,
+                                          true,
+                                          true_label_,
+                                          false_label_);
   ASSERT(!lit->IsUndetectableObject());  // There are no undetectable literals.
   if (lit->IsUndefined() || lit->IsNull() || lit->IsFalse()) {
-    __ jmp(false_label_);
+    if (false_label_ != fall_through_) __ jmp(false_label_);
   } else if (lit->IsTrue() || lit->IsJSObject()) {
-    __ jmp(true_label_);
+    if (true_label_ != fall_through_) __ jmp(true_label_);
   } else if (lit->IsString()) {
     if (String::cast(*lit)->length() == 0) {
-      __ jmp(false_label_);
+      if (false_label_ != fall_through_) __ jmp(false_label_);
     } else {
-      __ jmp(true_label_);
+      if (true_label_ != fall_through_) __ jmp(true_label_);
     }
   } else if (lit->IsSmi()) {
     if (Smi::cast(*lit)->value() == 0) {
-      __ jmp(false_label_);
+      if (false_label_ != fall_through_) __ jmp(false_label_);
     } else {
-      __ jmp(true_label_);
+      if (true_label_ != fall_through_) __ jmp(true_label_);
     }
   } else {
     // For simplicity we always test the accumulator register.
@@ -364,13 +391,14 @@ void FullCodeGenerator::TestContext::DropAndPlug(int count,
   // For simplicity we always test the accumulator register.
   __ Drop(count);
   __ Move(result_register(), reg);
+  codegen()->PrepareForBailoutBeforeSplit(TOS_REG, false, NULL, NULL);
   codegen()->DoTest(true_label_, false_label_, fall_through_);
 }
 
 
 void FullCodeGenerator::EffectContext::Plug(Label* materialize_true,
                                             Label* materialize_false) const {
-  ASSERT_EQ(materialize_true, materialize_false);
+  ASSERT(materialize_true == materialize_false);
   __ bind(materialize_true);
 }
 
@@ -403,8 +431,8 @@ void FullCodeGenerator::StackValueContext::Plug(
 
 void FullCodeGenerator::TestContext::Plug(Label* materialize_true,
                                           Label* materialize_false) const {
-  ASSERT(materialize_false == false_label_);
   ASSERT(materialize_true == true_label_);
+  ASSERT(materialize_false == false_label_);
 }
 
 
@@ -427,6 +455,10 @@ void FullCodeGenerator::StackValueContext::Plug(bool flag) const {
 
 
 void FullCodeGenerator::TestContext::Plug(bool flag) const {
+  codegen()->PrepareForBailoutBeforeSplit(TOS_REG,
+                                          true,
+                                          true_label_,
+                                          false_label_);
   if (flag) {
     if (true_label_ != fall_through_) __ jmp(true_label_);
   } else {
@@ -518,6 +550,32 @@ void FullCodeGenerator::Move(Slot* dst,
 }
 
 
+void FullCodeGenerator::PrepareForBailoutBeforeSplit(State state,
+                                                     bool should_normalize,
+                                                     Label* if_true,
+                                                     Label* if_false) {
+  // Only prepare for bailouts before splits if we're in a test
+  // context. Otherwise, we let the Visit function deal with the
+  // preparation to avoid preparing with the same AST id twice.
+  if (!context()->IsTest() || !info_->IsOptimizable()) return;
+
+  NearLabel skip;
+  if (should_normalize) __ jmp(&skip);
+
+  ForwardBailoutStack* current = forward_bailout_stack_;
+  while (current != NULL) {
+    PrepareForBailout(current->expr(), state);
+    current = current->parent();
+  }
+
+  if (should_normalize) {
+    __ cmp(eax, Factory::true_value());
+    Split(equal, if_true, if_false, NULL);
+    __ bind(&skip);
+  }
+}
+
+
 void FullCodeGenerator::EmitDeclaration(Variable* variable,
                                         Variable::Mode mode,
                                         FunctionLiteral* function) {
@@ -628,6 +686,9 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
   Comment cmnt(masm_, "[ SwitchStatement");
   Breakable nested_statement(this, stmt);
   SetStatementPosition(stmt);
+
+  PrepareForBailoutForId(stmt->EntryId(), NO_REGISTERS);
+
   // Keep the switch value on the stack until a case matches.
   VisitForStackValue(stmt->tag());
 
@@ -667,11 +728,12 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
       __ bind(&slow_case);
     }
 
-    CompareFlags flags = inline_smi_code
-        ? NO_SMI_COMPARE_IN_STUB
-        : NO_COMPARE_FLAGS;
-    CompareStub stub(equal, true, flags);
-    __ CallStub(&stub);
+    // Record position before stub call for type feedback.
+    SetSourcePosition(clause->position());
+
+    Handle<Code> ic = CompareIC::GetUninitialized(Token::EQ_STRICT);
+    __ call(ic, RelocInfo::CODE_TARGET);
+
     __ test(eax, Operand(eax));
     __ j(not_equal, &next_test);
     __ Drop(1);  // Switch value is no longer needed.
@@ -697,6 +759,7 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
   }
 
   __ bind(nested_statement.break_target());
+  PrepareForBailoutForId(stmt->ExitId(), NO_REGISTERS);
 }
 
 
@@ -851,24 +914,15 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
   EmitAssignment(stmt->each());
 
   // Generate code for the body of the loop.
-  Label stack_limit_hit;
-  NearLabel stack_check_done;
   Visit(stmt->body());
 
-  __ StackLimitCheck(&stack_limit_hit);
-  __ bind(&stack_check_done);
-
   // Generate code for going to the next element by incrementing the
   // index (smi) stored on top of the stack.
   __ bind(loop_statement.continue_target());
   __ add(Operand(esp, 0 * kPointerSize), Immediate(Smi::FromInt(1)));
-  __ jmp(&loop);
 
-  // Slow case for the stack limit check.
-  StackCheckStub stack_check_stub;
-  __ bind(&stack_limit_hit);
-  __ CallStub(&stack_check_stub);
-  __ jmp(&stack_check_done);
+  EmitStackCheck(stmt);
+  __ jmp(&loop);
 
   // Remove the pointers stored on the stack.
   __ bind(loop_statement.break_target());
@@ -883,8 +937,14 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
 void FullCodeGenerator::EmitNewClosure(Handle<SharedFunctionInfo> info,
                                        bool pretenure) {
   // Use the fast case closure allocation code that allocates in new
-  // space for nested functions that don't need literals cloning.
-  if (scope()->is_function_scope() &&
+  // space for nested functions that don't need literals cloning. If
+  // we're running with the --always-opt or the --prepare-always-opt
+  // flag, we need to use the runtime function so that the new function
+  // we are creating here gets a chance to have its code optimized and
+  // doesn't just get a copy of the existing unoptimized code.
+  if (!FLAG_always_opt &&
+      !FLAG_prepare_always_opt &&
+      scope()->is_function_scope() &&
       info->num_literals() == 0 &&
       !pretenure) {
     FastNewClosureStub stub;
@@ -1230,12 +1290,15 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
         // Fall through.
       case ObjectLiteral::Property::COMPUTED:
         if (key->handle()->IsSymbol()) {
-          VisitForAccumulatorValue(value);
-          __ mov(ecx, Immediate(key->handle()));
-          __ mov(edx, Operand(esp, 0));
           if (property->emit_store()) {
+            VisitForAccumulatorValue(value);
+            __ mov(ecx, Immediate(key->handle()));
+            __ mov(edx, Operand(esp, 0));
             Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
             EmitCallIC(ic, RelocInfo::CODE_TARGET);
+            PrepareForBailoutForId(key->id(), NO_REGISTERS);
+          } else {
+            VisitForEffect(value);
           }
           break;
         }
@@ -1283,6 +1346,7 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
   __ push(Immediate(Smi::FromInt(expr->literal_index())));
   __ push(Immediate(expr->constant_elements()));
   if (expr->constant_elements()->map() == Heap::fixed_cow_array_map()) {
+    ASSERT(expr->depth() == 1);
     FastCloneShallowArrayStub stub(
         FastCloneShallowArrayStub::COPY_ON_WRITE_ELEMENTS, length);
     __ CallStub(&stub);
@@ -1324,6 +1388,8 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
 
     // Update the write barrier for the array store.
     __ RecordWrite(ebx, offset, result_register(), ecx);
+
+    PrepareForBailoutForId(expr->GetIdForElement(i), NO_REGISTERS);
   }
 
   if (result_saved) {
@@ -1368,17 +1434,30 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
         VisitForStackValue(property->obj());
       }
       break;
-    case KEYED_PROPERTY:
+    case KEYED_PROPERTY: {
       if (expr->is_compound()) {
-        VisitForStackValue(property->obj());
-        VisitForAccumulatorValue(property->key());
+        if (property->is_arguments_access()) {
+          VariableProxy* obj_proxy = property->obj()->AsVariableProxy();
+          __ push(EmitSlotSearch(obj_proxy->var()->AsSlot(), ecx));
+          __ mov(eax, Immediate(property->key()->AsLiteral()->handle()));
+        } else {
+          VisitForStackValue(property->obj());
+          VisitForAccumulatorValue(property->key());
+        }
         __ mov(edx, Operand(esp, 0));
         __ push(eax);
       } else {
-        VisitForStackValue(property->obj());
-        VisitForStackValue(property->key());
+        if (property->is_arguments_access()) {
+          VariableProxy* obj_proxy = property->obj()->AsVariableProxy();
+          __ push(EmitSlotSearch(obj_proxy->var()->AsSlot(), ecx));
+          __ push(Immediate(property->key()->AsLiteral()->handle()));
+        } else {
+          VisitForStackValue(property->obj());
+          VisitForStackValue(property->key());
+        }
       }
       break;
+    }
   }
 
   if (expr->is_compound()) {
@@ -1396,6 +1475,12 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
       }
     }
 
+    // For property compound assignments we need another deoptimization
+    // point after the property load.
+    if (property != NULL) {
+      PrepareForBailoutForId(expr->compound_bailout_id(), TOS_REG);
+    }
+
     Token::Value op = expr->binary_op();
     ConstantOperand constant = ShouldInlineSmiCase(op)
         ? GetConstantOperand(op, expr->target(), expr->value())
@@ -1421,6 +1506,9 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
     } else {
       EmitBinaryOp(op, mode);
     }
+
+    // Deoptimization point in case the binary operation may have side effects.
+    PrepareForBailout(expr->binary_operation(), TOS_REG);
   } else {
     VisitForAccumulatorValue(expr->value());
   }
@@ -1475,13 +1563,12 @@ void FullCodeGenerator::EmitConstantSmiAdd(Expression* expr,
   __ bind(&call_stub);
   __ sub(Operand(eax), Immediate(value));
   Token::Value op = Token::ADD;
-  GenericBinaryOpStub stub(op, mode, NO_SMI_CODE_IN_STUB, TypeInfo::Unknown());
+  TypeRecordingBinaryOpStub stub(op, mode);
   if (left_is_constant_smi) {
-    __ push(Immediate(value));
-    __ push(eax);
+    __ mov(edx, Immediate(value));
   } else {
-    __ push(eax);
-    __ push(Immediate(value));
+    __ mov(edx, eax);
+    __ mov(eax, Immediate(value));
   }
   __ CallStub(&stub);
   __ bind(&done);
@@ -1506,19 +1593,16 @@ void FullCodeGenerator::EmitConstantSmiSub(Expression* expr,
   __ j(zero, &done);
 
   __ bind(&call_stub);
-  if (left_is_constant_smi)  {
-    __ push(Immediate(value));
-    __ push(ecx);
+  if (left_is_constant_smi) {
+    __ mov(edx, Immediate(value));
+    __ mov(eax, ecx);
   } else {
-    // Undo the optimistic sub operation.
-    __ add(Operand(eax), Immediate(value));
-
-    __ push(eax);
-    __ push(Immediate(value));
+    __ add(Operand(eax), Immediate(value));  // Undo the subtraction.
+    __ mov(edx, eax);
+    __ mov(eax, Immediate(value));
   }
-
   Token::Value op = Token::SUB;
-  GenericBinaryOpStub stub(op, mode, NO_SMI_CODE_IN_STUB, TypeInfo::Unknown());
+  TypeRecordingBinaryOpStub stub(op, mode);
   __ CallStub(&stub);
   __ bind(&done);
   context()->Plug(eax);
@@ -1536,9 +1620,9 @@ void FullCodeGenerator::EmitConstantSmiShiftOp(Expression* expr,
   __ j(zero, &smi_case);
 
   __ bind(&call_stub);
-  GenericBinaryOpStub stub(op, mode, NO_SMI_CODE_IN_STUB, TypeInfo::Unknown());
-  __ push(eax);
-  __ push(Immediate(value));
+  __ mov(edx, eax);
+  __ mov(eax, Immediate(value));
+  TypeRecordingBinaryOpStub stub(op, mode);
   __ CallStub(&stub);
   __ jmp(&done);
 
@@ -1595,11 +1679,10 @@ void FullCodeGenerator::EmitConstantSmiBitOp(Expression* expr,
   __ test(eax, Immediate(kSmiTagMask));
   __ j(zero, &smi_case);
 
-  GenericBinaryOpStub stub(op, mode, NO_SMI_CODE_IN_STUB, TypeInfo::Unknown());
   // The order of the arguments does not matter for bit-ops with a
   // constant operand.
-  __ push(Immediate(value));
-  __ push(eax);
+  __ mov(edx, Immediate(value));
+  TypeRecordingBinaryOpStub stub(op, mode);
   __ CallStub(&stub);
   __ jmp(&done);
 
@@ -1678,14 +1761,9 @@ void FullCodeGenerator::EmitInlineSmiBinaryOp(Expression* expr,
   __ j(zero, &smi_case);
 
   __ bind(&stub_call);
-  GenericBinaryOpStub stub(op, mode, NO_SMI_CODE_IN_STUB, TypeInfo::Unknown());
-  if (stub.ArgsInRegistersSupported()) {
-    stub.GenerateCall(masm_, edx, ecx);
-  } else {
-    __ push(edx);
-    __ push(ecx);
-    __ CallStub(&stub);
-  }
+  __ mov(eax, ecx);
+  TypeRecordingBinaryOpStub stub(op, mode);
+  __ CallStub(&stub);
   __ jmp(&done);
 
   __ bind(&smi_case);
@@ -1764,15 +1842,9 @@ void FullCodeGenerator::EmitInlineSmiBinaryOp(Expression* expr,
 
 void FullCodeGenerator::EmitBinaryOp(Token::Value op,
                                      OverwriteMode mode) {
-  TypeInfo type = TypeInfo::Unknown();
-  GenericBinaryOpStub stub(op, mode, NO_GENERIC_BINARY_FLAGS, type);
-  if (stub.ArgsInRegistersSupported()) {
-    __ pop(edx);
-    stub.GenerateCall(masm_, edx, eax);
-  } else {
-    __ push(result_register());
-    __ CallStub(&stub);
-  }
+  __ pop(edx);
+  TypeRecordingBinaryOpStub stub(op, mode);
+  __ CallStub(&stub);
   context()->Plug(eax);
 }
 
@@ -1987,13 +2059,14 @@ void FullCodeGenerator::VisitProperty(Property* expr) {
   if (key->IsPropertyName()) {
     VisitForAccumulatorValue(expr->obj());
     EmitNamedPropertyLoad(expr);
+    context()->Plug(eax);
   } else {
     VisitForStackValue(expr->obj());
     VisitForAccumulatorValue(expr->key());
     __ pop(edx);
     EmitKeyedPropertyLoad(expr);
+    context()->Plug(eax);
   }
-  context()->Plug(eax);
 }
 
 
@@ -2014,6 +2087,7 @@ void FullCodeGenerator::EmitCallWithIC(Call* expr,
   InLoopFlag in_loop = (loop_depth() > 0) ? IN_LOOP : NOT_IN_LOOP;
   Handle<Code> ic = StubCache::ComputeCallInitialize(arg_count, in_loop);
   EmitCallIC(ic, mode);
+  RecordJSReturnSite(expr);
   // Restore context register.
   __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
   context()->Plug(eax);
@@ -2046,6 +2120,7 @@ void FullCodeGenerator::EmitKeyedCallWithIC(Call* expr,
   Handle<Code> ic = StubCache::ComputeKeyedCallInitialize(arg_count, in_loop);
   __ mov(ecx, Operand(esp, (arg_count + 1) * kPointerSize));  // Key.
   EmitCallIC(ic, mode);
+  RecordJSReturnSite(expr);
   // Restore context register.
   __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
   context()->DropAndPlug(1, eax);  // Drop the key still on the stack.
@@ -2066,6 +2141,7 @@ void FullCodeGenerator::EmitCallWithStub(Call* expr) {
   InLoopFlag in_loop = (loop_depth() > 0) ? IN_LOOP : NOT_IN_LOOP;
   CallFunctionStub stub(arg_count, in_loop, RECEIVER_MIGHT_BE_VALUE);
   __ CallStub(&stub);
+  RecordJSReturnSite(expr);
   // Restore context register.
   __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
   context()->DropAndPlug(1, eax);
@@ -2073,6 +2149,12 @@ void FullCodeGenerator::EmitCallWithStub(Call* expr) {
 
 
 void FullCodeGenerator::VisitCall(Call* expr) {
+#ifdef DEBUG
+  // We want to verify that RecordJSReturnSite gets called on all paths
+  // through this function.  Avoid early returns.
+  expr->return_is_recorded_ = false;
+#endif
+
   Comment cmnt(masm_, "[ Call");
   Expression* fun = expr->expression();
   Variable* var = fun->AsVariableProxy()->AsVariable();
@@ -2118,6 +2200,7 @@ void FullCodeGenerator::VisitCall(Call* expr) {
     InLoopFlag in_loop = (loop_depth() > 0) ? IN_LOOP : NOT_IN_LOOP;
     CallFunctionStub stub(arg_count, in_loop, RECEIVER_MIGHT_BE_VALUE);
     __ CallStub(&stub);
+    RecordJSReturnSite(expr);
     // Restore context register.
     __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
     context()->DropAndPlug(1, eax);
@@ -2218,6 +2301,11 @@ void FullCodeGenerator::VisitCall(Call* expr) {
     // Emit function call.
     EmitCallWithStub(expr);
   }
+
+#ifdef DEBUG
+  // RecordJSReturnSite should have been called.
+  ASSERT(expr->return_is_recorded_);
+#endif
 }
 
 
@@ -2265,6 +2353,7 @@ void FullCodeGenerator::EmitIsSmi(ZoneList<Expression*>* args) {
   context()->PrepareTest(&materialize_true, &materialize_false,
                          &if_true, &if_false, &fall_through);
 
+  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
   __ test(eax, Immediate(kSmiTagMask));
   Split(zero, if_true, if_false, fall_through);
 
@@ -2284,6 +2373,7 @@ void FullCodeGenerator::EmitIsNonNegativeSmi(ZoneList<Expression*>* args) {
   context()->PrepareTest(&materialize_true, &materialize_false,
                          &if_true, &if_false, &fall_through);
 
+  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
   __ test(eax, Immediate(kSmiTagMask | 0x80000000));
   Split(zero, if_true, if_false, fall_through);
 
@@ -2316,6 +2406,7 @@ void FullCodeGenerator::EmitIsObject(ZoneList<Expression*>* args) {
   __ cmp(ecx, FIRST_JS_OBJECT_TYPE);
   __ j(below, if_false);
   __ cmp(ecx, LAST_JS_OBJECT_TYPE);
+  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
   Split(below_equal, if_true, if_false, fall_through);
 
   context()->Plug(if_true, if_false);
@@ -2337,6 +2428,7 @@ void FullCodeGenerator::EmitIsSpecObject(ZoneList<Expression*>* args) {
   __ test(eax, Immediate(kSmiTagMask));
   __ j(equal, if_false);
   __ CmpObjectType(eax, FIRST_JS_OBJECT_TYPE, ebx);
+  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
   Split(above_equal, if_true, if_false, fall_through);
 
   context()->Plug(if_true, if_false);
@@ -2360,6 +2452,7 @@ void FullCodeGenerator::EmitIsUndetectableObject(ZoneList<Expression*>* args) {
   __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
   __ movzx_b(ebx, FieldOperand(ebx, Map::kBitFieldOffset));
   __ test(ebx, Immediate(1 << Map::kIsUndetectable));
+  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
   Split(not_zero, if_true, if_false, fall_through);
 
   context()->Plug(if_true, if_false);
@@ -2379,9 +2472,9 @@ void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
   context()->PrepareTest(&materialize_true, &materialize_false,
                          &if_true, &if_false, &fall_through);
 
-  // Just indicate false, as %_IsStringWrapperSafeForDefaultValueOf() is only
-  // used in a few functions in runtime.js which should not normally be hit by
-  // this compiler.
+  // TODO(3110205): Implement this.
+  // Currently unimplemented.  Emit false, a safe choice.
+  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
   __ jmp(if_false);
   context()->Plug(if_true, if_false);
 }
@@ -2402,6 +2495,7 @@ void FullCodeGenerator::EmitIsFunction(ZoneList<Expression*>* args) {
   __ test(eax, Immediate(kSmiTagMask));
   __ j(zero, if_false);
   __ CmpObjectType(eax, JS_FUNCTION_TYPE, ebx);
+  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
   Split(equal, if_true, if_false, fall_through);
 
   context()->Plug(if_true, if_false);
@@ -2423,6 +2517,7 @@ void FullCodeGenerator::EmitIsArray(ZoneList<Expression*>* args) {
   __ test(eax, Immediate(kSmiTagMask));
   __ j(equal, if_false);
   __ CmpObjectType(eax, JS_ARRAY_TYPE, ebx);
+  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
   Split(equal, if_true, if_false, fall_through);
 
   context()->Plug(if_true, if_false);
@@ -2444,6 +2539,7 @@ void FullCodeGenerator::EmitIsRegExp(ZoneList<Expression*>* args) {
   __ test(eax, Immediate(kSmiTagMask));
   __ j(equal, if_false);
   __ CmpObjectType(eax, JS_REGEXP_TYPE, ebx);
+  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
   Split(equal, if_true, if_false, fall_through);
 
   context()->Plug(if_true, if_false);
@@ -2475,6 +2571,7 @@ void FullCodeGenerator::EmitIsConstructCall(ZoneList<Expression*>* args) {
   __ bind(&check_frame_marker);
   __ cmp(Operand(eax, StandardFrameConstants::kMarkerOffset),
          Immediate(Smi::FromInt(StackFrame::CONSTRUCT)));
+  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
   Split(equal, if_true, if_false, fall_through);
 
   context()->Plug(if_true, if_false);
@@ -2497,6 +2594,7 @@ void FullCodeGenerator::EmitObjectEquals(ZoneList<Expression*>* args) {
 
   __ pop(ebx);
   __ cmp(eax, Operand(ebx));
+  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
   Split(equal, if_true, if_false, fall_through);
 
   context()->Plug(if_true, if_false);
@@ -2714,7 +2812,9 @@ void FullCodeGenerator::EmitMathPow(ZoneList<Expression*>* args) {
   ASSERT(args->length() == 2);
   VisitForStackValue(args->at(0));
   VisitForStackValue(args->at(1));
-  __ CallRuntime(Runtime::kMath_pow, 2);
+
+  MathPowStub stub;
+  __ CallStub(&stub);
   context()->Plug(eax);
 }
 
@@ -2956,11 +3056,13 @@ void FullCodeGenerator::EmitCallFunction(ZoneList<Expression*>* args) {
 
 
 void FullCodeGenerator::EmitRegExpConstructResult(ZoneList<Expression*>* args) {
+  // Load the arguments on the stack and call the stub.
+  RegExpConstructResultStub stub;
   ASSERT(args->length() == 3);
   VisitForStackValue(args->at(0));
   VisitForStackValue(args->at(1));
   VisitForStackValue(args->at(2));
-  __ CallRuntime(Runtime::kRegExpConstructResult, 3);
+  __ CallStub(&stub);
   context()->Plug(eax);
 }
 
@@ -2970,7 +3072,64 @@ void FullCodeGenerator::EmitSwapElements(ZoneList<Expression*>* args) {
   VisitForStackValue(args->at(0));
   VisitForStackValue(args->at(1));
   VisitForStackValue(args->at(2));
+  Label done;
+  Label slow_case;
+  Register object = eax;
+  Register index_1 = ebx;
+  Register index_2 = ecx;
+  Register elements = edi;
+  Register temp = edx;
+  __ mov(object, Operand(esp, 2 * kPointerSize));
+  // Fetch the map and check if array is in fast case.
+  // Check that object doesn't require security checks and
+  // has no indexed interceptor.
+  __ CmpObjectType(object, FIRST_JS_OBJECT_TYPE, temp);
+  __ j(below, &slow_case);
+  __ test_b(FieldOperand(temp, Map::kBitFieldOffset),
+            KeyedLoadIC::kSlowCaseBitFieldMask);
+  __ j(not_zero, &slow_case);
+
+  // Check the object's elements are in fast case and writable.
+  __ mov(elements, FieldOperand(object, JSObject::kElementsOffset));
+  __ cmp(FieldOperand(elements, HeapObject::kMapOffset),
+         Immediate(Factory::fixed_array_map()));
+  __ j(not_equal, &slow_case);
+
+  // Check that both indices are smis.
+  __ mov(index_1, Operand(esp, 1 * kPointerSize));
+  __ mov(index_2, Operand(esp, 0));
+  __ mov(temp, index_1);
+  __ or_(temp, Operand(index_2));
+  __ test(temp, Immediate(kSmiTagMask));
+  __ j(not_zero, &slow_case);
+
+  // Bring addresses into index1 and index2.
+  __ lea(index_1, CodeGenerator::FixedArrayElementOperand(elements, index_1));
+  __ lea(index_2, CodeGenerator::FixedArrayElementOperand(elements, index_2));
+
+  // Swap elements.  Use object and temp as scratch registers.
+  __ mov(object, Operand(index_1, 0));
+  __ mov(temp,   Operand(index_2, 0));
+  __ mov(Operand(index_2, 0), object);
+  __ mov(Operand(index_1, 0), temp);
+
+  Label new_space;
+  __ InNewSpace(elements, temp, equal, &new_space);
+
+  __ mov(object, elements);
+  __ RecordWriteHelper(object, index_1, temp);
+  __ RecordWriteHelper(elements, index_2, temp);
+
+  __ bind(&new_space);
+  // We are done. Drop elements from the stack, and return undefined.
+  __ add(Operand(esp), Immediate(3 * kPointerSize));
+  __ mov(eax, Factory::undefined_value());
+  __ jmp(&done);
+
+  __ bind(&slow_case);
   __ CallRuntime(Runtime::kSwapElements, 3);
+
+  __ bind(&done);
   context()->Plug(eax);
 }
 
@@ -3078,6 +3237,7 @@ void FullCodeGenerator::EmitHasCachedArrayIndex(ZoneList<Expression*>* args) {
 
   __ test(FieldOperand(eax, String::kHashFieldOffset),
           Immediate(String::kContainsCachedArrayIndexMask));
+  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
   Split(zero, if_true, if_false, fall_through);
 
   context()->Plug(if_true, if_false);
@@ -3382,6 +3542,7 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
       // Notice that the labels are swapped.
       context()->PrepareTest(&materialize_true, &materialize_false,
                              &if_false, &if_true, &fall_through);
+      if (context()->IsTest()) ForwardBailoutToChild(expr);
       VisitForControl(expr->expression(), if_true, if_false, fall_through);
       context()->Plug(if_false, if_true);  // Labels swapped.
       break;
@@ -3498,14 +3659,24 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
       __ push(eax);
       EmitNamedPropertyLoad(prop);
     } else {
-      VisitForStackValue(prop->obj());
-      VisitForAccumulatorValue(prop->key());
+      if (prop->is_arguments_access()) {
+        VariableProxy* obj_proxy = prop->obj()->AsVariableProxy();
+        __ push(EmitSlotSearch(obj_proxy->var()->AsSlot(), ecx));
+        __ mov(eax, Immediate(prop->key()->AsLiteral()->handle()));
+      } else {
+        VisitForStackValue(prop->obj());
+        VisitForAccumulatorValue(prop->key());
+      }
       __ mov(edx, Operand(esp, 0));
       __ push(eax);
       EmitKeyedPropertyLoad(prop);
     }
   }
 
+  // We need a second deoptimization point after loading the value
+  // in case evaluating the property load my have a side effect.
+  PrepareForBailout(expr->increment(), TOS_REG);
+
   // Call ToNumber only if operand is not a smi.
   NearLabel no_conversion;
   if (ShouldInlineSmiCase(expr->op())) {
@@ -3558,12 +3729,16 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
       __ add(Operand(eax), Immediate(Smi::FromInt(1)));
     }
   }
+
+  // Record position before stub call.
+  SetSourcePosition(expr->position());
+
   // Call stub for +1/-1.
-  GenericBinaryOpStub stub(expr->binary_op(),
-                           NO_OVERWRITE,
-                           NO_GENERIC_BINARY_FLAGS,
-                           TypeInfo::Unknown());
-  stub.GenerateCall(masm(), eax, Smi::FromInt(1));
+  __ mov(edx, eax);
+  __ mov(eax, Immediate(Smi::FromInt(1)));
+  TypeRecordingBinaryOpStub stub(expr->binary_op(),
+                                 NO_OVERWRITE);
+  __ CallStub(&stub);
   __ bind(&done);
 
   // Store the value returned in eax.
@@ -3632,6 +3807,7 @@ void FullCodeGenerator::VisitForTypeofValue(Expression* expr) {
     // Use a regular load, not a contextual load, to avoid a reference
     // error.
     EmitCallIC(ic, RelocInfo::CODE_TARGET);
+    PrepareForBailout(expr, TOS_REG);
     context()->Plug(eax);
   } else if (proxy != NULL &&
              proxy->var()->AsSlot() != NULL &&
@@ -3647,12 +3823,13 @@ void FullCodeGenerator::VisitForTypeofValue(Expression* expr) {
     __ push(esi);
     __ push(Immediate(proxy->name()));
     __ CallRuntime(Runtime::kLoadContextSlotNoReferenceError, 2);
+    PrepareForBailout(expr, TOS_REG);
     __ bind(&done);
 
     context()->Plug(eax);
   } else {
     // This expression cannot throw a reference error at the top level.
-    Visit(expr);
+    context()->HandleExpression(expr);
   }
 }
 
@@ -3677,6 +3854,7 @@ bool FullCodeGenerator::TryLiteralCompare(Token::Value op,
   { AccumulatorValueContext context(this);
     VisitForTypeofValue(left_unary->expression());
   }
+  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
 
   if (check->Equals(Heap::number_symbol())) {
     __ test(eax, Immediate(kSmiTagMask));
@@ -3772,6 +3950,7 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
     case Token::IN:
       VisitForStackValue(expr->right());
       __ InvokeBuiltin(Builtins::IN, CALL_FUNCTION);
+      PrepareForBailoutBeforeSplit(TOS_REG, false, NULL, NULL);
       __ cmp(eax, Factory::true_value());
       Split(equal, if_true, if_false, fall_through);
       break;
@@ -3780,6 +3959,7 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
       VisitForStackValue(expr->right());
       InstanceofStub stub;
       __ CallStub(&stub);
+      PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
       __ test(eax, Operand(eax));
       // The stub returns 0 for true.
       Split(zero, if_true, if_false, fall_through);
@@ -3836,11 +4016,11 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
         __ bind(&slow_case);
       }
 
-      CompareFlags flags = inline_smi_code
-          ? NO_SMI_COMPARE_IN_STUB
-          : NO_COMPARE_FLAGS;
-      CompareStub stub(cc, strict, flags);
-      __ CallStub(&stub);
+      // Record position and call the compare IC.
+      Handle<Code> ic = CompareIC::GetUninitialized(op);
+      SetSourcePosition(expr->position());
+      __ call(ic, RelocInfo::CODE_TARGET);
+      PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
       __ test(eax, Operand(eax));
       Split(cc, if_true, if_false, fall_through);
     }
@@ -3861,6 +4041,8 @@ void FullCodeGenerator::VisitCompareToNull(CompareToNull* expr) {
                          &if_true, &if_false, &fall_through);
 
   VisitForAccumulatorValue(expr->expression());
+  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+
   __ cmp(eax, Factory::null_value());
   if (expr->is_strict()) {
     Split(equal, if_true, if_false, fall_through);
@@ -3899,8 +4081,31 @@ Register FullCodeGenerator::context_register() {
 void FullCodeGenerator::EmitCallIC(Handle<Code> ic, RelocInfo::Mode mode) {
   ASSERT(mode == RelocInfo::CODE_TARGET ||
          mode == RelocInfo::CODE_TARGET_CONTEXT);
+  switch (ic->kind()) {
+    case Code::LOAD_IC:
+      __ IncrementCounter(&Counters::named_load_full, 1);
+      break;
+    case Code::KEYED_LOAD_IC:
+      __ IncrementCounter(&Counters::keyed_load_full, 1);
+      break;
+    case Code::STORE_IC:
+      __ IncrementCounter(&Counters::named_store_full, 1);
+      break;
+    case Code::KEYED_STORE_IC:
+      __ IncrementCounter(&Counters::keyed_store_full, 1);
+    default:
+      break;
+  }
+
   __ call(ic, mode);
 
+  // Crankshaft doesn't need patching of inlined loads and stores.
+  // When compiling the snapshot we need to produce code that works
+  // with and without Crankshaft.
+  if (V8::UseCrankshaft() && !Serializer::enabled()) {
+    return;
+  }
+
   // If we're calling a (keyed) load or store stub, we have to mark
   // the call as containing no inlined code so we will not attempt to
   // patch it.
index a0bc086..b34179a 100644 (file)
@@ -710,7 +710,7 @@ void KeyedLoadIC::GenerateString(MacroAssembler* masm) {
   char_at_generator.GenerateFast(masm);
   __ ret(0);
 
-  ICRuntimeCallHelper call_helper;
+  StubRuntimeCallHelper call_helper;
   char_at_generator.GenerateSlow(masm, call_helper);
 
   __ bind(&miss);
@@ -1629,16 +1629,15 @@ void LoadIC::GenerateMiss(MacroAssembler* masm) {
 }
 
 
-// One byte opcode for test eax,0xXXXXXXXX.
-static const byte kTestEaxByte = 0xA9;
-
 bool LoadIC::PatchInlinedLoad(Address address, Object* map, int offset) {
+  if (V8::UseCrankshaft()) return false;
+
   // The address of the instruction following the call.
   Address test_instruction_address =
       address + Assembler::kCallTargetAddressOffset;
   // If the instruction following the call is not a test eax, nothing
   // was inlined.
-  if (*test_instruction_address != kTestEaxByte) return false;
+  if (*test_instruction_address != Assembler::kTestEaxByte) return false;
 
   Address delta_address = test_instruction_address + 1;
   // The delta to the start of the map check instruction.
@@ -1682,6 +1681,8 @@ bool LoadIC::PatchInlinedContextualLoad(Address address,
                                         Object* map,
                                         Object* cell,
                                         bool is_dont_delete) {
+  if (V8::UseCrankshaft()) return false;
+
   // The address of the instruction following the call.
   Address mov_instruction_address =
       address + Assembler::kCallTargetAddressOffset;
@@ -1713,13 +1714,15 @@ bool LoadIC::PatchInlinedContextualLoad(Address address,
 
 
 bool StoreIC::PatchInlinedStore(Address address, Object* map, int offset) {
+  if (V8::UseCrankshaft()) return false;
+
   // The address of the instruction following the call.
   Address test_instruction_address =
       address + Assembler::kCallTargetAddressOffset;
 
   // If the instruction following the call is not a test eax, nothing
   // was inlined.
-  if (*test_instruction_address != kTestEaxByte) return false;
+  if (*test_instruction_address != Assembler::kTestEaxByte) return false;
 
   // Extract the encoded deltas from the test eax instruction.
   Address encoded_offsets_address = test_instruction_address + 1;
@@ -1759,11 +1762,13 @@ bool StoreIC::PatchInlinedStore(Address address, Object* map, int offset) {
 
 
 static bool PatchInlinedMapCheck(Address address, Object* map) {
+  if (V8::UseCrankshaft()) return false;
+
   Address test_instruction_address =
       address + Assembler::kCallTargetAddressOffset;
   // The keyed load has a fast inlined case if the IC call instruction
   // is immediately followed by a test instruction.
-  if (*test_instruction_address != kTestEaxByte) return false;
+  if (*test_instruction_address != Assembler::kTestEaxByte) return false;
 
   // Fetch the offset from the test instruction to the map cmp
   // instruction.  This offset is stored in the last 4 bytes of the 5
@@ -1959,6 +1964,24 @@ void StoreIC::GenerateNormal(MacroAssembler* masm) {
 }
 
 
+void StoreIC::GenerateGlobalProxy(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- eax    : value
+  //  -- ecx    : name
+  //  -- edx    : receiver
+  //  -- esp[0] : return address
+  // -----------------------------------
+  __ pop(ebx);
+  __ push(edx);
+  __ push(ecx);
+  __ push(eax);
+  __ push(ebx);
+
+  // Do tail-call to runtime routine.
+  __ TailCallRuntime(Runtime::kSetProperty, 3, 1);
+}
+
+
 // Defined in ic.cc.
 Object* KeyedStoreIC_Miss(Arguments args);
 
@@ -2000,9 +2023,59 @@ void KeyedStoreIC::GenerateMiss(MacroAssembler* masm) {
   __ TailCallExternalReference(ref, 3, 1);
 }
 
+
 #undef __
 
 
+Condition CompareIC::ComputeCondition(Token::Value op) {
+  switch (op) {
+    case Token::EQ_STRICT:
+    case Token::EQ:
+      return equal;
+    case Token::LT:
+      return less;
+    case Token::GT:
+      // Reverse left and right operands to obtain ECMA-262 conversion order.
+      return less;
+    case Token::LTE:
+      // Reverse left and right operands to obtain ECMA-262 conversion order.
+      return greater_equal;
+    case Token::GTE:
+      return greater_equal;
+    default:
+      UNREACHABLE();
+      return no_condition;
+  }
+}
+
+
+void CompareIC::UpdateCaches(Handle<Object> x, Handle<Object> y) {
+  HandleScope scope;
+  Handle<Code> rewritten;
+#ifdef DEBUG
+  State previous_state = GetState();
+#endif
+  State state = TargetState(x, y);
+  if (state == GENERIC) {
+    CompareStub stub(GetCondition(), strict(), NO_COMPARE_FLAGS);
+    rewritten = stub.GetCode();
+  } else {
+    ICCompareStub stub(op_, state);
+    rewritten = stub.GetCode();
+  }
+  set_target(*rewritten);
+
+#ifdef DEBUG
+  if (FLAG_trace_ic) {
+    PrintF("[CompareIC (%s->%s)#%s]\n",
+           GetStateName(previous_state),
+           GetStateName(state),
+           Token::Name(op_));
+  }
+#endif
+}
+
+
 } }  // namespace v8::internal
 
 #endif  // V8_TARGET_ARCH_IA32
diff --git a/src/ia32/lithium-codegen-ia32.cc b/src/ia32/lithium-codegen-ia32.cc
new file mode 100644 (file)
index 0000000..dc0f5e9
--- /dev/null
@@ -0,0 +1,3100 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "ia32/lithium-codegen-ia32.h"
+#include "code-stubs.h"
+#include "stub-cache.h"
+
+namespace v8 {
+namespace internal {
+
+
+class SafepointGenerator : public PostCallGenerator {
+ public:
+  SafepointGenerator(LCodeGen* codegen,
+                     LPointerMap* pointers,
+                     int deoptimization_index)
+      : codegen_(codegen),
+        pointers_(pointers),
+        deoptimization_index_(deoptimization_index) { }
+  virtual ~SafepointGenerator() { }
+
+  virtual void Generate() {
+    codegen_->RecordSafepoint(pointers_, deoptimization_index_);
+  }
+
+ private:
+  LCodeGen* codegen_;
+  LPointerMap* pointers_;
+  int deoptimization_index_;
+};
+
+
+#define __ masm()->
+
+bool LCodeGen::GenerateCode() {
+  HPhase phase("Code generation", chunk());
+  ASSERT(is_unused());
+  status_ = GENERATING;
+  CpuFeatures::Scope scope(SSE2);
+  return GeneratePrologue() &&
+      GenerateBody() &&
+      GenerateDeferredCode() &&
+      GenerateSafepointTable();
+}
+
+
+void LCodeGen::FinishCode(Handle<Code> code) {
+  ASSERT(is_done());
+  code->set_stack_slots(StackSlotCount());
+  code->set_safepoint_table_start(safepoints_.GetCodeOffset());
+  PopulateDeoptimizationData(code);
+}
+
+
+void LCodeGen::Abort(const char* format, ...) {
+  if (FLAG_trace_bailout) {
+    SmartPointer<char> debug_name = graph()->debug_name()->ToCString();
+    PrintF("Aborting LCodeGen in @\"%s\": ", *debug_name);
+    va_list arguments;
+    va_start(arguments, format);
+    OS::VPrint(format, arguments);
+    va_end(arguments);
+    PrintF("\n");
+  }
+  status_ = ABORTED;
+}
+
+
+void LCodeGen::Comment(const char* format, ...) {
+  if (!FLAG_code_comments) return;
+  char buffer[4 * KB];
+  StringBuilder builder(buffer, ARRAY_SIZE(buffer));
+  va_list arguments;
+  va_start(arguments, format);
+  builder.AddFormattedList(format, arguments);
+  va_end(arguments);
+
+  // Copy the string before recording it in the assembler to avoid
+  // issues when the stack allocated buffer goes out of scope.
+  size_t length = builder.position();
+  Vector<char> copy = Vector<char>::New(length + 1);
+  memcpy(copy.start(), builder.Finalize(), copy.length());
+  masm()->RecordComment(copy.start());
+}
+
+
+bool LCodeGen::GeneratePrologue() {
+  ASSERT(is_generating());
+
+#ifdef DEBUG
+  if (strlen(FLAG_stop_at) > 0 &&
+      info_->function()->name()->IsEqualTo(CStrVector(FLAG_stop_at))) {
+    __ int3();
+  }
+#endif
+
+  __ push(ebp);  // Caller's frame pointer.
+  __ mov(ebp, esp);
+  __ push(esi);  // Callee's context.
+  __ push(edi);  // Callee's JS function.
+
+  // Reserve space for the stack slots needed by the code.
+  int slots = StackSlotCount();
+  if (slots > 0) {
+    if (FLAG_debug_code) {
+      __ mov(Operand(eax), Immediate(slots));
+      Label loop;
+      __ bind(&loop);
+      __ push(Immediate(kSlotsZapValue));
+      __ dec(eax);
+      __ j(not_zero, &loop);
+    } else {
+      __ sub(Operand(esp), Immediate(slots * kPointerSize));
+    }
+  }
+
+  // Trace the call.
+  if (FLAG_trace) {
+    __ CallRuntime(Runtime::kTraceEnter, 0);
+  }
+  return !is_aborted();
+}
+
+
+bool LCodeGen::GenerateBody() {
+  ASSERT(is_generating());
+  bool emit_instructions = true;
+  for (current_instruction_ = 0;
+       !is_aborted() && current_instruction_ < instructions_->length();
+       current_instruction_++) {
+    LInstruction* instr = instructions_->at(current_instruction_);
+    if (instr->IsLabel()) {
+      LLabel* label = LLabel::cast(instr);
+      emit_instructions = !label->HasReplacement();
+    }
+
+    if (emit_instructions) {
+      Comment(";;; @%d: %s.", current_instruction_, instr->Mnemonic());
+      instr->CompileToNative(this);
+    }
+  }
+  return !is_aborted();
+}
+
+
+LInstruction* LCodeGen::GetNextInstruction() {
+  if (current_instruction_ < instructions_->length() - 1) {
+    return instructions_->at(current_instruction_ + 1);
+  } else {
+    return NULL;
+  }
+}
+
+
+bool LCodeGen::GenerateDeferredCode() {
+  ASSERT(is_generating());
+  for (int i = 0; !is_aborted() && i < deferred_.length(); i++) {
+    LDeferredCode* code = deferred_[i];
+    __ bind(code->entry());
+    code->Generate();
+    __ jmp(code->exit());
+  }
+
+  // Deferred code is the last part of the instruction sequence. Mark
+  // the generated code as done unless we bailed out.
+  if (!is_aborted()) status_ = DONE;
+  return !is_aborted();
+}
+
+
+bool LCodeGen::GenerateSafepointTable() {
+  ASSERT(is_done());
+  safepoints_.Emit(masm(), StackSlotCount());
+  return !is_aborted();
+}
+
+
+Register LCodeGen::ToRegister(int index) const {
+  return Register::FromAllocationIndex(index);
+}
+
+
+XMMRegister LCodeGen::ToDoubleRegister(int index) const {
+  return XMMRegister::FromAllocationIndex(index);
+}
+
+
+Register LCodeGen::ToRegister(LOperand* op) const {
+  ASSERT(op->IsRegister());
+  return ToRegister(op->index());
+}
+
+
+XMMRegister LCodeGen::ToDoubleRegister(LOperand* op) const {
+  ASSERT(op->IsDoubleRegister());
+  return ToDoubleRegister(op->index());
+}
+
+
+int LCodeGen::ToInteger32(LConstantOperand* op) const {
+  Handle<Object> value = chunk_->LookupLiteral(op);
+  ASSERT(chunk_->LookupLiteralRepresentation(op).IsInteger32());
+  ASSERT(static_cast<double>(static_cast<int32_t>(value->Number())) ==
+      value->Number());
+  return static_cast<int32_t>(value->Number());
+}
+
+
+Immediate LCodeGen::ToImmediate(LOperand* op) {
+  LConstantOperand* const_op = LConstantOperand::cast(op);
+  Handle<Object> literal = chunk_->LookupLiteral(const_op);
+  Representation r = chunk_->LookupLiteralRepresentation(const_op);
+  if (r.IsInteger32()) {
+    ASSERT(literal->IsNumber());
+    return Immediate(static_cast<int32_t>(literal->Number()));
+  } else if (r.IsDouble()) {
+    Abort("unsupported double immediate");
+  }
+  ASSERT(r.IsTagged());
+  return Immediate(literal);
+}
+
+
+Operand LCodeGen::ToOperand(LOperand* op) const {
+  if (op->IsRegister()) return Operand(ToRegister(op));
+  if (op->IsDoubleRegister()) return Operand(ToDoubleRegister(op));
+  ASSERT(op->IsStackSlot() || op->IsDoubleStackSlot());
+  int index = op->index();
+  if (index >= 0) {
+    // Local or spill slot. Skip the frame pointer, function, and
+    // context in the fixed part of the frame.
+    return Operand(ebp, -(index + 3) * kPointerSize);
+  } else {
+    // Incoming parameter. Skip the return address.
+    return Operand(ebp, -(index - 1) * kPointerSize);
+  }
+}
+
+
+void LCodeGen::AddToTranslation(Translation* translation,
+                                LOperand* op,
+                                bool is_tagged) {
+  if (op == NULL) {
+    // TODO(twuerthinger): Introduce marker operands to indicate that this value
+    // is not present and must be reconstructed from the deoptimizer. Currently
+    // this is only used for the arguments object.
+    translation->StoreArgumentsObject();
+  } else if (op->IsStackSlot()) {
+    if (is_tagged) {
+      translation->StoreStackSlot(op->index());
+    } else {
+      translation->StoreInt32StackSlot(op->index());
+    }
+  } else if (op->IsDoubleStackSlot()) {
+    translation->StoreDoubleStackSlot(op->index());
+  } else if (op->IsArgument()) {
+    ASSERT(is_tagged);
+    int src_index = StackSlotCount() + op->index();
+    translation->StoreStackSlot(src_index);
+  } else if (op->IsRegister()) {
+    Register reg = ToRegister(op);
+    if (is_tagged) {
+      translation->StoreRegister(reg);
+    } else {
+      translation->StoreInt32Register(reg);
+    }
+  } else if (op->IsDoubleRegister()) {
+    XMMRegister reg = ToDoubleRegister(op);
+    translation->StoreDoubleRegister(reg);
+  } else if (op->IsConstantOperand()) {
+    Handle<Object> literal = chunk()->LookupLiteral(LConstantOperand::cast(op));
+    int src_index = DefineDeoptimizationLiteral(literal);
+    translation->StoreLiteral(src_index);
+  } else {
+    UNREACHABLE();
+  }
+}
+
+
+void LCodeGen::CallCode(Handle<Code> code,
+                        RelocInfo::Mode mode,
+                        LInstruction* instr) {
+  if (instr != NULL) {
+    LPointerMap* pointers = instr->pointer_map();
+    RecordPosition(pointers->position());
+    __ call(code, mode);
+    RegisterLazyDeoptimization(instr);
+  } else {
+    LPointerMap no_pointers(0);
+    RecordPosition(no_pointers.position());
+    __ call(code, mode);
+    RecordSafepoint(&no_pointers, Safepoint::kNoDeoptimizationIndex);
+  }
+}
+
+
+void LCodeGen::CallRuntime(Runtime::Function* function,
+                           int num_arguments,
+                           LInstruction* instr) {
+  ASSERT(instr != NULL);
+  LPointerMap* pointers = instr->pointer_map();
+  ASSERT(pointers != NULL);
+  RecordPosition(pointers->position());
+
+  __ CallRuntime(function, num_arguments);
+  // Runtime calls to Throw are not supposed to ever return at the
+  // call site, so don't register lazy deoptimization for these. We do
+  // however have to record a safepoint since throwing exceptions can
+  // cause garbage collections.
+  // BUG(3243555): register a lazy deoptimization point at throw. We need
+  // it to be able to inline functions containing a throw statement.
+  if (!instr->IsThrow()) {
+    RegisterLazyDeoptimization(instr);
+  } else {
+    RecordSafepoint(instr->pointer_map(), Safepoint::kNoDeoptimizationIndex);
+  }
+}
+
+
+void LCodeGen::RegisterLazyDeoptimization(LInstruction* instr) {
+  // Create the environment to bailout to. If the call has side effects
+  // execution has to continue after the call otherwise execution can continue
+  // from a previous bailout point repeating the call.
+  LEnvironment* deoptimization_environment;
+  if (instr->HasDeoptimizationEnvironment()) {
+    deoptimization_environment = instr->deoptimization_environment();
+  } else {
+    deoptimization_environment = instr->environment();
+  }
+
+  RegisterEnvironmentForDeoptimization(deoptimization_environment);
+  RecordSafepoint(instr->pointer_map(),
+                  deoptimization_environment->deoptimization_index());
+}
+
+
+void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment) {
+  if (!environment->HasBeenRegistered()) {
+    // Physical stack frame layout:
+    // -x ............. -4  0 ..................................... y
+    // [incoming arguments] [spill slots] [pushed outgoing arguments]
+
+    // Layout of the environment:
+    // 0 ..................................................... size-1
+    // [parameters] [locals] [expression stack including arguments]
+
+    // Layout of the translation:
+    // 0 ........................................................ size - 1 + 4
+    // [expression stack including arguments] [locals] [4 words] [parameters]
+    // |>------------  translation_size ------------<|
+
+    int frame_count = 0;
+    for (LEnvironment* e = environment; e != NULL; e = e->outer()) {
+      ++frame_count;
+    }
+    Translation translation(&translations_, frame_count);
+    environment->WriteTranslation(this, &translation);
+    int deoptimization_index = deoptimizations_.length();
+    environment->Register(deoptimization_index, translation.index());
+    deoptimizations_.Add(environment);
+  }
+}
+
+
+void LCodeGen::DeoptimizeIf(Condition cc, LEnvironment* environment) {
+  RegisterEnvironmentForDeoptimization(environment);
+  ASSERT(environment->HasBeenRegistered());
+  int id = environment->deoptimization_index();
+  Address entry = Deoptimizer::GetDeoptimizationEntry(id, Deoptimizer::EAGER);
+  ASSERT(entry != NULL);
+  if (entry == NULL) {
+    Abort("bailout was not prepared");
+    return;
+  }
+
+  if (FLAG_deopt_every_n_times != 0) {
+    Handle<SharedFunctionInfo> shared(info_->shared_info());
+    Label no_deopt;
+    __ pushfd();
+    __ push(eax);
+    __ push(ebx);
+    __ mov(ebx, shared);
+    __ mov(eax, FieldOperand(ebx, SharedFunctionInfo::kDeoptCounterOffset));
+    __ sub(Operand(eax), Immediate(Smi::FromInt(1)));
+    __ j(not_zero, &no_deopt);
+    if (FLAG_trap_on_deopt) __ int3();
+    __ mov(eax, Immediate(Smi::FromInt(FLAG_deopt_every_n_times)));
+    __ mov(FieldOperand(ebx, SharedFunctionInfo::kDeoptCounterOffset), eax);
+    __ pop(ebx);
+    __ pop(eax);
+    __ popfd();
+    __ jmp(entry, RelocInfo::RUNTIME_ENTRY);
+
+    __ bind(&no_deopt);
+    __ mov(FieldOperand(ebx, SharedFunctionInfo::kDeoptCounterOffset), eax);
+    __ pop(ebx);
+    __ pop(eax);
+    __ popfd();
+  }
+
+  if (cc == no_condition) {
+    if (FLAG_trap_on_deopt) __ int3();
+    __ jmp(entry, RelocInfo::RUNTIME_ENTRY);
+  } else {
+    if (FLAG_trap_on_deopt) {
+      NearLabel done;
+      __ j(NegateCondition(cc), &done);
+      __ int3();
+      __ jmp(entry, RelocInfo::RUNTIME_ENTRY);
+      __ bind(&done);
+    } else {
+      __ j(cc, entry, RelocInfo::RUNTIME_ENTRY, not_taken);
+    }
+  }
+}
+
+
+void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
+  int length = deoptimizations_.length();
+  if (length == 0) return;
+  ASSERT(FLAG_deopt);
+  Handle<DeoptimizationInputData> data =
+      Factory::NewDeoptimizationInputData(length, TENURED);
+
+  data->SetTranslationByteArray(*translations_.CreateByteArray());
+  data->SetInlinedFunctionCount(Smi::FromInt(inlined_function_count_));
+
+  Handle<FixedArray> literals =
+      Factory::NewFixedArray(deoptimization_literals_.length(), TENURED);
+  for (int i = 0; i < deoptimization_literals_.length(); i++) {
+    literals->set(i, *deoptimization_literals_[i]);
+  }
+  data->SetLiteralArray(*literals);
+
+  data->SetOsrAstId(Smi::FromInt(info_->osr_ast_id()));
+  data->SetOsrPcOffset(Smi::FromInt(osr_pc_offset_));
+
+  // Populate the deoptimization entries.
+  for (int i = 0; i < length; i++) {
+    LEnvironment* env = deoptimizations_[i];
+    data->SetAstId(i, Smi::FromInt(env->ast_id()));
+    data->SetTranslationIndex(i, Smi::FromInt(env->translation_index()));
+    data->SetArgumentsStackHeight(i,
+                                  Smi::FromInt(env->arguments_stack_height()));
+  }
+  code->set_deoptimization_data(*data);
+}
+
+
+int LCodeGen::DefineDeoptimizationLiteral(Handle<Object> literal) {
+  int result = deoptimization_literals_.length();
+  for (int i = 0; i < deoptimization_literals_.length(); ++i) {
+    if (deoptimization_literals_[i].is_identical_to(literal)) return i;
+  }
+  deoptimization_literals_.Add(literal);
+  return result;
+}
+
+
+void LCodeGen::PopulateDeoptimizationLiteralsWithInlinedFunctions() {
+  ASSERT(deoptimization_literals_.length() == 0);
+
+  const ZoneList<Handle<JSFunction> >* inlined_closures =
+      chunk()->inlined_closures();
+
+  for (int i = 0, length = inlined_closures->length();
+       i < length;
+       i++) {
+    DefineDeoptimizationLiteral(inlined_closures->at(i));
+  }
+
+  inlined_function_count_ = deoptimization_literals_.length();
+}
+
+
+void LCodeGen::RecordSafepoint(LPointerMap* pointers,
+                               int deoptimization_index) {
+  const ZoneList<LOperand*>* operands = pointers->operands();
+  Safepoint safepoint = safepoints_.DefineSafepoint(masm(),
+                                                    deoptimization_index);
+  for (int i = 0; i < operands->length(); i++) {
+    LOperand* pointer = operands->at(i);
+    if (pointer->IsStackSlot()) {
+      safepoint.DefinePointerSlot(pointer->index());
+    }
+  }
+}
+
+
+void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers,
+                                            int arguments,
+                                            int deoptimization_index) {
+  const ZoneList<LOperand*>* operands = pointers->operands();
+  Safepoint safepoint =
+      safepoints_.DefineSafepointWithRegisters(
+          masm(), arguments, deoptimization_index);
+  for (int i = 0; i < operands->length(); i++) {
+    LOperand* pointer = operands->at(i);
+    if (pointer->IsStackSlot()) {
+      safepoint.DefinePointerSlot(pointer->index());
+    } else if (pointer->IsRegister()) {
+      safepoint.DefinePointerRegister(ToRegister(pointer));
+    }
+  }
+  // Register esi always contains a pointer to the context.
+  safepoint.DefinePointerRegister(esi);
+}
+
+
+void LCodeGen::RecordPosition(int position) {
+  if (!FLAG_debug_info || position == RelocInfo::kNoPosition) return;
+  masm()->positions_recorder()->RecordPosition(position);
+}
+
+
+void LCodeGen::DoLabel(LLabel* label) {
+  if (label->is_loop_header()) {
+    Comment(";;; B%d - LOOP entry", label->block_id());
+  } else {
+    Comment(";;; B%d", label->block_id());
+  }
+  __ bind(label->label());
+  current_block_ = label->block_id();
+  LCodeGen::DoGap(label);
+}
+
+
+void LCodeGen::DoParallelMove(LParallelMove* move) {
+  // xmm0 must always be a scratch register.
+  XMMRegister xmm_scratch = xmm0;
+  LUnallocated marker_operand(LUnallocated::NONE);
+
+  Register cpu_scratch = esi;
+  bool destroys_cpu_scratch = false;
+
+  LGapResolver resolver(move->move_operands(), &marker_operand);
+  const ZoneList<LMoveOperands>* moves = resolver.ResolveInReverseOrder();
+  for (int i = moves->length() - 1; i >= 0; --i) {
+    LMoveOperands move = moves->at(i);
+    LOperand* from = move.from();
+    LOperand* to = move.to();
+    ASSERT(!from->IsDoubleRegister() ||
+           !ToDoubleRegister(from).is(xmm_scratch));
+    ASSERT(!to->IsDoubleRegister() || !ToDoubleRegister(to).is(xmm_scratch));
+    ASSERT(!from->IsRegister() || !ToRegister(from).is(cpu_scratch));
+    ASSERT(!to->IsRegister() || !ToRegister(to).is(cpu_scratch));
+    if (from->IsConstantOperand()) {
+      __ mov(ToOperand(to), ToImmediate(from));
+    } else if (from == &marker_operand) {
+      if (to->IsRegister() || to->IsStackSlot()) {
+        __ mov(ToOperand(to), cpu_scratch);
+        ASSERT(destroys_cpu_scratch);
+      } else {
+        ASSERT(to->IsDoubleRegister() || to->IsDoubleStackSlot());
+        __ movdbl(ToOperand(to), xmm_scratch);
+      }
+    } else if (to == &marker_operand) {
+      if (from->IsRegister() || from->IsStackSlot()) {
+        __ mov(cpu_scratch, ToOperand(from));
+        destroys_cpu_scratch = true;
+      } else {
+        ASSERT(from->IsDoubleRegister() || from->IsDoubleStackSlot());
+        __ movdbl(xmm_scratch, ToOperand(from));
+      }
+    } else if (from->IsRegister()) {
+      __ mov(ToOperand(to), ToRegister(from));
+    } else if (to->IsRegister()) {
+      __ mov(ToRegister(to), ToOperand(from));
+    } else if (from->IsStackSlot()) {
+      ASSERT(to->IsStackSlot());
+      __ push(eax);
+      __ mov(eax, ToOperand(from));
+      __ mov(ToOperand(to), eax);
+      __ pop(eax);
+    } else if (from->IsDoubleRegister()) {
+      __ movdbl(ToOperand(to), ToDoubleRegister(from));
+    } else if (to->IsDoubleRegister()) {
+      __ movdbl(ToDoubleRegister(to), ToOperand(from));
+    } else {
+      ASSERT(to->IsDoubleStackSlot() && from->IsDoubleStackSlot());
+      __ movdbl(xmm_scratch, ToOperand(from));
+      __ movdbl(ToOperand(to), xmm_scratch);
+    }
+  }
+
+  if (destroys_cpu_scratch) {
+    __ mov(cpu_scratch, Operand(ebp, -kPointerSize));
+  }
+}
+
+
+void LCodeGen::DoGap(LGap* gap) {
+  for (int i = LGap::FIRST_INNER_POSITION;
+       i <= LGap::LAST_INNER_POSITION;
+       i++) {
+    LGap::InnerPosition inner_pos = static_cast<LGap::InnerPosition>(i);
+    LParallelMove* move = gap->GetParallelMove(inner_pos);
+    if (move != NULL) DoParallelMove(move);
+  }
+
+  LInstruction* next = GetNextInstruction();
+  if (next != NULL && next->IsLazyBailout()) {
+    int pc = masm()->pc_offset();
+    safepoints_.SetPcAfterGap(pc);
+  }
+}
+
+
+void LCodeGen::DoParameter(LParameter* instr) {
+  // Nothing to do.
+}
+
+
+void LCodeGen::DoCallStub(LCallStub* instr) {
+  ASSERT(ToRegister(instr->result()).is(eax));
+  switch (instr->hydrogen()->major_key()) {
+    case CodeStub::RegExpConstructResult: {
+      RegExpConstructResultStub stub;
+      CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+      break;
+    }
+    case CodeStub::RegExpExec: {
+      RegExpExecStub stub;
+      CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+      break;
+    }
+    case CodeStub::SubString: {
+      SubStringStub stub;
+      CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+      break;
+    }
+    case CodeStub::StringCharAt: {
+      StringCharAtStub stub;
+      CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+      break;
+    }
+    case CodeStub::MathPow: {
+      MathPowStub stub;
+      CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+      break;
+    }
+    case CodeStub::NumberToString: {
+      NumberToStringStub stub;
+      CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+      break;
+    }
+    case CodeStub::StringAdd: {
+      StringAddStub stub(NO_STRING_ADD_FLAGS);
+      CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+      break;
+    }
+    case CodeStub::StringCompare: {
+      StringCompareStub stub;
+      CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+      break;
+    }
+    case CodeStub::TranscendentalCache: {
+      TranscendentalCacheStub stub(instr->transcendental_type());
+      CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+      break;
+    }
+    default:
+      UNREACHABLE();
+  }
+}
+
+
+void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) {
+  // Nothing to do.
+}
+
+
+void LCodeGen::DoModI(LModI* instr) {
+  LOperand* right = instr->right();
+  ASSERT(ToRegister(instr->result()).is(edx));
+  ASSERT(ToRegister(instr->left()).is(eax));
+  ASSERT(!ToRegister(instr->right()).is(eax));
+  ASSERT(!ToRegister(instr->right()).is(edx));
+
+  Register right_reg = ToRegister(right);
+
+  // Check for x % 0.
+  if (instr->hydrogen()->CheckFlag(HValue::kCanBeDivByZero)) {
+    __ test(right_reg, ToOperand(right));
+    DeoptimizeIf(zero, instr->environment());
+  }
+
+  // Sign extend to edx.
+  __ cdq();
+
+  // Check for (0 % -x) that will produce negative zero.
+  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+    NearLabel positive_left;
+    NearLabel done;
+    __ test(eax, Operand(eax));
+    __ j(not_sign, &positive_left);
+    __ idiv(right_reg);
+
+    // Test the remainder for 0, because then the result would be -0.
+    __ test(edx, Operand(edx));
+    __ j(not_zero, &done);
+
+    DeoptimizeIf(no_condition, instr->environment());
+    __ bind(&positive_left);
+    __ idiv(right_reg);
+    __ bind(&done);
+  } else {
+    __ idiv(right_reg);
+  }
+}
+
+
+void LCodeGen::DoDivI(LDivI* instr) {
+  LOperand* right = instr->right();
+  ASSERT(ToRegister(instr->result()).is(eax));
+  ASSERT(ToRegister(instr->left()).is(eax));
+  ASSERT(!ToRegister(instr->right()).is(eax));
+  ASSERT(!ToRegister(instr->right()).is(edx));
+
+  Register left_reg = eax;
+
+  // Check for x / 0.
+  Register right_reg = ToRegister(right);
+  if (instr->hydrogen()->CheckFlag(HValue::kCanBeDivByZero)) {
+    __ test(right_reg, ToOperand(right));
+    DeoptimizeIf(zero, instr->environment());
+  }
+
+  // Check for (0 / -x) that will produce negative zero.
+  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+    NearLabel left_not_zero;
+    __ test(left_reg, Operand(left_reg));
+    __ j(not_zero, &left_not_zero);
+    __ test(right_reg, ToOperand(right));
+    DeoptimizeIf(sign, instr->environment());
+    __ bind(&left_not_zero);
+  }
+
+  // Check for (-kMinInt / -1).
+  if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
+    NearLabel left_not_min_int;
+    __ cmp(left_reg, kMinInt);
+    __ j(not_zero, &left_not_min_int);
+    __ cmp(right_reg, -1);
+    DeoptimizeIf(zero, instr->environment());
+    __ bind(&left_not_min_int);
+  }
+
+  // Sign extend to edx.
+  __ cdq();
+  __ idiv(right_reg);
+
+  // Deoptimize if remainder is not 0.
+  __ test(edx, Operand(edx));
+  DeoptimizeIf(not_zero, instr->environment());
+}
+
+
+void LCodeGen::DoMulI(LMulI* instr) {
+  Register left = ToRegister(instr->left());
+  LOperand* right = instr->right();
+
+  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+    __ mov(ToRegister(instr->temp()), left);
+  }
+
+  if (right->IsConstantOperand()) {
+    __ imul(left, left, ToInteger32(LConstantOperand::cast(right)));
+  } else {
+    __ imul(left, ToOperand(right));
+  }
+
+  if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
+    DeoptimizeIf(overflow, instr->environment());
+  }
+
+  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+    // Bail out if the result is supposed to be negative zero.
+    NearLabel done;
+    __ test(left, Operand(left));
+    __ j(not_zero, &done);
+    if (right->IsConstantOperand()) {
+      if (ToInteger32(LConstantOperand::cast(right)) < 0) {
+        DeoptimizeIf(no_condition, instr->environment());
+      }
+    } else {
+      // Test the non-zero operand for negative sign.
+      __ or_(ToRegister(instr->temp()), ToOperand(right));
+      DeoptimizeIf(sign, instr->environment());
+    }
+    __ bind(&done);
+  }
+}
+
+
+void LCodeGen::DoBitI(LBitI* instr) {
+  LOperand* left = instr->left();
+  LOperand* right = instr->right();
+  ASSERT(left->Equals(instr->result()));
+  ASSERT(left->IsRegister());
+
+  if (right->IsConstantOperand()) {
+    int right_operand = ToInteger32(LConstantOperand::cast(right));
+    switch (instr->op()) {
+      case Token::BIT_AND:
+        __ and_(ToRegister(left), right_operand);
+        break;
+      case Token::BIT_OR:
+        __ or_(ToRegister(left), right_operand);
+        break;
+      case Token::BIT_XOR:
+        __ xor_(ToRegister(left), right_operand);
+        break;
+      default:
+        UNREACHABLE();
+        break;
+    }
+  } else {
+    switch (instr->op()) {
+      case Token::BIT_AND:
+        __ and_(ToRegister(left), ToOperand(right));
+        break;
+      case Token::BIT_OR:
+        __ or_(ToRegister(left), ToOperand(right));
+        break;
+      case Token::BIT_XOR:
+        __ xor_(ToRegister(left), ToOperand(right));
+        break;
+      default:
+        UNREACHABLE();
+        break;
+    }
+  }
+}
+
+
+void LCodeGen::DoShiftI(LShiftI* instr) {
+  LOperand* left = instr->left();
+  LOperand* right = instr->right();
+  ASSERT(left->Equals(instr->result()));
+  ASSERT(left->IsRegister());
+  if (right->IsRegister()) {
+    ASSERT(ToRegister(right).is(ecx));
+
+    switch (instr->op()) {
+      case Token::SAR:
+        __ sar_cl(ToRegister(left));
+        break;
+      case Token::SHR:
+        __ shr_cl(ToRegister(left));
+        if (instr->can_deopt()) {
+          __ test(ToRegister(left), Immediate(0x80000000));
+          DeoptimizeIf(not_zero, instr->environment());
+        }
+        break;
+      case Token::SHL:
+        __ shl_cl(ToRegister(left));
+        break;
+      default:
+        UNREACHABLE();
+        break;
+    }
+  } else {
+    int value = ToInteger32(LConstantOperand::cast(right));
+    uint8_t shift_count = static_cast<uint8_t>(value & 0x1F);
+    switch (instr->op()) {
+      case Token::SAR:
+        if (shift_count != 0) {
+          __ sar(ToRegister(left), shift_count);
+        }
+        break;
+      case Token::SHR:
+        if (shift_count == 0 && instr->can_deopt()) {
+          __ test(ToRegister(left), Immediate(0x80000000));
+          DeoptimizeIf(not_zero, instr->environment());
+        } else {
+          __ shr(ToRegister(left), shift_count);
+        }
+        break;
+      case Token::SHL:
+        if (shift_count != 0) {
+          __ shl(ToRegister(left), shift_count);
+        }
+        break;
+      default:
+        UNREACHABLE();
+        break;
+    }
+  }
+}
+
+
+void LCodeGen::DoSubI(LSubI* instr) {
+  LOperand* left = instr->left();
+  LOperand* right = instr->right();
+  ASSERT(left->Equals(instr->result()));
+
+  if (right->IsConstantOperand()) {
+    __ sub(ToOperand(left), ToImmediate(right));
+  } else {
+    __ sub(ToRegister(left), ToOperand(right));
+  }
+  if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
+    DeoptimizeIf(overflow, instr->environment());
+  }
+}
+
+
+void LCodeGen::DoConstantI(LConstantI* instr) {
+  ASSERT(instr->result()->IsRegister());
+  __ mov(ToRegister(instr->result()), instr->value());
+}
+
+
+void LCodeGen::DoConstantD(LConstantD* instr) {
+  ASSERT(instr->result()->IsDoubleRegister());
+  XMMRegister res = ToDoubleRegister(instr->result());
+  double v = instr->value();
+  // Use xor to produce +0.0 in a fast and compact way, but avoid to
+  // do so if the constant is -0.0.
+  if (BitCast<uint64_t, double>(v) == 0) {
+    __ xorpd(res, res);
+  } else {
+    int32_t v_int32 = static_cast<int32_t>(v);
+    if (static_cast<double>(v_int32) == v) {
+      __ push_imm32(v_int32);
+      __ cvtsi2sd(res, Operand(esp, 0));
+      __ add(Operand(esp), Immediate(kPointerSize));
+    } else {
+      uint64_t int_val = BitCast<uint64_t, double>(v);
+      int32_t lower = static_cast<int32_t>(int_val);
+      int32_t upper = static_cast<int32_t>(int_val >> (kBitsPerInt));
+      __ push_imm32(upper);
+      __ push_imm32(lower);
+      __ movdbl(res, Operand(esp, 0));
+      __ add(Operand(esp), Immediate(2 * kPointerSize));
+    }
+  }
+}
+
+
+void LCodeGen::DoConstantT(LConstantT* instr) {
+  ASSERT(instr->result()->IsRegister());
+  __ mov(ToRegister(instr->result()), Immediate(instr->value()));
+}
+
+
+void LCodeGen::DoArrayLength(LArrayLength* instr) {
+  Register result = ToRegister(instr->result());
+
+  if (instr->hydrogen()->value()->IsLoadElements()) {
+    // We load the length directly from the elements array.
+    Register elements = ToRegister(instr->input());
+    __ mov(result, FieldOperand(elements, FixedArray::kLengthOffset));
+  } else {
+    // Check that the receiver really is an array.
+    Register array = ToRegister(instr->input());
+    Register temporary = ToRegister(instr->temporary());
+    __ CmpObjectType(array, JS_ARRAY_TYPE, temporary);
+    DeoptimizeIf(not_equal, instr->environment());
+
+    // Load length directly from the array.
+    __ mov(result, FieldOperand(array, JSArray::kLengthOffset));
+  }
+}
+
+
+void LCodeGen::DoValueOf(LValueOf* instr) {
+  Register input = ToRegister(instr->input());
+  Register result = ToRegister(instr->result());
+  Register map = ToRegister(instr->temporary());
+  ASSERT(input.is(result));
+  NearLabel done;
+  // If the object is a smi return the object.
+  __ test(input, Immediate(kSmiTagMask));
+  __ j(zero, &done);
+
+  // If the object is not a value type, return the object.
+  __ CmpObjectType(input, JS_VALUE_TYPE, map);
+  __ j(not_equal, &done);
+  __ mov(result, FieldOperand(input, JSValue::kValueOffset));
+
+  __ bind(&done);
+}
+
+
+void LCodeGen::DoBitNotI(LBitNotI* instr) {
+  LOperand* input = instr->input();
+  ASSERT(input->Equals(instr->result()));
+  __ not_(ToRegister(input));
+}
+
+
+void LCodeGen::DoThrow(LThrow* instr) {
+  __ push(ToOperand(instr->input()));
+  CallRuntime(Runtime::kThrow, 1, instr);
+
+  if (FLAG_debug_code) {
+    Comment("Unreachable code.");
+    __ int3();
+  }
+}
+
+
+void LCodeGen::DoAddI(LAddI* instr) {
+  LOperand* left = instr->left();
+  LOperand* right = instr->right();
+  ASSERT(left->Equals(instr->result()));
+
+  if (right->IsConstantOperand()) {
+    __ add(ToOperand(left), ToImmediate(right));
+  } else {
+    __ add(ToRegister(left), ToOperand(right));
+  }
+
+  if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
+    DeoptimizeIf(overflow, instr->environment());
+  }
+}
+
+
+void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
+  LOperand* left = instr->left();
+  LOperand* right = instr->right();
+  // Modulo uses a fixed result register.
+  ASSERT(instr->op() == Token::MOD || left->Equals(instr->result()));
+  switch (instr->op()) {
+    case Token::ADD:
+      __ addsd(ToDoubleRegister(left), ToDoubleRegister(right));
+      break;
+    case Token::SUB:
+       __ subsd(ToDoubleRegister(left), ToDoubleRegister(right));
+       break;
+    case Token::MUL:
+      __ mulsd(ToDoubleRegister(left), ToDoubleRegister(right));
+      break;
+    case Token::DIV:
+      __ divsd(ToDoubleRegister(left), ToDoubleRegister(right));
+      break;
+    case Token::MOD: {
+      // Pass two doubles as arguments on the stack.
+      __ PrepareCallCFunction(4, eax);
+      __ movdbl(Operand(esp, 0 * kDoubleSize), ToDoubleRegister(left));
+      __ movdbl(Operand(esp, 1 * kDoubleSize), ToDoubleRegister(right));
+      __ CallCFunction(ExternalReference::double_fp_operation(Token::MOD), 4);
+
+      // Return value is in st(0) on ia32.
+      // Store it into the (fixed) result register.
+      __ sub(Operand(esp), Immediate(kDoubleSize));
+      __ fstp_d(Operand(esp, 0));
+      __ movdbl(ToDoubleRegister(instr->result()), Operand(esp, 0));
+      __ add(Operand(esp), Immediate(kDoubleSize));
+      break;
+    }
+    default:
+      UNREACHABLE();
+      break;
+  }
+}
+
+
+void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
+  ASSERT(ToRegister(instr->left()).is(edx));
+  ASSERT(ToRegister(instr->right()).is(eax));
+  ASSERT(ToRegister(instr->result()).is(eax));
+
+  TypeRecordingBinaryOpStub stub(instr->op(), NO_OVERWRITE);
+  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+}
+
+
+int LCodeGen::GetNextEmittedBlock(int block) {
+  for (int i = block + 1; i < graph()->blocks()->length(); ++i) {
+    LLabel* label = chunk_->GetLabel(i);
+    if (!label->HasReplacement()) return i;
+  }
+  return -1;
+}
+
+
+void LCodeGen::EmitBranch(int left_block, int right_block, Condition cc) {
+  int next_block = GetNextEmittedBlock(current_block_);
+  right_block = chunk_->LookupDestination(right_block);
+  left_block = chunk_->LookupDestination(left_block);
+
+  if (right_block == left_block) {
+    EmitGoto(left_block);
+  } else if (left_block == next_block) {
+    __ j(NegateCondition(cc), chunk_->GetAssemblyLabel(right_block));
+  } else if (right_block == next_block) {
+    __ j(cc, chunk_->GetAssemblyLabel(left_block));
+  } else {
+    __ j(cc, chunk_->GetAssemblyLabel(left_block));
+    __ jmp(chunk_->GetAssemblyLabel(right_block));
+  }
+}
+
+
+void LCodeGen::DoBranch(LBranch* instr) {
+  int true_block = chunk_->LookupDestination(instr->true_block_id());
+  int false_block = chunk_->LookupDestination(instr->false_block_id());
+
+  Representation r = instr->hydrogen()->representation();
+  if (r.IsInteger32()) {
+    Register reg = ToRegister(instr->input());
+    __ test(reg, Operand(reg));
+    EmitBranch(true_block, false_block, not_zero);
+  } else if (r.IsDouble()) {
+    XMMRegister reg = ToDoubleRegister(instr->input());
+    __ xorpd(xmm0, xmm0);
+    __ ucomisd(reg, xmm0);
+    EmitBranch(true_block, false_block, not_equal);
+  } else {
+    ASSERT(r.IsTagged());
+    Register reg = ToRegister(instr->input());
+    if (instr->hydrogen()->type().IsBoolean()) {
+      __ cmp(reg, Factory::true_value());
+      EmitBranch(true_block, false_block, equal);
+    } else {
+      Label* true_label = chunk_->GetAssemblyLabel(true_block);
+      Label* false_label = chunk_->GetAssemblyLabel(false_block);
+
+      __ cmp(reg, Factory::undefined_value());
+      __ j(equal, false_label);
+      __ cmp(reg, Factory::true_value());
+      __ j(equal, true_label);
+      __ cmp(reg, Factory::false_value());
+      __ j(equal, false_label);
+      __ test(reg, Operand(reg));
+      __ j(equal, false_label);
+      __ test(reg, Immediate(kSmiTagMask));
+      __ j(zero, true_label);
+
+      // Test for double values. Zero is false.
+      NearLabel call_stub;
+      __ cmp(FieldOperand(reg, HeapObject::kMapOffset),
+             Factory::heap_number_map());
+      __ j(not_equal, &call_stub);
+      __ fldz();
+      __ fld_d(FieldOperand(reg, HeapNumber::kValueOffset));
+      __ FCmp();
+      __ j(zero, false_label);
+      __ jmp(true_label);
+
+      // The conversion stub doesn't cause garbage collections so it's
+      // safe to not record a safepoint after the call.
+      __ bind(&call_stub);
+      ToBooleanStub stub;
+      __ pushad();
+      __ push(reg);
+      __ CallStub(&stub);
+      __ test(eax, Operand(eax));
+      __ popad();
+      EmitBranch(true_block, false_block, not_zero);
+    }
+  }
+}
+
+
+void LCodeGen::EmitGoto(int block, LDeferredCode* deferred_stack_check) {
+  block = chunk_->LookupDestination(block);
+  int next_block = GetNextEmittedBlock(current_block_);
+  if (block != next_block) {
+    // Perform stack overflow check if this goto needs it before jumping.
+    if (deferred_stack_check != NULL) {
+      ExternalReference stack_limit =
+          ExternalReference::address_of_stack_limit();
+      __ cmp(esp, Operand::StaticVariable(stack_limit));
+      __ j(above_equal, chunk_->GetAssemblyLabel(block));
+      __ jmp(deferred_stack_check->entry());
+      deferred_stack_check->SetExit(chunk_->GetAssemblyLabel(block));
+    } else {
+      __ jmp(chunk_->GetAssemblyLabel(block));
+    }
+  }
+}
+
+
+void LCodeGen::DoDeferredStackCheck(LGoto* instr) {
+  __ pushad();
+  __ CallRuntimeSaveDoubles(Runtime::kStackGuard);
+  RecordSafepointWithRegisters(
+      instr->pointer_map(), 0, Safepoint::kNoDeoptimizationIndex);
+  __ popad();
+}
+
+void LCodeGen::DoGoto(LGoto* instr) {
+  class DeferredStackCheck: public LDeferredCode {
+   public:
+    DeferredStackCheck(LCodeGen* codegen, LGoto* instr)
+        : LDeferredCode(codegen), instr_(instr) { }
+    virtual void Generate() { codegen()->DoDeferredStackCheck(instr_); }
+   private:
+    LGoto* instr_;
+  };
+
+  DeferredStackCheck* deferred = NULL;
+  if (instr->include_stack_check()) {
+    deferred = new DeferredStackCheck(this, instr);
+  }
+  EmitGoto(instr->block_id(), deferred);
+}
+
+
+Condition LCodeGen::TokenToCondition(Token::Value op, bool is_unsigned) {
+  Condition cond = no_condition;
+  switch (op) {
+    case Token::EQ:
+    case Token::EQ_STRICT:
+      cond = equal;
+      break;
+    case Token::LT:
+      cond = is_unsigned ? below : less;
+      break;
+    case Token::GT:
+      cond = is_unsigned ? above : greater;
+      break;
+    case Token::LTE:
+      cond = is_unsigned ? below_equal : less_equal;
+      break;
+    case Token::GTE:
+      cond = is_unsigned ? above_equal : greater_equal;
+      break;
+    case Token::IN:
+    case Token::INSTANCEOF:
+    default:
+      UNREACHABLE();
+  }
+  return cond;
+}
+
+
+void LCodeGen::EmitCmpI(LOperand* left, LOperand* right) {
+  if (right->IsConstantOperand()) {
+    __ cmp(ToOperand(left), ToImmediate(right));
+  } else {
+    __ cmp(ToRegister(left), ToOperand(right));
+  }
+}
+
+
+void LCodeGen::DoCmpID(LCmpID* instr) {
+  LOperand* left = instr->left();
+  LOperand* right = instr->right();
+  LOperand* result = instr->result();
+
+  NearLabel unordered;
+  if (instr->is_double()) {
+    // Don't base result on EFLAGS when a NaN is involved. Instead
+    // jump to the unordered case, which produces a false value.
+    __ ucomisd(ToDoubleRegister(left), ToDoubleRegister(right));
+    __ j(parity_even, &unordered, not_taken);
+  } else {
+    EmitCmpI(left, right);
+  }
+
+  NearLabel done;
+  Condition cc = TokenToCondition(instr->op(), instr->is_double());
+  __ mov(ToRegister(result), Handle<Object>(Heap::true_value()));
+  __ j(cc, &done);
+
+  __ bind(&unordered);
+  __ mov(ToRegister(result), Handle<Object>(Heap::false_value()));
+  __ bind(&done);
+}
+
+
+void LCodeGen::DoCmpIDAndBranch(LCmpIDAndBranch* instr) {
+  LOperand* left = instr->left();
+  LOperand* right = instr->right();
+  int false_block = chunk_->LookupDestination(instr->false_block_id());
+  int true_block = chunk_->LookupDestination(instr->true_block_id());
+
+  if (instr->is_double()) {
+    // Don't base result on EFLAGS when a NaN is involved. Instead
+    // jump to the false block.
+    __ ucomisd(ToDoubleRegister(left), ToDoubleRegister(right));
+    __ j(parity_even, chunk_->GetAssemblyLabel(false_block));
+  } else {
+    EmitCmpI(left, right);
+  }
+
+  Condition cc = TokenToCondition(instr->op(), instr->is_double());
+  EmitBranch(true_block, false_block, cc);
+}
+
+
+void LCodeGen::DoCmpJSObjectEq(LCmpJSObjectEq* instr) {
+  Register left = ToRegister(instr->left());
+  Register right = ToRegister(instr->right());
+  Register result = ToRegister(instr->result());
+
+  __ cmp(left, Operand(right));
+  __ mov(result, Handle<Object>(Heap::true_value()));
+  NearLabel done;
+  __ j(equal, &done);
+  __ mov(result, Handle<Object>(Heap::false_value()));
+  __ bind(&done);
+}
+
+
+void LCodeGen::DoCmpJSObjectEqAndBranch(LCmpJSObjectEqAndBranch* instr) {
+  Register left = ToRegister(instr->left());
+  Register right = ToRegister(instr->right());
+  int false_block = chunk_->LookupDestination(instr->false_block_id());
+  int true_block = chunk_->LookupDestination(instr->true_block_id());
+
+  __ cmp(left, Operand(right));
+  EmitBranch(true_block, false_block, equal);
+}
+
+
+void LCodeGen::DoIsNull(LIsNull* instr) {
+  Register reg = ToRegister(instr->input());
+  Register result = ToRegister(instr->result());
+
+  // TODO(fsc): If the expression is known to be a smi, then it's
+  // definitely not null. Materialize false.
+
+  __ cmp(reg, Factory::null_value());
+  if (instr->is_strict()) {
+    __ mov(result, Handle<Object>(Heap::true_value()));
+    NearLabel done;
+    __ j(equal, &done);
+    __ mov(result, Handle<Object>(Heap::false_value()));
+    __ bind(&done);
+  } else {
+    NearLabel true_value, false_value, done;
+    __ j(equal, &true_value);
+    __ cmp(reg, Factory::undefined_value());
+    __ j(equal, &true_value);
+    __ test(reg, Immediate(kSmiTagMask));
+    __ j(zero, &false_value);
+    // Check for undetectable objects by looking in the bit field in
+    // the map. The object has already been smi checked.
+    Register scratch = result;
+    __ mov(scratch, FieldOperand(reg, HeapObject::kMapOffset));
+    __ movzx_b(scratch, FieldOperand(scratch, Map::kBitFieldOffset));
+    __ test(scratch, Immediate(1 << Map::kIsUndetectable));
+    __ j(not_zero, &true_value);
+    __ bind(&false_value);
+    __ mov(result, Handle<Object>(Heap::false_value()));
+    __ jmp(&done);
+    __ bind(&true_value);
+    __ mov(result, Handle<Object>(Heap::true_value()));
+    __ bind(&done);
+  }
+}
+
+
+void LCodeGen::DoIsNullAndBranch(LIsNullAndBranch* instr) {
+  Register reg = ToRegister(instr->input());
+
+  // TODO(fsc): If the expression is known to be a smi, then it's
+  // definitely not null. Jump to the false block.
+
+  int true_block = chunk_->LookupDestination(instr->true_block_id());
+  int false_block = chunk_->LookupDestination(instr->false_block_id());
+
+  __ cmp(reg, Factory::null_value());
+  if (instr->is_strict()) {
+    EmitBranch(true_block, false_block, equal);
+  } else {
+    Label* true_label = chunk_->GetAssemblyLabel(true_block);
+    Label* false_label = chunk_->GetAssemblyLabel(false_block);
+    __ j(equal, true_label);
+    __ cmp(reg, Factory::undefined_value());
+    __ j(equal, true_label);
+    __ test(reg, Immediate(kSmiTagMask));
+    __ j(zero, false_label);
+    // Check for undetectable objects by looking in the bit field in
+    // the map. The object has already been smi checked.
+    Register scratch = ToRegister(instr->temp());
+    __ mov(scratch, FieldOperand(reg, HeapObject::kMapOffset));
+    __ movzx_b(scratch, FieldOperand(scratch, Map::kBitFieldOffset));
+    __ test(scratch, Immediate(1 << Map::kIsUndetectable));
+    EmitBranch(true_block, false_block, not_zero);
+  }
+}
+
+
+void LCodeGen::DoIsSmi(LIsSmi* instr) {
+  Operand input = ToOperand(instr->input());
+  Register result = ToRegister(instr->result());
+
+  ASSERT(instr->hydrogen()->value()->representation().IsTagged());
+  __ test(input, Immediate(kSmiTagMask));
+  __ mov(result, Handle<Object>(Heap::true_value()));
+  NearLabel done;
+  __ j(zero, &done);
+  __ mov(result, Handle<Object>(Heap::false_value()));
+  __ bind(&done);
+}
+
+
+void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) {
+  Operand input = ToOperand(instr->input());
+
+  int true_block = chunk_->LookupDestination(instr->true_block_id());
+  int false_block = chunk_->LookupDestination(instr->false_block_id());
+
+  __ test(input, Immediate(kSmiTagMask));
+  EmitBranch(true_block, false_block, zero);
+}
+
+
+InstanceType LHasInstanceType::TestType() {
+  InstanceType from = hydrogen()->from();
+  InstanceType to = hydrogen()->to();
+  if (from == FIRST_TYPE) return to;
+  ASSERT(from == to || to == LAST_TYPE);
+  return from;
+}
+
+
+
+Condition LHasInstanceType::BranchCondition() {
+  InstanceType from = hydrogen()->from();
+  InstanceType to = hydrogen()->to();
+  if (from == to) return equal;
+  if (to == LAST_TYPE) return above_equal;
+  if (from == FIRST_TYPE) return below_equal;
+  UNREACHABLE();
+  return equal;
+}
+
+
+void LCodeGen::DoHasInstanceType(LHasInstanceType* instr) {
+  Register input = ToRegister(instr->input());
+  Register result = ToRegister(instr->result());
+
+  ASSERT(instr->hydrogen()->value()->representation().IsTagged());
+  __ test(input, Immediate(kSmiTagMask));
+  NearLabel done, is_false;
+  __ j(zero, &is_false);
+  __ CmpObjectType(input, instr->TestType(), result);
+  __ j(NegateCondition(instr->BranchCondition()), &is_false);
+  __ mov(result, Handle<Object>(Heap::true_value()));
+  __ jmp(&done);
+  __ bind(&is_false);
+  __ mov(result, Handle<Object>(Heap::false_value()));
+  __ bind(&done);
+}
+
+
+void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) {
+  Register input = ToRegister(instr->input());
+  Register temp = ToRegister(instr->temp());
+
+  int true_block = chunk_->LookupDestination(instr->true_block_id());
+  int false_block = chunk_->LookupDestination(instr->false_block_id());
+
+  Label* false_label = chunk_->GetAssemblyLabel(false_block);
+
+  __ test(input, Immediate(kSmiTagMask));
+  __ j(zero, false_label);
+
+  __ CmpObjectType(input, instr->TestType(), temp);
+  EmitBranch(true_block, false_block, instr->BranchCondition());
+}
+
+
+void LCodeGen::DoHasCachedArrayIndex(LHasCachedArrayIndex* instr) {
+  Register input = ToRegister(instr->input());
+  Register result = ToRegister(instr->result());
+
+  ASSERT(instr->hydrogen()->value()->representation().IsTagged());
+  __ mov(result, Handle<Object>(Heap::true_value()));
+  __ test(FieldOperand(input, String::kHashFieldOffset),
+          Immediate(String::kContainsCachedArrayIndexMask));
+  NearLabel done;
+  __ j(not_zero, &done);
+  __ mov(result, Handle<Object>(Heap::false_value()));
+  __ bind(&done);
+}
+
+
+void LCodeGen::DoHasCachedArrayIndexAndBranch(
+    LHasCachedArrayIndexAndBranch* instr) {
+  Register input = ToRegister(instr->input());
+
+  int true_block = chunk_->LookupDestination(instr->true_block_id());
+  int false_block = chunk_->LookupDestination(instr->false_block_id());
+
+  __ test(FieldOperand(input, String::kHashFieldOffset),
+          Immediate(String::kContainsCachedArrayIndexMask));
+  EmitBranch(true_block, false_block, not_equal);
+}
+
+
+// Branches to a label or falls through with the answer in the z flag.  Trashes
+// the temp registers, but not the input.  Only input and temp2 may alias.
+void LCodeGen::EmitClassOfTest(Label* is_true,
+                               Label* is_false,
+                               Handle<String>class_name,
+                               Register input,
+                               Register temp,
+                               Register temp2) {
+  ASSERT(!input.is(temp));
+  ASSERT(!temp.is(temp2));  // But input and temp2 may be the same register.
+  __ test(input, Immediate(kSmiTagMask));
+  __ j(zero, is_false);
+  __ CmpObjectType(input, FIRST_JS_OBJECT_TYPE, temp);
+  __ j(below, is_false);
+
+  // Map is now in temp.
+  // Functions have class 'Function'.
+  __ CmpInstanceType(temp, JS_FUNCTION_TYPE);
+  if (class_name->IsEqualTo(CStrVector("Function"))) {
+    __ j(equal, is_true);
+  } else {
+    __ j(equal, is_false);
+  }
+
+  // Check if the constructor in the map is a function.
+  __ mov(temp, FieldOperand(temp, Map::kConstructorOffset));
+
+  // As long as JS_FUNCTION_TYPE is the last instance type and it is
+  // right after LAST_JS_OBJECT_TYPE, we can avoid checking for
+  // LAST_JS_OBJECT_TYPE.
+  ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
+  ASSERT(JS_FUNCTION_TYPE == LAST_JS_OBJECT_TYPE + 1);
+
+  // Objects with a non-function constructor have class 'Object'.
+  __ CmpObjectType(temp, JS_FUNCTION_TYPE, temp2);
+  if (class_name->IsEqualTo(CStrVector("Object"))) {
+    __ j(not_equal, is_true);
+  } else {
+    __ j(not_equal, is_false);
+  }
+
+  // temp now contains the constructor function. Grab the
+  // instance class name from there.
+  __ mov(temp, FieldOperand(temp, JSFunction::kSharedFunctionInfoOffset));
+  __ mov(temp, FieldOperand(temp,
+                            SharedFunctionInfo::kInstanceClassNameOffset));
+  // The class name we are testing against is a symbol because it's a literal.
+  // The name in the constructor is a symbol because of the way the context is
+  // booted.  This routine isn't expected to work for random API-created
+  // classes and it doesn't have to because you can't access it with natives
+  // syntax.  Since both sides are symbols it is sufficient to use an identity
+  // comparison.
+  __ cmp(temp, class_name);
+  // End with the answer in the z flag.
+}
+
+
+void LCodeGen::DoClassOfTest(LClassOfTest* instr) {
+  Register input = ToRegister(instr->input());
+  Register result = ToRegister(instr->result());
+  ASSERT(input.is(result));
+  Register temp = ToRegister(instr->temporary());
+  Handle<String> class_name = instr->hydrogen()->class_name();
+  NearLabel done;
+  Label is_true, is_false;
+
+  EmitClassOfTest(&is_true, &is_false, class_name, input, temp, input);
+
+  __ j(not_equal, &is_false);
+
+  __ bind(&is_true);
+  __ mov(result, Handle<Object>(Heap::true_value()));
+  __ jmp(&done);
+
+  __ bind(&is_false);
+  __ mov(result, Handle<Object>(Heap::false_value()));
+  __ bind(&done);
+}
+
+
+void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) {
+  Register input = ToRegister(instr->input());
+  Register temp = ToRegister(instr->temporary());
+  Register temp2 = ToRegister(instr->temporary2());
+  if (input.is(temp)) {
+    // Swap.
+    Register swapper = temp;
+    temp = temp2;
+    temp2 = swapper;
+  }
+  Handle<String> class_name = instr->hydrogen()->class_name();
+
+  int true_block = chunk_->LookupDestination(instr->true_block_id());
+  int false_block = chunk_->LookupDestination(instr->false_block_id());
+
+  Label* true_label = chunk_->GetAssemblyLabel(true_block);
+  Label* false_label = chunk_->GetAssemblyLabel(false_block);
+
+  EmitClassOfTest(true_label, false_label, class_name, input, temp, temp2);
+
+  EmitBranch(true_block, false_block, equal);
+}
+
+
+void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) {
+  Register reg = ToRegister(instr->input());
+  int true_block = instr->true_block_id();
+  int false_block = instr->false_block_id();
+
+  __ cmp(FieldOperand(reg, HeapObject::kMapOffset), instr->map());
+  EmitBranch(true_block, false_block, equal);
+}
+
+
+void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
+  InstanceofStub stub;
+  __ push(ToOperand(instr->left()));
+  __ push(ToOperand(instr->right()));
+  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+
+  NearLabel true_value, done;
+  __ test(eax, Operand(eax));
+  __ j(zero, &true_value);
+  __ mov(ToRegister(instr->result()), Factory::false_value());
+  __ jmp(&done);
+  __ bind(&true_value);
+  __ mov(ToRegister(instr->result()), Factory::true_value());
+  __ bind(&done);
+}
+
+
+void LCodeGen::DoInstanceOfAndBranch(LInstanceOfAndBranch* instr) {
+  int true_block = chunk_->LookupDestination(instr->true_block_id());
+  int false_block = chunk_->LookupDestination(instr->false_block_id());
+
+  InstanceofStub stub;
+  __ push(ToOperand(instr->left()));
+  __ push(ToOperand(instr->right()));
+  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+  __ test(eax, Operand(eax));
+  EmitBranch(true_block, false_block, zero);
+}
+
+
+static Condition ComputeCompareCondition(Token::Value op) {
+  switch (op) {
+    case Token::EQ_STRICT:
+    case Token::EQ:
+      return equal;
+    case Token::LT:
+      return less;
+    case Token::GT:
+      return greater;
+    case Token::LTE:
+      return less_equal;
+    case Token::GTE:
+      return greater_equal;
+    default:
+      UNREACHABLE();
+      return no_condition;
+  }
+}
+
+
+void LCodeGen::DoCmpT(LCmpT* instr) {
+  Token::Value op = instr->op();
+
+  Handle<Code> ic = CompareIC::GetUninitialized(op);
+  CallCode(ic, RelocInfo::CODE_TARGET, instr);
+
+  Condition condition = ComputeCompareCondition(op);
+  if (op == Token::GT || op == Token::LTE) {
+    condition = ReverseCondition(condition);
+  }
+  NearLabel true_value, done;
+  __ test(eax, Operand(eax));
+  __ j(condition, &true_value);
+  __ mov(ToRegister(instr->result()), Factory::false_value());
+  __ jmp(&done);
+  __ bind(&true_value);
+  __ mov(ToRegister(instr->result()), Factory::true_value());
+  __ bind(&done);
+}
+
+
+void LCodeGen::DoCmpTAndBranch(LCmpTAndBranch* instr) {
+  Token::Value op = instr->op();
+  int true_block = chunk_->LookupDestination(instr->true_block_id());
+  int false_block = chunk_->LookupDestination(instr->false_block_id());
+
+  Handle<Code> ic = CompareIC::GetUninitialized(op);
+  CallCode(ic, RelocInfo::CODE_TARGET, instr);
+
+  // The compare stub expects compare condition and the input operands
+  // reversed for GT and LTE.
+  Condition condition = ComputeCompareCondition(op);
+  if (op == Token::GT || op == Token::LTE) {
+    condition = ReverseCondition(condition);
+  }
+  __ test(eax, Operand(eax));
+  EmitBranch(true_block, false_block, condition);
+}
+
+
+void LCodeGen::DoReturn(LReturn* instr) {
+  if (FLAG_trace) {
+    // Preserve the return value on the stack and rely on the runtime
+    // call to return the value in the same register.
+    __ push(eax);
+    __ CallRuntime(Runtime::kTraceExit, 1);
+  }
+  __ mov(esp, ebp);
+  __ pop(ebp);
+  __ ret((ParameterCount() + 1) * kPointerSize);
+}
+
+
+void LCodeGen::DoLoadGlobal(LLoadGlobal* instr) {
+  Register result = ToRegister(instr->result());
+  __ mov(result, Operand::Cell(instr->hydrogen()->cell()));
+  if (instr->hydrogen()->check_hole_value()) {
+    __ cmp(result, Factory::the_hole_value());
+    DeoptimizeIf(equal, instr->environment());
+  }
+}
+
+
+void LCodeGen::DoStoreGlobal(LStoreGlobal* instr) {
+  Register value = ToRegister(instr->input());
+  __ mov(Operand::Cell(instr->hydrogen()->cell()), value);
+}
+
+
+void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
+  Register object = ToRegister(instr->input());
+  Register result = ToRegister(instr->result());
+  if (instr->hydrogen()->is_in_object()) {
+    __ mov(result, FieldOperand(object, instr->hydrogen()->offset()));
+  } else {
+    __ mov(result, FieldOperand(object, JSObject::kPropertiesOffset));
+    __ mov(result, FieldOperand(result, instr->hydrogen()->offset()));
+  }
+}
+
+
+void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
+  ASSERT(ToRegister(instr->object()).is(eax));
+  ASSERT(ToRegister(instr->result()).is(eax));
+
+  __ mov(ecx, instr->name());
+  Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
+  CallCode(ic, RelocInfo::CODE_TARGET, instr);
+}
+
+
+void LCodeGen::DoLoadElements(LLoadElements* instr) {
+  ASSERT(instr->result()->Equals(instr->input()));
+  Register reg = ToRegister(instr->input());
+  __ mov(reg, FieldOperand(reg, JSObject::kElementsOffset));
+  if (FLAG_debug_code) {
+    NearLabel done;
+    __ cmp(FieldOperand(reg, HeapObject::kMapOffset),
+           Immediate(Factory::fixed_array_map()));
+    __ j(equal, &done);
+    __ cmp(FieldOperand(reg, HeapObject::kMapOffset),
+           Immediate(Factory::fixed_cow_array_map()));
+    __ Check(equal, "Check for fast elements failed.");
+    __ bind(&done);
+  }
+}
+
+
+void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
+  Register arguments = ToRegister(instr->arguments());
+  Register length = ToRegister(instr->length());
+  Operand index = ToOperand(instr->index());
+  Register result = ToRegister(instr->result());
+
+  __ sub(length, index);
+  DeoptimizeIf(below_equal, instr->environment());
+
+  __ mov(result, Operand(arguments, length, times_4, kPointerSize));
+}
+
+
+void LCodeGen::DoLoadKeyedFastElement(LLoadKeyedFastElement* instr) {
+  Register elements = ToRegister(instr->elements());
+  Register key = ToRegister(instr->key());
+  Register result;
+  if (instr->load_result() != NULL) {
+    result = ToRegister(instr->load_result());
+  } else {
+    result = ToRegister(instr->result());
+    ASSERT(result.is(elements));
+  }
+
+  // Load the result.
+  __ mov(result, FieldOperand(elements, key, times_4, FixedArray::kHeaderSize));
+
+  Representation r = instr->hydrogen()->representation();
+  if (r.IsInteger32()) {
+    // Untag and check for smi.
+    __ SmiUntag(result);
+    DeoptimizeIf(carry, instr->environment());
+  } else if (r.IsDouble()) {
+    EmitNumberUntagD(result,
+                     ToDoubleRegister(instr->result()),
+                     instr->environment());
+  } else {
+    // Check for the hole value.
+    ASSERT(r.IsTagged());
+    __ cmp(result, Factory::the_hole_value());
+    DeoptimizeIf(equal, instr->environment());
+  }
+}
+
+
+void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
+  ASSERT(ToRegister(instr->object()).is(edx));
+  ASSERT(ToRegister(instr->key()).is(eax));
+
+  Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
+  CallCode(ic, RelocInfo::CODE_TARGET, instr);
+}
+
+
+void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
+  Register result = ToRegister(instr->result());
+
+  // Check for arguments adapter frame.
+  Label done, adapted;
+  __ mov(result, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
+  __ mov(result, Operand(result, StandardFrameConstants::kContextOffset));
+  __ cmp(Operand(result),
+         Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+  __ j(equal, &adapted);
+
+  // No arguments adaptor frame.
+  __ mov(result, Operand(ebp));
+  __ jmp(&done);
+
+  // Arguments adaptor frame present.
+  __ bind(&adapted);
+  __ mov(result, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
+
+  // Done. Pointer to topmost argument is in result.
+  __ bind(&done);
+}
+
+
+void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) {
+  Operand elem = ToOperand(instr->input());
+  Register result = ToRegister(instr->result());
+
+  Label done;
+
+  // No arguments adaptor frame. Number of arguments is fixed.
+  __ cmp(ebp, elem);
+  __ mov(result, Immediate(scope()->num_parameters()));
+  __ j(equal, &done);
+
+  // Arguments adaptor frame present. Get argument length from there.
+  __ mov(result, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
+  __ mov(result, Operand(result,
+                         ArgumentsAdaptorFrameConstants::kLengthOffset));
+  __ SmiUntag(result);
+
+  // Done. Argument length is in result register.
+  __ bind(&done);
+}
+
+
+void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
+  Register receiver = ToRegister(instr->receiver());
+  ASSERT(ToRegister(instr->function()).is(edi));
+  ASSERT(ToRegister(instr->result()).is(eax));
+
+  // If the receiver is null or undefined, we have to pass the
+  // global object as a receiver.
+  NearLabel global_receiver, receiver_ok;
+  __ cmp(receiver, Factory::null_value());
+  __ j(equal, &global_receiver);
+  __ cmp(receiver, Factory::undefined_value());
+  __ j(not_equal, &receiver_ok);
+  __ bind(&global_receiver);
+  __ mov(receiver, GlobalObjectOperand());
+  __ bind(&receiver_ok);
+
+  Register length = ToRegister(instr->length());
+  Register elements = ToRegister(instr->elements());
+
+  Label invoke;
+
+  // Copy the arguments to this function possibly from the
+  // adaptor frame below it.
+  const uint32_t kArgumentsLimit = 1 * KB;
+  __ cmp(length, kArgumentsLimit);
+  DeoptimizeIf(above, instr->environment());
+
+  __ push(receiver);
+  __ mov(receiver, length);
+
+  // Loop through the arguments pushing them onto the execution
+  // stack.
+  Label loop;
+  // length is a small non-negative integer, due to the test above.
+  __ test(length, Operand(length));
+  __ j(zero, &invoke);
+  __ bind(&loop);
+  __ push(Operand(elements, length, times_pointer_size, 1 * kPointerSize));
+  __ dec(length);
+  __ j(not_zero, &loop);
+
+  // Invoke the function.
+  __ bind(&invoke);
+  ASSERT(receiver.is(eax));
+  v8::internal::ParameterCount actual(eax);
+  SafepointGenerator safepoint_generator(this,
+                                         instr->pointer_map(),
+                                         Safepoint::kNoDeoptimizationIndex);
+  __ InvokeFunction(edi, actual, CALL_FUNCTION, &safepoint_generator);
+}
+
+
+void LCodeGen::DoPushArgument(LPushArgument* instr) {
+  LOperand* argument = instr->input();
+  if (argument->IsConstantOperand()) {
+    __ push(ToImmediate(argument));
+  } else {
+    __ push(ToOperand(argument));
+  }
+}
+
+
+void LCodeGen::DoGlobalObject(LGlobalObject* instr) {
+  Register result = ToRegister(instr->result());
+  __ mov(result, Operand(esi, Context::SlotOffset(Context::GLOBAL_INDEX)));
+}
+
+
+void LCodeGen::DoGlobalReceiver(LGlobalReceiver* instr) {
+  Register result = ToRegister(instr->result());
+  __ mov(result, Operand(esi, Context::SlotOffset(Context::GLOBAL_INDEX)));
+  __ mov(result, FieldOperand(result, GlobalObject::kGlobalReceiverOffset));
+}
+
+
+void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
+                                 int arity,
+                                 LInstruction* instr) {
+  // Change context if needed.
+  bool change_context =
+      (graph()->info()->closure()->context() != function->context()) ||
+      scope()->contains_with() ||
+      (scope()->num_heap_slots() > 0);
+  if (change_context) {
+    __ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
+  }
+
+  // Set eax to arguments count if adaption is not needed. Assumes that eax
+  // is available to write to at this point.
+  if (!function->NeedsArgumentsAdaption()) {
+    __ mov(eax, arity);
+  }
+
+  LPointerMap* pointers = instr->pointer_map();
+  RecordPosition(pointers->position());
+
+  // Invoke function.
+  if (*function == *graph()->info()->closure()) {
+    __ CallSelf();
+  } else {
+    __ call(FieldOperand(edi, JSFunction::kCodeEntryOffset));
+  }
+
+  // Setup deoptimization.
+  RegisterLazyDeoptimization(instr);
+
+  // Restore context.
+  __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
+}
+
+
+void LCodeGen::DoCallConstantFunction(LCallConstantFunction* instr) {
+  ASSERT(ToRegister(instr->result()).is(eax));
+  __ mov(edi, instr->function());
+  CallKnownFunction(instr->function(), instr->arity(), instr);
+}
+
+
+void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LUnaryMathOperation* instr) {
+  Register input_reg = ToRegister(instr->input());
+  __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
+         Factory::heap_number_map());
+  DeoptimizeIf(not_equal, instr->environment());
+
+  Label done;
+  Register tmp = input_reg.is(eax) ? ecx : eax;
+  Register tmp2 = tmp.is(ecx) ? edx : input_reg.is(ecx) ? edx : ecx;
+
+  // Preserve the value of all registers.
+  __ PushSafepointRegisters();
+
+  Label negative;
+  __ mov(tmp, FieldOperand(input_reg, HeapNumber::kExponentOffset));
+  // Check the sign of the argument. If the argument is positive,
+  // just return it.
+  __ test(tmp, Immediate(HeapNumber::kSignMask));
+  __ j(not_zero, &negative);
+  __ mov(tmp, input_reg);
+  __ jmp(&done);
+
+  __ bind(&negative);
+
+  Label allocated, slow;
+  __ AllocateHeapNumber(tmp, tmp2, no_reg, &slow);
+  __ jmp(&allocated);
+
+  // Slow case: Call the runtime system to do the number allocation.
+  __ bind(&slow);
+
+  __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
+  RecordSafepointWithRegisters(
+      instr->pointer_map(), 0, Safepoint::kNoDeoptimizationIndex);
+  // Set the pointer to the new heap number in tmp.
+  if (!tmp.is(eax)) __ mov(tmp, eax);
+
+  // Restore input_reg after call to runtime.
+  __ mov(input_reg, Operand(esp, EspIndexForPushAll(input_reg) * kPointerSize));
+
+  __ bind(&allocated);
+  __ mov(tmp2, FieldOperand(input_reg, HeapNumber::kExponentOffset));
+  __ and_(tmp2, ~HeapNumber::kSignMask);
+  __ mov(FieldOperand(tmp, HeapNumber::kExponentOffset), tmp2);
+  __ mov(tmp2, FieldOperand(input_reg, HeapNumber::kMantissaOffset));
+  __ mov(FieldOperand(tmp, HeapNumber::kMantissaOffset), tmp2);
+
+  __ bind(&done);
+  __ mov(Operand(esp, EspIndexForPushAll(input_reg) * kPointerSize), tmp);
+
+  __ PopSafepointRegisters();
+}
+
+
+void LCodeGen::DoMathAbs(LUnaryMathOperation* instr) {
+  // Class for deferred case.
+  class DeferredMathAbsTaggedHeapNumber: public LDeferredCode {
+   public:
+    DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen,
+                                    LUnaryMathOperation* instr)
+        : LDeferredCode(codegen), instr_(instr) { }
+    virtual void Generate() {
+      codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_);
+    }
+   private:
+    LUnaryMathOperation* instr_;
+  };
+
+  ASSERT(instr->input()->Equals(instr->result()));
+  Representation r = instr->hydrogen()->value()->representation();
+
+  if (r.IsDouble()) {
+    XMMRegister  scratch = xmm0;
+    XMMRegister input_reg = ToDoubleRegister(instr->input());
+    __ pxor(scratch, scratch);
+    __ subsd(scratch, input_reg);
+    __ pand(input_reg, scratch);
+  } else if (r.IsInteger32()) {
+    Register input_reg = ToRegister(instr->input());
+    __ test(input_reg, Operand(input_reg));
+    Label is_positive;
+    __ j(not_sign, &is_positive);
+    __ neg(input_reg);
+    __ test(input_reg, Operand(input_reg));
+    DeoptimizeIf(negative, instr->environment());
+    __ bind(&is_positive);
+  } else {  // Tagged case.
+    DeferredMathAbsTaggedHeapNumber* deferred =
+        new DeferredMathAbsTaggedHeapNumber(this, instr);
+    Label not_smi;
+    Register input_reg = ToRegister(instr->input());
+    // Smi check.
+    __ test(input_reg, Immediate(kSmiTagMask));
+    __ j(not_zero, deferred->entry());
+    __ test(input_reg, Operand(input_reg));
+    Label is_positive;
+    __ j(not_sign, &is_positive);
+    __ neg(input_reg);
+
+    __ test(input_reg, Operand(input_reg));
+    DeoptimizeIf(negative, instr->environment());
+
+    __ bind(&is_positive);
+    __ bind(deferred->exit());
+  }
+}
+
+
+void LCodeGen::DoMathFloor(LUnaryMathOperation* instr) {
+  XMMRegister xmm_scratch = xmm0;
+  Register output_reg = ToRegister(instr->result());
+  XMMRegister input_reg = ToDoubleRegister(instr->input());
+  __ xorpd(xmm_scratch, xmm_scratch);  // Zero the register.
+  __ ucomisd(input_reg, xmm_scratch);
+
+  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+    DeoptimizeIf(below_equal, instr->environment());
+  } else {
+    DeoptimizeIf(below, instr->environment());
+  }
+
+  // Use truncating instruction (OK because input is positive).
+  __ cvttsd2si(output_reg, Operand(input_reg));
+
+  // Overflow is signalled with minint.
+  __ cmp(output_reg, 0x80000000u);
+  DeoptimizeIf(equal, instr->environment());
+}
+
+
+void LCodeGen::DoMathRound(LUnaryMathOperation* instr) {
+  XMMRegister xmm_scratch = xmm0;
+  Register output_reg = ToRegister(instr->result());
+  XMMRegister input_reg = ToDoubleRegister(instr->input());
+
+  // xmm_scratch = 0.5
+  ExternalReference one_half = ExternalReference::address_of_one_half();
+  __ movdbl(xmm_scratch, Operand::StaticVariable(one_half));
+
+  // input = input + 0.5
+  __ addsd(input_reg, xmm_scratch);
+
+  // We need to return -0 for the input range [-0.5, 0[, otherwise
+  // compute Math.floor(value + 0.5).
+  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+    __ ucomisd(input_reg, xmm_scratch);
+    DeoptimizeIf(below_equal, instr->environment());
+  } else {
+    // If we don't need to bailout on -0, we check only bailout
+    // on negative inputs.
+    __ xorpd(xmm_scratch, xmm_scratch);  // Zero the register.
+    __ ucomisd(input_reg, xmm_scratch);
+    DeoptimizeIf(below, instr->environment());
+  }
+
+  // Compute Math.floor(value + 0.5).
+  // Use truncating instruction (OK because input is positive).
+  __ cvttsd2si(output_reg, Operand(input_reg));
+
+  // Overflow is signalled with minint.
+  __ cmp(output_reg, 0x80000000u);
+  DeoptimizeIf(equal, instr->environment());
+}
+
+
+void LCodeGen::DoMathSqrt(LUnaryMathOperation* instr) {
+  XMMRegister input_reg = ToDoubleRegister(instr->input());
+  ASSERT(ToDoubleRegister(instr->result()).is(input_reg));
+  __ sqrtsd(input_reg, input_reg);
+}
+
+
+void LCodeGen::DoUnaryMathOperation(LUnaryMathOperation* instr) {
+  switch (instr->op()) {
+    case kMathAbs:
+      DoMathAbs(instr);
+      break;
+    case kMathFloor:
+      DoMathFloor(instr);
+      break;
+    case kMathRound:
+      DoMathRound(instr);
+      break;
+    case kMathSqrt:
+      DoMathSqrt(instr);
+      break;
+    default:
+      UNREACHABLE();
+  }
+}
+
+
+void LCodeGen::DoCallKeyed(LCallKeyed* instr) {
+  ASSERT(ToRegister(instr->result()).is(eax));
+
+  int arity = instr->arity();
+  Handle<Code> ic = StubCache::ComputeKeyedCallInitialize(arity, NOT_IN_LOOP);
+  CallCode(ic, RelocInfo::CODE_TARGET, instr);
+  __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
+}
+
+
+void LCodeGen::DoCallNamed(LCallNamed* instr) {
+  ASSERT(ToRegister(instr->result()).is(eax));
+
+  int arity = instr->arity();
+  Handle<Code> ic = StubCache::ComputeCallInitialize(arity, NOT_IN_LOOP);
+  __ mov(ecx, instr->name());
+  CallCode(ic, RelocInfo::CODE_TARGET, instr);
+  __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
+}
+
+
+void LCodeGen::DoCallFunction(LCallFunction* instr) {
+  ASSERT(ToRegister(instr->result()).is(eax));
+
+  int arity = instr->arity();
+  CallFunctionStub stub(arity, NOT_IN_LOOP, RECEIVER_MIGHT_BE_VALUE);
+  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+  __ Drop(1);
+  __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
+}
+
+
+void LCodeGen::DoCallGlobal(LCallGlobal* instr) {
+  ASSERT(ToRegister(instr->result()).is(eax));
+
+  int arity = instr->arity();
+  Handle<Code> ic = StubCache::ComputeCallInitialize(arity, NOT_IN_LOOP);
+  __ mov(ecx, instr->name());
+  CallCode(ic, RelocInfo::CODE_TARGET_CONTEXT, instr);
+  __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
+}
+
+
+void LCodeGen::DoCallKnownGlobal(LCallKnownGlobal* instr) {
+  ASSERT(ToRegister(instr->result()).is(eax));
+  __ mov(edi, instr->target());
+  CallKnownFunction(instr->target(), instr->arity(), instr);
+}
+
+
+void LCodeGen::DoCallNew(LCallNew* instr) {
+  ASSERT(ToRegister(instr->input()).is(edi));
+  ASSERT(ToRegister(instr->result()).is(eax));
+
+  Handle<Code> builtin(Builtins::builtin(Builtins::JSConstructCall));
+  __ Set(eax, Immediate(instr->arity()));
+  CallCode(builtin, RelocInfo::CONSTRUCT_CALL, instr);
+}
+
+
+void LCodeGen::DoCallRuntime(LCallRuntime* instr) {
+  CallRuntime(instr->function(), instr->arity(), instr);
+}
+
+
+void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
+  Register object = ToRegister(instr->object());
+  Register value = ToRegister(instr->value());
+  int offset = instr->offset();
+
+  if (!instr->transition().is_null()) {
+    __ mov(FieldOperand(object, HeapObject::kMapOffset), instr->transition());
+  }
+
+  // Do the store.
+  if (instr->is_in_object()) {
+    __ mov(FieldOperand(object, offset), value);
+    if (instr->needs_write_barrier()) {
+      Register temp = ToRegister(instr->temp());
+      // Update the write barrier for the object for in-object properties.
+      __ RecordWrite(object, offset, value, temp);
+    }
+  } else {
+    Register temp = ToRegister(instr->temp());
+    __ mov(temp, FieldOperand(object, JSObject::kPropertiesOffset));
+    __ mov(FieldOperand(temp, offset), value);
+    if (instr->needs_write_barrier()) {
+      // Update the write barrier for the properties array.
+      // object is used as a scratch register.
+      __ RecordWrite(temp, offset, value, object);
+    }
+  }
+}
+
+
+void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
+  ASSERT(ToRegister(instr->object()).is(edx));
+  ASSERT(ToRegister(instr->value()).is(eax));
+
+  __ mov(ecx, instr->name());
+  Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
+  CallCode(ic, RelocInfo::CODE_TARGET, instr);
+}
+
+
+void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
+  __ cmp(ToRegister(instr->index()), ToOperand(instr->length()));
+  DeoptimizeIf(above_equal, instr->environment());
+}
+
+
+void LCodeGen::DoStoreKeyedFastElement(LStoreKeyedFastElement* instr) {
+  Register value = ToRegister(instr->value());
+  Register elements = ToRegister(instr->object());
+  Register key = instr->key()->IsRegister() ? ToRegister(instr->key()) : no_reg;
+
+  // Do the store.
+  if (instr->key()->IsConstantOperand()) {
+    ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
+    LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
+    int offset =
+        ToInteger32(const_operand) * kPointerSize + FixedArray::kHeaderSize;
+    __ mov(FieldOperand(elements, offset), value);
+  } else {
+    __ mov(FieldOperand(elements, key, times_4, FixedArray::kHeaderSize),
+           value);
+  }
+
+  // Update the write barrier unless we're certain that we're storing a smi.
+  if (instr->hydrogen()->NeedsWriteBarrier()) {
+    // Compute address of modified element and store it into key register.
+    __ lea(key, FieldOperand(elements, key, times_4, FixedArray::kHeaderSize));
+    __ RecordWrite(elements, key, value);
+  }
+}
+
+
+void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
+  ASSERT(ToRegister(instr->object()).is(edx));
+  ASSERT(ToRegister(instr->key()).is(ecx));
+  ASSERT(ToRegister(instr->value()).is(eax));
+
+  Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
+  CallCode(ic, RelocInfo::CODE_TARGET, instr);
+}
+
+
+void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
+  LOperand* input = instr->input();
+  ASSERT(input->IsRegister() || input->IsStackSlot());
+  LOperand* output = instr->result();
+  ASSERT(output->IsDoubleRegister());
+  __ cvtsi2sd(ToDoubleRegister(output), ToOperand(input));
+}
+
+
+void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
+  class DeferredNumberTagI: public LDeferredCode {
+   public:
+    DeferredNumberTagI(LCodeGen* codegen, LNumberTagI* instr)
+        : LDeferredCode(codegen), instr_(instr) { }
+    virtual void Generate() { codegen()->DoDeferredNumberTagI(instr_); }
+   private:
+    LNumberTagI* instr_;
+  };
+
+  LOperand* input = instr->input();
+  ASSERT(input->IsRegister() && input->Equals(instr->result()));
+  Register reg = ToRegister(input);
+
+  DeferredNumberTagI* deferred = new DeferredNumberTagI(this, instr);
+  __ SmiTag(reg);
+  __ j(overflow, deferred->entry());
+  __ bind(deferred->exit());
+}
+
+
+void LCodeGen::DoDeferredNumberTagI(LNumberTagI* instr) {
+  Label slow;
+  Register reg = ToRegister(instr->input());
+  Register tmp = reg.is(eax) ? ecx : eax;
+
+  // Preserve the value of all registers.
+  __ PushSafepointRegisters();
+
+  // There was overflow, so bits 30 and 31 of the original integer
+  // disagree. Try to allocate a heap number in new space and store
+  // the value in there. If that fails, call the runtime system.
+  NearLabel done;
+  __ SmiUntag(reg);
+  __ xor_(reg, 0x80000000);
+  __ cvtsi2sd(xmm0, Operand(reg));
+  if (FLAG_inline_new) {
+    __ AllocateHeapNumber(reg, tmp, no_reg, &slow);
+    __ jmp(&done);
+  }
+
+  // Slow case: Call the runtime system to do the number allocation.
+  __ bind(&slow);
+
+  // TODO(3095996): Put a valid pointer value in the stack slot where the result
+  // register is stored, as this register is in the pointer map, but contains an
+  // integer value.
+  __ mov(Operand(esp, EspIndexForPushAll(reg) * kPointerSize), Immediate(0));
+
+  __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
+  RecordSafepointWithRegisters(
+      instr->pointer_map(), 0, Safepoint::kNoDeoptimizationIndex);
+  if (!reg.is(eax)) __ mov(reg, eax);
+
+  // Done. Put the value in xmm0 into the value of the allocated heap
+  // number.
+  __ bind(&done);
+  __ movdbl(FieldOperand(reg, HeapNumber::kValueOffset), xmm0);
+  __ mov(Operand(esp, EspIndexForPushAll(reg) * kPointerSize), reg);
+  __ PopSafepointRegisters();
+}
+
+
+void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
+  class DeferredNumberTagD: public LDeferredCode {
+   public:
+    DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr)
+        : LDeferredCode(codegen), instr_(instr) { }
+    virtual void Generate() { codegen()->DoDeferredNumberTagD(instr_); }
+   private:
+    LNumberTagD* instr_;
+  };
+
+  XMMRegister input_reg = ToDoubleRegister(instr->input());
+  Register reg = ToRegister(instr->result());
+  Register tmp = ToRegister(instr->temp());
+
+  DeferredNumberTagD* deferred = new DeferredNumberTagD(this, instr);
+  if (FLAG_inline_new) {
+    __ AllocateHeapNumber(reg, tmp, no_reg, deferred->entry());
+  } else {
+    __ jmp(deferred->entry());
+  }
+  __ bind(deferred->exit());
+  __ movdbl(FieldOperand(reg, HeapNumber::kValueOffset), input_reg);
+}
+
+
+void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
+  // TODO(3095996): Get rid of this. For now, we need to make the
+  // result register contain a valid pointer because it is already
+  // contained in the register pointer map.
+  Register reg = ToRegister(instr->result());
+  __ Set(reg, Immediate(0));
+
+  __ PushSafepointRegisters();
+  __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
+  RecordSafepointWithRegisters(
+      instr->pointer_map(), 0, Safepoint::kNoDeoptimizationIndex);
+  __ mov(Operand(esp, EspIndexForPushAll(reg) * kPointerSize), eax);
+  __ PopSafepointRegisters();
+}
+
+
+void LCodeGen::DoSmiTag(LSmiTag* instr) {
+  LOperand* input = instr->input();
+  ASSERT(input->IsRegister() && input->Equals(instr->result()));
+  ASSERT(!instr->hydrogen_value()->CheckFlag(HValue::kCanOverflow));
+  __ SmiTag(ToRegister(input));
+}
+
+
+void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
+  LOperand* input = instr->input();
+  ASSERT(input->IsRegister() && input->Equals(instr->result()));
+  if (instr->needs_check()) {
+    __ test(ToRegister(input), Immediate(kSmiTagMask));
+    DeoptimizeIf(not_zero, instr->environment());
+  }
+  __ SmiUntag(ToRegister(input));
+}
+
+
+void LCodeGen::EmitNumberUntagD(Register input_reg,
+                                XMMRegister result_reg,
+                                LEnvironment* env) {
+  NearLabel load_smi, heap_number, done;
+
+  // Smi check.
+  __ test(input_reg, Immediate(kSmiTagMask));
+  __ j(zero, &load_smi, not_taken);
+
+  // Heap number map check.
+  __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
+         Factory::heap_number_map());
+  __ j(equal, &heap_number);
+
+  __ cmp(input_reg, Factory::undefined_value());
+  DeoptimizeIf(not_equal, env);
+
+  // Convert undefined to NaN.
+  __ push(input_reg);
+  __ mov(input_reg, Factory::nan_value());
+  __ movdbl(result_reg, FieldOperand(input_reg, HeapNumber::kValueOffset));
+  __ pop(input_reg);
+  __ jmp(&done);
+
+  // Heap number to XMM conversion.
+  __ bind(&heap_number);
+  __ movdbl(result_reg, FieldOperand(input_reg, HeapNumber::kValueOffset));
+  __ jmp(&done);
+
+  // Smi to XMM conversion
+  __ bind(&load_smi);
+  __ SmiUntag(input_reg);  // Untag smi before converting to float.
+  __ cvtsi2sd(result_reg, Operand(input_reg));
+  __ SmiTag(input_reg);  // Retag smi.
+  __ bind(&done);
+}
+
+
+class DeferredTaggedToI: public LDeferredCode {
+ public:
+  DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr)
+      : LDeferredCode(codegen), instr_(instr) { }
+  virtual void Generate() { codegen()->DoDeferredTaggedToI(instr_); }
+ private:
+  LTaggedToI* instr_;
+};
+
+
+void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
+  NearLabel done, heap_number;
+  Register input_reg = ToRegister(instr->input());
+
+  // Heap number map check.
+  __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
+         Factory::heap_number_map());
+
+  if (instr->truncating()) {
+    __ j(equal, &heap_number);
+    // Check for undefined. Undefined is converted to zero for truncating
+    // conversions.
+    __ cmp(input_reg, Factory::undefined_value());
+    DeoptimizeIf(not_equal, instr->environment());
+    __ mov(input_reg, 0);
+    __ jmp(&done);
+
+    __ bind(&heap_number);
+    if (CpuFeatures::IsSupported(SSE3)) {
+      CpuFeatures::Scope scope(SSE3);
+      NearLabel convert;
+      // Use more powerful conversion when sse3 is available.
+      // Load x87 register with heap number.
+      __ fld_d(FieldOperand(input_reg, HeapNumber::kValueOffset));
+      // Get exponent alone and check for too-big exponent.
+      __ mov(input_reg, FieldOperand(input_reg, HeapNumber::kExponentOffset));
+      __ and_(input_reg, HeapNumber::kExponentMask);
+      const uint32_t kTooBigExponent =
+          (HeapNumber::kExponentBias + 63) << HeapNumber::kExponentShift;
+      __ cmp(Operand(input_reg), Immediate(kTooBigExponent));
+      __ j(less, &convert);
+      // Pop FPU stack before deoptimizing.
+      __ ffree(0);
+      __ fincstp();
+      DeoptimizeIf(no_condition, instr->environment());
+
+      // Reserve space for 64 bit answer.
+      __ bind(&convert);
+      __ sub(Operand(esp), Immediate(kDoubleSize));
+      // Do conversion, which cannot fail because we checked the exponent.
+      __ fisttp_d(Operand(esp, 0));
+      __ mov(input_reg, Operand(esp, 0));  // Low word of answer is the result.
+      __ add(Operand(esp), Immediate(kDoubleSize));
+    } else {
+      NearLabel deopt;
+      XMMRegister xmm_temp = ToDoubleRegister(instr->temp());
+      __ movdbl(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
+      __ cvttsd2si(input_reg, Operand(xmm0));
+      __ cmp(input_reg, 0x80000000u);
+      __ j(not_equal, &done);
+      // Check if the input was 0x8000000 (kMinInt).
+      // If no, then we got an overflow and we deoptimize.
+      ExternalReference min_int = ExternalReference::address_of_min_int();
+      __ movdbl(xmm_temp, Operand::StaticVariable(min_int));
+      __ ucomisd(xmm_temp, xmm0);
+      DeoptimizeIf(not_equal, instr->environment());
+      DeoptimizeIf(parity_even, instr->environment());  // NaN.
+    }
+  } else {
+    // Deoptimize if we don't have a heap number.
+    DeoptimizeIf(not_equal, instr->environment());
+
+    XMMRegister xmm_temp = ToDoubleRegister(instr->temp());
+    __ movdbl(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
+    __ cvttsd2si(input_reg, Operand(xmm0));
+    __ cvtsi2sd(xmm_temp, Operand(input_reg));
+    __ ucomisd(xmm0, xmm_temp);
+    DeoptimizeIf(not_equal, instr->environment());
+    DeoptimizeIf(parity_even, instr->environment());  // NaN.
+    if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+      __ test(input_reg, Operand(input_reg));
+      __ j(not_zero, &done);
+      __ movmskpd(input_reg, xmm0);
+      __ and_(input_reg, 1);
+      DeoptimizeIf(not_zero, instr->environment());
+    }
+  }
+  __ bind(&done);
+}
+
+
+void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
+  LOperand* input = instr->input();
+  ASSERT(input->IsRegister());
+  ASSERT(input->Equals(instr->result()));
+
+  Register input_reg = ToRegister(input);
+
+  DeferredTaggedToI* deferred = new DeferredTaggedToI(this, instr);
+
+  // Smi check.
+  __ test(input_reg, Immediate(kSmiTagMask));
+  __ j(not_zero, deferred->entry());
+
+  // Smi to int32 conversion
+  __ SmiUntag(input_reg);  // Untag smi.
+
+  __ bind(deferred->exit());
+}
+
+
+void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
+  LOperand* input = instr->input();
+  ASSERT(input->IsRegister());
+  LOperand* result = instr->result();
+  ASSERT(result->IsDoubleRegister());
+
+  Register input_reg = ToRegister(input);
+  XMMRegister result_reg = ToDoubleRegister(result);
+
+  EmitNumberUntagD(input_reg, result_reg, instr->environment());
+}
+
+
+void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
+  LOperand* input = instr->input();
+  ASSERT(input->IsDoubleRegister());
+  LOperand* result = instr->result();
+  ASSERT(result->IsRegister());
+
+  XMMRegister input_reg = ToDoubleRegister(input);
+  Register result_reg = ToRegister(result);
+
+  if (instr->truncating()) {
+    // Performs a truncating conversion of a floating point number as used by
+    // the JS bitwise operations.
+    __ cvttsd2si(result_reg, Operand(input_reg));
+    __ cmp(result_reg, 0x80000000u);
+    if (CpuFeatures::IsSupported(SSE3)) {
+      // This will deoptimize if the exponent of the input in out of range.
+      CpuFeatures::Scope scope(SSE3);
+      NearLabel convert, done;
+      __ j(not_equal, &done);
+      __ sub(Operand(esp), Immediate(kDoubleSize));
+      __ movdbl(Operand(esp, 0), input_reg);
+      // Get exponent alone and check for too-big exponent.
+      __ mov(result_reg, Operand(esp, sizeof(int32_t)));
+      __ and_(result_reg, HeapNumber::kExponentMask);
+      const uint32_t kTooBigExponent =
+          (HeapNumber::kExponentBias + 63) << HeapNumber::kExponentShift;
+      __ cmp(Operand(result_reg), Immediate(kTooBigExponent));
+      __ j(less, &convert);
+      __ add(Operand(esp), Immediate(kDoubleSize));
+      DeoptimizeIf(no_condition, instr->environment());
+      __ bind(&convert);
+      // Do conversion, which cannot fail because we checked the exponent.
+      __ fld_d(Operand(esp, 0));
+      __ fisttp_d(Operand(esp, 0));
+      __ mov(result_reg, Operand(esp, 0));  // Low word of answer is the result.
+      __ add(Operand(esp), Immediate(kDoubleSize));
+      __ bind(&done);
+    } else {
+      // This will bail out if the input was not in the int32 range (or,
+      // unfortunately, if the input was 0x80000000).
+      DeoptimizeIf(equal, instr->environment());
+    }
+  } else {
+    NearLabel done;
+    __ cvttsd2si(result_reg, Operand(input_reg));
+    __ cvtsi2sd(xmm0, Operand(result_reg));
+    __ ucomisd(xmm0, input_reg);
+    DeoptimizeIf(not_equal, instr->environment());
+    DeoptimizeIf(parity_even, instr->environment());  // NaN.
+    if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+      // The integer converted back is equal to the original. We
+      // only have to test if we got -0 as an input.
+      __ test(result_reg, Operand(result_reg));
+      __ j(not_zero, &done);
+      __ movmskpd(result_reg, input_reg);
+      // Bit 0 contains the sign of the double in input_reg.
+      // If input was positive, we are ok and return 0, otherwise
+      // deoptimize.
+      __ and_(result_reg, 1);
+      DeoptimizeIf(not_zero, instr->environment());
+    }
+    __ bind(&done);
+  }
+}
+
+
+void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
+  LOperand* input = instr->input();
+  ASSERT(input->IsRegister());
+  __ test(ToRegister(input), Immediate(kSmiTagMask));
+  DeoptimizeIf(instr->condition(), instr->environment());
+}
+
+
+void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
+  Register input = ToRegister(instr->input());
+  Register temp = ToRegister(instr->temp());
+  InstanceType first = instr->hydrogen()->first();
+  InstanceType last = instr->hydrogen()->last();
+
+  __ test(input, Immediate(kSmiTagMask));
+  DeoptimizeIf(zero, instr->environment());
+
+  __ mov(temp, FieldOperand(input, HeapObject::kMapOffset));
+  __ cmpb(FieldOperand(temp, Map::kInstanceTypeOffset),
+          static_cast<int8_t>(first));
+
+  // If there is only one type in the interval check for equality.
+  if (first == last) {
+    DeoptimizeIf(not_equal, instr->environment());
+  } else {
+    DeoptimizeIf(below, instr->environment());
+    // Omit check for the last type.
+    if (last != LAST_TYPE) {
+      __ cmpb(FieldOperand(temp, Map::kInstanceTypeOffset),
+              static_cast<int8_t>(last));
+      DeoptimizeIf(above, instr->environment());
+    }
+  }
+}
+
+
+void LCodeGen::DoCheckFunction(LCheckFunction* instr) {
+  ASSERT(instr->input()->IsRegister());
+  Register reg = ToRegister(instr->input());
+  __ cmp(reg, instr->hydrogen()->target());
+  DeoptimizeIf(not_equal, instr->environment());
+}
+
+
+void LCodeGen::DoCheckMap(LCheckMap* instr) {
+  LOperand* input = instr->input();
+  ASSERT(input->IsRegister());
+  Register reg = ToRegister(input);
+  __ cmp(FieldOperand(reg, HeapObject::kMapOffset),
+         instr->hydrogen()->map());
+  DeoptimizeIf(not_equal, instr->environment());
+}
+
+
+void LCodeGen::LoadPrototype(Register result, Handle<JSObject> prototype) {
+  if (Heap::InNewSpace(*prototype)) {
+    Handle<JSGlobalPropertyCell> cell =
+        Factory::NewJSGlobalPropertyCell(prototype);
+    __ mov(result, Operand::Cell(cell));
+  } else {
+    __ mov(result, prototype);
+  }
+}
+
+
+void LCodeGen::DoCheckPrototypeMaps(LCheckPrototypeMaps* instr) {
+  Register reg = ToRegister(instr->temp());
+
+  Handle<JSObject> holder = instr->holder();
+  Handle<Map> receiver_map = instr->receiver_map();
+  Handle<JSObject> current_prototype(JSObject::cast(receiver_map->prototype()));
+
+  // Load prototype object.
+  LoadPrototype(reg, current_prototype);
+
+  // Check prototype maps up to the holder.
+  while (!current_prototype.is_identical_to(holder)) {
+    __ cmp(FieldOperand(reg, HeapObject::kMapOffset),
+           Handle<Map>(current_prototype->map()));
+    DeoptimizeIf(not_equal, instr->environment());
+    current_prototype =
+        Handle<JSObject>(JSObject::cast(current_prototype->GetPrototype()));
+    // Load next prototype object.
+    LoadPrototype(reg, current_prototype);
+  }
+
+  // Check the holder map.
+  __ cmp(FieldOperand(reg, HeapObject::kMapOffset),
+         Handle<Map>(current_prototype->map()));
+  DeoptimizeIf(not_equal, instr->environment());
+}
+
+
+void LCodeGen::DoArrayLiteral(LArrayLiteral* instr) {
+  // Setup the parameters to the stub/runtime call.
+  __ mov(eax, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
+  __ push(FieldOperand(eax, JSFunction::kLiteralsOffset));
+  __ push(Immediate(Smi::FromInt(instr->hydrogen()->literal_index())));
+  __ push(Immediate(instr->hydrogen()->constant_elements()));
+
+  // Pick the right runtime function or stub to call.
+  int length = instr->hydrogen()->length();
+  if (instr->hydrogen()->IsCopyOnWrite()) {
+    ASSERT(instr->hydrogen()->depth() == 1);
+    FastCloneShallowArrayStub::Mode mode =
+        FastCloneShallowArrayStub::COPY_ON_WRITE_ELEMENTS;
+    FastCloneShallowArrayStub stub(mode, length);
+    CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+  } else if (instr->hydrogen()->depth() > 1) {
+    CallRuntime(Runtime::kCreateArrayLiteral, 3, instr);
+  } else if (length > FastCloneShallowArrayStub::kMaximumClonedLength) {
+    CallRuntime(Runtime::kCreateArrayLiteralShallow, 3, instr);
+  } else {
+    FastCloneShallowArrayStub::Mode mode =
+        FastCloneShallowArrayStub::CLONE_ELEMENTS;
+    FastCloneShallowArrayStub stub(mode, length);
+    CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+  }
+}
+
+
+void LCodeGen::DoObjectLiteral(LObjectLiteral* instr) {
+  // Setup the parameters to the stub/runtime call.
+  __ mov(eax, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
+  __ push(FieldOperand(eax, JSFunction::kLiteralsOffset));
+  __ push(Immediate(Smi::FromInt(instr->hydrogen()->literal_index())));
+  __ push(Immediate(instr->hydrogen()->constant_properties()));
+  __ push(Immediate(Smi::FromInt(instr->hydrogen()->fast_elements() ? 1 : 0)));
+
+  // Pick the right runtime function or stub to call.
+  if (instr->hydrogen()->depth() > 1) {
+    CallRuntime(Runtime::kCreateObjectLiteral, 4, instr);
+  } else {
+    CallRuntime(Runtime::kCreateObjectLiteralShallow, 4, instr);
+  }
+}
+
+
+void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
+  NearLabel materialized;
+  // Registers will be used as follows:
+  // edi = JS function.
+  // ecx = literals array.
+  // ebx = regexp literal.
+  // eax = regexp literal clone.
+  __ mov(edi, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
+  __ mov(ecx, FieldOperand(edi, JSFunction::kLiteralsOffset));
+  int literal_offset = FixedArray::kHeaderSize +
+      instr->hydrogen()->literal_index() * kPointerSize;
+  __ mov(ebx, FieldOperand(ecx, literal_offset));
+  __ cmp(ebx, Factory::undefined_value());
+  __ j(not_equal, &materialized);
+
+  // Create regexp literal using runtime function
+  // Result will be in eax.
+  __ push(ecx);
+  __ push(Immediate(Smi::FromInt(instr->hydrogen()->literal_index())));
+  __ push(Immediate(instr->hydrogen()->pattern()));
+  __ push(Immediate(instr->hydrogen()->flags()));
+  CallRuntime(Runtime::kMaterializeRegExpLiteral, 4, instr);
+  __ mov(ebx, eax);
+
+  __ bind(&materialized);
+  int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize;
+  Label allocated, runtime_allocate;
+  __ AllocateInNewSpace(size, eax, ecx, edx, &runtime_allocate, TAG_OBJECT);
+  __ jmp(&allocated);
+
+  __ bind(&runtime_allocate);
+  __ push(ebx);
+  __ push(Immediate(Smi::FromInt(size)));
+  CallRuntime(Runtime::kAllocateInNewSpace, 1, instr);
+  __ pop(ebx);
+
+  __ bind(&allocated);
+  // Copy the content into the newly allocated memory.
+  // (Unroll copy loop once for better throughput).
+  for (int i = 0; i < size - kPointerSize; i += 2 * kPointerSize) {
+    __ mov(edx, FieldOperand(ebx, i));
+    __ mov(ecx, FieldOperand(ebx, i + kPointerSize));
+    __ mov(FieldOperand(eax, i), edx);
+    __ mov(FieldOperand(eax, i + kPointerSize), ecx);
+  }
+  if ((size % (2 * kPointerSize)) != 0) {
+    __ mov(edx, FieldOperand(ebx, size - kPointerSize));
+    __ mov(FieldOperand(eax, size - kPointerSize), edx);
+  }
+}
+
+
+void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
+  // Use the fast case closure allocation code that allocates in new
+  // space for nested functions that don't need literals cloning.
+  Handle<SharedFunctionInfo> shared_info = instr->shared_info();
+  bool pretenure = !instr->hydrogen()->pretenure();
+  if (shared_info->num_literals() == 0 && !pretenure) {
+    FastNewClosureStub stub;
+    __ push(Immediate(shared_info));
+    CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+  } else {
+    __ push(esi);
+    __ push(Immediate(shared_info));
+    __ push(Immediate(pretenure
+                      ? Factory::true_value()
+                      : Factory::false_value()));
+    CallRuntime(Runtime::kNewClosure, 3, instr);
+  }
+}
+
+
+void LCodeGen::DoTypeof(LTypeof* instr) {
+  LOperand* input = instr->input();
+  if (input->IsConstantOperand()) {
+    __ push(ToImmediate(input));
+  } else {
+    __ push(ToOperand(input));
+  }
+  CallRuntime(Runtime::kTypeof, 1, instr);
+}
+
+
+void LCodeGen::DoTypeofIs(LTypeofIs* instr) {
+  Register input = ToRegister(instr->input());
+  Register result = ToRegister(instr->result());
+  Label true_label;
+  Label false_label;
+  NearLabel done;
+
+  Condition final_branch_condition = EmitTypeofIs(&true_label,
+                                                  &false_label,
+                                                  input,
+                                                  instr->type_literal());
+  __ j(final_branch_condition, &true_label);
+  __ bind(&false_label);
+  __ mov(result, Handle<Object>(Heap::false_value()));
+  __ jmp(&done);
+
+  __ bind(&true_label);
+  __ mov(result, Handle<Object>(Heap::true_value()));
+
+  __ bind(&done);
+}
+
+
+void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) {
+  Register input = ToRegister(instr->input());
+  int true_block = chunk_->LookupDestination(instr->true_block_id());
+  int false_block = chunk_->LookupDestination(instr->false_block_id());
+  Label* true_label = chunk_->GetAssemblyLabel(true_block);
+  Label* false_label = chunk_->GetAssemblyLabel(false_block);
+
+  Condition final_branch_condition = EmitTypeofIs(true_label,
+                                                  false_label,
+                                                  input,
+                                                  instr->type_literal());
+
+  EmitBranch(true_block, false_block, final_branch_condition);
+}
+
+
+Condition LCodeGen::EmitTypeofIs(Label* true_label,
+                                 Label* false_label,
+                                 Register input,
+                                 Handle<String> type_name) {
+  Condition final_branch_condition = no_condition;
+  if (type_name->Equals(Heap::number_symbol())) {
+    __ test(input, Immediate(kSmiTagMask));
+    __ j(zero, true_label);
+    __ cmp(FieldOperand(input, HeapObject::kMapOffset),
+           Factory::heap_number_map());
+    final_branch_condition = equal;
+
+  } else if (type_name->Equals(Heap::string_symbol())) {
+    __ test(input, Immediate(kSmiTagMask));
+    __ j(zero, false_label);
+    __ mov(input, FieldOperand(input, HeapObject::kMapOffset));
+    __ test_b(FieldOperand(input, Map::kBitFieldOffset),
+              1 << Map::kIsUndetectable);
+    __ j(not_zero, false_label);
+    __ CmpInstanceType(input, FIRST_NONSTRING_TYPE);
+    final_branch_condition = below;
+
+  } else if (type_name->Equals(Heap::boolean_symbol())) {
+    __ cmp(input, Handle<Object>(Heap::true_value()));
+    __ j(equal, true_label);
+    __ cmp(input, Handle<Object>(Heap::false_value()));
+    final_branch_condition = equal;
+
+  } else if (type_name->Equals(Heap::undefined_symbol())) {
+    __ cmp(input, Factory::undefined_value());
+    __ j(equal, true_label);
+    __ test(input, Immediate(kSmiTagMask));
+    __ j(zero, false_label);
+    // Check for undetectable objects => true.
+    __ mov(input, FieldOperand(input, HeapObject::kMapOffset));
+    __ test_b(FieldOperand(input, Map::kBitFieldOffset),
+              1 << Map::kIsUndetectable);
+    final_branch_condition = not_zero;
+
+  } else if (type_name->Equals(Heap::function_symbol())) {
+    __ test(input, Immediate(kSmiTagMask));
+    __ j(zero, false_label);
+    __ CmpObjectType(input, JS_FUNCTION_TYPE, input);
+    __ j(equal, true_label);
+    // Regular expressions => 'function' (they are callable).
+    __ CmpInstanceType(input, JS_REGEXP_TYPE);
+    final_branch_condition = equal;
+
+  } else if (type_name->Equals(Heap::object_symbol())) {
+    __ test(input, Immediate(kSmiTagMask));
+    __ j(zero, false_label);
+    __ cmp(input, Factory::null_value());
+    __ j(equal, true_label);
+    // Regular expressions => 'function', not 'object'.
+    __ CmpObjectType(input, JS_REGEXP_TYPE, input);
+    __ j(equal, false_label);
+    // Check for undetectable objects => false.
+    __ test_b(FieldOperand(input, Map::kBitFieldOffset),
+              1 << Map::kIsUndetectable);
+    __ j(not_zero, false_label);
+    // Check for JS objects => true.
+    __ CmpInstanceType(input, FIRST_JS_OBJECT_TYPE);
+    __ j(below, false_label);
+    __ CmpInstanceType(input, LAST_JS_OBJECT_TYPE);
+    final_branch_condition = below_equal;
+
+  } else {
+    final_branch_condition = not_equal;
+    __ jmp(false_label);
+    // A dead branch instruction will be generated after this point.
+  }
+
+  return final_branch_condition;
+}
+
+
+void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
+  // No code for lazy bailout instruction. Used to capture environment after a
+  // call for populating the safepoint data with deoptimization data.
+}
+
+
+void LCodeGen::DoDeoptimize(LDeoptimize* instr) {
+  DeoptimizeIf(no_condition, instr->environment());
+}
+
+
+void LCodeGen::DoDeleteProperty(LDeleteProperty* instr) {
+  LOperand* obj = instr->object();
+  LOperand* key = instr->key();
+  __ push(ToOperand(obj));
+  if (key->IsConstantOperand()) {
+    __ push(ToImmediate(key));
+  } else {
+    __ push(ToOperand(key));
+  }
+  RecordPosition(instr->pointer_map()->position());
+  SafepointGenerator safepoint_generator(this,
+                                         instr->pointer_map(),
+                                         Safepoint::kNoDeoptimizationIndex);
+  __ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION, &safepoint_generator);
+}
+
+
+void LCodeGen::DoStackCheck(LStackCheck* instr) {
+  // Perform stack overflow check.
+  NearLabel done;
+  ExternalReference stack_limit = ExternalReference::address_of_stack_limit();
+  __ cmp(esp, Operand::StaticVariable(stack_limit));
+  __ j(above_equal, &done);
+
+  StackCheckStub stub;
+  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+  __ bind(&done);
+}
+
+
+void LCodeGen::DoOsrEntry(LOsrEntry* instr) {
+  // This is a pseudo-instruction that ensures that the environment here is
+  // properly registered for deoptimization and records the assembler's PC
+  // offset.
+  LEnvironment* environment = instr->environment();
+  environment->SetSpilledRegisters(instr->SpilledRegisterArray(),
+                                   instr->SpilledDoubleRegisterArray());
+
+  // If the environment were already registered, we would have no way of
+  // backpatching it with the spill slot operands.
+  ASSERT(!environment->HasBeenRegistered());
+  RegisterEnvironmentForDeoptimization(environment);
+  ASSERT(osr_pc_offset_ == -1);
+  osr_pc_offset_ = masm()->pc_offset();
+}
+
+
+#undef __
+
+} }  // namespace v8::internal
diff --git a/src/ia32/lithium-codegen-ia32.h b/src/ia32/lithium-codegen-ia32.h
new file mode 100644 (file)
index 0000000..91b3fab
--- /dev/null
@@ -0,0 +1,252 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_IA32_LITHIUM_CODEGEN_IA32_H_
+#define V8_IA32_LITHIUM_CODEGEN_IA32_H_
+
+#include "ia32/lithium-ia32.h"
+
+#include "checks.h"
+#include "deoptimizer.h"
+#include "safepoint-table.h"
+#include "scopes.h"
+
+namespace v8 {
+namespace internal {
+
+// Forward declarations.
+class LDeferredCode;
+class SafepointGenerator;
+
+
+class LCodeGen BASE_EMBEDDED {
+ public:
+  LCodeGen(LChunk* chunk, MacroAssembler* assembler, CompilationInfo* info)
+      : chunk_(chunk),
+        masm_(assembler),
+        info_(info),
+        current_block_(-1),
+        current_instruction_(-1),
+        instructions_(chunk->instructions()),
+        deoptimizations_(4),
+        deoptimization_literals_(8),
+        inlined_function_count_(0),
+        scope_(chunk->graph()->info()->scope()),
+        status_(UNUSED),
+        deferred_(8),
+        osr_pc_offset_(-1) {
+    PopulateDeoptimizationLiteralsWithInlinedFunctions();
+  }
+
+  // Try to generate code for the entire chunk, but it may fail if the
+  // chunk contains constructs we cannot handle. Returns true if the
+  // code generation attempt succeeded.
+  bool GenerateCode();
+
+  // Finish the code by setting stack height, safepoint, and bailout
+  // information on it.
+  void FinishCode(Handle<Code> code);
+
+  // Deferred code support.
+  void DoDeferredNumberTagD(LNumberTagD* instr);
+  void DoDeferredNumberTagI(LNumberTagI* instr);
+  void DoDeferredTaggedToI(LTaggedToI* instr);
+  void DoDeferredMathAbsTaggedHeapNumber(LUnaryMathOperation* instr);
+  void DoDeferredStackCheck(LGoto* instr);
+
+  // Parallel move support.
+  void DoParallelMove(LParallelMove* move);
+
+  // Declare methods that deal with the individual node types.
+#define DECLARE_DO(type) void Do##type(L##type* node);
+  LITHIUM_CONCRETE_INSTRUCTION_LIST(DECLARE_DO)
+#undef DECLARE_DO
+
+ private:
+  enum Status {
+    UNUSED,
+    GENERATING,
+    DONE,
+    ABORTED
+  };
+
+  bool is_unused() const { return status_ == UNUSED; }
+  bool is_generating() const { return status_ == GENERATING; }
+  bool is_done() const { return status_ == DONE; }
+  bool is_aborted() const { return status_ == ABORTED; }
+
+  LChunk* chunk() const { return chunk_; }
+  Scope* scope() const { return scope_; }
+  HGraph* graph() const { return chunk_->graph(); }
+  MacroAssembler* masm() const { return masm_; }
+
+  int GetNextEmittedBlock(int block);
+  LInstruction* GetNextInstruction();
+
+  void EmitClassOfTest(Label* if_true,
+                       Label* if_false,
+                       Handle<String> class_name,
+                       Register input,
+                       Register temporary,
+                       Register temporary2);
+
+  int StackSlotCount() const { return chunk()->spill_slot_count(); }
+  int ParameterCount() const { return scope()->num_parameters(); }
+
+  void Abort(const char* format, ...);
+  void Comment(const char* format, ...);
+
+  void AddDeferredCode(LDeferredCode* code) { deferred_.Add(code); }
+
+  // Code generation passes.  Returns true if code generation should
+  // continue.
+  bool GeneratePrologue();
+  bool GenerateBody();
+  bool GenerateDeferredCode();
+  bool GenerateSafepointTable();
+
+  void CallCode(Handle<Code> code,
+                RelocInfo::Mode mode,
+                LInstruction* instr);
+  void CallRuntime(Runtime::Function* function,
+                   int num_arguments,
+                   LInstruction* instr);
+  void CallRuntime(Runtime::FunctionId id,
+                   int num_arguments,
+                   LInstruction* instr) {
+    Runtime::Function* function = Runtime::FunctionForId(id);
+    CallRuntime(function, num_arguments, instr);
+  }
+
+  // Generate a direct call to a known function.  Expects the function
+  // to be in edi.
+  void CallKnownFunction(Handle<JSFunction> function,
+                         int arity,
+                         LInstruction* instr);
+
+  void LoadPrototype(Register result, Handle<JSObject> prototype);
+
+  void RegisterLazyDeoptimization(LInstruction* instr);
+  void RegisterEnvironmentForDeoptimization(LEnvironment* environment);
+  void DeoptimizeIf(Condition cc, LEnvironment* environment);
+
+  void AddToTranslation(Translation* translation,
+                        LOperand* op,
+                        bool is_tagged);
+  void PopulateDeoptimizationData(Handle<Code> code);
+  int DefineDeoptimizationLiteral(Handle<Object> literal);
+
+  void PopulateDeoptimizationLiteralsWithInlinedFunctions();
+
+  Register ToRegister(int index) const;
+  XMMRegister ToDoubleRegister(int index) const;
+  Register ToRegister(LOperand* op) const;
+  XMMRegister ToDoubleRegister(LOperand* op) const;
+  int ToInteger32(LConstantOperand* op) const;
+  Operand ToOperand(LOperand* op) const;
+  Immediate ToImmediate(LOperand* op);
+
+  // Specific math operations - used from DoUnaryMathOperation.
+  void DoMathAbs(LUnaryMathOperation* instr);
+  void DoMathFloor(LUnaryMathOperation* instr);
+  void DoMathRound(LUnaryMathOperation* instr);
+  void DoMathSqrt(LUnaryMathOperation* instr);
+
+  // Support for recording safepoint and position information.
+  void RecordSafepoint(LPointerMap* pointers, int deoptimization_index);
+  void RecordSafepointWithRegisters(LPointerMap* pointers,
+                                    int arguments,
+                                    int deoptimization_index);
+  void RecordPosition(int position);
+
+  static Condition TokenToCondition(Token::Value op, bool is_unsigned);
+  void EmitGoto(int block, LDeferredCode* deferred_stack_check = NULL);
+  void EmitBranch(int left_block, int right_block, Condition cc);
+  void EmitCmpI(LOperand* left, LOperand* right);
+  void EmitNumberUntagD(Register input, XMMRegister result, LEnvironment* env);
+
+  // Emits optimized code for typeof x == "y".  Modifies input register.
+  // Returns the condition on which a final split to
+  // true and false label should be made, to optimize fallthrough.
+  Condition EmitTypeofIs(Label* true_label, Label* false_label,
+                         Register input, Handle<String> type_name);
+
+  LChunk* const chunk_;
+  MacroAssembler* const masm_;
+  CompilationInfo* const info_;
+
+  int current_block_;
+  int current_instruction_;
+  const ZoneList<LInstruction*>* instructions_;
+  ZoneList<LEnvironment*> deoptimizations_;
+  ZoneList<Handle<Object> > deoptimization_literals_;
+  int inlined_function_count_;
+  Scope* const scope_;
+  Status status_;
+  TranslationBuffer translations_;
+  ZoneList<LDeferredCode*> deferred_;
+  int osr_pc_offset_;
+
+  // Builder that keeps track of safepoints in the code. The table
+  // itself is emitted at the end of the generated code.
+  SafepointTableBuilder safepoints_;
+
+  friend class LDeferredCode;
+  friend class LEnvironment;
+  friend class SafepointGenerator;
+  DISALLOW_COPY_AND_ASSIGN(LCodeGen);
+};
+
+
+class LDeferredCode: public ZoneObject {
+ public:
+  explicit LDeferredCode(LCodeGen* codegen)
+      : codegen_(codegen), external_exit_(NULL) {
+    codegen->AddDeferredCode(this);
+  }
+
+  virtual ~LDeferredCode() { }
+  virtual void Generate() = 0;
+
+  void SetExit(Label *exit) { external_exit_ = exit; }
+  Label* entry() { return &entry_; }
+  Label* exit() { return external_exit_ != NULL ? external_exit_ : &exit_; }
+
+ protected:
+  LCodeGen* codegen() const { return codegen_; }
+  MacroAssembler* masm() const { return codegen_->masm(); }
+
+ private:
+  LCodeGen* codegen_;
+  Label entry_;
+  Label exit_;
+  Label* external_exit_;
+};
+
+} }  // namespace v8::internal
+
+#endif  // V8_IA32_LITHIUM_CODEGEN_IA32_H_
diff --git a/src/ia32/lithium-ia32.cc b/src/ia32/lithium-ia32.cc
new file mode 100644 (file)
index 0000000..c39df9a
--- /dev/null
@@ -0,0 +1,2096 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "ia32/lithium-ia32.h"
+#include "ia32/lithium-codegen-ia32.h"
+
+namespace v8 {
+namespace internal {
+
+#define DEFINE_COMPILE(type)                            \
+  void L##type::CompileToNative(LCodeGen* generator) {  \
+    generator->Do##type(this);                          \
+  }
+LITHIUM_CONCRETE_INSTRUCTION_LIST(DEFINE_COMPILE)
+#undef DEFINE_COMPILE
+
+LOsrEntry::LOsrEntry() {
+  for (int i = 0; i < Register::kNumAllocatableRegisters; ++i) {
+    register_spills_[i] = NULL;
+  }
+  for (int i = 0; i < DoubleRegister::kNumAllocatableRegisters; ++i) {
+    double_register_spills_[i] = NULL;
+  }
+}
+
+
+void LOsrEntry::MarkSpilledRegister(int allocation_index,
+                                    LOperand* spill_operand) {
+  ASSERT(spill_operand->IsStackSlot());
+  ASSERT(register_spills_[allocation_index] == NULL);
+  register_spills_[allocation_index] = spill_operand;
+}
+
+
+void LOsrEntry::MarkSpilledDoubleRegister(int allocation_index,
+                                          LOperand* spill_operand) {
+  ASSERT(spill_operand->IsDoubleStackSlot());
+  ASSERT(double_register_spills_[allocation_index] == NULL);
+  double_register_spills_[allocation_index] = spill_operand;
+}
+
+
+void LInstruction::PrintTo(StringStream* stream) const {
+  stream->Add("%s ", this->Mnemonic());
+  if (HasResult()) {
+    result()->PrintTo(stream);
+    stream->Add(" ");
+  }
+  PrintDataTo(stream);
+
+  if (HasEnvironment()) {
+    stream->Add(" ");
+    environment()->PrintTo(stream);
+  }
+
+  if (HasPointerMap()) {
+    stream->Add(" ");
+    pointer_map()->PrintTo(stream);
+  }
+}
+
+
+void LLabel::PrintDataTo(StringStream* stream) const {
+  LGap::PrintDataTo(stream);
+  LLabel* rep = replacement();
+  if (rep != NULL) {
+    stream->Add(" Dead block replaced with B%d", rep->block_id());
+  }
+}
+
+
+bool LParallelMove::IsRedundant() const {
+  for (int i = 0; i < move_operands_.length(); ++i) {
+    if (!move_operands_[i].IsRedundant()) return false;
+  }
+  return true;
+}
+
+
+void LParallelMove::PrintDataTo(StringStream* stream) const {
+  for (int i = move_operands_.length() - 1; i >= 0; --i) {
+    if (!move_operands_[i].IsEliminated()) {
+      LOperand* from = move_operands_[i].from();
+      LOperand* to = move_operands_[i].to();
+      if (from->Equals(to)) {
+        to->PrintTo(stream);
+      } else {
+        to->PrintTo(stream);
+        stream->Add(" = ");
+        from->PrintTo(stream);
+      }
+      stream->Add("; ");
+    }
+  }
+}
+
+
+bool LGap::IsRedundant() const {
+  for (int i = 0; i < 4; i++) {
+    if (parallel_moves_[i] != NULL && !parallel_moves_[i]->IsRedundant()) {
+      return false;
+    }
+  }
+
+  return true;
+}
+
+
+void LGap::PrintDataTo(StringStream* stream) const {
+  for (int i = 0; i < 4; i++) {
+    stream->Add("(");
+    if (parallel_moves_[i] != NULL) {
+      parallel_moves_[i]->PrintDataTo(stream);
+    }
+    stream->Add(") ");
+  }
+}
+
+
+const char* LArithmeticD::Mnemonic() const {
+  switch (op()) {
+    case Token::ADD: return "add-d";
+    case Token::SUB: return "sub-d";
+    case Token::MUL: return "mul-d";
+    case Token::DIV: return "div-d";
+    case Token::MOD: return "mod-d";
+    default:
+      UNREACHABLE();
+      return NULL;
+  }
+}
+
+
+const char* LArithmeticT::Mnemonic() const {
+  switch (op()) {
+    case Token::ADD: return "add-t";
+    case Token::SUB: return "sub-t";
+    case Token::MUL: return "mul-t";
+    case Token::MOD: return "mod-t";
+    case Token::DIV: return "div-t";
+    default:
+      UNREACHABLE();
+      return NULL;
+  }
+}
+
+
+
+void LBinaryOperation::PrintDataTo(StringStream* stream) const {
+  stream->Add("= ");
+  left()->PrintTo(stream);
+  stream->Add(" ");
+  right()->PrintTo(stream);
+}
+
+
+void LGoto::PrintDataTo(StringStream* stream) const {
+  stream->Add("B%d", block_id());
+}
+
+
+void LBranch::PrintDataTo(StringStream* stream) const {
+  stream->Add("B%d | B%d on ", true_block_id(), false_block_id());
+  input()->PrintTo(stream);
+}
+
+
+void LCmpIDAndBranch::PrintDataTo(StringStream* stream) const {
+  stream->Add("if ");
+  left()->PrintTo(stream);
+  stream->Add(" %s ", Token::String(op()));
+  right()->PrintTo(stream);
+  stream->Add(" then B%d else B%d", true_block_id(), false_block_id());
+}
+
+
+void LIsNullAndBranch::PrintDataTo(StringStream* stream) const {
+  stream->Add("if ");
+  input()->PrintTo(stream);
+  stream->Add(is_strict() ? " === null" : " == null");
+  stream->Add(" then B%d else B%d", true_block_id(), false_block_id());
+}
+
+
+void LIsSmiAndBranch::PrintDataTo(StringStream* stream) const {
+  stream->Add("if is_smi(");
+  input()->PrintTo(stream);
+  stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
+}
+
+
+void LHasInstanceTypeAndBranch::PrintDataTo(StringStream* stream) const {
+  stream->Add("if has_instance_type(");
+  input()->PrintTo(stream);
+  stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
+}
+
+
+void LHasCachedArrayIndexAndBranch::PrintDataTo(StringStream* stream) const {
+  stream->Add("if has_cached_array_index(");
+  input()->PrintTo(stream);
+  stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
+}
+
+
+void LClassOfTestAndBranch::PrintDataTo(StringStream* stream) const {
+  stream->Add("if class_of_test(");
+  input()->PrintTo(stream);
+  stream->Add(", \"%o\") then B%d else B%d",
+              *hydrogen()->class_name(),
+              true_block_id(),
+              false_block_id());
+}
+
+
+void LTypeofIs::PrintDataTo(StringStream* stream) const {
+  input()->PrintTo(stream);
+  stream->Add(" == \"%s\"", *hydrogen()->type_literal()->ToCString());
+}
+
+
+void LTypeofIsAndBranch::PrintDataTo(StringStream* stream) const {
+  stream->Add("if typeof ");
+  input()->PrintTo(stream);
+  stream->Add(" == \"%s\" then B%d else B%d",
+              *hydrogen()->type_literal()->ToCString(),
+              true_block_id(), false_block_id());
+}
+
+
+void LCallConstantFunction::PrintDataTo(StringStream* stream) const {
+  stream->Add("#%d / ", arity());
+}
+
+
+void LUnaryMathOperation::PrintDataTo(StringStream* stream) const {
+  stream->Add("/%s ", hydrogen()->OpName());
+  input()->PrintTo(stream);
+}
+
+
+void LCallKeyed::PrintDataTo(StringStream* stream) const {
+  stream->Add("[ecx] #%d / ", arity());
+}
+
+
+void LCallNamed::PrintDataTo(StringStream* stream) const {
+  SmartPointer<char> name_string = name()->ToCString();
+  stream->Add("%s #%d / ", *name_string, arity());
+}
+
+
+void LCallGlobal::PrintDataTo(StringStream* stream) const {
+  SmartPointer<char> name_string = name()->ToCString();
+  stream->Add("%s #%d / ", *name_string, arity());
+}
+
+
+void LCallKnownGlobal::PrintDataTo(StringStream* stream) const {
+  stream->Add("#%d / ", arity());
+}
+
+
+void LCallNew::PrintDataTo(StringStream* stream) const {
+  LUnaryOperation::PrintDataTo(stream);
+  stream->Add(" #%d / ", arity());
+}
+
+
+void LClassOfTest::PrintDataTo(StringStream* stream) const {
+  stream->Add("= class_of_test(");
+  input()->PrintTo(stream);
+  stream->Add(", \"%o\")", *hydrogen()->class_name());
+}
+
+
+void LUnaryOperation::PrintDataTo(StringStream* stream) const {
+  stream->Add("= ");
+  input()->PrintTo(stream);
+}
+
+
+void LAccessArgumentsAt::PrintDataTo(StringStream* stream) const {
+  arguments()->PrintTo(stream);
+
+  stream->Add(" length ");
+  length()->PrintTo(stream);
+
+  stream->Add(" index ");
+  index()->PrintTo(stream);
+}
+
+
+LChunk::LChunk(HGraph* graph)
+    : spill_slot_count_(0),
+      graph_(graph),
+      instructions_(32),
+      pointer_maps_(8),
+      inlined_closures_(1) {
+}
+
+
+void LChunk::Verify() const {
+  // TODO(twuerthinger): Implement verification for chunk.
+}
+
+
+int LChunk::GetNextSpillIndex(bool is_double) {
+  // Skip a slot if for a double-width slot.
+  if (is_double) spill_slot_count_++;
+  return spill_slot_count_++;
+}
+
+
+LOperand* LChunk::GetNextSpillSlot(bool is_double)  {
+  int index = GetNextSpillIndex(is_double);
+  if (is_double) {
+    return LDoubleStackSlot::Create(index);
+  } else {
+    return LStackSlot::Create(index);
+  }
+}
+
+
+void LChunk::MarkEmptyBlocks() {
+  HPhase phase("Mark empty blocks", this);
+  for (int i = 0; i < graph()->blocks()->length(); ++i) {
+    HBasicBlock* block = graph()->blocks()->at(i);
+    int first = block->first_instruction_index();
+    int last = block->last_instruction_index();
+    LInstruction* first_instr = instructions()->at(first);
+    LInstruction* last_instr = instructions()->at(last);
+
+    LLabel* label = LLabel::cast(first_instr);
+    if (last_instr->IsGoto()) {
+      LGoto* goto_instr = LGoto::cast(last_instr);
+      if (!goto_instr->include_stack_check() &&
+          label->IsRedundant() &&
+          !label->is_loop_header()) {
+        bool can_eliminate = true;
+        for (int i = first + 1; i < last && can_eliminate; ++i) {
+          LInstruction* cur = instructions()->at(i);
+          if (cur->IsGap()) {
+            LGap* gap = LGap::cast(cur);
+            if (!gap->IsRedundant()) {
+              can_eliminate = false;
+            }
+          } else {
+            can_eliminate = false;
+          }
+        }
+
+        if (can_eliminate) {
+          label->set_replacement(GetLabel(goto_instr->block_id()));
+        }
+      }
+    }
+  }
+}
+
+
+void LStoreNamed::PrintDataTo(StringStream* stream) const {
+  object()->PrintTo(stream);
+  stream->Add(".");
+  stream->Add(*String::cast(*name())->ToCString());
+  stream->Add(" <- ");
+  value()->PrintTo(stream);
+}
+
+
+void LStoreKeyed::PrintDataTo(StringStream* stream) const {
+  object()->PrintTo(stream);
+  stream->Add("[");
+  key()->PrintTo(stream);
+  stream->Add("] <- ");
+  value()->PrintTo(stream);
+}
+
+
+int LChunk::AddInstruction(LInstruction* instr, HBasicBlock* block) {
+  LGap* gap = new LGap(block);
+  int index = -1;
+  if (instr->IsControl()) {
+    instructions_.Add(gap);
+    index = instructions_.length();
+    instructions_.Add(instr);
+  } else {
+    index = instructions_.length();
+    instructions_.Add(instr);
+    instructions_.Add(gap);
+  }
+  if (instr->HasPointerMap()) {
+    pointer_maps_.Add(instr->pointer_map());
+    instr->pointer_map()->set_lithium_position(index);
+  }
+  return index;
+}
+
+
+LConstantOperand* LChunk::DefineConstantOperand(HConstant* constant) {
+  return LConstantOperand::Create(constant->id());
+}
+
+
+int LChunk::GetParameterStackSlot(int index) const {
+  // The receiver is at index 0, the first parameter at index 1, so we
+  // shift all parameter indexes down by the number of parameters, and
+  // make sure they end up negative so they are distinguishable from
+  // spill slots.
+  int result = index - graph()->info()->scope()->num_parameters() - 1;
+  ASSERT(result < 0);
+  return result;
+}
+
+// A parameter relative to ebp in the arguments stub.
+int LChunk::ParameterAt(int index) {
+  ASSERT(-1 <= index);  // -1 is the receiver.
+  return (1 + graph()->info()->scope()->num_parameters() - index) *
+      kPointerSize;
+}
+
+
+LGap* LChunk::GetGapAt(int index) const {
+  return LGap::cast(instructions_[index]);
+}
+
+
+bool LChunk::IsGapAt(int index) const {
+  return instructions_[index]->IsGap();
+}
+
+
+int LChunk::NearestGapPos(int index) const {
+  while (!IsGapAt(index)) index--;
+  return index;
+}
+
+
+int LChunk::NearestNextGapPos(int index) const {
+  while (!IsGapAt(index)) index++;
+  return index;
+}
+
+
+void LChunk::AddGapMove(int index, LOperand* from, LOperand* to) {
+  GetGapAt(index)->GetOrCreateParallelMove(LGap::START)->AddMove(from, to);
+}
+
+
+class LGapNode: public ZoneObject {
+ public:
+  explicit LGapNode(LOperand* operand)
+      : operand_(operand), resolved_(false), visited_id_(-1) { }
+
+  LOperand* operand() const { return operand_; }
+  bool IsResolved() const { return !IsAssigned() || resolved_; }
+  void MarkResolved() {
+    ASSERT(!IsResolved());
+    resolved_ = true;
+  }
+  int visited_id() const { return visited_id_; }
+  void set_visited_id(int id) {
+    ASSERT(id > visited_id_);
+    visited_id_ = id;
+  }
+
+  bool IsAssigned() const { return assigned_from_.is_set(); }
+  LGapNode* assigned_from() const { return assigned_from_.get(); }
+  void set_assigned_from(LGapNode* n) { assigned_from_.set(n); }
+
+ private:
+  LOperand* operand_;
+  SetOncePointer<LGapNode> assigned_from_;
+  bool resolved_;
+  int visited_id_;
+};
+
+
+LGapResolver::LGapResolver(const ZoneList<LMoveOperands>* moves,
+                           LOperand* marker_operand)
+    : nodes_(4),
+      identified_cycles_(4),
+      result_(4),
+      marker_operand_(marker_operand),
+      next_visited_id_(0) {
+  for (int i = 0; i < moves->length(); ++i) {
+    LMoveOperands move = moves->at(i);
+    if (!move.IsRedundant()) RegisterMove(move);
+  }
+}
+
+
+const ZoneList<LMoveOperands>* LGapResolver::ResolveInReverseOrder() {
+  for (int i = 0; i < identified_cycles_.length(); ++i) {
+    ResolveCycle(identified_cycles_[i]);
+  }
+
+  int unresolved_nodes;
+  do {
+    unresolved_nodes = 0;
+    for (int j = 0; j < nodes_.length(); j++) {
+      LGapNode* node = nodes_[j];
+      if (!node->IsResolved() && node->assigned_from()->IsResolved()) {
+        AddResultMove(node->assigned_from(), node);
+        node->MarkResolved();
+      }
+      if (!node->IsResolved()) ++unresolved_nodes;
+    }
+  } while (unresolved_nodes > 0);
+  return &result_;
+}
+
+
+void LGapResolver::AddResultMove(LGapNode* from, LGapNode* to) {
+  AddResultMove(from->operand(), to->operand());
+}
+
+
+void LGapResolver::AddResultMove(LOperand* from, LOperand* to) {
+  result_.Add(LMoveOperands(from, to));
+}
+
+
+void LGapResolver::ResolveCycle(LGapNode* start) {
+  ZoneList<LOperand*> circle_operands(8);
+  circle_operands.Add(marker_operand_);
+  LGapNode* cur = start;
+  do {
+    cur->MarkResolved();
+    circle_operands.Add(cur->operand());
+    cur = cur->assigned_from();
+  } while (cur != start);
+  circle_operands.Add(marker_operand_);
+
+  for (int i = circle_operands.length() - 1; i > 0; --i) {
+    LOperand* from = circle_operands[i];
+    LOperand* to = circle_operands[i - 1];
+    AddResultMove(from, to);
+  }
+}
+
+
+bool LGapResolver::CanReach(LGapNode* a, LGapNode* b, int visited_id) {
+  ASSERT(a != b);
+  LGapNode* cur = a;
+  while (cur != b && cur->visited_id() != visited_id && cur->IsAssigned()) {
+    cur->set_visited_id(visited_id);
+    cur = cur->assigned_from();
+  }
+
+  return cur == b;
+}
+
+
+bool LGapResolver::CanReach(LGapNode* a, LGapNode* b) {
+  ASSERT(a != b);
+  return CanReach(a, b, next_visited_id_++);
+}
+
+
+void LGapResolver::RegisterMove(LMoveOperands move) {
+  if (move.from()->IsConstantOperand()) {
+    // Constant moves should be last in the machine code. Therefore add them
+    // first to the result set.
+    AddResultMove(move.from(), move.to());
+  } else {
+    LGapNode* from = LookupNode(move.from());
+    LGapNode* to = LookupNode(move.to());
+    if (to->IsAssigned() && to->assigned_from() == from) {
+      move.Eliminate();
+      return;
+    }
+    ASSERT(!to->IsAssigned());
+    if (CanReach(from, to)) {
+      // This introduces a circle. Save.
+      identified_cycles_.Add(from);
+    }
+    to->set_assigned_from(from);
+  }
+}
+
+
+LGapNode* LGapResolver::LookupNode(LOperand* operand) {
+  for (int i = 0; i < nodes_.length(); ++i) {
+    if (nodes_[i]->operand()->Equals(operand)) return nodes_[i];
+  }
+
+  // No node found => create a new one.
+  LGapNode* result = new LGapNode(operand);
+  nodes_.Add(result);
+  return result;
+}
+
+
+Handle<Object> LChunk::LookupLiteral(LConstantOperand* operand) const {
+  return HConstant::cast(graph_->LookupValue(operand->index()))->handle();
+}
+
+
+Representation LChunk::LookupLiteralRepresentation(
+    LConstantOperand* operand) const {
+  return graph_->LookupValue(operand->index())->representation();
+}
+
+
+LChunk* LChunkBuilder::Build() {
+  ASSERT(is_unused());
+  chunk_ = new LChunk(graph());
+  HPhase phase("Building chunk", chunk_);
+  status_ = BUILDING;
+  const ZoneList<HBasicBlock*>* blocks = graph()->blocks();
+  for (int i = 0; i < blocks->length(); i++) {
+    HBasicBlock* next = NULL;
+    if (i < blocks->length() - 1) next = blocks->at(i + 1);
+    DoBasicBlock(blocks->at(i), next);
+    if (is_aborted()) return NULL;
+  }
+  status_ = DONE;
+  return chunk_;
+}
+
+
+void LChunkBuilder::Abort(const char* format, ...) {
+  if (FLAG_trace_bailout) {
+    SmartPointer<char> debug_name = graph()->debug_name()->ToCString();
+    PrintF("Aborting LChunk building in @\"%s\": ", *debug_name);
+    va_list arguments;
+    va_start(arguments, format);
+    OS::VPrint(format, arguments);
+    va_end(arguments);
+    PrintF("\n");
+  }
+  status_ = ABORTED;
+}
+
+
+LRegister* LChunkBuilder::ToOperand(Register reg) {
+  return LRegister::Create(Register::ToAllocationIndex(reg));
+}
+
+
+LUnallocated* LChunkBuilder::ToUnallocated(Register reg) {
+  return new LUnallocated(LUnallocated::FIXED_REGISTER,
+                          Register::ToAllocationIndex(reg));
+}
+
+
+LUnallocated* LChunkBuilder::ToUnallocated(XMMRegister reg) {
+  return new LUnallocated(LUnallocated::FIXED_DOUBLE_REGISTER,
+                          XMMRegister::ToAllocationIndex(reg));
+}
+
+
+LOperand* LChunkBuilder::UseFixed(HValue* value, Register fixed_register) {
+  return Use(value, ToUnallocated(fixed_register));
+}
+
+
+LOperand* LChunkBuilder::UseFixedDouble(HValue* value, XMMRegister reg) {
+  return Use(value, ToUnallocated(reg));
+}
+
+
+LOperand* LChunkBuilder::UseRegister(HValue* value) {
+  return Use(value, new LUnallocated(LUnallocated::MUST_HAVE_REGISTER));
+}
+
+
+LOperand* LChunkBuilder::UseRegisterAtStart(HValue* value) {
+  return Use(value,
+             new LUnallocated(LUnallocated::MUST_HAVE_REGISTER,
+                              LUnallocated::USED_AT_START));
+}
+
+
+LOperand* LChunkBuilder::UseTempRegister(HValue* value) {
+  return Use(value, new LUnallocated(LUnallocated::WRITABLE_REGISTER));
+}
+
+
+LOperand* LChunkBuilder::Use(HValue* value) {
+  return Use(value, new LUnallocated(LUnallocated::NONE));
+}
+
+
+LOperand* LChunkBuilder::UseAtStart(HValue* value) {
+  return Use(value, new LUnallocated(LUnallocated::NONE,
+                                     LUnallocated::USED_AT_START));
+}
+
+
+LOperand* LChunkBuilder::UseOrConstant(HValue* value) {
+  return value->IsConstant()
+      ? chunk_->DefineConstantOperand(HConstant::cast(value))
+      : Use(value);
+}
+
+
+LOperand* LChunkBuilder::UseOrConstantAtStart(HValue* value) {
+  return value->IsConstant()
+      ? chunk_->DefineConstantOperand(HConstant::cast(value))
+      : UseAtStart(value);
+}
+
+
+LOperand* LChunkBuilder::UseRegisterOrConstant(HValue* value) {
+  return value->IsConstant()
+      ? chunk_->DefineConstantOperand(HConstant::cast(value))
+      : UseRegister(value);
+}
+
+
+LOperand* LChunkBuilder::UseRegisterOrConstantAtStart(HValue* value) {
+  return value->IsConstant()
+      ? chunk_->DefineConstantOperand(HConstant::cast(value))
+      : UseRegisterAtStart(value);
+}
+
+
+LOperand* LChunkBuilder::Use(HValue* value, LUnallocated* operand) {
+  if (value->EmitAtUses()) {
+    HInstruction* instr = HInstruction::cast(value);
+    VisitInstruction(instr);
+  }
+  allocator_->RecordUse(value, operand);
+  return operand;
+}
+
+
+LInstruction* LChunkBuilder::Define(LInstruction* instr) {
+  return Define(instr, new LUnallocated(LUnallocated::NONE));
+}
+
+
+LInstruction* LChunkBuilder::DefineAsRegister(LInstruction* instr) {
+  return Define(instr, new LUnallocated(LUnallocated::MUST_HAVE_REGISTER));
+}
+
+
+LInstruction* LChunkBuilder::DefineAsSpilled(LInstruction* instr, int index) {
+  return Define(instr, new LUnallocated(LUnallocated::FIXED_SLOT, index));
+}
+
+
+LInstruction* LChunkBuilder::DefineSameAsAny(LInstruction* instr) {
+  return Define(instr, new LUnallocated(LUnallocated::SAME_AS_ANY_INPUT));
+}
+
+
+LInstruction* LChunkBuilder::DefineSameAsFirst(LInstruction* instr) {
+  return Define(instr, new LUnallocated(LUnallocated::SAME_AS_FIRST_INPUT));
+}
+
+
+LInstruction* LChunkBuilder::DefineFixed(LInstruction* instr, Register reg) {
+  return Define(instr, ToUnallocated(reg));
+}
+
+
+LInstruction* LChunkBuilder::DefineFixedDouble(LInstruction* instr,
+                                               XMMRegister reg) {
+  return Define(instr, ToUnallocated(reg));
+}
+
+
+LInstruction* LChunkBuilder::AssignEnvironment(LInstruction* instr) {
+  HEnvironment* hydrogen_env = current_block_->last_environment();
+  instr->set_environment(CreateEnvironment(hydrogen_env));
+  return instr;
+}
+
+
+LInstruction* LChunkBuilder::SetInstructionPendingDeoptimizationEnvironment(
+    LInstruction* instr, int ast_id) {
+  ASSERT(instructions_pending_deoptimization_environment_ == NULL);
+  ASSERT(pending_deoptimization_ast_id_ == AstNode::kNoNumber);
+  instructions_pending_deoptimization_environment_ = instr;
+  pending_deoptimization_ast_id_ = ast_id;
+  return instr;
+}
+
+
+void LChunkBuilder::ClearInstructionPendingDeoptimizationEnvironment() {
+  instructions_pending_deoptimization_environment_ = NULL;
+  pending_deoptimization_ast_id_ = AstNode::kNoNumber;
+}
+
+
+LInstruction* LChunkBuilder::MarkAsCall(LInstruction* instr,
+                                        HInstruction* hinstr,
+                                        CanDeoptimize can_deoptimize) {
+  allocator_->MarkAsCall();
+  instr = AssignPointerMap(instr);
+
+  if (hinstr->HasSideEffects()) {
+    ASSERT(hinstr->next()->IsSimulate());
+    HSimulate* sim = HSimulate::cast(hinstr->next());
+    instr = SetInstructionPendingDeoptimizationEnvironment(
+        instr, sim->ast_id());
+  }
+
+  // If instruction does not have side-effects lazy deoptimization
+  // after the call will try to deoptimize to the point before the call.
+  // Thus we still need to attach environment to this call even if
+  // call sequence can not deoptimize eagerly.
+  bool needs_environment =
+      (can_deoptimize == CAN_DEOPTIMIZE_EAGERLY) || !hinstr->HasSideEffects();
+  if (needs_environment && !instr->HasEnvironment()) {
+    instr = AssignEnvironment(instr);
+  }
+
+  return instr;
+}
+
+
+LInstruction* LChunkBuilder::AssignPointerMap(LInstruction* instr) {
+  ASSERT(!instr->HasPointerMap());
+  instr->set_pointer_map(new LPointerMap(position_));
+  return instr;
+}
+
+
+LInstruction* LChunkBuilder::Define(LInstruction* instr, LUnallocated* result) {
+  allocator_->RecordDefinition(current_instruction_, result);
+  instr->set_result(result);
+  return instr;
+}
+
+
+LOperand* LChunkBuilder::Temp() {
+  LUnallocated* operand = new LUnallocated(LUnallocated::NONE);
+  allocator_->RecordTemporary(operand);
+  return operand;
+}
+
+
+LUnallocated* LChunkBuilder::TempRegister() {
+  LUnallocated* operand = new LUnallocated(LUnallocated::MUST_HAVE_REGISTER);
+  allocator_->RecordTemporary(operand);
+  return operand;
+}
+
+
+LOperand* LChunkBuilder::FixedTemp(Register reg) {
+  LUnallocated* operand = ToUnallocated(reg);
+  allocator_->RecordTemporary(operand);
+  return operand;
+}
+
+
+LOperand* LChunkBuilder::FixedTemp(XMMRegister reg) {
+  LUnallocated* operand = ToUnallocated(reg);
+  allocator_->RecordTemporary(operand);
+  return operand;
+}
+
+
+LInstruction* LChunkBuilder::DoBlockEntry(HBlockEntry* instr) {
+  HBasicBlock* deopt_predecessor = instr->block()->deopt_predecessor();
+  if (deopt_predecessor != NULL &&
+      deopt_predecessor->inverted()) {
+    HEnvironment* env = current_block_->last_environment();
+    HValue* value = env->Pop();
+    ASSERT(value->IsConstant());
+    Handle<Object> obj = HConstant::cast(value)->handle();
+    ASSERT(*obj == *Factory::true_value() || *obj == *Factory::false_value());
+    env->Push(*obj == *Factory::true_value()
+              ? current_block_->graph()->GetConstantFalse()
+              : current_block_->graph()->GetConstantTrue());
+  }
+
+  return new LLabel(instr->block());
+}
+
+
+LInstruction* LChunkBuilder::DoDeoptimize(HDeoptimize* instr) {
+  return AssignEnvironment(new LDeoptimize);
+}
+
+
+LInstruction* LChunkBuilder::DoBit(Token::Value op,
+                                   HBitwiseBinaryOperation* instr) {
+  ASSERT(instr->representation().IsInteger32());
+  ASSERT(instr->left()->representation().IsInteger32());
+  ASSERT(instr->right()->representation().IsInteger32());
+
+  LOperand* left = UseRegisterAtStart(instr->LeastConstantOperand());
+  LOperand* right = UseOrConstantAtStart(instr->MostConstantOperand());
+  return DefineSameAsFirst(new LBitI(op, left, right));
+}
+
+
+LInstruction* LChunkBuilder::DoShift(Token::Value op,
+                                     HBitwiseBinaryOperation* instr) {
+  ASSERT(instr->representation().IsInteger32());
+  ASSERT(instr->OperandAt(0)->representation().IsInteger32());
+  ASSERT(instr->OperandAt(1)->representation().IsInteger32());
+  LOperand* left = UseRegisterAtStart(instr->OperandAt(0));
+
+  HValue* right_value = instr->OperandAt(1);
+  LOperand* right = NULL;
+  int constant_value = 0;
+  if (right_value->IsConstant()) {
+    HConstant* constant = HConstant::cast(right_value);
+    right = chunk_->DefineConstantOperand(constant);
+    constant_value = constant->Integer32Value() & 0x1f;
+  } else {
+    right = UseFixed(right_value, ecx);
+  }
+
+  // Shift operations can only deoptimize if we do a logical shift
+  // by 0 and the result cannot be truncated to int32.
+  bool can_deopt = (op == Token::SHR && constant_value == 0);
+  if (can_deopt) {
+    bool can_truncate = true;
+    for (int i = 0; i < instr->uses()->length(); i++) {
+      if (!instr->uses()->at(i)->CheckFlag(HValue::kTruncatingToInt32)) {
+        can_truncate = false;
+        break;
+      }
+    }
+    can_deopt = !can_truncate;
+  }
+
+  LInstruction* result =
+      DefineSameAsFirst(new LShiftI(op, left, right, can_deopt));
+  if (can_deopt) AssignEnvironment(result);
+  return result;
+}
+
+
+LInstruction* LChunkBuilder::DoArithmeticD(Token::Value op,
+                                           HArithmeticBinaryOperation* instr) {
+  ASSERT(instr->representation().IsDouble());
+  ASSERT(instr->left()->representation().IsDouble());
+  ASSERT(instr->right()->representation().IsDouble());
+  LOperand* left = UseRegisterAtStart(instr->left());
+  LOperand* right = UseRegisterAtStart(instr->right());
+  LArithmeticD* result = new LArithmeticD(op, left, right);
+  return DefineSameAsFirst(result);
+}
+
+
+LInstruction* LChunkBuilder::DoArithmeticT(Token::Value op,
+                                           HArithmeticBinaryOperation* instr) {
+  ASSERT(op == Token::ADD ||
+         op == Token::DIV ||
+         op == Token::MOD ||
+         op == Token::MUL ||
+         op == Token::SUB);
+  HValue* left = instr->left();
+  HValue* right = instr->right();
+  ASSERT(left->representation().IsTagged());
+  ASSERT(right->representation().IsTagged());
+  LOperand* left_operand = UseFixed(left, edx);
+  LOperand* right_operand = UseFixed(right, eax);
+  LInstruction* result = new LArithmeticT(op, left_operand, right_operand);
+  return MarkAsCall(DefineFixed(result, eax), instr);
+}
+
+void LChunkBuilder::DoBasicBlock(HBasicBlock* block, HBasicBlock* next_block) {
+  ASSERT(is_building());
+  current_block_ = block;
+  next_block_ = next_block;
+  if (block->IsStartBlock()) {
+    block->UpdateEnvironment(graph_->start_environment());
+    argument_count_ = 0;
+  } else if (block->predecessors()->length() == 1) {
+    // We have a single predecessor => copy environment and outgoing
+    // argument count from the predecessor.
+    ASSERT(block->phis()->length() == 0);
+    HBasicBlock* pred = block->predecessors()->at(0);
+    HEnvironment* last_environment = pred->last_environment();
+    ASSERT(last_environment != NULL);
+    // Only copy the environment, if it is later used again.
+    if (pred->end()->SecondSuccessor() == NULL) {
+      ASSERT(pred->end()->FirstSuccessor() == block);
+    } else {
+      if (pred->end()->FirstSuccessor()->block_id() > block->block_id() ||
+          pred->end()->SecondSuccessor()->block_id() > block->block_id()) {
+        last_environment = last_environment->Copy();
+      }
+    }
+    block->UpdateEnvironment(last_environment);
+    ASSERT(pred->argument_count() >= 0);
+    argument_count_ = pred->argument_count();
+  } else {
+    // We are at a state join => process phis.
+    HBasicBlock* pred = block->predecessors()->at(0);
+    // No need to copy the environment, it cannot be used later.
+    HEnvironment* last_environment = pred->last_environment();
+    for (int i = 0; i < block->phis()->length(); ++i) {
+      HPhi* phi = block->phis()->at(i);
+      last_environment->SetValueAt(phi->merged_index(), phi);
+    }
+    for (int i = 0; i < block->deleted_phis()->length(); ++i) {
+      last_environment->SetValueAt(block->deleted_phis()->at(i),
+                                   graph_->GetConstantUndefined());
+    }
+    block->UpdateEnvironment(last_environment);
+    // Pick up the outgoing argument count of one of the predecessors.
+    argument_count_ = pred->argument_count();
+  }
+  HInstruction* current = block->first();
+  int start = chunk_->instructions()->length();
+  while (current != NULL && !is_aborted()) {
+    if (FLAG_trace_environment) {
+      PrintF("Process instruction %d\n", current->id());
+    }
+    // Code for constants in registers is generated lazily.
+    if (!current->EmitAtUses()) {
+      VisitInstruction(current);
+    }
+    current = current->next();
+  }
+  int end = chunk_->instructions()->length() - 1;
+  if (end >= start) {
+    block->set_first_instruction_index(start);
+    block->set_last_instruction_index(end);
+  }
+  block->set_argument_count(argument_count_);
+  next_block_ = NULL;
+  current_block_ = NULL;
+}
+
+
+void LChunkBuilder::VisitInstruction(HInstruction* current) {
+  HInstruction* old_current = current_instruction_;
+  current_instruction_ = current;
+  allocator_->BeginInstruction();
+  if (current->has_position()) position_ = current->position();
+  LInstruction* instr = current->CompileToLithium(this);
+
+  if (instr != NULL) {
+    if (FLAG_stress_pointer_maps && !instr->HasPointerMap()) {
+      instr = AssignPointerMap(instr);
+    }
+    if (FLAG_stress_environments && !instr->HasEnvironment()) {
+      instr = AssignEnvironment(instr);
+    }
+    if (current->IsBranch()) {
+      instr->set_hydrogen_value(HBranch::cast(current)->value());
+    } else {
+      instr->set_hydrogen_value(current);
+    }
+
+    int index = chunk_->AddInstruction(instr, current_block_);
+    allocator_->SummarizeInstruction(index);
+  } else {
+    // This instruction should be omitted.
+    allocator_->OmitInstruction();
+  }
+  current_instruction_ = old_current;
+}
+
+
+void LEnvironment::WriteTranslation(LCodeGen* cgen,
+                                    Translation* translation) const {
+  if (this == NULL) return;
+
+  // The translation includes one command per value in the environment.
+  int translation_size = values()->length();
+  // The output frame height does not include the parameters.
+  int height = translation_size - parameter_count();
+
+  outer()->WriteTranslation(cgen, translation);
+  int closure_id = cgen->DefineDeoptimizationLiteral(closure());
+  translation->BeginFrame(ast_id(), closure_id, height);
+  for (int i = 0; i < translation_size; ++i) {
+    LOperand* value = values()->at(i);
+    // spilled_registers_ and spilled_double_registers_ are either
+    // both NULL or both set.
+    if (spilled_registers_ != NULL && value != NULL) {
+      if (value->IsRegister() &&
+          spilled_registers_[value->index()] != NULL) {
+        translation->MarkDuplicate();
+        cgen->AddToTranslation(translation,
+                               spilled_registers_[value->index()],
+                               HasTaggedValueAt(i));
+      } else if (value->IsDoubleRegister() &&
+                 spilled_double_registers_[value->index()] != NULL) {
+        translation->MarkDuplicate();
+        cgen->AddToTranslation(translation,
+                               spilled_double_registers_[value->index()],
+                               false);
+      }
+    }
+
+    cgen->AddToTranslation(translation, value, HasTaggedValueAt(i));
+  }
+}
+
+
+void LEnvironment::PrintTo(StringStream* stream) const {
+  stream->Add("[id=%d|", ast_id());
+  stream->Add("[parameters=%d|", parameter_count());
+  stream->Add("[arguments_stack_height=%d|", arguments_stack_height());
+  for (int i = 0; i < values_.length(); ++i) {
+    if (i != 0) stream->Add(";");
+    if (values_[i] == NULL) {
+      stream->Add("[hole]");
+    } else {
+      values_[i]->PrintTo(stream);
+    }
+  }
+  stream->Add("]");
+}
+
+
+LEnvironment* LChunkBuilder::CreateEnvironment(HEnvironment* hydrogen_env) {
+  if (hydrogen_env == NULL) return NULL;
+
+  LEnvironment* outer = CreateEnvironment(hydrogen_env->outer());
+  int ast_id = hydrogen_env->ast_id();
+  ASSERT(ast_id != AstNode::kNoNumber);
+  int value_count = hydrogen_env->values()->length();
+  LEnvironment* result = new LEnvironment(hydrogen_env->closure(),
+                                          ast_id,
+                                          hydrogen_env->parameter_count(),
+                                          argument_count_,
+                                          value_count,
+                                          outer);
+  int argument_index = 0;
+  for (int i = 0; i < value_count; ++i) {
+    HValue* value = hydrogen_env->values()->at(i);
+    LOperand* op = NULL;
+    if (value->IsArgumentsObject()) {
+      op = NULL;
+    } else if (value->IsPushArgument()) {
+      op = new LArgument(argument_index++);
+    } else {
+      op = UseOrConstant(value);
+      if (op->IsUnallocated()) {
+        LUnallocated* unalloc = LUnallocated::cast(op);
+        unalloc->set_policy(LUnallocated::ANY);
+      }
+    }
+    result->AddValue(op, value->representation());
+  }
+
+  return result;
+}
+
+
+LInstruction* LChunkBuilder::DoGoto(HGoto* instr) {
+  LInstruction* result = new LGoto(instr->FirstSuccessor()->block_id(),
+                                   instr->include_stack_check());
+  if (instr->include_stack_check()) result = AssignPointerMap(result);
+  return result;
+}
+
+
+LInstruction* LChunkBuilder::DoBranch(HBranch* instr) {
+  HValue* v = instr->value();
+  HBasicBlock* first = instr->FirstSuccessor();
+  HBasicBlock* second = instr->SecondSuccessor();
+  ASSERT(first != NULL && second != NULL);
+  int first_id = first->block_id();
+  int second_id = second->block_id();
+
+  if (v->EmitAtUses()) {
+    if (v->IsClassOfTest()) {
+      HClassOfTest* compare = HClassOfTest::cast(v);
+      ASSERT(compare->value()->representation().IsTagged());
+
+      return new LClassOfTestAndBranch(UseTempRegister(compare->value()),
+                                       TempRegister(),
+                                       TempRegister(),
+                                       first_id,
+                                       second_id);
+    } else if (v->IsCompare()) {
+      HCompare* compare = HCompare::cast(v);
+      Token::Value op = compare->token();
+      HValue* left = compare->left();
+      HValue* right = compare->right();
+      if (left->representation().IsInteger32()) {
+        ASSERT(right->representation().IsInteger32());
+        return new LCmpIDAndBranch(op,
+                                   UseRegisterAtStart(left),
+                                   UseOrConstantAtStart(right),
+                                   first_id,
+                                   second_id,
+                                   false);
+      } else if (left->representation().IsDouble()) {
+        ASSERT(right->representation().IsDouble());
+        return new LCmpIDAndBranch(op,
+                                   UseRegisterAtStart(left),
+                                   UseRegisterAtStart(right),
+                                   first_id,
+                                   second_id,
+                                   true);
+      } else {
+        ASSERT(left->representation().IsTagged());
+        ASSERT(right->representation().IsTagged());
+        bool reversed = op == Token::GT || op == Token::LTE;
+        LOperand* left_operand = UseFixed(left, reversed ? eax : edx);
+        LOperand* right_operand = UseFixed(right, reversed ? edx : eax);
+        LInstruction* result = new LCmpTAndBranch(left_operand,
+                                                  right_operand,
+                                                  first_id,
+                                                  second_id);
+        return MarkAsCall(result, instr);
+      }
+    } else if (v->IsIsSmi()) {
+      HIsSmi* compare = HIsSmi::cast(v);
+      ASSERT(compare->value()->representation().IsTagged());
+
+      return new LIsSmiAndBranch(Use(compare->value()),
+                                 first_id,
+                                 second_id);
+    } else if (v->IsHasInstanceType()) {
+      HHasInstanceType* compare = HHasInstanceType::cast(v);
+      ASSERT(compare->value()->representation().IsTagged());
+
+      return new LHasInstanceTypeAndBranch(UseRegisterAtStart(compare->value()),
+                                           TempRegister(),
+                                           first_id,
+                                           second_id);
+    } else if (v->IsHasCachedArrayIndex()) {
+      HHasCachedArrayIndex* compare = HHasCachedArrayIndex::cast(v);
+      ASSERT(compare->value()->representation().IsTagged());
+
+      return new LHasCachedArrayIndexAndBranch(
+          UseRegisterAtStart(compare->value()), first_id, second_id);
+    } else if (v->IsIsNull()) {
+      HIsNull* compare = HIsNull::cast(v);
+      ASSERT(compare->value()->representation().IsTagged());
+
+      // We only need a temp register for non-strict compare.
+      LOperand* temp = compare->is_strict() ? NULL : TempRegister();
+      return new LIsNullAndBranch(UseRegisterAtStart(compare->value()),
+                                  compare->is_strict(),
+                                  temp,
+                                  first_id,
+                                  second_id);
+    } else if (v->IsCompareJSObjectEq()) {
+      HCompareJSObjectEq* compare = HCompareJSObjectEq::cast(v);
+      return new LCmpJSObjectEqAndBranch(UseRegisterAtStart(compare->left()),
+                                         UseRegisterAtStart(compare->right()),
+                                         first_id,
+                                         second_id);
+    } else if (v->IsInstanceOf()) {
+      HInstanceOf* instance_of = HInstanceOf::cast(v);
+      LInstruction* result =
+          new LInstanceOfAndBranch(Use(instance_of->left()),
+                                   Use(instance_of->right()),
+                                   first_id,
+                                   second_id);
+      return MarkAsCall(result, instr);
+    } else if (v->IsTypeofIs()) {
+      HTypeofIs* typeof_is = HTypeofIs::cast(v);
+      return new LTypeofIsAndBranch(UseTempRegister(typeof_is->value()),
+                                    first_id,
+                                    second_id);
+    } else {
+      if (v->IsConstant()) {
+        if (HConstant::cast(v)->handle()->IsTrue()) {
+          return new LGoto(first_id);
+        } else if (HConstant::cast(v)->handle()->IsFalse()) {
+          return new LGoto(second_id);
+        }
+      }
+      Abort("Undefined compare before branch");
+      return NULL;
+    }
+  }
+  return new LBranch(UseRegisterAtStart(v), first_id, second_id);
+}
+
+
+LInstruction* LChunkBuilder::DoCompareMapAndBranch(
+    HCompareMapAndBranch* instr) {
+  ASSERT(instr->value()->representation().IsTagged());
+  LOperand* value = UseRegisterAtStart(instr->value());
+  HBasicBlock* first = instr->FirstSuccessor();
+  HBasicBlock* second = instr->SecondSuccessor();
+  return new LCmpMapAndBranch(value,
+                              instr->map(),
+                              first->block_id(),
+                              second->block_id());
+}
+
+
+LInstruction* LChunkBuilder::DoArgumentsLength(HArgumentsLength* length) {
+  return DefineAsRegister(new LArgumentsLength(Use(length->value())));
+}
+
+
+LInstruction* LChunkBuilder::DoArgumentsElements(HArgumentsElements* elems) {
+  return DefineAsRegister(new LArgumentsElements);
+}
+
+
+LInstruction* LChunkBuilder::DoInstanceOf(HInstanceOf* instr) {
+  LInstruction* result =
+      new LInstanceOf(Use(instr->left()), Use(instr->right()));
+  return MarkAsCall(DefineFixed(result, eax), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoApplyArguments(HApplyArguments* instr) {
+  LOperand* function = UseFixed(instr->function(), edi);
+  LOperand* receiver = UseFixed(instr->receiver(), eax);
+  LOperand* length = UseRegisterAtStart(instr->length());
+  LOperand* elements = UseRegisterAtStart(instr->elements());
+  LInstruction* result = new LApplyArguments(function,
+                                             receiver,
+                                             length,
+                                             elements);
+  return MarkAsCall(DefineFixed(result, eax), instr, CAN_DEOPTIMIZE_EAGERLY);
+}
+
+
+LInstruction* LChunkBuilder::DoPushArgument(HPushArgument* instr) {
+  ++argument_count_;
+  LOperand* argument = Use(instr->argument());
+  return new LPushArgument(argument);
+}
+
+
+LInstruction* LChunkBuilder::DoGlobalObject(HGlobalObject* instr) {
+  return DefineAsRegister(new LGlobalObject);
+}
+
+
+LInstruction* LChunkBuilder::DoGlobalReceiver(HGlobalReceiver* instr) {
+  return DefineAsRegister(new LGlobalReceiver);
+}
+
+
+LInstruction* LChunkBuilder::DoCallConstantFunction(
+    HCallConstantFunction* instr) {
+  argument_count_ -= instr->argument_count();
+  return MarkAsCall(DefineFixed(new LCallConstantFunction, eax), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoUnaryMathOperation(HUnaryMathOperation* instr) {
+  MathFunctionId op = instr->op();
+  LOperand* input = UseRegisterAtStart(instr->value());
+  LInstruction* result = new LUnaryMathOperation(input);
+  switch (op) {
+    case kMathAbs:
+      return AssignEnvironment(AssignPointerMap(DefineSameAsFirst(result)));
+    case kMathFloor:
+      return AssignEnvironment(DefineAsRegister(result));
+    case kMathRound:
+      return AssignEnvironment(DefineAsRegister(result));
+    case kMathSqrt:
+      return DefineSameAsFirst(result);
+    default:
+      UNREACHABLE();
+      return NULL;
+  }
+}
+
+
+LInstruction* LChunkBuilder::DoCallKeyed(HCallKeyed* instr) {
+  ASSERT(instr->key()->representation().IsTagged());
+  argument_count_ -= instr->argument_count();
+  UseFixed(instr->key(), ecx);
+  return MarkAsCall(DefineFixed(new LCallKeyed, eax), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoCallNamed(HCallNamed* instr) {
+  argument_count_ -= instr->argument_count();
+  return MarkAsCall(DefineFixed(new LCallNamed, eax), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoCallGlobal(HCallGlobal* instr) {
+  argument_count_ -= instr->argument_count();
+  return MarkAsCall(DefineFixed(new LCallGlobal, eax), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoCallKnownGlobal(HCallKnownGlobal* instr) {
+  argument_count_ -= instr->argument_count();
+  return MarkAsCall(DefineFixed(new LCallKnownGlobal, eax), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoCallNew(HCallNew* instr) {
+  LOperand* constructor = UseFixed(instr->constructor(), edi);
+  argument_count_ -= instr->argument_count();
+  LInstruction* result = new LCallNew(constructor);
+  return MarkAsCall(DefineFixed(result, eax), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoCallFunction(HCallFunction* instr) {
+  argument_count_ -= instr->argument_count();
+  return MarkAsCall(DefineFixed(new LCallFunction, eax), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoCallRuntime(HCallRuntime* instr) {
+  argument_count_ -= instr->argument_count();
+  return MarkAsCall(DefineFixed(new LCallRuntime, eax), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoShr(HShr* instr) {
+  return DoShift(Token::SHR, instr);
+}
+
+
+LInstruction* LChunkBuilder::DoSar(HSar* instr) {
+  return DoShift(Token::SAR, instr);
+}
+
+
+LInstruction* LChunkBuilder::DoShl(HShl* instr) {
+  return DoShift(Token::SHL, instr);
+}
+
+
+LInstruction* LChunkBuilder::DoBitAnd(HBitAnd* instr) {
+  return DoBit(Token::BIT_AND, instr);
+}
+
+
+LInstruction* LChunkBuilder::DoBitNot(HBitNot* instr) {
+  ASSERT(instr->value()->representation().IsInteger32());
+  ASSERT(instr->representation().IsInteger32());
+  return DefineSameAsFirst(new LBitNotI(UseRegisterAtStart(instr->value())));
+}
+
+
+LInstruction* LChunkBuilder::DoBitOr(HBitOr* instr) {
+  return DoBit(Token::BIT_OR, instr);
+}
+
+
+LInstruction* LChunkBuilder::DoBitXor(HBitXor* instr) {
+  return DoBit(Token::BIT_XOR, instr);
+}
+
+
+LInstruction* LChunkBuilder::DoDiv(HDiv* instr) {
+  if (instr->representation().IsDouble()) {
+    return DoArithmeticD(Token::DIV, instr);
+  } else if (instr->representation().IsInteger32()) {
+    // The temporary operand is necessary to ensure that right is not allocated
+    // into edx.
+    FixedTemp(edx);
+    LOperand* value = UseFixed(instr->left(), eax);
+    LOperand* divisor = UseRegister(instr->right());
+    return AssignEnvironment(DefineFixed(new LDivI(value, divisor), eax));
+  } else {
+    ASSERT(instr->representation().IsTagged());
+    return DoArithmeticT(Token::DIV, instr);
+  }
+}
+
+
+LInstruction* LChunkBuilder::DoMod(HMod* instr) {
+  if (instr->representation().IsInteger32()) {
+    ASSERT(instr->left()->representation().IsInteger32());
+    ASSERT(instr->right()->representation().IsInteger32());
+    // The temporary operand is necessary to ensure that right is not allocated
+    // into edx.
+    FixedTemp(edx);
+    LOperand* value = UseFixed(instr->left(), eax);
+    LOperand* divisor = UseRegister(instr->right());
+    LInstruction* result = DefineFixed(new LModI(value, divisor), edx);
+    if (instr->CheckFlag(HValue::kBailoutOnMinusZero) ||
+        instr->CheckFlag(HValue::kCanBeDivByZero)) {
+      result = AssignEnvironment(result);
+    }
+    return result;
+  } else if (instr->representation().IsTagged()) {
+    return DoArithmeticT(Token::MOD, instr);
+  } else {
+    ASSERT(instr->representation().IsDouble());
+    // We call a C function for double modulo. It can't trigger a GC.
+    // We need to use fixed result register for the call.
+    // TODO(fschneider): Allow any register as input registers.
+    LOperand* left = UseFixedDouble(instr->left(), xmm1);
+    LOperand* right = UseFixedDouble(instr->right(), xmm2);
+    LArithmeticD* result = new LArithmeticD(Token::MOD, left, right);
+    return MarkAsCall(DefineFixedDouble(result, xmm1), instr);
+  }
+}
+
+
+LInstruction* LChunkBuilder::DoMul(HMul* instr) {
+  if (instr->representation().IsInteger32()) {
+    ASSERT(instr->left()->representation().IsInteger32());
+    ASSERT(instr->right()->representation().IsInteger32());
+    LOperand* left = UseRegisterAtStart(instr->LeastConstantOperand());
+    LOperand* right = UseOrConstant(instr->MostConstantOperand());
+    LOperand* temp = NULL;
+    if (instr->CheckFlag(HValue::kBailoutOnMinusZero)) {
+      temp = TempRegister();
+    }
+    LMulI* mul = new LMulI(left, right, temp);
+    return AssignEnvironment(DefineSameAsFirst(mul));
+  } else if (instr->representation().IsDouble()) {
+    return DoArithmeticD(Token::MUL, instr);
+  } else {
+    ASSERT(instr->representation().IsTagged());
+    return DoArithmeticT(Token::MUL, instr);
+  }
+}
+
+
+LInstruction* LChunkBuilder::DoSub(HSub* instr) {
+  if (instr->representation().IsInteger32()) {
+    ASSERT(instr->left()->representation().IsInteger32());
+    ASSERT(instr->right()->representation().IsInteger32());
+    LOperand* left = UseRegisterAtStart(instr->LeastConstantOperand());
+    LOperand* right = UseOrConstantAtStart(instr->MostConstantOperand());
+    LSubI* sub = new LSubI(left, right);
+    LInstruction* result = DefineSameAsFirst(sub);
+    if (instr->CheckFlag(HValue::kCanOverflow)) {
+      result = AssignEnvironment(result);
+    }
+    return result;
+  } else if (instr->representation().IsDouble()) {
+    return DoArithmeticD(Token::SUB, instr);
+  } else {
+    ASSERT(instr->representation().IsTagged());
+    return DoArithmeticT(Token::SUB, instr);
+  }
+}
+
+
+LInstruction* LChunkBuilder::DoAdd(HAdd* instr) {
+  if (instr->representation().IsInteger32()) {
+    ASSERT(instr->left()->representation().IsInteger32());
+    ASSERT(instr->right()->representation().IsInteger32());
+    LOperand* left = UseRegisterAtStart(instr->LeastConstantOperand());
+    LOperand* right = UseOrConstantAtStart(instr->MostConstantOperand());
+    LAddI* add = new LAddI(left, right);
+    LInstruction* result = DefineSameAsFirst(add);
+    if (instr->CheckFlag(HValue::kCanOverflow)) {
+      result = AssignEnvironment(result);
+    }
+    return result;
+  } else if (instr->representation().IsDouble()) {
+    return DoArithmeticD(Token::ADD, instr);
+  } else {
+    ASSERT(instr->representation().IsTagged());
+    return DoArithmeticT(Token::ADD, instr);
+  }
+}
+
+
+LInstruction* LChunkBuilder::DoCompare(HCompare* instr) {
+  Token::Value op = instr->token();
+  if (instr->left()->representation().IsInteger32()) {
+    ASSERT(instr->right()->representation().IsInteger32());
+    LOperand* left = UseRegisterAtStart(instr->left());
+    LOperand* right = UseOrConstantAtStart(instr->right());
+    return DefineAsRegister(new LCmpID(op, left, right, false));
+  } else if (instr->left()->representation().IsDouble()) {
+    ASSERT(instr->right()->representation().IsDouble());
+    LOperand* left = UseRegisterAtStart(instr->left());
+    LOperand* right = UseRegisterAtStart(instr->right());
+    return DefineAsRegister(new LCmpID(op, left, right, true));
+  } else {
+    bool reversed = (op == Token::GT || op == Token::LTE);
+    LOperand* left = UseFixed(instr->left(), reversed ? eax : edx);
+    LOperand* right = UseFixed(instr->right(), reversed ? edx : eax);
+    LInstruction* result = new LCmpT(left, right);
+    return MarkAsCall(DefineFixed(result, eax), instr);
+  }
+}
+
+
+LInstruction* LChunkBuilder::DoCompareJSObjectEq(
+    HCompareJSObjectEq* instr) {
+  LOperand* left = UseRegisterAtStart(instr->left());
+  LOperand* right = UseRegisterAtStart(instr->right());
+  LInstruction* result = new LCmpJSObjectEq(left, right);
+  return DefineAsRegister(result);
+}
+
+
+LInstruction* LChunkBuilder::DoIsNull(HIsNull* instr) {
+  ASSERT(instr->value()->representation().IsTagged());
+  LOperand* value = UseRegisterAtStart(instr->value());
+
+  return DefineAsRegister(new LIsNull(value,
+                                      instr->is_strict()));
+}
+
+
+LInstruction* LChunkBuilder::DoIsSmi(HIsSmi* instr) {
+  ASSERT(instr->value()->representation().IsTagged());
+  LOperand* value = UseAtStart(instr->value());
+
+  return DefineAsRegister(new LIsSmi(value));
+}
+
+
+LInstruction* LChunkBuilder::DoHasInstanceType(HHasInstanceType* instr) {
+  ASSERT(instr->value()->representation().IsTagged());
+  LOperand* value = UseRegisterAtStart(instr->value());
+
+  return DefineAsRegister(new LHasInstanceType(value));
+}
+
+
+LInstruction* LChunkBuilder::DoHasCachedArrayIndex(
+    HHasCachedArrayIndex* instr) {
+  ASSERT(instr->value()->representation().IsTagged());
+  LOperand* value = UseRegister(instr->value());
+
+  return DefineAsRegister(new LHasCachedArrayIndex(value));
+}
+
+
+LInstruction* LChunkBuilder::DoClassOfTest(HClassOfTest* instr) {
+  ASSERT(instr->value()->representation().IsTagged());
+  LOperand* value = UseTempRegister(instr->value());
+
+  return DefineSameAsFirst(new LClassOfTest(value, TempRegister()));
+}
+
+
+LInstruction* LChunkBuilder::DoArrayLength(HArrayLength* instr) {
+  LOperand* array = NULL;
+  LOperand* temporary = NULL;
+
+  if (instr->value()->IsLoadElements()) {
+    array = UseRegisterAtStart(instr->value());
+  } else {
+    array = UseRegister(instr->value());
+    temporary = TempRegister();
+  }
+
+  LInstruction* result = new LArrayLength(array, temporary);
+  return AssignEnvironment(DefineAsRegister(result));
+}
+
+
+LInstruction* LChunkBuilder::DoValueOf(HValueOf* instr) {
+  LOperand* object = UseRegister(instr->value());
+  LInstruction* result = new LValueOf(object, TempRegister());
+  return AssignEnvironment(DefineSameAsFirst(result));
+}
+
+
+LInstruction* LChunkBuilder::DoBoundsCheck(HBoundsCheck* instr) {
+  return AssignEnvironment(new LBoundsCheck(UseRegisterAtStart(instr->index()),
+                                            Use(instr->length())));
+}
+
+
+LInstruction* LChunkBuilder::DoThrow(HThrow* instr) {
+  LOperand* value = UseFixed(instr->value(), eax);
+  return MarkAsCall(new LThrow(value), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoChange(HChange* instr) {
+  Representation from = instr->from();
+  Representation to = instr->to();
+  if (from.IsTagged()) {
+    if (to.IsDouble()) {
+      LOperand* value = UseRegister(instr->value());
+      LInstruction* res = new LNumberUntagD(value);
+      return AssignEnvironment(DefineAsRegister(res));
+    } else {
+      ASSERT(to.IsInteger32());
+      LOperand* value = UseRegister(instr->value());
+      bool needs_check = !instr->value()->type().IsSmi();
+      if (needs_check) {
+        LOperand* xmm_temp =
+            (instr->CanTruncateToInt32() && !CpuFeatures::IsSupported(SSE3))
+            ? NULL
+            : FixedTemp(xmm1);
+        LInstruction* res = new LTaggedToI(value, xmm_temp);
+        return AssignEnvironment(DefineSameAsFirst(res));
+      } else {
+        return DefineSameAsFirst(new LSmiUntag(value, needs_check));
+      }
+    }
+  } else if (from.IsDouble()) {
+    if (to.IsTagged()) {
+      LOperand* value = UseRegister(instr->value());
+      LOperand* temp = TempRegister();
+
+      // Make sure that temp and result_temp are different registers.
+      LUnallocated* result_temp = TempRegister();
+      LInstruction* result = new LNumberTagD(value, temp);
+      return AssignPointerMap(Define(result, result_temp));
+    } else {
+      ASSERT(to.IsInteger32());
+      LOperand* value = UseRegister(instr->value());
+      return AssignEnvironment(DefineAsRegister(new LDoubleToI(value)));
+    }
+  } else if (from.IsInteger32()) {
+    if (to.IsTagged()) {
+      HValue* val = instr->value();
+      LOperand* value = UseRegister(val);
+      if (val->HasRange() && val->range()->IsInSmiRange()) {
+        return DefineSameAsFirst(new LSmiTag(value));
+      } else {
+        LInstruction* result = new LNumberTagI(value);
+        return AssignEnvironment(AssignPointerMap(DefineSameAsFirst(result)));
+      }
+    } else {
+      ASSERT(to.IsDouble());
+      return DefineAsRegister(new LInteger32ToDouble(Use(instr->value())));
+    }
+  }
+  UNREACHABLE();
+  return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoCheckNonSmi(HCheckNonSmi* instr) {
+  LOperand* value = UseRegisterAtStart(instr->value());
+  return AssignEnvironment(new LCheckSmi(value, zero));
+}
+
+
+LInstruction* LChunkBuilder::DoCheckInstanceType(HCheckInstanceType* instr) {
+  LOperand* value = UseRegisterAtStart(instr->value());
+  LOperand* temp = TempRegister();
+  LInstruction* result = new LCheckInstanceType(value, temp);
+  return AssignEnvironment(result);
+}
+
+
+LInstruction* LChunkBuilder::DoCheckPrototypeMaps(HCheckPrototypeMaps* instr) {
+  LOperand* temp = TempRegister();
+  LInstruction* result =
+      new LCheckPrototypeMaps(temp,
+                              instr->holder(),
+                              instr->receiver_map());
+  return AssignEnvironment(result);
+}
+
+
+LInstruction* LChunkBuilder::DoCheckSmi(HCheckSmi* instr) {
+  LOperand* value = UseRegisterAtStart(instr->value());
+  return AssignEnvironment(new LCheckSmi(value, not_zero));
+}
+
+
+LInstruction* LChunkBuilder::DoCheckFunction(HCheckFunction* instr) {
+  LOperand* value = UseRegisterAtStart(instr->value());
+  return AssignEnvironment(new LCheckFunction(value));
+}
+
+
+LInstruction* LChunkBuilder::DoCheckMap(HCheckMap* instr) {
+  LOperand* value = UseRegisterAtStart(instr->value());
+  LInstruction* result = new LCheckMap(value);
+  return AssignEnvironment(result);
+}
+
+
+LInstruction* LChunkBuilder::DoReturn(HReturn* instr) {
+  return new LReturn(UseFixed(instr->value(), eax));
+}
+
+
+LInstruction* LChunkBuilder::DoConstant(HConstant* instr) {
+  Representation r = instr->representation();
+  if (r.IsInteger32()) {
+    int32_t value = instr->Integer32Value();
+    return DefineAsRegister(new LConstantI(value));
+  } else if (r.IsDouble()) {
+    double value = instr->DoubleValue();
+    return DefineAsRegister(new LConstantD(value));
+  } else if (r.IsTagged()) {
+    return DefineAsRegister(new LConstantT(instr->handle()));
+  } else {
+    Abort("unsupported constant of type double");
+    return NULL;
+  }
+}
+
+
+LInstruction* LChunkBuilder::DoLoadGlobal(HLoadGlobal* instr) {
+  LInstruction* result = new LLoadGlobal;
+  return instr->check_hole_value()
+      ? AssignEnvironment(DefineAsRegister(result))
+      : DefineAsRegister(result);
+}
+
+
+LInstruction* LChunkBuilder::DoStoreGlobal(HStoreGlobal* instr) {
+  return new LStoreGlobal(UseRegisterAtStart(instr->value()));
+}
+
+
+LInstruction* LChunkBuilder::DoLoadNamedField(HLoadNamedField* instr) {
+  return DefineAsRegister(
+      new LLoadNamedField(UseRegisterAtStart(instr->object())));
+}
+
+
+LInstruction* LChunkBuilder::DoLoadNamedGeneric(HLoadNamedGeneric* instr) {
+  LOperand* object = UseFixed(instr->object(), eax);
+  LInstruction* result = DefineFixed(new LLoadNamedGeneric(object), eax);
+  return MarkAsCall(result, instr);
+}
+
+
+LInstruction* LChunkBuilder::DoLoadElements(HLoadElements* instr) {
+  LOperand* input = UseRegisterAtStart(instr->value());
+  return DefineSameAsFirst(new LLoadElements(input));
+}
+
+
+LInstruction* LChunkBuilder::DoLoadKeyedFastElement(
+    HLoadKeyedFastElement* instr) {
+  Representation r = instr->representation();
+  LOperand* obj = UseRegisterAtStart(instr->object());
+  ASSERT(instr->key()->representation().IsInteger32());
+  LOperand* key = UseRegisterAtStart(instr->key());
+  LOperand* load_result = NULL;
+  // Double needs an extra temp, because the result is converted from heap
+  // number to a double register.
+  if (r.IsDouble()) load_result = TempRegister();
+  LInstruction* result = new LLoadKeyedFastElement(obj,
+                                                   key,
+                                                   load_result);
+  if (r.IsDouble()) {
+    result = DefineAsRegister(result);
+  } else {
+    result = DefineSameAsFirst(result);
+  }
+  return AssignEnvironment(result);
+}
+
+
+LInstruction* LChunkBuilder::DoLoadKeyedGeneric(HLoadKeyedGeneric* instr) {
+  LOperand* object = UseFixed(instr->object(), edx);
+  LOperand* key = UseFixed(instr->key(), eax);
+
+  LInstruction* result =
+      DefineFixed(new LLoadKeyedGeneric(object, key), eax);
+  return MarkAsCall(result, instr);
+}
+
+
+LInstruction* LChunkBuilder::DoStoreKeyedFastElement(
+    HStoreKeyedFastElement* instr) {
+  bool needs_write_barrier = instr->NeedsWriteBarrier();
+  ASSERT(instr->value()->representation().IsTagged());
+  ASSERT(instr->object()->representation().IsTagged());
+  ASSERT(instr->key()->representation().IsInteger32());
+
+  LOperand* obj = UseTempRegister(instr->object());
+  LOperand* val = needs_write_barrier
+      ? UseTempRegister(instr->value())
+      : UseRegisterAtStart(instr->value());
+  LOperand* key = needs_write_barrier
+      ? UseTempRegister(instr->key())
+      : UseRegisterOrConstantAtStart(instr->key());
+
+  return AssignEnvironment(new LStoreKeyedFastElement(obj, key, val));
+}
+
+
+LInstruction* LChunkBuilder::DoStoreKeyedGeneric(HStoreKeyedGeneric* instr) {
+  LOperand* obj = UseFixed(instr->object(), edx);
+  LOperand* key = UseFixed(instr->key(), ecx);
+  LOperand* val = UseFixed(instr->value(), eax);
+
+  ASSERT(instr->object()->representation().IsTagged());
+  ASSERT(instr->key()->representation().IsTagged());
+  ASSERT(instr->value()->representation().IsTagged());
+
+  return MarkAsCall(new LStoreKeyedGeneric(obj, key, val), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) {
+  bool needs_write_barrier = !instr->value()->type().IsSmi();
+
+  LOperand* obj = needs_write_barrier
+      ? UseTempRegister(instr->object())
+      : UseRegisterAtStart(instr->object());
+
+  LOperand* val = needs_write_barrier
+      ? UseTempRegister(instr->value())
+      : UseRegister(instr->value());
+
+  // We only need a scratch register if we have a write barrier or we
+  // have a store into the properties array (not in-object-property).
+  LOperand* temp = (!instr->is_in_object() || needs_write_barrier)
+      ? TempRegister() : NULL;
+
+  return new LStoreNamedField(obj,
+                              instr->name(),
+                              val,
+                              instr->is_in_object(),
+                              instr->offset(),
+                              temp,
+                              needs_write_barrier,
+                              instr->transition());
+}
+
+
+LInstruction* LChunkBuilder::DoStoreNamedGeneric(HStoreNamedGeneric* instr) {
+  LOperand* obj = UseFixed(instr->object(), edx);
+  LOperand* val = UseFixed(instr->value(), eax);
+
+  LInstruction* result = new LStoreNamedGeneric(obj, instr->name(), val);
+  return MarkAsCall(result, instr);
+}
+
+
+LInstruction* LChunkBuilder::DoArrayLiteral(HArrayLiteral* instr) {
+  return MarkAsCall(DefineFixed(new LArrayLiteral, eax), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoObjectLiteral(HObjectLiteral* instr) {
+  return MarkAsCall(DefineFixed(new LObjectLiteral, eax), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoRegExpLiteral(HRegExpLiteral* instr) {
+  return MarkAsCall(DefineFixed(new LRegExpLiteral, eax), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoFunctionLiteral(HFunctionLiteral* instr) {
+  return MarkAsCall(DefineFixed(new LFunctionLiteral, eax), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoDeleteProperty(HDeleteProperty* instr) {
+  LInstruction* result = new LDeleteProperty(Use(instr->object()),
+                                             UseOrConstant(instr->key()));
+  return MarkAsCall(DefineFixed(result, eax), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoOsrEntry(HOsrEntry* instr) {
+  allocator_->MarkAsOsrEntry();
+  current_block_->last_environment()->set_ast_id(instr->ast_id());
+  return AssignEnvironment(new LOsrEntry);
+}
+
+
+LInstruction* LChunkBuilder::DoParameter(HParameter* instr) {
+  int spill_index = chunk()->GetParameterStackSlot(instr->index());
+  return DefineAsSpilled(new LParameter, spill_index);
+}
+
+
+LInstruction* LChunkBuilder::DoUnknownOSRValue(HUnknownOSRValue* instr) {
+  int spill_index = chunk()->GetNextSpillIndex(false);  // Not double-width.
+  return DefineAsSpilled(new LUnknownOSRValue, spill_index);
+}
+
+
+LInstruction* LChunkBuilder::DoCallStub(HCallStub* instr) {
+  argument_count_ -= instr->argument_count();
+  return MarkAsCall(DefineFixed(new LCallStub, eax), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoArgumentsObject(HArgumentsObject* instr) {
+  // There are no real uses of the arguments object (we bail out in all other
+  // cases).
+  return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoAccessArgumentsAt(HAccessArgumentsAt* instr) {
+  LOperand* arguments = UseRegister(instr->arguments());
+  LOperand* length = UseTempRegister(instr->length());
+  LOperand* index = Use(instr->index());
+  LInstruction* result = new LAccessArgumentsAt(arguments, length, index);
+  return DefineAsRegister(AssignEnvironment(result));
+}
+
+
+LInstruction* LChunkBuilder::DoTypeof(HTypeof* instr) {
+  LInstruction* result = new LTypeof(Use(instr->value()));
+  return MarkAsCall(DefineFixed(result, eax), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoTypeofIs(HTypeofIs* instr) {
+  return DefineSameAsFirst(new LTypeofIs(UseRegister(instr->value())));
+}
+
+LInstruction* LChunkBuilder::DoSimulate(HSimulate* instr) {
+  HEnvironment* env = current_block_->last_environment();
+  ASSERT(env != NULL);
+
+  env->set_ast_id(instr->ast_id());
+
+  env->Drop(instr->pop_count());
+  for (int i = 0; i < instr->values()->length(); ++i) {
+    HValue* value = instr->values()->at(i);
+    if (instr->HasAssignedIndexAt(i)) {
+      env->Bind(instr->GetAssignedIndexAt(i), value);
+    } else {
+      env->Push(value);
+    }
+  }
+
+  if (FLAG_trace_environment) {
+    PrintF("Reconstructed environment ast_id=%d, instr_id=%d\n",
+           instr->ast_id(),
+           instr->id());
+    env->PrintToStd();
+  }
+  ASSERT(env->values()->length() == instr->environment_height());
+
+  // If there is an instruction pending deoptimization environment create a
+  // lazy bailout instruction to capture the environment.
+  if (pending_deoptimization_ast_id_ == instr->ast_id()) {
+    LInstruction* result = new LLazyBailout;
+    result = AssignEnvironment(result);
+    instructions_pending_deoptimization_environment_->
+        set_deoptimization_environment(result->environment());
+    ClearInstructionPendingDeoptimizationEnvironment();
+    return result;
+  }
+
+  return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoStackCheck(HStackCheck* instr) {
+  return MarkAsCall(new LStackCheck, instr);
+}
+
+
+LInstruction* LChunkBuilder::DoEnterInlined(HEnterInlined* instr) {
+  HEnvironment* outer = current_block_->last_environment();
+  HConstant* undefined = graph()->GetConstantUndefined();
+  HEnvironment* inner = outer->CopyForInlining(instr->closure(),
+                                               instr->function(),
+                                               false,
+                                               undefined);
+  current_block_->UpdateEnvironment(inner);
+  chunk_->AddInlinedClosure(instr->closure());
+  return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoLeaveInlined(HLeaveInlined* instr) {
+  HEnvironment* outer = current_block_->last_environment()->outer();
+  current_block_->UpdateEnvironment(outer);
+  return NULL;
+}
+
+
+void LPointerMap::RecordPointer(LOperand* op) {
+  // Do not record arguments as pointers.
+  if (op->IsStackSlot() && op->index() < 0) return;
+  ASSERT(!op->IsDoubleRegister() && !op->IsDoubleStackSlot());
+  pointer_operands_.Add(op);
+}
+
+
+void LPointerMap::PrintTo(StringStream* stream) const {
+  stream->Add("{");
+  for (int i = 0; i < pointer_operands_.length(); ++i) {
+    if (i != 0) stream->Add(";");
+    pointer_operands_[i]->PrintTo(stream);
+  }
+  stream->Add("} @%d", position());
+}
+
+} }  // namespace v8::internal
diff --git a/src/ia32/lithium-ia32.h b/src/ia32/lithium-ia32.h
new file mode 100644 (file)
index 0000000..af0d560
--- /dev/null
@@ -0,0 +1,2071 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_IA32_LITHIUM_IA32_H_
+#define V8_IA32_LITHIUM_IA32_H_
+
+#include "hydrogen.h"
+#include "lithium-allocator.h"
+#include "safepoint-table.h"
+
+namespace v8 {
+namespace internal {
+
+// Forward declarations.
+class LCodeGen;
+class LEnvironment;
+class Translation;
+class LGapNode;
+
+
+// Type hierarchy:
+//
+// LInstruction
+//   LAccessArgumentsAt
+//   LArgumentsElements
+//   LArgumentsLength
+//   LBinaryOperation
+//     LAddI
+//     LApplyArguments
+//     LArithmeticD
+//     LArithmeticT
+//     LBitI
+//     LBoundsCheck
+//     LCmpID
+//     LCmpIDAndBranch
+//     LCmpJSObjectEq
+//     LCmpJSObjectEqAndBranch
+//     LCmpT
+//     LDivI
+//     LInstanceOf
+//     LInstanceOfAndBranch
+//     LLoadKeyedFastElement
+//     LLoadKeyedGeneric
+//     LModI
+//     LMulI
+//     LShiftI
+//     LSubI
+//   LCallConstantFunction
+//   LCallFunction
+//   LCallGlobal
+//   LCallKeyed
+//   LCallKnownGlobal
+//   LCallNamed
+//   LCallRuntime
+//   LCallStub
+//   LConstant
+//     LConstantD
+//     LConstantI
+//     LConstantT
+//   LDeoptimize
+//   LFunctionLiteral
+//   LGlobalObject
+//   LGlobalReceiver
+//   LLabel
+//   LLayzBailout
+//   LLoadGlobal
+//   LMaterializedLiteral
+//     LArrayLiteral
+//     LObjectLiteral
+//     LRegExpLiteral
+//   LOsrEntry
+//   LParameter
+//   LRegExpConstructResult
+//   LStackCheck
+//   LStoreKeyed
+//     LStoreKeyedFastElement
+//     LStoreKeyedGeneric
+//   LStoreNamed
+//     LStoreNamedField
+//     LStoreNamedGeneric
+//   LUnaryOperation
+//     LArrayLength
+//     LBitNotI
+//     LBranch
+//     LCallNew
+//     LCheckFunction
+//     LCheckInstanceType
+//     LCheckMap
+//     LCheckPrototypeMaps
+//     LCheckSmi
+//     LClassOfTest
+//     LClassOfTestAndBranch
+//     LDeleteProperty
+//     LDoubleToI
+//     LHasCachedArrayIndex
+//     LHasCachedArrayIndexAndBranch
+//     LHasInstanceType
+//     LHasInstanceTypeAndBranch
+//     LInteger32ToDouble
+//     LIsNull
+//     LIsNullAndBranch
+//     LIsSmi
+//     LIsSmiAndBranch
+//     LLoadNamedField
+//     LLoadNamedGeneric
+//     LNumberTagD
+//     LNumberTagI
+//     LPushArgument
+//     LReturn
+//     LSmiTag
+//     LStoreGlobal
+//     LTaggedToI
+//     LThrow
+//     LTypeof
+//     LTypeofIs
+//     LTypeofIsAndBranch
+//     LUnaryMathOperation
+//     LValueOf
+//   LUnknownOSRValue
+
+#define LITHIUM_ALL_INSTRUCTION_LIST(V)         \
+  V(BinaryOperation)                            \
+  V(Constant)                                   \
+  V(Call)                                       \
+  V(MaterializedLiteral)                        \
+  V(StoreKeyed)                                 \
+  V(StoreNamed)                                 \
+  V(UnaryOperation)                             \
+  LITHIUM_CONCRETE_INSTRUCTION_LIST(V)
+
+
+#define LITHIUM_CONCRETE_INSTRUCTION_LIST(V)    \
+  V(AccessArgumentsAt)                          \
+  V(AddI)                                       \
+  V(ApplyArguments)                             \
+  V(ArgumentsElements)                          \
+  V(ArgumentsLength)                            \
+  V(ArithmeticD)                                \
+  V(ArithmeticT)                                \
+  V(ArrayLength)                                \
+  V(ArrayLiteral)                               \
+  V(BitI)                                       \
+  V(BitNotI)                                    \
+  V(BoundsCheck)                                \
+  V(Branch)                                     \
+  V(CallConstantFunction)                       \
+  V(CallFunction)                               \
+  V(CallGlobal)                                 \
+  V(CallKeyed)                                  \
+  V(CallKnownGlobal)                            \
+  V(CallNamed)                                  \
+  V(CallNew)                                    \
+  V(CallRuntime)                                \
+  V(CallStub)                                   \
+  V(CheckFunction)                              \
+  V(CheckInstanceType)                          \
+  V(CheckMap)                                   \
+  V(CheckPrototypeMaps)                         \
+  V(CheckSmi)                                   \
+  V(CmpID)                                      \
+  V(CmpIDAndBranch)                             \
+  V(CmpJSObjectEq)                              \
+  V(CmpJSObjectEqAndBranch)                     \
+  V(CmpMapAndBranch)                            \
+  V(CmpT)                                       \
+  V(CmpTAndBranch)                              \
+  V(ConstantD)                                  \
+  V(ConstantI)                                  \
+  V(ConstantT)                                  \
+  V(DeleteProperty)                             \
+  V(Deoptimize)                                 \
+  V(DivI)                                       \
+  V(DoubleToI)                                  \
+  V(FunctionLiteral)                            \
+  V(Gap)                                        \
+  V(GlobalObject)                               \
+  V(GlobalReceiver)                             \
+  V(Goto)                                       \
+  V(InstanceOf)                                 \
+  V(InstanceOfAndBranch)                        \
+  V(Integer32ToDouble)                          \
+  V(IsNull)                                     \
+  V(IsNullAndBranch)                            \
+  V(IsSmi)                                      \
+  V(IsSmiAndBranch)                             \
+  V(HasInstanceType)                            \
+  V(HasInstanceTypeAndBranch)                   \
+  V(HasCachedArrayIndex)                        \
+  V(HasCachedArrayIndexAndBranch)               \
+  V(ClassOfTest)                                \
+  V(ClassOfTestAndBranch)                       \
+  V(Label)                                      \
+  V(LazyBailout)                                \
+  V(LoadElements)                               \
+  V(LoadGlobal)                                 \
+  V(LoadKeyedFastElement)                       \
+  V(LoadKeyedGeneric)                           \
+  V(LoadNamedField)                             \
+  V(LoadNamedGeneric)                           \
+  V(ModI)                                       \
+  V(MulI)                                       \
+  V(NumberTagD)                                 \
+  V(NumberTagI)                                 \
+  V(NumberUntagD)                               \
+  V(ObjectLiteral)                              \
+  V(OsrEntry)                                   \
+  V(Parameter)                                  \
+  V(PushArgument)                               \
+  V(RegExpLiteral)                              \
+  V(Return)                                     \
+  V(ShiftI)                                     \
+  V(SmiTag)                                     \
+  V(SmiUntag)                                   \
+  V(StackCheck)                                 \
+  V(StoreGlobal)                                \
+  V(StoreKeyedFastElement)                      \
+  V(StoreKeyedGeneric)                          \
+  V(StoreNamedField)                            \
+  V(StoreNamedGeneric)                          \
+  V(SubI)                                       \
+  V(TaggedToI)                                  \
+  V(Throw)                                      \
+  V(Typeof)                                     \
+  V(TypeofIs)                                   \
+  V(TypeofIsAndBranch)                          \
+  V(UnaryMathOperation)                         \
+  V(UnknownOSRValue)                            \
+  V(ValueOf)
+
+
+#define DECLARE_INSTRUCTION(type)                \
+  virtual bool Is##type() const { return true; } \
+  static L##type* cast(LInstruction* instr) {    \
+    ASSERT(instr->Is##type());                   \
+    return reinterpret_cast<L##type*>(instr);    \
+  }
+
+
+#define DECLARE_CONCRETE_INSTRUCTION(type, mnemonic)        \
+  virtual void CompileToNative(LCodeGen* generator);        \
+  virtual const char* Mnemonic() const { return mnemonic; } \
+  DECLARE_INSTRUCTION(type)
+
+
+#define DECLARE_HYDROGEN_ACCESSOR(type)     \
+  H##type* hydrogen() const {               \
+    return H##type::cast(hydrogen_value()); \
+  }
+
+
+class LInstruction: public ZoneObject {
+ public:
+  LInstruction()
+      : hydrogen_value_(NULL) { }
+  virtual ~LInstruction() { }
+
+  virtual void CompileToNative(LCodeGen* generator) = 0;
+  virtual const char* Mnemonic() const = 0;
+  virtual void PrintTo(StringStream* stream) const;
+  virtual void PrintDataTo(StringStream* stream) const { }
+
+  // Declare virtual type testers.
+#define DECLARE_DO(type) virtual bool Is##type() const { return false; }
+  LITHIUM_ALL_INSTRUCTION_LIST(DECLARE_DO)
+#undef DECLARE_DO
+  virtual bool IsControl() const { return false; }
+
+  void set_environment(LEnvironment* env) { environment_.set(env); }
+  LEnvironment* environment() const { return environment_.get(); }
+  bool HasEnvironment() const { return environment_.is_set(); }
+
+  void set_pointer_map(LPointerMap* p) { pointer_map_.set(p); }
+  LPointerMap* pointer_map() const { return pointer_map_.get(); }
+  bool HasPointerMap() const { return pointer_map_.is_set(); }
+
+  void set_result(LOperand* operand) { result_.set(operand); }
+  LOperand* result() const { return result_.get(); }
+  bool HasResult() const { return result_.is_set(); }
+
+  void set_hydrogen_value(HValue* value) { hydrogen_value_ = value; }
+  HValue* hydrogen_value() const { return hydrogen_value_; }
+
+  void set_deoptimization_environment(LEnvironment* env) {
+    deoptimization_environment_.set(env);
+  }
+  LEnvironment* deoptimization_environment() const {
+    return deoptimization_environment_.get();
+  }
+  bool HasDeoptimizationEnvironment() const {
+    return deoptimization_environment_.is_set();
+  }
+
+ private:
+  SetOncePointer<LEnvironment> environment_;
+  SetOncePointer<LPointerMap> pointer_map_;
+  SetOncePointer<LOperand> result_;
+  HValue* hydrogen_value_;
+  SetOncePointer<LEnvironment> deoptimization_environment_;
+};
+
+
+class LGapResolver BASE_EMBEDDED {
+ public:
+  LGapResolver(const ZoneList<LMoveOperands>* moves, LOperand* marker_operand);
+  const ZoneList<LMoveOperands>* ResolveInReverseOrder();
+
+ private:
+  LGapNode* LookupNode(LOperand* operand);
+  bool CanReach(LGapNode* a, LGapNode* b, int visited_id);
+  bool CanReach(LGapNode* a, LGapNode* b);
+  void RegisterMove(LMoveOperands move);
+  void AddResultMove(LOperand* from, LOperand* to);
+  void AddResultMove(LGapNode* from, LGapNode* to);
+  void ResolveCycle(LGapNode* start);
+
+  ZoneList<LGapNode*> nodes_;
+  ZoneList<LGapNode*> identified_cycles_;
+  ZoneList<LMoveOperands> result_;
+  LOperand* marker_operand_;
+  int next_visited_id_;
+  int bailout_after_ast_id_;
+};
+
+
+class LParallelMove : public ZoneObject {
+ public:
+  LParallelMove() : move_operands_(4) { }
+
+  void AddMove(LOperand* from, LOperand* to) {
+    move_operands_.Add(LMoveOperands(from, to));
+  }
+
+  bool IsRedundant() const;
+
+  const ZoneList<LMoveOperands>* move_operands() const {
+    return &move_operands_;
+  }
+
+  void PrintDataTo(StringStream* stream) const;
+
+ private:
+  ZoneList<LMoveOperands> move_operands_;
+};
+
+
+class LGap: public LInstruction {
+ public:
+  explicit LGap(HBasicBlock* block)
+      : block_(block) {
+    parallel_moves_[BEFORE] = NULL;
+    parallel_moves_[START] = NULL;
+    parallel_moves_[END] = NULL;
+    parallel_moves_[AFTER] = NULL;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(Gap, "gap")
+  virtual void PrintDataTo(StringStream* stream) const;
+
+  bool IsRedundant() const;
+
+  HBasicBlock* block() const { return block_; }
+
+  enum InnerPosition {
+    BEFORE,
+    START,
+    END,
+    AFTER,
+    FIRST_INNER_POSITION = BEFORE,
+    LAST_INNER_POSITION = AFTER
+  };
+
+  LParallelMove* GetOrCreateParallelMove(InnerPosition pos)  {
+    if (parallel_moves_[pos] == NULL) parallel_moves_[pos] = new LParallelMove;
+    return parallel_moves_[pos];
+  }
+
+  LParallelMove* GetParallelMove(InnerPosition pos)  {
+    return parallel_moves_[pos];
+  }
+
+ private:
+  LParallelMove* parallel_moves_[LAST_INNER_POSITION + 1];
+  HBasicBlock* block_;
+};
+
+
+class LGoto: public LInstruction {
+ public:
+  LGoto(int block_id, bool include_stack_check = false)
+    : block_id_(block_id), include_stack_check_(include_stack_check) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(Goto, "goto")
+  virtual void PrintDataTo(StringStream* stream) const;
+  virtual bool IsControl() const { return true; }
+
+  int block_id() const { return block_id_; }
+  bool include_stack_check() const { return include_stack_check_; }
+
+ private:
+  int block_id_;
+  bool include_stack_check_;
+};
+
+
+class LLazyBailout: public LInstruction {
+ public:
+  LLazyBailout() : gap_instructions_size_(0) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(LazyBailout, "lazy-bailout")
+
+  void set_gap_instructions_size(int gap_instructions_size) {
+    gap_instructions_size_ = gap_instructions_size;
+  }
+  int gap_instructions_size() { return gap_instructions_size_; }
+
+ private:
+  int gap_instructions_size_;
+};
+
+
+class LDeoptimize: public LInstruction {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(Deoptimize, "deoptimize")
+};
+
+
+class LLabel: public LGap {
+ public:
+  explicit LLabel(HBasicBlock* block)
+      : LGap(block), replacement_(NULL) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(Label, "label")
+
+  virtual void PrintDataTo(StringStream* stream) const;
+
+  int block_id() const { return block()->block_id(); }
+  bool is_loop_header() const { return block()->IsLoopHeader(); }
+  Label* label() { return &label_; }
+  LLabel* replacement() const { return replacement_; }
+  void set_replacement(LLabel* label) { replacement_ = label; }
+  bool HasReplacement() const { return replacement_ != NULL; }
+
+ private:
+  Label label_;
+  LLabel* replacement_;
+};
+
+
+class LParameter: public LInstruction {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(Parameter, "parameter")
+};
+
+
+class LCallStub: public LInstruction {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(CallStub, "call-stub")
+  DECLARE_HYDROGEN_ACCESSOR(CallStub)
+
+  TranscendentalCache::Type transcendental_type() {
+    return hydrogen()->transcendental_type();
+  }
+};
+
+
+class LUnknownOSRValue: public LInstruction {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(UnknownOSRValue, "unknown-osr-value")
+};
+
+
+class LUnaryOperation: public LInstruction {
+ public:
+  explicit LUnaryOperation(LOperand* input) : input_(input) { }
+
+  DECLARE_INSTRUCTION(UnaryOperation)
+
+  LOperand* input() const { return input_; }
+
+  virtual void PrintDataTo(StringStream* stream) const;
+
+ private:
+  LOperand* input_;
+};
+
+
+class LBinaryOperation: public LInstruction {
+ public:
+  LBinaryOperation(LOperand* left, LOperand* right)
+      : left_(left), right_(right) { }
+
+  DECLARE_INSTRUCTION(BinaryOperation)
+
+  LOperand* left() const { return left_; }
+  LOperand* right() const { return right_; }
+  virtual void PrintDataTo(StringStream* stream) const;
+
+ private:
+  LOperand* left_;
+  LOperand* right_;
+};
+
+
+class LApplyArguments: public LBinaryOperation {
+ public:
+  LApplyArguments(LOperand* function,
+                  LOperand* receiver,
+                  LOperand* length,
+                  LOperand* elements)
+      : LBinaryOperation(function, receiver),
+        length_(length),
+        elements_(elements) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(ApplyArguments, "apply-arguments")
+
+  LOperand* function() const { return left(); }
+  LOperand* receiver() const { return right(); }
+  LOperand* length() const { return length_; }
+  LOperand* elements() const { return elements_; }
+
+ private:
+  LOperand* length_;
+  LOperand* elements_;
+};
+
+
+class LAccessArgumentsAt: public LInstruction {
+ public:
+  LAccessArgumentsAt(LOperand* arguments, LOperand* length, LOperand* index)
+      : arguments_(arguments), length_(length), index_(index) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(AccessArgumentsAt, "access-arguments-at")
+
+  LOperand* arguments() const { return arguments_; }
+  LOperand* length() const { return length_; }
+  LOperand* index() const { return index_; }
+
+  virtual void PrintDataTo(StringStream* stream) const;
+
+ private:
+  LOperand* arguments_;
+  LOperand* length_;
+  LOperand* index_;
+};
+
+
+class LArgumentsLength: public LUnaryOperation {
+ public:
+  explicit LArgumentsLength(LOperand* elements) : LUnaryOperation(elements) {}
+
+  DECLARE_CONCRETE_INSTRUCTION(ArgumentsLength, "arguments-length")
+};
+
+
+class LArgumentsElements: public LInstruction {
+ public:
+  LArgumentsElements() { }
+
+  DECLARE_CONCRETE_INSTRUCTION(ArgumentsElements, "arguments-elements")
+};
+
+
+class LModI: public LBinaryOperation {
+ public:
+  LModI(LOperand* left, LOperand* right) : LBinaryOperation(left, right) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(ModI, "mod-i")
+  DECLARE_HYDROGEN_ACCESSOR(Mod)
+};
+
+
+class LDivI: public LBinaryOperation {
+ public:
+  LDivI(LOperand* left, LOperand* right)
+      : LBinaryOperation(left, right) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(DivI, "div-i")
+  DECLARE_HYDROGEN_ACCESSOR(Div)
+};
+
+
+class LMulI: public LBinaryOperation {
+ public:
+  LMulI(LOperand* left, LOperand* right, LOperand* temp)
+      : LBinaryOperation(left, right), temp_(temp) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(MulI, "mul-i")
+  DECLARE_HYDROGEN_ACCESSOR(Mul)
+
+  LOperand* temp() const { return temp_; }
+
+ private:
+  LOperand* temp_;
+};
+
+
+class LCmpID: public LBinaryOperation {
+ public:
+  LCmpID(Token::Value op, LOperand* left, LOperand* right, bool is_double)
+      : LBinaryOperation(left, right), op_(op), is_double_(is_double) { }
+
+  Token::Value op() const { return op_; }
+  bool is_double() const { return is_double_; }
+
+  DECLARE_CONCRETE_INSTRUCTION(CmpID, "cmp-id")
+
+ private:
+  Token::Value op_;
+  bool is_double_;
+};
+
+
+class LCmpIDAndBranch: public LCmpID {
+ public:
+  LCmpIDAndBranch(Token::Value op,
+                  LOperand* left,
+                  LOperand* right,
+                  int true_block_id,
+                  int false_block_id,
+                  bool is_double)
+      : LCmpID(op, left, right, is_double),
+        true_block_id_(true_block_id),
+        false_block_id_(false_block_id) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(CmpIDAndBranch, "cmp-id-and-branch")
+  virtual void PrintDataTo(StringStream* stream) const;
+  virtual bool IsControl() const { return true; }
+
+  int true_block_id() const { return true_block_id_; }
+  int false_block_id() const { return false_block_id_; }
+
+ private:
+  int true_block_id_;
+  int false_block_id_;
+};
+
+
+class LUnaryMathOperation: public LUnaryOperation {
+ public:
+  explicit LUnaryMathOperation(LOperand* value)
+      : LUnaryOperation(value) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(UnaryMathOperation, "unary-math-operation")
+  DECLARE_HYDROGEN_ACCESSOR(UnaryMathOperation)
+
+  virtual void PrintDataTo(StringStream* stream) const;
+  MathFunctionId op() const { return hydrogen()->op(); }
+};
+
+
+class LCmpJSObjectEq: public LBinaryOperation {
+ public:
+  LCmpJSObjectEq(LOperand* left, LOperand* right)
+      : LBinaryOperation(left, right) {}
+
+  DECLARE_CONCRETE_INSTRUCTION(CmpJSObjectEq, "cmp-jsobject-eq")
+};
+
+
+class LCmpJSObjectEqAndBranch: public LCmpJSObjectEq {
+ public:
+  LCmpJSObjectEqAndBranch(LOperand* left,
+                          LOperand* right,
+                          int true_block_id,
+                          int false_block_id)
+      : LCmpJSObjectEq(left, right),
+        true_block_id_(true_block_id),
+        false_block_id_(false_block_id) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(CmpJSObjectEqAndBranch,
+                               "cmp-jsobject-eq-and-branch")
+
+  int true_block_id() const { return true_block_id_; }
+  int false_block_id() const { return false_block_id_; }
+
+ private:
+  int true_block_id_;
+  int false_block_id_;
+};
+
+
+class LIsNull: public LUnaryOperation {
+ public:
+  LIsNull(LOperand* value, bool is_strict)
+      : LUnaryOperation(value), is_strict_(is_strict) {}
+
+  DECLARE_CONCRETE_INSTRUCTION(IsNull, "is-null")
+
+  bool is_strict() const { return is_strict_; }
+
+ private:
+  bool is_strict_;
+};
+
+
+class LIsNullAndBranch: public LIsNull {
+ public:
+  LIsNullAndBranch(LOperand* value,
+                   bool is_strict,
+                   LOperand* temp,
+                   int true_block_id,
+                   int false_block_id)
+      : LIsNull(value, is_strict),
+        temp_(temp),
+        true_block_id_(true_block_id),
+        false_block_id_(false_block_id) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(IsNullAndBranch, "is-null-and-branch")
+  virtual void PrintDataTo(StringStream* stream) const;
+  virtual bool IsControl() const { return true; }
+
+  int true_block_id() const { return true_block_id_; }
+  int false_block_id() const { return false_block_id_; }
+
+  LOperand* temp() const { return temp_; }
+
+ private:
+  LOperand* temp_;
+  int true_block_id_;
+  int false_block_id_;
+};
+
+
+class LIsSmi: public LUnaryOperation {
+ public:
+  explicit LIsSmi(LOperand* value) : LUnaryOperation(value) {}
+
+  DECLARE_CONCRETE_INSTRUCTION(IsSmi, "is-smi")
+  DECLARE_HYDROGEN_ACCESSOR(IsSmi)
+};
+
+
+class LIsSmiAndBranch: public LIsSmi {
+ public:
+  LIsSmiAndBranch(LOperand* value,
+                  int true_block_id,
+                  int false_block_id)
+      : LIsSmi(value),
+        true_block_id_(true_block_id),
+        false_block_id_(false_block_id) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(IsSmiAndBranch, "is-smi-and-branch")
+  virtual void PrintDataTo(StringStream* stream) const;
+  virtual bool IsControl() const { return true; }
+
+  int true_block_id() const { return true_block_id_; }
+  int false_block_id() const { return false_block_id_; }
+
+ private:
+  int true_block_id_;
+  int false_block_id_;
+};
+
+
+class LHasInstanceType: public LUnaryOperation {
+ public:
+  explicit LHasInstanceType(LOperand* value)
+      : LUnaryOperation(value) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(HasInstanceType, "has-instance-type")
+  DECLARE_HYDROGEN_ACCESSOR(HasInstanceType)
+
+  InstanceType TestType();  // The type to test against when generating code.
+  Condition BranchCondition();  // The branch condition for 'true'.
+};
+
+
+class LHasInstanceTypeAndBranch: public LHasInstanceType {
+ public:
+  LHasInstanceTypeAndBranch(LOperand* value,
+                            LOperand* temporary,
+                            int true_block_id,
+                            int false_block_id)
+      : LHasInstanceType(value),
+        temp_(temporary),
+        true_block_id_(true_block_id),
+        false_block_id_(false_block_id) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(HasInstanceTypeAndBranch,
+                               "has-instance-type-and-branch")
+  virtual void PrintDataTo(StringStream* stream) const;
+  virtual bool IsControl() const { return true; }
+
+  int true_block_id() const { return true_block_id_; }
+  int false_block_id() const { return false_block_id_; }
+
+  LOperand* temp() { return temp_; }
+
+ private:
+  LOperand* temp_;
+  int true_block_id_;
+  int false_block_id_;
+};
+
+
+class LHasCachedArrayIndex: public LUnaryOperation {
+ public:
+  explicit LHasCachedArrayIndex(LOperand* value) : LUnaryOperation(value) {}
+
+  DECLARE_CONCRETE_INSTRUCTION(HasCachedArrayIndex, "has-cached-array-index")
+  DECLARE_HYDROGEN_ACCESSOR(HasCachedArrayIndex)
+};
+
+
+class LHasCachedArrayIndexAndBranch: public LHasCachedArrayIndex {
+ public:
+  LHasCachedArrayIndexAndBranch(LOperand* value,
+                                int true_block_id,
+                                int false_block_id)
+      : LHasCachedArrayIndex(value),
+        true_block_id_(true_block_id),
+        false_block_id_(false_block_id) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(HasCachedArrayIndexAndBranch,
+                               "has-cached-array-index-and-branch")
+  virtual void PrintDataTo(StringStream* stream) const;
+  virtual bool IsControl() const { return true; }
+
+  int true_block_id() const { return true_block_id_; }
+  int false_block_id() const { return false_block_id_; }
+
+ private:
+  int true_block_id_;
+  int false_block_id_;
+};
+
+
+class LClassOfTest: public LUnaryOperation {
+ public:
+  LClassOfTest(LOperand* value, LOperand* temp)
+      : LUnaryOperation(value), temporary_(temp) {}
+
+  DECLARE_CONCRETE_INSTRUCTION(ClassOfTest, "class-of-test")
+  DECLARE_HYDROGEN_ACCESSOR(ClassOfTest)
+
+  virtual void PrintDataTo(StringStream* stream) const;
+
+  LOperand* temporary() { return temporary_; }
+
+ private:
+  LOperand *temporary_;
+};
+
+
+class LClassOfTestAndBranch: public LClassOfTest {
+ public:
+  LClassOfTestAndBranch(LOperand* value,
+                        LOperand* temporary,
+                        LOperand* temporary2,
+                        int true_block_id,
+                        int false_block_id)
+      : LClassOfTest(value, temporary),
+        temporary2_(temporary2),
+        true_block_id_(true_block_id),
+        false_block_id_(false_block_id) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(ClassOfTestAndBranch,
+                               "class-of-test-and-branch")
+  virtual void PrintDataTo(StringStream* stream) const;
+  virtual bool IsControl() const { return true; }
+
+  int true_block_id() const { return true_block_id_; }
+  int false_block_id() const { return false_block_id_; }
+  LOperand* temporary2() { return temporary2_; }
+
+ private:
+  LOperand* temporary2_;
+  int true_block_id_;
+  int false_block_id_;
+};
+
+
+class LCmpT: public LBinaryOperation {
+ public:
+  LCmpT(LOperand* left, LOperand* right) : LBinaryOperation(left, right) {}
+
+  DECLARE_CONCRETE_INSTRUCTION(CmpT, "cmp-t")
+  DECLARE_HYDROGEN_ACCESSOR(Compare)
+
+  Token::Value op() const { return hydrogen()->token(); }
+};
+
+
+class LCmpTAndBranch: public LCmpT {
+ public:
+  LCmpTAndBranch(LOperand* left,
+                 LOperand* right,
+                 int true_block_id,
+                 int false_block_id)
+      : LCmpT(left, right),
+        true_block_id_(true_block_id),
+        false_block_id_(false_block_id) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(CmpTAndBranch, "cmp-t-and-branch")
+
+  int true_block_id() const { return true_block_id_; }
+  int false_block_id() const { return false_block_id_; }
+
+ private:
+  int true_block_id_;
+  int false_block_id_;
+};
+
+
+class LInstanceOf: public LBinaryOperation {
+ public:
+  LInstanceOf(LOperand* left, LOperand* right)
+      : LBinaryOperation(left, right) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(InstanceOf, "instance-of")
+};
+
+
+class LInstanceOfAndBranch: public LInstanceOf {
+ public:
+  LInstanceOfAndBranch(LOperand* left,
+                       LOperand* right,
+                       int true_block_id,
+                       int false_block_id)
+      : LInstanceOf(left, right),
+        true_block_id_(true_block_id),
+        false_block_id_(false_block_id) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(InstanceOfAndBranch, "instance-of-and-branch")
+
+  int true_block_id() const { return true_block_id_; }
+  int false_block_id() const { return false_block_id_; }
+
+ private:
+  int true_block_id_;
+  int false_block_id_;
+};
+
+
+class LBoundsCheck: public LBinaryOperation {
+ public:
+  LBoundsCheck(LOperand* index, LOperand* length)
+      : LBinaryOperation(index, length) { }
+
+  LOperand* index() const { return left(); }
+  LOperand* length() const { return right(); }
+
+  DECLARE_CONCRETE_INSTRUCTION(BoundsCheck, "bounds-check")
+};
+
+
+class LBitI: public LBinaryOperation {
+ public:
+  LBitI(Token::Value op, LOperand* left, LOperand* right)
+      : LBinaryOperation(left, right), op_(op) { }
+
+  Token::Value op() const { return op_; }
+
+  DECLARE_CONCRETE_INSTRUCTION(BitI, "bit-i")
+
+ private:
+  Token::Value op_;
+};
+
+
+class LShiftI: public LBinaryOperation {
+ public:
+  LShiftI(Token::Value op, LOperand* left, LOperand* right, bool can_deopt)
+      : LBinaryOperation(left, right), op_(op), can_deopt_(can_deopt) { }
+
+  Token::Value op() const { return op_; }
+
+  bool can_deopt() const { return can_deopt_; }
+
+  DECLARE_CONCRETE_INSTRUCTION(ShiftI, "shift-i")
+
+ private:
+  Token::Value op_;
+  bool can_deopt_;
+};
+
+
+class LSubI: public LBinaryOperation {
+ public:
+  LSubI(LOperand* left, LOperand* right)
+      : LBinaryOperation(left, right) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(SubI, "sub-i")
+  DECLARE_HYDROGEN_ACCESSOR(Sub)
+};
+
+
+class LConstant: public LInstruction {
+  DECLARE_INSTRUCTION(Constant)
+};
+
+
+class LConstantI: public LConstant {
+ public:
+  explicit LConstantI(int32_t value) : value_(value) { }
+  int32_t value() const { return value_; }
+
+  DECLARE_CONCRETE_INSTRUCTION(ConstantI, "constant-i")
+
+ private:
+  int32_t value_;
+};
+
+
+class LConstantD: public LConstant {
+ public:
+  explicit LConstantD(double value) : value_(value) { }
+  double value() const { return value_; }
+
+  DECLARE_CONCRETE_INSTRUCTION(ConstantD, "constant-d")
+
+ private:
+  double value_;
+};
+
+
+class LConstantT: public LConstant {
+ public:
+  explicit LConstantT(Handle<Object> value) : value_(value) { }
+  Handle<Object> value() const { return value_; }
+
+  DECLARE_CONCRETE_INSTRUCTION(ConstantT, "constant-t")
+
+ private:
+  Handle<Object> value_;
+};
+
+
+class LBranch: public LUnaryOperation {
+ public:
+  LBranch(LOperand* input, int true_block_id, int false_block_id)
+      : LUnaryOperation(input),
+        true_block_id_(true_block_id),
+        false_block_id_(false_block_id) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(Branch, "branch")
+  DECLARE_HYDROGEN_ACCESSOR(Value)
+
+  virtual void PrintDataTo(StringStream* stream) const;
+  virtual bool IsControl() const { return true; }
+
+  int true_block_id() const { return true_block_id_; }
+  int false_block_id() const { return false_block_id_; }
+
+ private:
+  int true_block_id_;
+  int false_block_id_;
+};
+
+
+class LCmpMapAndBranch: public LUnaryOperation {
+ public:
+  LCmpMapAndBranch(LOperand* value,
+                   Handle<Map> map,
+                   int true_block_id,
+                   int false_block_id)
+      : LUnaryOperation(value),
+        map_(map),
+        true_block_id_(true_block_id),
+        false_block_id_(false_block_id) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(CmpMapAndBranch, "cmp-map-and-branch")
+
+  virtual bool IsControl() const { return true; }
+
+  Handle<Map> map() const { return map_; }
+  int true_block_id() const { return true_block_id_; }
+  int false_block_id() const { return false_block_id_; }
+
+ private:
+  Handle<Map> map_;
+  int true_block_id_;
+  int false_block_id_;
+};
+
+
+class LArrayLength: public LUnaryOperation {
+ public:
+  LArrayLength(LOperand* input, LOperand* temporary)
+      : LUnaryOperation(input), temporary_(temporary) { }
+
+  LOperand* temporary() const { return temporary_; }
+
+  DECLARE_CONCRETE_INSTRUCTION(ArrayLength, "array-length")
+  DECLARE_HYDROGEN_ACCESSOR(ArrayLength)
+
+ private:
+  LOperand* temporary_;
+};
+
+
+class LValueOf: public LUnaryOperation {
+ public:
+  LValueOf(LOperand* input, LOperand* temporary)
+      : LUnaryOperation(input), temporary_(temporary) { }
+
+  LOperand* temporary() const { return temporary_; }
+
+  DECLARE_CONCRETE_INSTRUCTION(ValueOf, "value-of")
+  DECLARE_HYDROGEN_ACCESSOR(ValueOf)
+
+ private:
+  LOperand* temporary_;
+};
+
+
+class LThrow: public LUnaryOperation {
+ public:
+  explicit LThrow(LOperand* value) : LUnaryOperation(value) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(Throw, "throw")
+};
+
+
+class LBitNotI: public LUnaryOperation {
+ public:
+  explicit LBitNotI(LOperand* use) : LUnaryOperation(use) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(BitNotI, "bit-not-i")
+};
+
+
+class LAddI: public LBinaryOperation {
+ public:
+  LAddI(LOperand* left, LOperand* right)
+      : LBinaryOperation(left, right) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(AddI, "add-i")
+  DECLARE_HYDROGEN_ACCESSOR(Add)
+};
+
+
+class LArithmeticD: public LBinaryOperation {
+ public:
+  LArithmeticD(Token::Value op, LOperand* left, LOperand* right)
+      : LBinaryOperation(left, right), op_(op) { }
+
+  Token::Value op() const { return op_; }
+
+  virtual void CompileToNative(LCodeGen* generator);
+  virtual const char* Mnemonic() const;
+
+ private:
+  Token::Value op_;
+};
+
+
+class LArithmeticT: public LBinaryOperation {
+ public:
+  LArithmeticT(Token::Value op, LOperand* left, LOperand* right)
+      : LBinaryOperation(left, right), op_(op) { }
+
+  virtual void CompileToNative(LCodeGen* generator);
+  virtual const char* Mnemonic() const;
+
+  Token::Value op() const { return op_; }
+
+ private:
+  Token::Value op_;
+};
+
+
+class LReturn: public LUnaryOperation {
+ public:
+  explicit LReturn(LOperand* use) : LUnaryOperation(use) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(Return, "return")
+};
+
+
+class LLoadNamedField: public LUnaryOperation {
+ public:
+  explicit LLoadNamedField(LOperand* object) : LUnaryOperation(object) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(LoadNamedField, "load-named-field")
+  DECLARE_HYDROGEN_ACCESSOR(LoadNamedField)
+};
+
+
+class LLoadNamedGeneric: public LUnaryOperation {
+ public:
+  explicit LLoadNamedGeneric(LOperand* object) : LUnaryOperation(object) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(LoadNamedGeneric, "load-named-generic")
+  DECLARE_HYDROGEN_ACCESSOR(LoadNamedGeneric)
+
+  LOperand* object() const { return input(); }
+  Handle<Object> name() const { return hydrogen()->name(); }
+};
+
+
+class LLoadElements: public LUnaryOperation {
+ public:
+  explicit LLoadElements(LOperand* obj) : LUnaryOperation(obj) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(LoadElements, "load-elements")
+};
+
+
+class LLoadKeyedFastElement: public LBinaryOperation {
+ public:
+  LLoadKeyedFastElement(LOperand* elements,
+                        LOperand* key,
+                        LOperand* load_result)
+      : LBinaryOperation(elements, key),
+        load_result_(load_result) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(LoadKeyedFastElement, "load-keyed-fast-element")
+  DECLARE_HYDROGEN_ACCESSOR(LoadKeyedFastElement)
+
+  LOperand* elements() const { return left(); }
+  LOperand* key() const { return right(); }
+  LOperand* load_result() const { return load_result_; }
+
+ private:
+  LOperand* load_result_;
+};
+
+
+class LLoadKeyedGeneric: public LBinaryOperation {
+ public:
+  LLoadKeyedGeneric(LOperand* obj, LOperand* key)
+      : LBinaryOperation(obj, key) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(LoadKeyedGeneric, "load-keyed-generic")
+
+  LOperand* object() const { return left(); }
+  LOperand* key() const { return right(); }
+};
+
+
+class LLoadGlobal: public LInstruction {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(LoadGlobal, "load-global")
+  DECLARE_HYDROGEN_ACCESSOR(LoadGlobal)
+};
+
+
+class LStoreGlobal: public LUnaryOperation {
+ public:
+  explicit LStoreGlobal(LOperand* value) : LUnaryOperation(value) {}
+
+  DECLARE_CONCRETE_INSTRUCTION(StoreGlobal, "store-global")
+  DECLARE_HYDROGEN_ACCESSOR(StoreGlobal)
+};
+
+
+class LPushArgument: public LUnaryOperation {
+ public:
+  explicit LPushArgument(LOperand* argument) : LUnaryOperation(argument) {}
+
+  DECLARE_CONCRETE_INSTRUCTION(PushArgument, "push-argument")
+};
+
+
+class LGlobalObject: public LInstruction {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(GlobalObject, "global-object")
+};
+
+
+class LGlobalReceiver: public LInstruction {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(GlobalReceiver, "global-receiver")
+};
+
+
+class LCallConstantFunction: public LInstruction {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(CallConstantFunction, "call-constant-function")
+  DECLARE_HYDROGEN_ACCESSOR(CallConstantFunction)
+
+  virtual void PrintDataTo(StringStream* stream) const;
+
+  Handle<JSFunction> function() const { return hydrogen()->function(); }
+  int arity() const { return hydrogen()->argument_count() - 1; }
+};
+
+
+class LCallKeyed: public LInstruction {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(CallKeyed, "call-keyed")
+  DECLARE_HYDROGEN_ACCESSOR(CallKeyed)
+
+  virtual void PrintDataTo(StringStream* stream) const;
+
+  int arity() const { return hydrogen()->argument_count() - 1; }
+};
+
+
+class LCallNamed: public LInstruction {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(CallNamed, "call-named")
+  DECLARE_HYDROGEN_ACCESSOR(CallNamed)
+
+  virtual void PrintDataTo(StringStream* stream) const;
+
+  Handle<String> name() const { return hydrogen()->name(); }
+  int arity() const { return hydrogen()->argument_count() - 1; }
+};
+
+
+class LCallFunction: public LInstruction {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(CallFunction, "call-function")
+  DECLARE_HYDROGEN_ACCESSOR(CallFunction)
+
+  int arity() const { return hydrogen()->argument_count() - 2; }
+};
+
+
+class LCallGlobal: public LInstruction {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(CallGlobal, "call-global")
+  DECLARE_HYDROGEN_ACCESSOR(CallGlobal)
+
+  virtual void PrintDataTo(StringStream* stream) const;
+
+  Handle<String> name() const {return hydrogen()->name(); }
+  int arity() const { return hydrogen()->argument_count() - 1; }
+};
+
+
+class LCallKnownGlobal: public LInstruction {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(CallKnownGlobal, "call-known-global")
+  DECLARE_HYDROGEN_ACCESSOR(CallKnownGlobal)
+
+  virtual void PrintDataTo(StringStream* stream) const;
+
+  Handle<JSFunction> target() const { return hydrogen()->target();  }
+  int arity() const { return hydrogen()->argument_count() - 1;  }
+};
+
+
+class LCallNew: public LUnaryOperation {
+ public:
+  explicit LCallNew(LOperand* constructor) : LUnaryOperation(constructor) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(CallNew, "call-new")
+  DECLARE_HYDROGEN_ACCESSOR(CallNew)
+
+  virtual void PrintDataTo(StringStream* stream) const;
+
+  int arity() const { return hydrogen()->argument_count() - 1; }
+};
+
+
+class LCallRuntime: public LInstruction {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(CallRuntime, "call-runtime")
+  DECLARE_HYDROGEN_ACCESSOR(CallRuntime)
+
+  Runtime::Function* function() const { return hydrogen()->function(); }
+  int arity() const { return hydrogen()->argument_count(); }
+};
+
+
+class LInteger32ToDouble: public LUnaryOperation {
+ public:
+  explicit LInteger32ToDouble(LOperand* use) : LUnaryOperation(use) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(Integer32ToDouble, "int32-to-double")
+};
+
+
+class LNumberTagI: public LUnaryOperation {
+ public:
+  explicit LNumberTagI(LOperand* use) : LUnaryOperation(use) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(NumberTagI, "number-tag-i")
+};
+
+
+class LNumberTagD: public LUnaryOperation {
+ public:
+  explicit LNumberTagD(LOperand* value, LOperand* temp)
+      : LUnaryOperation(value), temp_(temp) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(NumberTagD, "number-tag-d")
+
+  LOperand* temp() const { return temp_; }
+
+ private:
+  LOperand* temp_;
+};
+
+
+// Sometimes truncating conversion from a tagged value to an int32.
+class LDoubleToI: public LUnaryOperation {
+ public:
+  explicit LDoubleToI(LOperand* value) : LUnaryOperation(value) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(DoubleToI, "double-to-i")
+  DECLARE_HYDROGEN_ACCESSOR(Change)
+
+  bool truncating() { return hydrogen()->CanTruncateToInt32(); }
+};
+
+
+// Truncating conversion from a tagged value to an int32.
+class LTaggedToI: public LUnaryOperation {
+ public:
+  LTaggedToI(LOperand* value, LOperand* temp)
+      : LUnaryOperation(value), temp_(temp) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(TaggedToI, "tagged-to-i")
+  DECLARE_HYDROGEN_ACCESSOR(Change)
+
+  bool truncating() { return hydrogen()->CanTruncateToInt32(); }
+  LOperand* temp() const { return temp_; }
+
+ private:
+  LOperand* temp_;
+};
+
+
+class LSmiTag: public LUnaryOperation {
+ public:
+  explicit LSmiTag(LOperand* use) : LUnaryOperation(use) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(SmiTag, "smi-tag")
+};
+
+
+class LNumberUntagD: public LUnaryOperation {
+ public:
+  explicit LNumberUntagD(LOperand* value) : LUnaryOperation(value) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(NumberUntagD, "double-untag")
+};
+
+
+class LSmiUntag: public LUnaryOperation {
+ public:
+  LSmiUntag(LOperand* use, bool needs_check)
+      : LUnaryOperation(use), needs_check_(needs_check) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(SmiUntag, "smi-untag")
+
+  bool needs_check() const { return needs_check_; }
+
+ private:
+  bool needs_check_;
+};
+
+
+class LStoreNamed: public LInstruction {
+ public:
+  LStoreNamed(LOperand* obj, Handle<Object> name, LOperand* val)
+      : object_(obj), name_(name), value_(val) { }
+
+  DECLARE_INSTRUCTION(StoreNamed)
+
+  virtual void PrintDataTo(StringStream* stream) const;
+
+  LOperand* object() const { return object_; }
+  Handle<Object> name() const { return name_; }
+  LOperand* value() const { return value_; }
+
+ private:
+  LOperand* object_;
+  Handle<Object> name_;
+  LOperand* value_;
+};
+
+
+class LStoreNamedField: public LStoreNamed {
+ public:
+  LStoreNamedField(LOperand* obj,
+                   Handle<Object> name,
+                   LOperand* val,
+                   bool in_object,
+                   int offset,
+                   LOperand* temp,
+                   bool needs_write_barrier,
+                   Handle<Map> transition)
+      : LStoreNamed(obj, name, val),
+        is_in_object_(in_object),
+        offset_(offset),
+        temp_(temp),
+        needs_write_barrier_(needs_write_barrier),
+        transition_(transition) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(StoreNamedField, "store-named-field")
+
+  bool is_in_object() { return is_in_object_; }
+  int offset() { return offset_; }
+  LOperand* temp() { return temp_; }
+  bool needs_write_barrier() { return needs_write_barrier_; }
+  Handle<Map> transition() const { return transition_; }
+  void set_transition(Handle<Map> map) { transition_ = map; }
+
+ private:
+  bool is_in_object_;
+  int offset_;
+  LOperand* temp_;
+  bool needs_write_barrier_;
+  Handle<Map> transition_;
+};
+
+
+class LStoreNamedGeneric: public LStoreNamed {
+ public:
+  LStoreNamedGeneric(LOperand* obj,
+                     Handle<Object> name,
+                     LOperand* val)
+      : LStoreNamed(obj, name, val) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(StoreNamedGeneric, "store-named-generic")
+};
+
+
+class LStoreKeyed: public LInstruction {
+ public:
+  LStoreKeyed(LOperand* obj, LOperand* key, LOperand* val)
+      : object_(obj), key_(key), value_(val) { }
+
+  DECLARE_INSTRUCTION(StoreKeyed)
+
+  virtual void PrintDataTo(StringStream* stream) const;
+
+  LOperand* object() const { return object_; }
+  LOperand* key() const { return key_; }
+  LOperand* value() const { return value_; }
+
+ private:
+  LOperand* object_;
+  LOperand* key_;
+  LOperand* value_;
+};
+
+
+class LStoreKeyedFastElement: public LStoreKeyed {
+ public:
+  LStoreKeyedFastElement(LOperand* obj, LOperand* key, LOperand* val)
+      : LStoreKeyed(obj, key, val) {}
+
+  DECLARE_CONCRETE_INSTRUCTION(StoreKeyedFastElement,
+                               "store-keyed-fast-element")
+  DECLARE_HYDROGEN_ACCESSOR(StoreKeyedFastElement)
+};
+
+
+class LStoreKeyedGeneric: public LStoreKeyed {
+ public:
+  LStoreKeyedGeneric(LOperand* obj, LOperand* key, LOperand* val)
+      : LStoreKeyed(obj, key, val) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(StoreKeyedGeneric, "store-keyed-generic")
+};
+
+
+class LCheckFunction: public LUnaryOperation {
+ public:
+  explicit LCheckFunction(LOperand* use) : LUnaryOperation(use) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(CheckFunction, "check-function")
+  DECLARE_HYDROGEN_ACCESSOR(CheckFunction)
+};
+
+
+class LCheckInstanceType: public LUnaryOperation {
+ public:
+  LCheckInstanceType(LOperand* use, LOperand* temp)
+      : LUnaryOperation(use), temp_(temp) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(CheckInstanceType, "check-instance-type")
+  DECLARE_HYDROGEN_ACCESSOR(CheckInstanceType)
+
+  LOperand* temp() const { return temp_; }
+
+ private:
+  LOperand* temp_;
+};
+
+
+class LCheckMap: public LUnaryOperation {
+ public:
+  explicit LCheckMap(LOperand* use) : LUnaryOperation(use) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(CheckMap, "check-map")
+  DECLARE_HYDROGEN_ACCESSOR(CheckMap)
+};
+
+
+class LCheckPrototypeMaps: public LInstruction {
+ public:
+  LCheckPrototypeMaps(LOperand* temp,
+                      Handle<JSObject> holder,
+                      Handle<Map> receiver_map)
+      : temp_(temp),
+        holder_(holder),
+        receiver_map_(receiver_map) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(CheckPrototypeMaps, "check-prototype-maps")
+
+  LOperand* temp() const { return temp_; }
+  Handle<JSObject> holder() const { return holder_; }
+  Handle<Map> receiver_map() const { return receiver_map_; }
+
+ private:
+  LOperand* temp_;
+  Handle<JSObject> holder_;
+  Handle<Map> receiver_map_;
+};
+
+
+class LCheckSmi: public LUnaryOperation {
+ public:
+  LCheckSmi(LOperand* use, Condition condition)
+      : LUnaryOperation(use), condition_(condition) { }
+
+  Condition condition() const { return condition_; }
+
+  virtual void CompileToNative(LCodeGen* generator);
+  virtual const char* Mnemonic() const {
+    return (condition_ == zero) ? "check-non-smi" : "check-smi";
+  }
+
+ private:
+  Condition condition_;
+};
+
+
+class LMaterializedLiteral: public LInstruction {
+ public:
+  DECLARE_INSTRUCTION(MaterializedLiteral)
+};
+
+
+class LArrayLiteral: public LMaterializedLiteral {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(ArrayLiteral, "array-literal")
+  DECLARE_HYDROGEN_ACCESSOR(ArrayLiteral)
+};
+
+
+class LObjectLiteral: public LMaterializedLiteral {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(ObjectLiteral, "object-literal")
+  DECLARE_HYDROGEN_ACCESSOR(ObjectLiteral)
+};
+
+
+class LRegExpLiteral: public LMaterializedLiteral {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(RegExpLiteral, "regexp-literal")
+  DECLARE_HYDROGEN_ACCESSOR(RegExpLiteral)
+};
+
+
+class LFunctionLiteral: public LInstruction {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(FunctionLiteral, "function-literal")
+  DECLARE_HYDROGEN_ACCESSOR(FunctionLiteral)
+
+  Handle<SharedFunctionInfo> shared_info() { return hydrogen()->shared_info(); }
+};
+
+
+class LTypeof: public LUnaryOperation {
+ public:
+  explicit LTypeof(LOperand* input) : LUnaryOperation(input) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(Typeof, "typeof")
+};
+
+
+class LTypeofIs: public LUnaryOperation {
+ public:
+  explicit LTypeofIs(LOperand* input) : LUnaryOperation(input) { }
+  virtual void PrintDataTo(StringStream* stream) const;
+
+  DECLARE_CONCRETE_INSTRUCTION(TypeofIs, "typeof-is")
+  DECLARE_HYDROGEN_ACCESSOR(TypeofIs)
+
+  Handle<String> type_literal() { return hydrogen()->type_literal(); }
+};
+
+
+class LTypeofIsAndBranch: public LTypeofIs {
+ public:
+  LTypeofIsAndBranch(LOperand* value,
+                     int true_block_id,
+                     int false_block_id)
+      : LTypeofIs(value),
+        true_block_id_(true_block_id),
+        false_block_id_(false_block_id) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(TypeofIsAndBranch, "typeof-is-and-branch")
+
+  virtual void PrintDataTo(StringStream* stream) const;
+  virtual bool IsControl() const { return true; }
+
+  int true_block_id() const { return true_block_id_; }
+  int false_block_id() const { return false_block_id_; }
+
+ private:
+  int true_block_id_;
+  int false_block_id_;
+};
+
+
+class LDeleteProperty: public LBinaryOperation {
+ public:
+  LDeleteProperty(LOperand* obj, LOperand* key) : LBinaryOperation(obj, key) {}
+
+  DECLARE_CONCRETE_INSTRUCTION(DeleteProperty, "delete-property")
+
+  LOperand* object() const { return left(); }
+  LOperand* key() const { return right(); }
+};
+
+
+class LOsrEntry: public LInstruction {
+ public:
+  LOsrEntry();
+
+  DECLARE_CONCRETE_INSTRUCTION(OsrEntry, "osr-entry")
+
+  LOperand** SpilledRegisterArray() { return register_spills_; }
+  LOperand** SpilledDoubleRegisterArray() { return double_register_spills_; }
+
+  void MarkSpilledRegister(int allocation_index, LOperand* spill_operand);
+  void MarkSpilledDoubleRegister(int allocation_index,
+                                 LOperand* spill_operand);
+
+ private:
+  // Arrays of spill slot operands for registers with an assigned spill
+  // slot, i.e., that must also be restored to the spill slot on OSR entry.
+  // NULL if the register has no assigned spill slot.  Indexed by allocation
+  // index.
+  LOperand* register_spills_[Register::kNumAllocatableRegisters];
+  LOperand* double_register_spills_[DoubleRegister::kNumAllocatableRegisters];
+};
+
+
+class LStackCheck: public LInstruction {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(StackCheck, "stack-check")
+};
+
+
+class LPointerMap: public ZoneObject {
+ public:
+  explicit LPointerMap(int position)
+      : pointer_operands_(8), position_(position), lithium_position_(-1) { }
+
+  const ZoneList<LOperand*>* operands() const { return &pointer_operands_; }
+  int position() const { return position_; }
+  int lithium_position() const { return lithium_position_; }
+
+  void set_lithium_position(int pos) {
+    ASSERT(lithium_position_ == -1);
+    lithium_position_ = pos;
+  }
+
+  void RecordPointer(LOperand* op);
+  void PrintTo(StringStream* stream) const;
+
+ private:
+  ZoneList<LOperand*> pointer_operands_;
+  int position_;
+  int lithium_position_;
+};
+
+
+class LEnvironment: public ZoneObject {
+ public:
+  LEnvironment(Handle<JSFunction> closure,
+               int ast_id,
+               int parameter_count,
+               int argument_count,
+               int value_count,
+               LEnvironment* outer)
+      : closure_(closure),
+        arguments_stack_height_(argument_count),
+        deoptimization_index_(Safepoint::kNoDeoptimizationIndex),
+        translation_index_(-1),
+        ast_id_(ast_id),
+        parameter_count_(parameter_count),
+        values_(value_count),
+        representations_(value_count),
+        spilled_registers_(NULL),
+        spilled_double_registers_(NULL),
+        outer_(outer) {
+  }
+
+  Handle<JSFunction> closure() const { return closure_; }
+  int arguments_stack_height() const { return arguments_stack_height_; }
+  int deoptimization_index() const { return deoptimization_index_; }
+  int translation_index() const { return translation_index_; }
+  int ast_id() const { return ast_id_; }
+  int parameter_count() const { return parameter_count_; }
+  const ZoneList<LOperand*>* values() const { return &values_; }
+  LEnvironment* outer() const { return outer_; }
+
+  void AddValue(LOperand* operand, Representation representation) {
+    values_.Add(operand);
+    representations_.Add(representation);
+  }
+
+  bool HasTaggedValueAt(int index) const {
+    return representations_[index].IsTagged();
+  }
+
+  void Register(int deoptimization_index, int translation_index) {
+    ASSERT(!HasBeenRegistered());
+    deoptimization_index_ = deoptimization_index;
+    translation_index_ = translation_index;
+  }
+  bool HasBeenRegistered() const {
+    return deoptimization_index_ != Safepoint::kNoDeoptimizationIndex;
+  }
+
+  void SetSpilledRegisters(LOperand** registers,
+                           LOperand** double_registers) {
+    spilled_registers_ = registers;
+    spilled_double_registers_ = double_registers;
+  }
+
+  // Emit frame translation commands for this environment.
+  void WriteTranslation(LCodeGen* cgen, Translation* translation) const;
+
+  void PrintTo(StringStream* stream) const;
+
+ private:
+  Handle<JSFunction> closure_;
+  int arguments_stack_height_;
+  int deoptimization_index_;
+  int translation_index_;
+  int ast_id_;
+  int parameter_count_;
+  ZoneList<LOperand*> values_;
+  ZoneList<Representation> representations_;
+
+  // Allocation index indexed arrays of spill slot operands for registers
+  // that are also in spill slots at an OSR entry.  NULL for environments
+  // that do not correspond to an OSR entry.
+  LOperand** spilled_registers_;
+  LOperand** spilled_double_registers_;
+
+  LEnvironment* outer_;
+};
+
+class LChunkBuilder;
+class LChunk: public ZoneObject {
+ public:
+  explicit LChunk(HGraph* graph);
+
+  int AddInstruction(LInstruction* instruction, HBasicBlock* block);
+  LConstantOperand* DefineConstantOperand(HConstant* constant);
+  Handle<Object> LookupLiteral(LConstantOperand* operand) const;
+  Representation LookupLiteralRepresentation(LConstantOperand* operand) const;
+
+  int GetNextSpillIndex(bool is_double);
+  LOperand* GetNextSpillSlot(bool is_double);
+
+  int ParameterAt(int index);
+  int GetParameterStackSlot(int index) const;
+  int spill_slot_count() const { return spill_slot_count_; }
+  HGraph* graph() const { return graph_; }
+  const ZoneList<LInstruction*>* instructions() const { return &instructions_; }
+  void AddGapMove(int index, LOperand* from, LOperand* to);
+  LGap* GetGapAt(int index) const;
+  bool IsGapAt(int index) const;
+  int NearestGapPos(int index) const;
+  int NearestNextGapPos(int index) const;
+  void MarkEmptyBlocks();
+  const ZoneList<LPointerMap*>* pointer_maps() const { return &pointer_maps_; }
+  LLabel* GetLabel(int block_id) const {
+    HBasicBlock* block = graph_->blocks()->at(block_id);
+    int first_instruction = block->first_instruction_index();
+    return LLabel::cast(instructions_[first_instruction]);
+  }
+  int LookupDestination(int block_id) const {
+    LLabel* cur = GetLabel(block_id);
+    while (cur->replacement() != NULL) {
+      cur = cur->replacement();
+    }
+    return cur->block_id();
+  }
+  Label* GetAssemblyLabel(int block_id) const {
+    LLabel* label = GetLabel(block_id);
+    ASSERT(!label->HasReplacement());
+    return label->label();
+  }
+
+  const ZoneList<Handle<JSFunction> >* inlined_closures() const {
+    return &inlined_closures_;
+  }
+
+  void AddInlinedClosure(Handle<JSFunction> closure) {
+    inlined_closures_.Add(closure);
+  }
+
+  void Verify() const;
+
+ private:
+  int spill_slot_count_;
+  HGraph* const graph_;
+  ZoneList<LInstruction*> instructions_;
+  ZoneList<LPointerMap*> pointer_maps_;
+  ZoneList<Handle<JSFunction> > inlined_closures_;
+};
+
+
+class LChunkBuilder BASE_EMBEDDED {
+ public:
+  LChunkBuilder(HGraph* graph, LAllocator* allocator)
+      : chunk_(NULL),
+        graph_(graph),
+        status_(UNUSED),
+        current_instruction_(NULL),
+        current_block_(NULL),
+        next_block_(NULL),
+        argument_count_(0),
+        allocator_(allocator),
+        position_(RelocInfo::kNoPosition),
+        instructions_pending_deoptimization_environment_(NULL),
+        pending_deoptimization_ast_id_(AstNode::kNoNumber) { }
+
+  // Build the sequence for the graph.
+  LChunk* Build();
+
+  // Declare methods that deal with the individual node types.
+#define DECLARE_DO(type) LInstruction* Do##type(H##type* node);
+  HYDROGEN_CONCRETE_INSTRUCTION_LIST(DECLARE_DO)
+#undef DECLARE_DO
+
+ private:
+  enum Status {
+    UNUSED,
+    BUILDING,
+    DONE,
+    ABORTED
+  };
+
+  LChunk* chunk() const { return chunk_; }
+  HGraph* graph() const { return graph_; }
+
+  bool is_unused() const { return status_ == UNUSED; }
+  bool is_building() const { return status_ == BUILDING; }
+  bool is_done() const { return status_ == DONE; }
+  bool is_aborted() const { return status_ == ABORTED; }
+
+  void Abort(const char* format, ...);
+
+  // Methods for getting operands for Use / Define / Temp.
+  LRegister* ToOperand(Register reg);
+  LUnallocated* ToUnallocated(Register reg);
+  LUnallocated* ToUnallocated(XMMRegister reg);
+
+  // Methods for setting up define-use relationships.
+  LOperand* Use(HValue* value, LUnallocated* operand);
+  LOperand* UseFixed(HValue* value, Register fixed_register);
+  LOperand* UseFixedDouble(HValue* value, XMMRegister fixed_register);
+
+  // A value that is guaranteed to be allocated to a register.
+  // Operand created by UseRegister is guaranteed to be live until the end of
+  // instruction. This means that register allocator will not reuse it's
+  // register for any other operand inside instruction.
+  // Operand created by UseRegisterAtStart is guaranteed to be live only at
+  // instruction start. Register allocator is free to assign the same register
+  // to some other operand used inside instruction (i.e. temporary or
+  // output).
+  LOperand* UseRegister(HValue* value);
+  LOperand* UseRegisterAtStart(HValue* value);
+
+  // A value in a register that may be trashed.
+  LOperand* UseTempRegister(HValue* value);
+  LOperand* Use(HValue* value);
+  LOperand* UseAtStart(HValue* value);
+  LOperand* UseOrConstant(HValue* value);
+  LOperand* UseOrConstantAtStart(HValue* value);
+  LOperand* UseRegisterOrConstant(HValue* value);
+  LOperand* UseRegisterOrConstantAtStart(HValue* value);
+
+  // Methods for setting up define-use relationships.
+  // Return the same instruction that they are passed.
+  LInstruction* Define(LInstruction* instr, LUnallocated* result);
+  LInstruction* Define(LInstruction* instr);
+  LInstruction* DefineAsRegister(LInstruction* instr);
+  LInstruction* DefineAsSpilled(LInstruction* instr, int index);
+  LInstruction* DefineSameAsAny(LInstruction* instr);
+  LInstruction* DefineSameAsFirst(LInstruction* instr);
+  LInstruction* DefineFixed(LInstruction* instr, Register reg);
+  LInstruction* DefineFixedDouble(LInstruction* instr, XMMRegister reg);
+  LInstruction* AssignEnvironment(LInstruction* instr);
+  LInstruction* AssignPointerMap(LInstruction* instr);
+
+  enum CanDeoptimize { CAN_DEOPTIMIZE_EAGERLY, CANNOT_DEOPTIMIZE_EAGERLY };
+
+  // By default we assume that instruction sequences generated for calls
+  // cannot deoptimize eagerly and we do not attach environment to this
+  // instruction.
+  LInstruction* MarkAsCall(
+      LInstruction* instr,
+      HInstruction* hinstr,
+      CanDeoptimize can_deoptimize = CANNOT_DEOPTIMIZE_EAGERLY);
+
+  LInstruction* SetInstructionPendingDeoptimizationEnvironment(
+      LInstruction* instr, int ast_id);
+  void ClearInstructionPendingDeoptimizationEnvironment();
+
+  LEnvironment* CreateEnvironment(HEnvironment* hydrogen_env);
+
+  // Temporary operand that may be a memory location.
+  LOperand* Temp();
+  // Temporary operand that must be in a register.
+  LUnallocated* TempRegister();
+  LOperand* FixedTemp(Register reg);
+  LOperand* FixedTemp(XMMRegister reg);
+
+  void VisitInstruction(HInstruction* current);
+
+  void DoBasicBlock(HBasicBlock* block, HBasicBlock* next_block);
+  LInstruction* DoBit(Token::Value op, HBitwiseBinaryOperation* instr);
+  LInstruction* DoShift(Token::Value op, HBitwiseBinaryOperation* instr);
+  LInstruction* DoArithmeticD(Token::Value op,
+                              HArithmeticBinaryOperation* instr);
+  LInstruction* DoArithmeticT(Token::Value op,
+                              HArithmeticBinaryOperation* instr);
+
+  LChunk* chunk_;
+  HGraph* const graph_;
+  Status status_;
+  HInstruction* current_instruction_;
+  HBasicBlock* current_block_;
+  HBasicBlock* next_block_;
+  int argument_count_;
+  LAllocator* allocator_;
+  int position_;
+  LInstruction* instructions_pending_deoptimization_environment_;
+  int pending_deoptimization_ast_id_;
+
+  DISALLOW_COPY_AND_ASSIGN(LChunkBuilder);
+};
+
+#undef DECLARE_HYDROGEN_ACCESSOR
+#undef DECLARE_INSTRUCTION
+#undef DECLARE_CONCRETE_INSTRUCTION
+
+} }  // namespace v8::internal
+
+#endif  // V8_IA32_LITHIUM_IA32_H_
index cbf93dd..84911ec 100644 (file)
@@ -1,4 +1,4 @@
-// Copyright 2006-2009 the V8 project authors. All rights reserved.
+// Copyright 2010 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -183,13 +183,6 @@ void MacroAssembler::RecordWrite(Register object,
 }
 
 
-void MacroAssembler::StackLimitCheck(Label* on_stack_overflow) {
-  cmp(esp,
-      Operand::StaticVariable(ExternalReference::address_of_stack_limit()));
-  j(below, on_stack_overflow);
-}
-
-
 #ifdef ENABLE_DEBUGGER_SUPPORT
 void MacroAssembler::DebugBreak() {
   Set(eax, Immediate(0));
@@ -364,9 +357,20 @@ void MacroAssembler::EnterExitFramePrologue() {
 }
 
 
-void MacroAssembler::EnterExitFrameEpilogue(int argc) {
-  // Reserve space for arguments.
-  sub(Operand(esp), Immediate(argc * kPointerSize));
+void MacroAssembler::EnterExitFrameEpilogue(int argc, bool save_doubles) {
+  // Optionally save all XMM registers.
+  if (save_doubles) {
+    CpuFeatures::Scope scope(SSE2);
+    int space = XMMRegister::kNumRegisters * kDoubleSize + argc * kPointerSize;
+    sub(Operand(esp), Immediate(space));
+    int offset = -2 * kPointerSize;
+    for (int i = 0; i < XMMRegister::kNumRegisters; i++) {
+      XMMRegister reg = XMMRegister::from_code(i);
+      movdbl(Operand(ebp, offset - ((i + 1) * kDoubleSize)), reg);
+    }
+  } else {
+    sub(Operand(esp), Immediate(argc * kPointerSize));
+  }
 
   // Get the required frame alignment for the OS.
   static const int kFrameAlignment = OS::ActivationFrameAlignment();
@@ -380,7 +384,7 @@ void MacroAssembler::EnterExitFrameEpilogue(int argc) {
 }
 
 
-void MacroAssembler::EnterExitFrame() {
+void MacroAssembler::EnterExitFrame(bool save_doubles) {
   EnterExitFramePrologue();
 
   // Setup argc and argv in callee-saved registers.
@@ -388,17 +392,27 @@ void MacroAssembler::EnterExitFrame() {
   mov(edi, Operand(eax));
   lea(esi, Operand(ebp, eax, times_4, offset));
 
-  EnterExitFrameEpilogue(2);
+  EnterExitFrameEpilogue(2, save_doubles);
 }
 
 
 void MacroAssembler::EnterApiExitFrame(int argc) {
   EnterExitFramePrologue();
-  EnterExitFrameEpilogue(argc);
+  EnterExitFrameEpilogue(argc, false);
 }
 
 
-void MacroAssembler::LeaveExitFrame() {
+void MacroAssembler::LeaveExitFrame(bool save_doubles) {
+  // Optionally restore all XMM registers.
+  if (save_doubles) {
+    CpuFeatures::Scope scope(SSE2);
+    int offset = -2 * kPointerSize;
+    for (int i = 0; i < XMMRegister::kNumRegisters; i++) {
+      XMMRegister reg = XMMRegister::from_code(i);
+      movdbl(reg, Operand(ebp, offset - ((i + 1) * kDoubleSize)));
+    }
+  }
+
   // Get the return address from the stack and restore the frame pointer.
   mov(ecx, Operand(ebp, 1 * kPointerSize));
   mov(ebp, Operand(ebp, 0 * kPointerSize));
@@ -1098,6 +1112,16 @@ void MacroAssembler::CallRuntime(Runtime::FunctionId id, int num_arguments) {
 }
 
 
+void MacroAssembler::CallRuntimeSaveDoubles(Runtime::FunctionId id) {
+  Runtime::Function* function = Runtime::FunctionForId(id);
+  Set(eax, Immediate(function->nargs));
+  mov(ebx, Immediate(ExternalReference(function)));
+  CEntryStub ces(1);
+  ces.SaveDoubles();
+  CallStub(&ces);
+}
+
+
 MaybeObject* MacroAssembler::TryCallRuntime(Runtime::FunctionId id,
                                             int num_arguments) {
   return TryCallRuntime(Runtime::FunctionForId(id), num_arguments);
@@ -1336,7 +1360,8 @@ void MacroAssembler::InvokePrologue(const ParameterCount& expected,
                                     Handle<Code> code_constant,
                                     const Operand& code_operand,
                                     Label* done,
-                                    InvokeFlag flag) {
+                                    InvokeFlag flag,
+                                    PostCallGenerator* post_call_generator) {
   bool definitely_matches = false;
   Label invoke;
   if (expected.is_immediate()) {
@@ -1387,6 +1412,7 @@ void MacroAssembler::InvokePrologue(const ParameterCount& expected,
 
     if (flag == CALL_FUNCTION) {
       call(adaptor, RelocInfo::CODE_TARGET);
+      if (post_call_generator != NULL) post_call_generator->Generate();
       jmp(done);
     } else {
       jmp(adaptor, RelocInfo::CODE_TARGET);
@@ -1399,11 +1425,14 @@ void MacroAssembler::InvokePrologue(const ParameterCount& expected,
 void MacroAssembler::InvokeCode(const Operand& code,
                                 const ParameterCount& expected,
                                 const ParameterCount& actual,
-                                InvokeFlag flag) {
+                                InvokeFlag flag,
+                                PostCallGenerator* post_call_generator) {
   Label done;
-  InvokePrologue(expected, actual, Handle<Code>::null(), code, &done, flag);
+  InvokePrologue(expected, actual, Handle<Code>::null(), code,
+                 &done, flag, post_call_generator);
   if (flag == CALL_FUNCTION) {
     call(code);
+    if (post_call_generator != NULL) post_call_generator->Generate();
   } else {
     ASSERT(flag == JUMP_FUNCTION);
     jmp(code);
@@ -1416,12 +1445,15 @@ void MacroAssembler::InvokeCode(Handle<Code> code,
                                 const ParameterCount& expected,
                                 const ParameterCount& actual,
                                 RelocInfo::Mode rmode,
-                                InvokeFlag flag) {
+                                InvokeFlag flag,
+                                PostCallGenerator* post_call_generator) {
   Label done;
   Operand dummy(eax);
-  InvokePrologue(expected, actual, code, dummy, &done, flag);
+  InvokePrologue(expected, actual, code, dummy, &done,
+                 flag, post_call_generator);
   if (flag == CALL_FUNCTION) {
     call(code, rmode);
+    if (post_call_generator != NULL) post_call_generator->Generate();
   } else {
     ASSERT(flag == JUMP_FUNCTION);
     jmp(code, rmode);
@@ -1432,7 +1464,8 @@ void MacroAssembler::InvokeCode(Handle<Code> code,
 
 void MacroAssembler::InvokeFunction(Register fun,
                                     const ParameterCount& actual,
-                                    InvokeFlag flag) {
+                                    InvokeFlag flag,
+                                    PostCallGenerator* post_call_generator) {
   ASSERT(fun.is(edi));
   mov(edx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
   mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
@@ -1441,25 +1474,37 @@ void MacroAssembler::InvokeFunction(Register fun,
 
   ParameterCount expected(ebx);
   InvokeCode(FieldOperand(edi, JSFunction::kCodeEntryOffset),
-             expected, actual, flag);
+             expected, actual, flag, post_call_generator);
 }
 
 
 void MacroAssembler::InvokeFunction(JSFunction* function,
                                     const ParameterCount& actual,
-                                    InvokeFlag flag) {
+                                    InvokeFlag flag,
+                                    PostCallGenerator* post_call_generator) {
   ASSERT(function->is_compiled());
   // Get the function and setup the context.
   mov(edi, Immediate(Handle<JSFunction>(function)));
   mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
-  // Invoke the cached code.
-  Handle<Code> code(function->code());
+
   ParameterCount expected(function->shared()->formal_parameter_count());
-  InvokeCode(code, expected, actual, RelocInfo::CODE_TARGET, flag);
+  if (V8::UseCrankshaft()) {
+    // TODO(kasperl): For now, we always call indirectly through the
+    // code field in the function to allow recompilation to take effect
+    // without changing any of the call sites.
+    InvokeCode(FieldOperand(edi, JSFunction::kCodeEntryOffset),
+               expected, actual, flag, post_call_generator);
+  } else {
+    Handle<Code> code(function->code());
+    InvokeCode(code, expected, actual, RelocInfo::CODE_TARGET,
+               flag, post_call_generator);
+  }
 }
 
 
-void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id, InvokeFlag flag) {
+void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id,
+                                   InvokeFlag flag,
+                                   PostCallGenerator* post_call_generator) {
   // Calls are not allowed in some stubs.
   ASSERT(flag == JUMP_FUNCTION || allow_stub_calls());
 
@@ -1469,7 +1514,7 @@ void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id, InvokeFlag flag) {
   ParameterCount expected(0);
   GetBuiltinFunction(edi, id);
   InvokeCode(FieldOperand(edi, JSFunction::kCodeEntryOffset),
-           expected, expected, flag);
+             expected, expected, flag, post_call_generator);
 }
 
 void MacroAssembler::GetBuiltinFunction(Register target,
@@ -1534,6 +1579,15 @@ void MacroAssembler::LoadGlobalFunctionInitialMap(Register function,
 }
 
 
+int MacroAssembler::SafepointRegisterStackIndex(int reg_code) {
+  // The registers are pushed starting with the lowest encoding,
+  // which means that lowest encodings are furthest away from
+  // the stack pointer.
+  ASSERT(reg_code >= 0 && reg_code < kNumSafepointRegisters);
+  return kNumSafepointRegisters - reg_code - 1;
+}
+
+
 void MacroAssembler::Ret() {
   ret(0);
 }
index d208dbe..8407802 100644 (file)
@@ -1,4 +1,4 @@
-// Copyright 2006-2009 the V8 project authors. All rights reserved.
+// Copyright 2010 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -51,6 +51,7 @@ typedef Operand MemOperand;
 
 // Forward declaration.
 class JumpTarget;
+class PostCallGenerator;
 
 // MacroAssembler implements a collection of frequently used macros.
 class MacroAssembler: public Assembler {
@@ -103,12 +104,6 @@ class MacroAssembler: public Assembler {
 #endif
 
   // ---------------------------------------------------------------------------
-  // Stack limit support
-
-  // Do simple test for stack overflow. This doesn't handle an overflow.
-  void StackLimitCheck(Label* on_stack_limit_hit);
-
-  // ---------------------------------------------------------------------------
   // Activation frames
 
   void EnterInternalFrame() { EnterFrame(StackFrame::INTERNAL); }
@@ -117,18 +112,18 @@ class MacroAssembler: public Assembler {
   void EnterConstructFrame() { EnterFrame(StackFrame::CONSTRUCT); }
   void LeaveConstructFrame() { LeaveFrame(StackFrame::CONSTRUCT); }
 
-  // Enter specific kind of exit frame; either in normal or debug mode.
-  // Expects the number of arguments in register eax and
-  // sets up the number of arguments in register edi and the pointer
-  // to the first argument in register esi.
-  void EnterExitFrame();
+  // Enter specific kind of exit frame. Expects the number of
+  // arguments in register eax and sets up the number of arguments in
+  // register edi and the pointer to the first argument in register
+  // esi.
+  void EnterExitFrame(bool save_doubles);
 
   void EnterApiExitFrame(int argc);
 
   // Leave the current exit frame. Expects the return value in
   // register eax:edx (untouched) and the pointer to the first
   // argument in register esi.
-  void LeaveExitFrame();
+  void LeaveExitFrame(bool save_doubles);
 
   // Leave the current exit frame. Expects the return value in
   // register eax (untouched).
@@ -144,6 +139,11 @@ class MacroAssembler: public Assembler {
   // function and map can be the same.
   void LoadGlobalFunctionInitialMap(Register function, Register map);
 
+  // Push and pop the registers that can hold pointers.
+  void PushSafepointRegisters() { pushad(); }
+  void PopSafepointRegisters() { popad(); }
+  static int SafepointRegisterStackIndex(int reg_code);
+
   // ---------------------------------------------------------------------------
   // JavaScript invokes
 
@@ -151,27 +151,33 @@ class MacroAssembler: public Assembler {
   void InvokeCode(const Operand& code,
                   const ParameterCount& expected,
                   const ParameterCount& actual,
-                  InvokeFlag flag);
+                  InvokeFlag flag,
+                  PostCallGenerator* post_call_generator = NULL);
 
   void InvokeCode(Handle<Code> code,
                   const ParameterCount& expected,
                   const ParameterCount& actual,
                   RelocInfo::Mode rmode,
-                  InvokeFlag flag);
+                  InvokeFlag flag,
+                  PostCallGenerator* post_call_generator = NULL);
 
   // Invoke the JavaScript function in the given register. Changes the
   // current context to the context in the function before invoking.
   void InvokeFunction(Register function,
                       const ParameterCount& actual,
-                      InvokeFlag flag);
+                      InvokeFlag flag,
+                      PostCallGenerator* post_call_generator = NULL);
 
   void InvokeFunction(JSFunction* function,
                       const ParameterCount& actual,
-                      InvokeFlag flag);
+                      InvokeFlag flag,
+                      PostCallGenerator* post_call_generator = NULL);
 
   // Invoke specified builtin JavaScript function. Adds an entry to
   // the unresolved list if the name does not resolve.
-  void InvokeBuiltin(Builtins::JavaScript id, InvokeFlag flag);
+  void InvokeBuiltin(Builtins::JavaScript id,
+                     InvokeFlag flag,
+                     PostCallGenerator* post_call_generator = NULL);
 
   // Store the function for the given builtin in the target register.
   void GetBuiltinFunction(Register target, Builtins::JavaScript id);
@@ -457,6 +463,7 @@ class MacroAssembler: public Assembler {
 
   // Call a runtime routine.
   void CallRuntime(Runtime::Function* f, int num_arguments);
+  void CallRuntimeSaveDoubles(Runtime::FunctionId id);
 
   // Call a runtime function, returning the CodeStub object called.
   // Try to generate the stub code if necessary.  Do not perform a GC
@@ -546,6 +553,12 @@ class MacroAssembler: public Assembler {
 
   void Call(Label* target) { call(target); }
 
+  // Emit call to the code we are currently generating.
+  void CallSelf() {
+    Handle<Code> self(reinterpret_cast<Code**>(CodeObject().location()));
+    call(self, RelocInfo::CODE_TARGET);
+  }
+
   // Move if the registers are not identical.
   void Move(Register target, Register source);
 
@@ -618,14 +631,15 @@ class MacroAssembler: public Assembler {
                       Handle<Code> code_constant,
                       const Operand& code_operand,
                       Label* done,
-                      InvokeFlag flag);
+                      InvokeFlag flag,
+                      PostCallGenerator* post_call_generator = NULL);
 
   // Activation support.
   void EnterFrame(StackFrame::Type type);
   void LeaveFrame(StackFrame::Type type);
 
   void EnterExitFramePrologue();
-  void EnterExitFrameEpilogue(int argc);
+  void EnterExitFrameEpilogue(int argc, bool save_doubles);
 
   void LeaveExitFrameEpilogue();
 
@@ -664,6 +678,17 @@ class CodePatcher {
 };
 
 
+// Helper class for generating code or data associated with the code
+// right after a call instruction. As an example this can be used to
+// generate safepoint data after calls for crankshaft.
+class PostCallGenerator {
+ public:
+  PostCallGenerator() { }
+  virtual ~PostCallGenerator() { }
+  virtual void Generate() = 0;
+};
+
+
 // -----------------------------------------------------------------------------
 // Static helper functions.
 
index adcb521..352eae1 100644 (file)
@@ -855,9 +855,14 @@ MUST_USE_RESULT static MaybeObject* GenerateCheckPropertyCell(
   }
   JSGlobalPropertyCell* cell = JSGlobalPropertyCell::cast(probe);
   ASSERT(cell->value()->IsTheHole());
-  __ mov(scratch, Immediate(Handle<Object>(cell)));
-  __ cmp(FieldOperand(scratch, JSGlobalPropertyCell::kValueOffset),
-         Immediate(Factory::the_hole_value()));
+  if (Serializer::enabled()) {
+    __ mov(scratch, Immediate(Handle<Object>(cell)));
+    __ cmp(FieldOperand(scratch, JSGlobalPropertyCell::kValueOffset),
+           Immediate(Factory::the_hole_value()));
+  } else {
+    __ cmp(Operand::Cell(Handle<JSGlobalPropertyCell>(cell)),
+           Immediate(Factory::the_hole_value()));
+  }
   __ j(not_equal, miss, not_taken);
   return cell;
 }
@@ -1326,8 +1331,12 @@ void CallStubCompiler::GenerateLoadFunctionFromCell(JSGlobalPropertyCell* cell,
                                                     JSFunction* function,
                                                     Label* miss) {
   // Get the value from the cell.
-  __ mov(edi, Immediate(Handle<JSGlobalPropertyCell>(cell)));
-  __ mov(edi, FieldOperand(edi, JSGlobalPropertyCell::kValueOffset));
+  if (Serializer::enabled()) {
+    __ mov(edi, Immediate(Handle<JSGlobalPropertyCell>(cell)));
+    __ mov(edi, FieldOperand(edi, JSGlobalPropertyCell::kValueOffset));
+  } else {
+    __ mov(edi, Operand::Cell(Handle<JSGlobalPropertyCell>(cell)));
+  }
 
   // Check that the cell contains the same function.
   if (Heap::InNewSpace(function)) {
@@ -1710,7 +1719,7 @@ MaybeObject* CallStubCompiler::CompileStringCharCodeAtCall(
   char_code_at_generator.GenerateFast(masm());
   __ ret((argc + 1) * kPointerSize);
 
-  ICRuntimeCallHelper call_helper;
+  StubRuntimeCallHelper call_helper;
   char_code_at_generator.GenerateSlow(masm(), call_helper);
 
   __ bind(&index_out_of_range);
@@ -1785,7 +1794,7 @@ MaybeObject* CallStubCompiler::CompileStringCharAtCall(
   char_at_generator.GenerateFast(masm());
   __ ret((argc + 1) * kPointerSize);
 
-  ICRuntimeCallHelper call_helper;
+  StubRuntimeCallHelper call_helper;
   char_at_generator.GenerateSlow(masm(), call_helper);
 
   __ bind(&index_out_of_range);
@@ -1858,7 +1867,7 @@ MaybeObject* CallStubCompiler::CompileStringFromCharCodeCall(
   char_from_code_generator.GenerateFast(masm());
   __ ret(2 * kPointerSize);
 
-  ICRuntimeCallHelper call_helper;
+  StubRuntimeCallHelper call_helper;
   char_from_code_generator.GenerateSlow(masm(), call_helper);
 
   // Tail call the full function. We do not have to patch the receiver
@@ -2399,10 +2408,18 @@ MaybeObject* CallStubCompiler::CompileCallGlobal(JSObject* object,
   // Jump to the cached code (tail call).
   __ IncrementCounter(&Counters::call_global_inline, 1);
   ASSERT(function->is_compiled());
-  Handle<Code> code(function->code());
   ParameterCount expected(function->shared()->formal_parameter_count());
-  __ InvokeCode(code, expected, arguments(),
-                RelocInfo::CODE_TARGET, JUMP_FUNCTION);
+  if (V8::UseCrankshaft()) {
+    // TODO(kasperl): For now, we always call indirectly through the
+    // code field in the function to allow recompilation to take effect
+    // without changing any of the call sites.
+    __ InvokeCode(FieldOperand(edi, JSFunction::kCodeEntryOffset),
+                  expected, arguments(), JUMP_FUNCTION);
+  } else {
+    Handle<Code> code(function->code());
+    __ InvokeCode(code, expected, arguments(),
+                  RelocInfo::CODE_TARGET, JUMP_FUNCTION);
+  }
 
   // Handle call cache miss.
   __ bind(&miss);
@@ -2565,8 +2582,12 @@ MaybeObject* StoreStubCompiler::CompileStoreGlobal(GlobalObject* object,
   __ j(not_equal, &miss, not_taken);
 
   // Store the value in the cell.
-  __ mov(ecx, Immediate(Handle<JSGlobalPropertyCell>(cell)));
-  __ mov(FieldOperand(ecx, JSGlobalPropertyCell::kValueOffset), eax);
+  if (Serializer::enabled()) {
+    __ mov(ecx, Immediate(Handle<JSGlobalPropertyCell>(cell)));
+    __ mov(FieldOperand(ecx, JSGlobalPropertyCell::kValueOffset), eax);
+  } else {
+    __ mov(Operand::Cell(Handle<JSGlobalPropertyCell>(cell)), eax);
+  }
 
   // Return the value (register eax).
   __ IncrementCounter(&Counters::named_store_global_inline, 1);
@@ -2620,6 +2641,63 @@ MaybeObject* KeyedStoreStubCompiler::CompileStoreField(JSObject* object,
 }
 
 
+MaybeObject* KeyedStoreStubCompiler::CompileStoreSpecialized(
+    JSObject* receiver) {
+  // ----------- S t a t e -------------
+  //  -- eax    : value
+  //  -- ecx    : key
+  //  -- edx    : receiver
+  //  -- esp[0] : return address
+  // -----------------------------------
+  Label miss;
+
+  // Check that the receiver isn't a smi.
+  __ test(edx, Immediate(kSmiTagMask));
+  __ j(zero, &miss, not_taken);
+
+  // Check that the map matches.
+  __ cmp(FieldOperand(edx, HeapObject::kMapOffset),
+         Immediate(Handle<Map>(receiver->map())));
+  __ j(not_equal, &miss, not_taken);
+
+  // Check that the key is a smi.
+  __ test(ecx, Immediate(kSmiTagMask));
+  __ j(not_zero, &miss, not_taken);
+
+  // Get the elements array and make sure it is a fast element array, not 'cow'.
+  __ mov(edi, FieldOperand(edx, JSObject::kElementsOffset));
+  __ cmp(FieldOperand(edi, HeapObject::kMapOffset),
+         Immediate(Factory::fixed_array_map()));
+  __ j(not_equal, &miss, not_taken);
+
+  // Check that the key is within bounds.
+  if (receiver->IsJSArray()) {
+    __ cmp(ecx, FieldOperand(edx, JSArray::kLengthOffset));  // Compare smis.
+    __ j(above_equal, &miss, not_taken);
+  } else {
+    __ cmp(ecx, FieldOperand(edi, FixedArray::kLengthOffset));  // Compare smis.
+    __ j(above_equal, &miss, not_taken);
+  }
+
+  // Do the store and update the write barrier. Make sure to preserve
+  // the value in register eax.
+  __ mov(edx, Operand(eax));
+  __ mov(FieldOperand(edi, ecx, times_2, FixedArray::kHeaderSize), eax);
+  __ RecordWrite(edi, 0, edx, ecx);
+
+  // Done.
+  __ ret(0);
+
+  // Handle store cache miss.
+  __ bind(&miss);
+  Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Miss));
+  __ jmp(ic, RelocInfo::CODE_TARGET);
+
+  // Return the generated code.
+  return GetCode(NORMAL, NULL);
+}
+
+
 MaybeObject* LoadStubCompiler::CompileLoadNonexistent(String* name,
                                                       JSObject* object,
                                                       JSObject* last) {
@@ -2793,8 +2871,12 @@ MaybeObject* LoadStubCompiler::CompileLoadGlobal(JSObject* object,
   CheckPrototypes(object, eax, holder, ebx, edx, edi, name, &miss);
 
   // Get the value from the cell.
-  __ mov(ebx, Immediate(Handle<JSGlobalPropertyCell>(cell)));
-  __ mov(ebx, FieldOperand(ebx, JSGlobalPropertyCell::kValueOffset));
+  if (Serializer::enabled()) {
+    __ mov(ebx, Immediate(Handle<JSGlobalPropertyCell>(cell)));
+    __ mov(ebx, FieldOperand(ebx, JSGlobalPropertyCell::kValueOffset));
+  } else {
+    __ mov(ebx, Operand::Cell(Handle<JSGlobalPropertyCell>(cell)));
+  }
 
   // Check for deleted property if property can actually be deleted.
   if (!is_dont_delete) {
@@ -3019,6 +3101,51 @@ MaybeObject* KeyedLoadStubCompiler::CompileLoadFunctionPrototype(String* name) {
 }
 
 
+MaybeObject* KeyedLoadStubCompiler::CompileLoadSpecialized(JSObject* receiver) {
+  // ----------- S t a t e -------------
+  //  -- eax    : key
+  //  -- edx    : receiver
+  //  -- esp[0] : return address
+  // -----------------------------------
+  Label miss;
+
+  // Check that the receiver isn't a smi.
+  __ test(edx, Immediate(kSmiTagMask));
+  __ j(zero, &miss, not_taken);
+
+  // Check that the map matches.
+  __ cmp(FieldOperand(edx, HeapObject::kMapOffset),
+         Immediate(Handle<Map>(receiver->map())));
+  __ j(not_equal, &miss, not_taken);
+
+  // Check that the key is a smi.
+  __ test(eax, Immediate(kSmiTagMask));
+  __ j(not_zero, &miss, not_taken);
+
+  // Get the elements array.
+  __ mov(ecx, FieldOperand(edx, JSObject::kElementsOffset));
+  __ AssertFastElements(ecx);
+
+  // Check that the key is within bounds.
+  __ cmp(eax, FieldOperand(ecx, FixedArray::kLengthOffset));
+  __ j(above_equal, &miss, not_taken);
+
+  // Load the result and make sure it's not the hole.
+  __ mov(ebx, Operand(ecx, eax, times_2,
+                      FixedArray::kHeaderSize - kHeapObjectTag));
+  __ cmp(ebx, Factory::the_hole_value());
+  __ j(equal, &miss, not_taken);
+  __ mov(eax, ebx);
+  __ ret(0);
+
+  __ bind(&miss);
+  GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
+
+  // Return the generated code.
+  return GetCode(NORMAL, NULL);
+}
+
+
 // Specialized stub for constructing objects from functions which only have only
 // simple assignments of the form this.x = ...; in their body.
 MaybeObject* ConstructStubCompiler::CompileConstructStub(JSFunction* function) {
index 94dbd5f..8fbc184 100644 (file)
@@ -75,7 +75,7 @@ Code* IC::GetTargetAtAddress(Address address) {
 
 
 void IC::SetTargetAtAddress(Address address, Code* target) {
-  ASSERT(target->is_inline_cache_stub());
+  ASSERT(target->is_inline_cache_stub() || target->is_compare_ic_stub());
   Assembler::set_target_address_at(address, target->instruction_start());
 }
 
index 58acebc..cda0b15 100644 (file)
--- a/src/ic.cc
+++ b/src/ic.cc
@@ -30,6 +30,7 @@
 #include "accessors.h"
 #include "api.h"
 #include "arguments.h"
+#include "codegen.h"
 #include "execution.h"
 #include "ic-inl.h"
 #include "runtime.h"
@@ -156,7 +157,7 @@ static bool HasNormalObjectsInPrototypeChain(LookupResult* lookup,
 IC::State IC::StateFrom(Code* target, Object* receiver, Object* name) {
   IC::State state = target->ic_state();
 
-  if (state != MONOMORPHIC) return state;
+  if (state != MONOMORPHIC || !name->IsString()) return state;
   if (receiver->IsUndefined() || receiver->IsNull()) return state;
 
   InlineCacheHolderFlag cache_holder =
@@ -259,8 +260,12 @@ void IC::Clear(Address address) {
     case Code::KEYED_STORE_IC: return KeyedStoreIC::Clear(address, target);
     case Code::CALL_IC: return CallIC::Clear(address, target);
     case Code::KEYED_CALL_IC:  return KeyedCallIC::Clear(address, target);
-    case Code::BINARY_OP_IC: return;  // Clearing these is tricky and does not
-                                      // make any performance difference.
+    case Code::BINARY_OP_IC:
+    case Code::TYPE_RECORDING_BINARY_OP_IC:
+    case Code::COMPARE_IC:
+      // Clearing these is tricky and does not
+      // make any performance difference.
+      return;
     default: UNREACHABLE();
   }
 }
@@ -1134,9 +1139,20 @@ MaybeObject* KeyedLoadIC::Load(State state,
         stub = external_array_stub(receiver->GetElementsKind());
       } else if (receiver->HasIndexedInterceptor()) {
         stub = indexed_interceptor_stub();
+      } else if (state == UNINITIALIZED &&
+                 key->IsSmi() &&
+                 receiver->map()->has_fast_elements()) {
+        MaybeObject* probe = StubCache::ComputeKeyedLoadSpecialized(*receiver);
+        stub =
+            probe->IsFailure() ? NULL : Code::cast(probe->ToObjectUnchecked());
       }
     }
-    set_target(stub);
+    if (stub != NULL) set_target(stub);
+
+#ifdef DEBUG
+    TraceIC("KeyedLoadIC", key, state, target());
+#endif  // DEBUG
+
     // For JSObjects with fast elements that are not value wrappers
     // and that do not have indexed interceptors, we initialize the
     // inlined fast case (if present) by patching the inlined map
@@ -1360,6 +1376,17 @@ MaybeObject* StoreIC::Store(State state,
     }
   }
 
+  if (receiver->IsJSGlobalProxy()) {
+    // Generate a generic stub that goes to the runtime when we see a global
+    // proxy as receiver.
+    if (target() != global_proxy_stub()) {
+      set_target(global_proxy_stub());
+#ifdef DEBUG
+      TraceIC("StoreIC", name, state, target());
+#endif
+    }
+  }
+
   // Set the property.
   return receiver->SetProperty(*name, *value, NONE);
 }
@@ -1503,9 +1530,15 @@ MaybeObject* KeyedStoreIC::Store(State state,
       Handle<JSObject> receiver = Handle<JSObject>::cast(object);
       if (receiver->HasExternalArrayElements()) {
         stub = external_array_stub(receiver->GetElementsKind());
+      } else if (state == UNINITIALIZED &&
+                 key->IsSmi() &&
+                 receiver->map()->has_fast_elements()) {
+        MaybeObject* probe = StubCache::ComputeKeyedStoreSpecialized(*receiver);
+        stub =
+            probe->IsFailure() ? NULL : Code::cast(probe->ToObjectUnchecked());
       }
     }
-    set_target(stub);
+    if (stub != NULL) set_target(stub);
   }
 
   // Set the property.
@@ -1750,6 +1783,7 @@ void BinaryOpIC::patch(Code* code) {
 
 const char* BinaryOpIC::GetName(TypeInfo type_info) {
   switch (type_info) {
+    case UNINIT_OR_SMI: return "UninitOrSmi";
     case DEFAULT: return "Default";
     case GENERIC: return "Generic";
     case HEAP_NUMBERS: return "HeapNumbers";
@@ -1761,23 +1795,26 @@ const char* BinaryOpIC::GetName(TypeInfo type_info) {
 
 BinaryOpIC::State BinaryOpIC::ToState(TypeInfo type_info) {
   switch (type_info) {
-    // DEFAULT is mapped to UNINITIALIZED so that calls to DEFAULT stubs
-    // are not cleared at GC.
-    case DEFAULT: return UNINITIALIZED;
-
-    // Could have mapped GENERIC to MONOMORPHIC just as well but MEGAMORPHIC is
-    // conceptually closer.
-    case GENERIC: return MEGAMORPHIC;
-
-    default: return MONOMORPHIC;
+    case UNINIT_OR_SMI:
+      return UNINITIALIZED;
+    case DEFAULT:
+    case HEAP_NUMBERS:
+    case STRINGS:
+      return MONOMORPHIC;
+    case GENERIC:
+      return MEGAMORPHIC;
   }
+  UNREACHABLE();
+  return UNINITIALIZED;
 }
 
 
 BinaryOpIC::TypeInfo BinaryOpIC::GetTypeInfo(Object* left,
                                              Object* right) {
   if (left->IsSmi() && right->IsSmi()) {
-    return GENERIC;
+    // If we have two smi inputs we can reach here because
+    // of an overflow. Enter default state.
+    return DEFAULT;
   }
 
   if (left->IsNumber() && right->IsNumber()) {
@@ -1794,43 +1831,220 @@ BinaryOpIC::TypeInfo BinaryOpIC::GetTypeInfo(Object* left,
 }
 
 
-// defined in codegen-<arch>.cc
+// defined in code-stubs-<arch>.cc
 Handle<Code> GetBinaryOpStub(int key, BinaryOpIC::TypeInfo type_info);
 
 
 MUST_USE_RESULT MaybeObject* BinaryOp_Patch(Arguments args) {
   ASSERT(args.length() == 5);
 
+  HandleScope scope;
   Handle<Object> left = args.at<Object>(0);
   Handle<Object> right = args.at<Object>(1);
   int key = Smi::cast(args[2])->value();
   Token::Value op = static_cast<Token::Value>(Smi::cast(args[3])->value());
-#ifdef DEBUG
-  BinaryOpIC::TypeInfo prev_type_info =
+  BinaryOpIC::TypeInfo previous_type =
       static_cast<BinaryOpIC::TypeInfo>(Smi::cast(args[4])->value());
-#endif  // DEBUG
-  { HandleScope scope;
-    BinaryOpIC::TypeInfo type_info = BinaryOpIC::GetTypeInfo(*left, *right);
-    Handle<Code> code = GetBinaryOpStub(key, type_info);
-    if (!code.is_null()) {
-      BinaryOpIC ic;
-      ic.patch(*code);
-#ifdef DEBUG
-      if (FLAG_trace_ic) {
-        PrintF("[BinaryOpIC (%s->%s)#%s]\n",
-            BinaryOpIC::GetName(prev_type_info),
-            BinaryOpIC::GetName(type_info),
-            Token::Name(op));
-      }
-#endif  // DEBUG
+
+  BinaryOpIC::TypeInfo type = BinaryOpIC::GetTypeInfo(*left, *right);
+  Handle<Code> code = GetBinaryOpStub(key, type);
+  if (!code.is_null()) {
+    BinaryOpIC ic;
+    ic.patch(*code);
+    if (FLAG_trace_ic) {
+      PrintF("[BinaryOpIC (%s->%s)#%s]\n",
+             BinaryOpIC::GetName(previous_type),
+             BinaryOpIC::GetName(type),
+             Token::Name(op));
     }
   }
 
-  HandleScope scope;
   Handle<JSBuiltinsObject> builtins = Top::builtins();
-
   Object* builtin = NULL;  // Initialization calms down the compiler.
+  switch (op) {
+    case Token::ADD:
+      builtin = builtins->javascript_builtin(Builtins::ADD);
+      break;
+    case Token::SUB:
+      builtin = builtins->javascript_builtin(Builtins::SUB);
+      break;
+    case Token::MUL:
+      builtin = builtins->javascript_builtin(Builtins::MUL);
+      break;
+    case Token::DIV:
+      builtin = builtins->javascript_builtin(Builtins::DIV);
+      break;
+    case Token::MOD:
+      builtin = builtins->javascript_builtin(Builtins::MOD);
+      break;
+    case Token::BIT_AND:
+      builtin = builtins->javascript_builtin(Builtins::BIT_AND);
+      break;
+    case Token::BIT_OR:
+      builtin = builtins->javascript_builtin(Builtins::BIT_OR);
+      break;
+    case Token::BIT_XOR:
+      builtin = builtins->javascript_builtin(Builtins::BIT_XOR);
+      break;
+    case Token::SHR:
+      builtin = builtins->javascript_builtin(Builtins::SHR);
+      break;
+    case Token::SAR:
+      builtin = builtins->javascript_builtin(Builtins::SAR);
+      break;
+    case Token::SHL:
+      builtin = builtins->javascript_builtin(Builtins::SHL);
+      break;
+    default:
+      UNREACHABLE();
+  }
+
+  Handle<JSFunction> builtin_function(JSFunction::cast(builtin));
+
+  bool caught_exception;
+  Object** builtin_args[] = { right.location() };
+  Handle<Object> result = Execution::Call(builtin_function,
+                                          left,
+                                          ARRAY_SIZE(builtin_args),
+                                          builtin_args,
+                                          &caught_exception);
+  if (caught_exception) {
+    return Failure::Exception();
+  }
+  return *result;
+}
+
 
+void TRBinaryOpIC::patch(Code* code) {
+  set_target(code);
+}
+
+
+const char* TRBinaryOpIC::GetName(TypeInfo type_info) {
+  switch (type_info) {
+    case UNINITIALIZED: return "Uninitialized";
+    case SMI: return "SMI";
+    case INT32: return "Int32s";
+    case HEAP_NUMBER: return "HeapNumbers";
+    case STRING: return "Strings";
+    case GENERIC: return "Generic";
+    default: return "Invalid";
+  }
+}
+
+
+TRBinaryOpIC::State TRBinaryOpIC::ToState(TypeInfo type_info) {
+  switch (type_info) {
+    case UNINITIALIZED:
+      return ::v8::internal::UNINITIALIZED;
+    case SMI:
+    case INT32:
+    case HEAP_NUMBER:
+    case STRING:
+      return MONOMORPHIC;
+    case GENERIC:
+      return MEGAMORPHIC;
+  }
+  UNREACHABLE();
+  return ::v8::internal::UNINITIALIZED;
+}
+
+
+TRBinaryOpIC::TypeInfo TRBinaryOpIC::JoinTypes(TRBinaryOpIC::TypeInfo x,
+                                           TRBinaryOpIC::TypeInfo y) {
+  if (x == UNINITIALIZED) return y;
+  if (y == UNINITIALIZED) return x;
+  if (x == STRING && y == STRING) return STRING;
+  if (x == STRING || y == STRING) return GENERIC;
+  if (x >= y) return x;
+  return y;
+}
+
+TRBinaryOpIC::TypeInfo TRBinaryOpIC::GetTypeInfo(Handle<Object> left,
+                                                 Handle<Object> right) {
+  ::v8::internal::TypeInfo left_type =
+      ::v8::internal::TypeInfo::TypeFromValue(left);
+  ::v8::internal::TypeInfo right_type =
+      ::v8::internal::TypeInfo::TypeFromValue(right);
+
+  if (left_type.IsSmi() && right_type.IsSmi()) {
+    return SMI;
+  }
+
+  if (left_type.IsInteger32() && right_type.IsInteger32()) {
+    return INT32;
+  }
+
+  if (left_type.IsNumber() && right_type.IsNumber()) {
+    return HEAP_NUMBER;
+  }
+
+  if (left_type.IsString() || right_type.IsString()) {
+    // Patching for fast string ADD makes sense even if only one of the
+    // arguments is a string.
+    return STRING;
+  }
+
+  return GENERIC;
+}
+
+
+// defined in code-stubs-<arch>.cc
+// Only needed to remove dependency of ic.cc on code-stubs-<arch>.h.
+Handle<Code> GetTypeRecordingBinaryOpStub(int key,
+                                          TRBinaryOpIC::TypeInfo type_info,
+                                          TRBinaryOpIC::TypeInfo result_type);
+
+
+MaybeObject* TypeRecordingBinaryOp_Patch(Arguments args) {
+  ASSERT(args.length() == 5);
+
+  HandleScope scope;
+  Handle<Object> left = args.at<Object>(0);
+  Handle<Object> right = args.at<Object>(1);
+  int key = Smi::cast(args[2])->value();
+  Token::Value op = static_cast<Token::Value>(Smi::cast(args[3])->value());
+  TRBinaryOpIC::TypeInfo previous_type =
+      static_cast<TRBinaryOpIC::TypeInfo>(Smi::cast(args[4])->value());
+
+  TRBinaryOpIC::TypeInfo type = TRBinaryOpIC::GetTypeInfo(left, right);
+  type = TRBinaryOpIC::JoinTypes(type, previous_type);
+  TRBinaryOpIC::TypeInfo result_type = TRBinaryOpIC::UNINITIALIZED;
+  if (type == TRBinaryOpIC::STRING && op != Token::ADD) {
+    type = TRBinaryOpIC::GENERIC;
+  }
+  if (type == TRBinaryOpIC::SMI &&
+      previous_type == TRBinaryOpIC::SMI) {
+    if (op == Token::DIV || op == Token::MUL) {
+      // Arithmetic on two Smi inputs has yielded a heap number.
+      // That is the only way to get here from the Smi stub.
+      result_type = TRBinaryOpIC::HEAP_NUMBER;
+    } else {
+      // Other operations on SMIs that overflow yield int32s.
+      result_type = TRBinaryOpIC::INT32;
+    }
+  }
+  if (type == TRBinaryOpIC::INT32 &&
+      previous_type == TRBinaryOpIC::INT32) {
+    // We must be here because an operation on two INT32 types overflowed.
+    result_type = TRBinaryOpIC::HEAP_NUMBER;
+  }
+
+  Handle<Code> code = GetTypeRecordingBinaryOpStub(key, type, result_type);
+  if (!code.is_null()) {
+    TRBinaryOpIC ic;
+    ic.patch(*code);
+    if (FLAG_trace_ic) {
+      PrintF("[TypeRecordingBinaryOpIC (%s->(%s->%s))#%s]\n",
+             TRBinaryOpIC::GetName(previous_type),
+             TRBinaryOpIC::GetName(type),
+             TRBinaryOpIC::GetName(result_type),
+             Token::Name(op));
+    }
+  }
+
+  Handle<JSBuiltinsObject> builtins = Top::builtins();
+  Object* builtin = NULL;  // Initialization calms down the compiler.
   switch (op) {
     case Token::ADD:
       builtin = builtins->javascript_builtin(Builtins::ADD);
@@ -1885,6 +2099,55 @@ MUST_USE_RESULT MaybeObject* BinaryOp_Patch(Arguments args) {
 }
 
 
+Handle<Code> CompareIC::GetUninitialized(Token::Value op) {
+  ICCompareStub stub(op, UNINITIALIZED);
+  return stub.GetCode();
+}
+
+
+CompareIC::State CompareIC::ComputeState(Code* target) {
+  int key = target->major_key();
+  if (key == CodeStub::Compare) return GENERIC;
+  ASSERT(key == CodeStub::CompareIC);
+  return static_cast<State>(target->compare_state());
+}
+
+
+const char* CompareIC::GetStateName(State state) {
+  switch (state) {
+    case UNINITIALIZED: return "UNINITIALIZED";
+    case SMIS: return "SMIS";
+    case HEAP_NUMBERS: return "HEAP_NUMBERS";
+    case OBJECTS: return "OBJECTS";
+    case GENERIC: return "GENERIC";
+    default:
+      UNREACHABLE();
+      return NULL;
+  }
+}
+
+
+CompareIC::State CompareIC::TargetState(Handle<Object> x, Handle<Object> y) {
+  State state = GetState();
+  if (state != UNINITIALIZED) return GENERIC;
+  if (x->IsSmi() && y->IsSmi()) return SMIS;
+  if (x->IsNumber() && y->IsNumber()) return HEAP_NUMBERS;
+  if (op_ != Token::EQ && op_ != Token::EQ_STRICT) return GENERIC;
+  if (x->IsJSObject() && y->IsJSObject()) return OBJECTS;
+  return GENERIC;
+}
+
+
+// Used from ic_<arch>.cc.
+Code* CompareIC_Miss(Arguments args) {
+  NoHandleAllocation na;
+  ASSERT(args.length() == 3);
+  CompareIC ic(static_cast<Token::Value>(Smi::cast(args[2])->value()));
+  ic.UpdateCaches(args.at<Object>(0), args.at<Object>(1));
+  return ic.target();
+}
+
+
 static Address IC_utilities[] = {
 #define ADDR(name) FUNCTION_ADDR(name),
     IC_UTIL_LIST(ADDR)
index 7b8b1bf..434c502 100644 (file)
--- a/src/ic.h
+++ b/src/ic.h
@@ -28,7 +28,7 @@
 #ifndef V8_IC_H_
 #define V8_IC_H_
 
-#include "assembler.h"
+#include "macro-assembler.h"
 
 namespace v8 {
 namespace internal {
@@ -53,8 +53,9 @@ namespace internal {
   ICU(LoadPropertyWithInterceptorForCall)             \
   ICU(KeyedLoadPropertyWithInterceptor)               \
   ICU(StoreInterceptorProperty)                       \
-  ICU(BinaryOp_Patch)
-
+  ICU(BinaryOp_Patch)                                 \
+  ICU(TypeRecordingBinaryOp_Patch)                    \
+  ICU(CompareIC_Miss)
 //
 // IC is the base class for LoadIC, StoreIC, CallIC, KeyedLoadIC,
 // and KeyedStoreIC.
@@ -403,6 +404,7 @@ class StoreIC: public IC {
   static void GenerateMegamorphic(MacroAssembler* masm);
   static void GenerateArrayLength(MacroAssembler* masm);
   static void GenerateNormal(MacroAssembler* masm);
+  static void GenerateGlobalProxy(MacroAssembler* masm);
 
   // Clear the use of an inlined version.
   static void ClearInlinedVersion(Address address);
@@ -426,6 +428,9 @@ class StoreIC: public IC {
   static Code* initialize_stub() {
     return Builtins::builtin(Builtins::StoreIC_Initialize);
   }
+  static Code* global_proxy_stub() {
+    return Builtins::builtin(Builtins::StoreIC_GlobalProxy);
+  }
 
   static void Clear(Address address, Code* target);
 
@@ -503,6 +508,7 @@ class BinaryOpIC: public IC {
  public:
 
   enum TypeInfo {
+    UNINIT_OR_SMI,
     DEFAULT,  // Initial state. When first executed, patches to one
               // of the following states depending on the operands types.
     HEAP_NUMBERS,  // Both arguments are HeapNumbers.
@@ -514,8 +520,6 @@ class BinaryOpIC: public IC {
 
   void patch(Code* code);
 
-  static void Clear(Address address, Code* target);
-
   static const char* GetName(TypeInfo type_info);
 
   static State ToState(TypeInfo type_info);
@@ -523,6 +527,71 @@ class BinaryOpIC: public IC {
   static TypeInfo GetTypeInfo(Object* left, Object* right);
 };
 
+
+// Type Recording BinaryOpIC, that records the types of the inputs and outputs.
+class TRBinaryOpIC: public IC {
+ public:
+
+  enum TypeInfo {
+    UNINITIALIZED,
+    SMI,
+    INT32,
+    HEAP_NUMBER,
+    STRING,  // Only used for addition operation.  At least one string operand.
+    GENERIC
+  };
+
+  TRBinaryOpIC() : IC(NO_EXTRA_FRAME) { }
+
+  void patch(Code* code);
+
+  static const char* GetName(TypeInfo type_info);
+
+  static State ToState(TypeInfo type_info);
+
+  static TypeInfo GetTypeInfo(Handle<Object> left, Handle<Object> right);
+
+  static TypeInfo JoinTypes(TypeInfo x, TypeInfo y);
+};
+
+
+class CompareIC: public IC {
+ public:
+  enum State {
+    UNINITIALIZED,
+    SMIS,
+    HEAP_NUMBERS,
+    OBJECTS,
+    GENERIC
+  };
+
+  explicit CompareIC(Token::Value op) : IC(EXTRA_CALL_FRAME), op_(op) { }
+
+  // Update the inline cache for the given operands.
+  void UpdateCaches(Handle<Object> x, Handle<Object> y);
+
+  // Factory method for getting an uninitialized compare stub.
+  static Handle<Code> GetUninitialized(Token::Value op);
+
+  // Helper function for computing the condition for a compare operation.
+  static Condition ComputeCondition(Token::Value op);
+
+  // Helper function for determining the state of a compare IC.
+  static State ComputeState(Code* target);
+
+  static const char* GetStateName(State state);
+
+ private:
+  State TargetState(Handle<Object> x, Handle<Object> y);
+
+  bool strict() const { return op_ == Token::EQ_STRICT; }
+  Condition GetCondition() const { return ComputeCondition(op_); }
+  State GetState() { return ComputeState(target()); }
+
+  Token::Value op_;
+};
+
+
 } }  // namespace v8::internal
 
 #endif  // V8_IC_H_
index 5ca4d60..0d65306 100644 (file)
@@ -152,6 +152,7 @@ class BreakTarget : public JumpTarget {
  public:
   // Construct a break target.
   inline BreakTarget();
+
   inline BreakTarget(JumpTarget::Directionality direction);
 
   virtual ~BreakTarget() {}
index e277bc8..eeaea65 100644 (file)
@@ -96,6 +96,17 @@ Vector<T> List<T, P>::AddBlock(T value, int count) {
 
 
 template<typename T, class P>
+void List<T, P>::InsertAt(int index, const T& elm) {
+  ASSERT(index >= 0 && index <= length_);
+  Add(elm);
+  for (int i = length_ - 1; i > index; --i) {
+    data_[i] = data_[i - 1];
+  }
+  data_[index] = elm;
+}
+
+
+template<typename T, class P>
 T List<T, P>::Remove(int i) {
   T element = at(i);
   length_--;
@@ -108,6 +119,18 @@ T List<T, P>::Remove(int i) {
 
 
 template<typename T, class P>
+bool List<T, P>::RemoveElement(const T& elm) {
+  for (int i = 0; i < length_; i++) {
+    if (data_[i] == elm) {
+      Remove(i);
+      return true;
+    }
+  }
+  return false;
+}
+
+
+template<typename T, class P>
 void List<T, P>::Clear() {
   DeleteData(data_);
   Initialize(0);
@@ -134,7 +157,7 @@ void List<T, P>::Iterate(Visitor* visitor) {
 
 
 template<typename T, class P>
-bool List<T, P>::Contains(const T& elm) {
+bool List<T, P>::Contains(const T& elm) const {
   for (int i = 0; i < length_; i++) {
     if (data_[i] == elm)
       return true;
@@ -144,6 +167,16 @@ bool List<T, P>::Contains(const T& elm) {
 
 
 template<typename T, class P>
+int List<T, P>::CountOccurrences(const T& elm, int start, int end) const {
+  int result = 0;
+  for (int i = start; i <= end; i++) {
+    if (data_[i] == elm) ++result;
+  }
+  return result;
+}
+
+
+template<typename T, class P>
 void List<T, P>::Sort(int (*cmp)(const T* x, const T* y)) {
   ToVector().Sort(cmp);
 #ifdef DEBUG
index 24f3494..9a2e698 100644 (file)
@@ -91,6 +91,9 @@ class List {
   // Add all the elements from the argument list to this list.
   void AddAll(const List<T, P>& other);
 
+  // Inserts the element at the specific index.
+  void InsertAt(int index, const T& element);
+
   // Added 'count' elements with the value 'value' and returns a
   // vector that allows access to the elements.  The vector is valid
   // until the next change is made to this list.
@@ -102,6 +105,10 @@ class List {
   // size of the list.
   T Remove(int i);
 
+  // Remove the given element from the list. Returns whether or not
+  // the input is included in the list in the first place.
+  bool RemoveElement(const T& elm);
+
   // Removes the last element without deleting it even if T is a
   // pointer type. Returns the removed element.
   INLINE(T RemoveLast()) { return Remove(length_ - 1); }
@@ -113,7 +120,11 @@ class List {
   // Drops all but the first 'pos' elements from the list.
   INLINE(void Rewind(int pos));
 
-  bool Contains(const T& elm);
+  // Drop the last 'count' elements from the list.
+  INLINE(void RewindBy(int count)) { Rewind(length_ - count); }
+
+  bool Contains(const T& elm) const;
+  int CountOccurrences(const T& elm, int start, int end) const;
 
   // Iterate through all list entries, starting at index 0.
   void Iterate(void (*callback)(T* x));
diff --git a/src/lithium-allocator.cc b/src/lithium-allocator.cc
new file mode 100644 (file)
index 0000000..db0bc8b
--- /dev/null
@@ -0,0 +1,2055 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "lithium-allocator.h"
+
+#include "data-flow.h"
+#include "hydrogen.h"
+#include "string-stream.h"
+
+#if V8_TARGET_ARCH_IA32
+#include "ia32/lithium-ia32.h"
+#elif V8_TARGET_ARCH_X64
+#include "x64/lithium-x64.h"
+#elif V8_TARGET_ARCH_ARM
+#include "arm/lithium-arm.h"
+#else
+#error "Unknown architecture."
+#endif
+
+namespace v8 {
+namespace internal {
+
+
+#define DEFINE_OPERAND_CACHE(name, type)            \
+  name name::cache[name::kNumCachedOperands];       \
+  void name::SetupCache() {                         \
+    for (int i = 0; i < kNumCachedOperands; i++) {  \
+      cache[i].ConvertTo(type, i);                  \
+    }                                               \
+  }
+
+DEFINE_OPERAND_CACHE(LConstantOperand, CONSTANT_OPERAND)
+DEFINE_OPERAND_CACHE(LStackSlot,       STACK_SLOT)
+DEFINE_OPERAND_CACHE(LDoubleStackSlot, DOUBLE_STACK_SLOT)
+DEFINE_OPERAND_CACHE(LRegister,        REGISTER)
+DEFINE_OPERAND_CACHE(LDoubleRegister,  DOUBLE_REGISTER)
+
+#undef DEFINE_OPERAND_CACHE
+
+
+static inline LifetimePosition Min(LifetimePosition a, LifetimePosition b) {
+  return a.Value() < b.Value() ? a : b;
+}
+
+
+static inline LifetimePosition Max(LifetimePosition a, LifetimePosition b) {
+  return a.Value() > b.Value() ? a : b;
+}
+
+
+void LOperand::PrintTo(StringStream* stream) {
+  LUnallocated* unalloc = NULL;
+  switch (kind()) {
+    case INVALID:
+      break;
+    case UNALLOCATED:
+      unalloc = LUnallocated::cast(this);
+      stream->Add("v%d", unalloc->virtual_register());
+      switch (unalloc->policy()) {
+        case LUnallocated::NONE:
+          break;
+        case LUnallocated::FIXED_REGISTER: {
+          const char* register_name =
+              Register::AllocationIndexToString(unalloc->fixed_index());
+          stream->Add("(=%s)", register_name);
+          break;
+        }
+        case LUnallocated::FIXED_DOUBLE_REGISTER: {
+          const char* double_register_name =
+              DoubleRegister::AllocationIndexToString(unalloc->fixed_index());
+          stream->Add("(=%s)", double_register_name);
+          break;
+        }
+        case LUnallocated::FIXED_SLOT:
+          stream->Add("(=%dS)", unalloc->fixed_index());
+          break;
+        case LUnallocated::MUST_HAVE_REGISTER:
+          stream->Add("(R)");
+          break;
+        case LUnallocated::WRITABLE_REGISTER:
+          stream->Add("(WR)");
+          break;
+        case LUnallocated::SAME_AS_FIRST_INPUT:
+          stream->Add("(1)");
+          break;
+        case LUnallocated::SAME_AS_ANY_INPUT:
+          stream->Add("(A)");
+          break;
+        case LUnallocated::ANY:
+          stream->Add("(-)");
+          break;
+        case LUnallocated::IGNORE:
+          stream->Add("(0)");
+          break;
+      }
+      break;
+    case CONSTANT_OPERAND:
+      stream->Add("[constant:%d]", index());
+      break;
+    case STACK_SLOT:
+      stream->Add("[stack:%d]", index());
+      break;
+    case DOUBLE_STACK_SLOT:
+      stream->Add("[double_stack:%d]", index());
+      break;
+    case REGISTER:
+      stream->Add("[%s|R]", Register::AllocationIndexToString(index()));
+      break;
+    case DOUBLE_REGISTER:
+      stream->Add("[%s|R]", DoubleRegister::AllocationIndexToString(index()));
+      break;
+    case ARGUMENT:
+      stream->Add("[arg:%d]", index());
+      break;
+  }
+}
+
+int LOperand::VirtualRegister() {
+  LUnallocated* unalloc = LUnallocated::cast(this);
+  return unalloc->virtual_register();
+}
+
+
+bool UsePosition::RequiresRegister() const {
+  return requires_reg_;
+}
+
+
+bool UsePosition::RegisterIsBeneficial() const {
+  return register_beneficial_;
+}
+
+
+void UseInterval::SplitAt(LifetimePosition pos) {
+  ASSERT(Contains(pos) && pos.Value() != start().Value());
+  UseInterval* after = new UseInterval(pos, end_);
+  after->next_ = next_;
+  next_ = after;
+  end_ = pos;
+}
+
+
+#ifdef DEBUG
+
+
+void LiveRange::Verify() const {
+  UsePosition* cur = first_pos_;
+  while (cur != NULL) {
+    ASSERT(Start().Value() <= cur->pos().Value() &&
+           cur->pos().Value() <= End().Value());
+    cur = cur->next();
+  }
+}
+
+
+bool LiveRange::HasOverlap(UseInterval* target) const {
+  UseInterval* current_interval = first_interval_;
+  while (current_interval != NULL) {
+    // Intervals overlap if the start of one is contained in the other.
+    if (current_interval->Contains(target->start()) ||
+        target->Contains(current_interval->start())) {
+      return true;
+    }
+    current_interval = current_interval->next();
+  }
+  return false;
+}
+
+
+#endif
+
+
+UsePosition* LiveRange::NextUsePosition(LifetimePosition start) {
+  UsePosition* use_pos = last_processed_use_;
+  if (use_pos == NULL) use_pos = first_pos();
+  while (use_pos != NULL && use_pos->pos().Value() < start.Value()) {
+    use_pos = use_pos->next();
+  }
+  last_processed_use_ = use_pos;
+  return use_pos;
+}
+
+
+UsePosition* LiveRange::NextUsePositionRegisterIsBeneficial(
+    LifetimePosition start) {
+  UsePosition* pos = NextUsePosition(start);
+  while (pos != NULL && !pos->RegisterIsBeneficial()) {
+    pos = pos->next();
+  }
+  return pos;
+}
+
+
+UsePosition* LiveRange::NextRegisterPosition(LifetimePosition start) {
+  UsePosition* pos = NextUsePosition(start);
+  while (pos != NULL && !pos->RequiresRegister()) {
+    pos = pos->next();
+  }
+  return pos;
+}
+
+
+bool LiveRange::CanBeSpilled(LifetimePosition pos) {
+  // TODO(kmillikin): Comment. Now.
+  if (pos.Value() <= Start().Value() && HasRegisterAssigned()) return false;
+
+  // We cannot spill a live range that has a use requiring a register
+  // at the current or the immediate next position.
+  UsePosition* use_pos = NextRegisterPosition(pos);
+  if (use_pos == NULL) return true;
+  return use_pos->pos().Value() > pos.NextInstruction().Value();
+}
+
+
+UsePosition* LiveRange::FirstPosWithHint() const {
+  UsePosition* pos = first_pos_;
+  while (pos != NULL && !pos->HasHint()) pos = pos->next();
+  return pos;
+}
+
+
+LOperand* LiveRange::CreateAssignedOperand() {
+  LOperand* op = NULL;
+  if (HasRegisterAssigned()) {
+    ASSERT(!IsSpilled());
+    if (assigned_double_) {
+      op = LDoubleRegister::Create(assigned_register());
+    } else {
+      op = LRegister::Create(assigned_register());
+    }
+  } else if (IsSpilled()) {
+    ASSERT(!HasRegisterAssigned());
+    op = TopLevel()->GetSpillOperand();
+    ASSERT(!op->IsUnallocated());
+  } else {
+    LUnallocated* unalloc = new LUnallocated(LUnallocated::NONE);
+    unalloc->set_virtual_register(id_);
+    op = unalloc;
+  }
+  return op;
+}
+
+
+UseInterval* LiveRange::FirstSearchIntervalForPosition(
+    LifetimePosition position) const {
+  if (current_interval_ == NULL) return first_interval_;
+  if (current_interval_->start().Value() > position.Value()) {
+    current_interval_ = NULL;
+    return first_interval_;
+  }
+  return current_interval_;
+}
+
+
+void LiveRange::AdvanceLastProcessedMarker(
+    UseInterval* to_start_of, LifetimePosition but_not_past) const {
+  if (to_start_of == NULL) return;
+  if (to_start_of->start().Value() > but_not_past.Value()) return;
+  LifetimePosition start =
+      current_interval_ == NULL ? LifetimePosition::Invalid()
+                                : current_interval_->start();
+  if (to_start_of->start().Value() > start.Value()) {
+    current_interval_ = to_start_of;
+  }
+}
+
+
+void LiveRange::SplitAt(LifetimePosition position, LiveRange* result) {
+  ASSERT(Start().Value() <= position.Value());
+  ASSERT(result->IsEmpty());
+  // Find the last interval that ends before the position. If the
+  // position is contained in one of the intervals in the chain, we
+  // split that interval and use the first part.
+  UseInterval* current = FirstSearchIntervalForPosition(position);
+  while (current != NULL) {
+    if (current->Contains(position)) {
+      current->SplitAt(position);
+      break;
+    }
+    UseInterval* next = current->next();
+    if (next->start().Value() >= position.Value()) break;
+    current = next;
+  }
+
+  // Partition original use intervals to the two live ranges.
+  UseInterval* before = current;
+  UseInterval* after = before->next();
+  result->last_interval_ = (last_interval_ == before)
+      ? after            // Only interval in the range after split.
+      : last_interval_;  // Last interval of the original range.
+  result->first_interval_ = after;
+  last_interval_ = before;
+
+  // Find the last use position before the split and the first use
+  // position after it.
+  UsePosition* use_after = first_pos_;
+  UsePosition* use_before = NULL;
+  while (use_after != NULL && use_after->pos().Value() <= position.Value()) {
+    use_before = use_after;
+    use_after = use_after->next();
+  }
+
+  // Partition original use positions to the two live ranges.
+  if (use_before != NULL) {
+    use_before->next_ = NULL;
+  } else {
+    first_pos_ = NULL;
+  }
+  result->first_pos_ = use_after;
+
+  // Link the new live range in the chain before any of the other
+  // ranges linked from the range before the split.
+  result->parent_ = (parent_ == NULL) ? this : parent_;
+  result->next_ = next_;
+  next_ = result;
+
+#ifdef DEBUG
+  Verify();
+  result->Verify();
+#endif
+}
+
+
+// This implements an ordering on live ranges so that they are ordered by their
+// start positions.  This is needed for the correctness of the register
+// allocation algorithm.  If two live ranges start at the same offset then there
+// is a tie breaker based on where the value is first used.  This part of the
+// ordering is merely a heuristic.
+bool LiveRange::ShouldBeAllocatedBefore(const LiveRange* other) const {
+  LifetimePosition start = Start();
+  LifetimePosition other_start = other->Start();
+  if (start.Value() == other_start.Value()) {
+    UsePosition* pos = FirstPosWithHint();
+    if (pos == NULL) return false;
+    UsePosition* other_pos = other->first_pos();
+    if (other_pos == NULL) return true;
+    return pos->pos().Value() < other_pos->pos().Value();
+  }
+  return start.Value() < other_start.Value();
+}
+
+
+void LiveRange::ShortenTo(LifetimePosition start) {
+  LAllocator::TraceAlloc("Shorten live range %d to [%d\n", id_, start.Value());
+  ASSERT(first_interval_ != NULL);
+  ASSERT(first_interval_->start().Value() <= start.Value());
+  ASSERT(start.Value() < first_interval_->end().Value());
+  first_interval_->set_start(start);
+}
+
+
+void LiveRange::EnsureInterval(LifetimePosition start, LifetimePosition end) {
+  LAllocator::TraceAlloc("Ensure live range %d in interval [%d %d[\n",
+                         id_,
+                         start.Value(),
+                         end.Value());
+  LifetimePosition new_end = end;
+  while (first_interval_ != NULL &&
+         first_interval_->start().Value() <= end.Value()) {
+    if (first_interval_->end().Value() > end.Value()) {
+      new_end = first_interval_->end();
+    }
+    first_interval_ = first_interval_->next();
+  }
+
+  UseInterval* new_interval = new UseInterval(start, new_end);
+  new_interval->next_ = first_interval_;
+  first_interval_ = new_interval;
+  if (new_interval->next() == NULL) {
+    last_interval_ = new_interval;
+  }
+}
+
+
+void LiveRange::AddUseInterval(LifetimePosition start, LifetimePosition end) {
+  LAllocator::TraceAlloc("Add to live range %d interval [%d %d[\n",
+                         id_,
+                         start.Value(),
+                         end.Value());
+  if (first_interval_ == NULL) {
+    UseInterval* interval = new UseInterval(start, end);
+    first_interval_ = interval;
+    last_interval_ = interval;
+  } else {
+    if (end.Value() == first_interval_->start().Value()) {
+      first_interval_->set_start(start);
+    } else if (end.Value() < first_interval_->start().Value()) {
+      UseInterval* interval = new UseInterval(start, end);
+      interval->set_next(first_interval_);
+      first_interval_ = interval;
+    } else {
+      // Order of instruction's processing (see ProcessInstructions) guarantees
+      // that each new use interval either precedes or intersects with
+      // last added interval.
+      ASSERT(start.Value() < first_interval_->end().Value());
+      first_interval_->start_ = Min(start, first_interval_->start_);
+      first_interval_->end_ = Max(end, first_interval_->end_);
+    }
+  }
+}
+
+
+UsePosition* LiveRange::AddUsePosition(LifetimePosition pos,
+                                       LOperand* operand) {
+  LAllocator::TraceAlloc("Add to live range %d use position %d\n",
+                         id_,
+                         pos.Value());
+  UsePosition* use_pos = new UsePosition(pos, operand);
+  UsePosition* prev = NULL;
+  UsePosition* current = first_pos_;
+  while (current != NULL && current->pos().Value() < pos.Value()) {
+    prev = current;
+    current = current->next();
+  }
+
+  if (prev == NULL) {
+    use_pos->set_next(first_pos_);
+    first_pos_ = use_pos;
+  } else {
+    use_pos->next_ = prev->next_;
+    prev->next_ = use_pos;
+  }
+
+  return use_pos;
+}
+
+
+void LiveRange::ConvertOperands() {
+  LOperand* op = CreateAssignedOperand();
+  UsePosition* use_pos = first_pos();
+  while (use_pos != NULL) {
+    ASSERT(Start().Value() <= use_pos->pos().Value() &&
+           use_pos->pos().Value() <= End().Value());
+
+    if (use_pos->HasOperand()) {
+      ASSERT(op->IsRegister() || op->IsDoubleRegister() ||
+             !use_pos->RequiresRegister());
+      use_pos->operand()->ConvertTo(op->kind(), op->index());
+    }
+    use_pos = use_pos->next();
+  }
+}
+
+
+UsePosition* LiveRange::AddUsePosition(LifetimePosition pos) {
+  return AddUsePosition(pos, CreateAssignedOperand());
+}
+
+
+bool LiveRange::CanCover(LifetimePosition position) const {
+  if (IsEmpty()) return false;
+  return Start().Value() <= position.Value() &&
+         position.Value() < End().Value();
+}
+
+
+bool LiveRange::Covers(LifetimePosition position) {
+  if (!CanCover(position)) return false;
+  UseInterval* start_search = FirstSearchIntervalForPosition(position);
+  for (UseInterval* interval = start_search;
+       interval != NULL;
+       interval = interval->next()) {
+    ASSERT(interval->next() == NULL ||
+           interval->next()->start().Value() >= interval->start().Value());
+    AdvanceLastProcessedMarker(interval, position);
+    if (interval->Contains(position)) return true;
+    if (interval->start().Value() > position.Value()) return false;
+  }
+  return false;
+}
+
+
+LifetimePosition LiveRange::FirstIntersection(LiveRange* other) {
+  UseInterval* b = other->first_interval();
+  if (b == NULL) return LifetimePosition::Invalid();
+  LifetimePosition advance_last_processed_up_to = b->start();
+  UseInterval* a = FirstSearchIntervalForPosition(b->start());
+  while (a != NULL && b != NULL) {
+    if (a->start().Value() > other->End().Value()) break;
+    if (b->start().Value() > End().Value()) break;
+    LifetimePosition cur_intersection = a->Intersect(b);
+    if (cur_intersection.IsValid()) {
+      return cur_intersection;
+    }
+    if (a->start().Value() < b->start().Value()) {
+      a = a->next();
+      if (a == NULL && a->start().Value() > other->End().Value()) break;
+      AdvanceLastProcessedMarker(a, advance_last_processed_up_to);
+    } else {
+      b = b->next();
+    }
+  }
+  return LifetimePosition::Invalid();
+}
+
+
+void LAllocator::InitializeLivenessAnalysis() {
+  // Initialize the live_in sets for each block to NULL.
+  int block_count = graph()->blocks()->length();
+  live_in_sets_.Initialize(block_count);
+  live_in_sets_.AddBlock(NULL, block_count);
+}
+
+
+BitVector* LAllocator::ComputeLiveOut(HBasicBlock* block) {
+  // Compute live out for the given block, except not including backward
+  // successor edges.
+  BitVector* live_out = new BitVector(next_virtual_register_);
+
+  // Process all successor blocks.
+  HBasicBlock* successor = block->end()->FirstSuccessor();
+  while (successor != NULL) {
+    // Add values live on entry to the successor. Note the successor's
+    // live_in will not be computed yet for backwards edges.
+    BitVector* live_in = live_in_sets_[successor->block_id()];
+    if (live_in != NULL) live_out->Union(*live_in);
+
+    // All phi input operands corresponding to this successor edge are live
+    // out from this block.
+    int index = successor->PredecessorIndexOf(block);
+    const ZoneList<HPhi*>* phis = successor->phis();
+    for (int i = 0; i < phis->length(); ++i) {
+      HPhi* phi = phis->at(i);
+      if (!phi->OperandAt(index)->IsConstant()) {
+        live_out->Add(phi->OperandAt(index)->id());
+      }
+    }
+
+    // Check if we are done with second successor.
+    if (successor == block->end()->SecondSuccessor()) break;
+
+    successor = block->end()->SecondSuccessor();
+  }
+
+  return live_out;
+}
+
+
+void LAllocator::AddInitialIntervals(HBasicBlock* block,
+                                     BitVector* live_out) {
+  // Add an interval that includes the entire block to the live range for
+  // each live_out value.
+  LifetimePosition start = LifetimePosition::FromInstructionIndex(
+      block->first_instruction_index());
+  LifetimePosition end = LifetimePosition::FromInstructionIndex(
+      block->last_instruction_index());
+  BitVector::Iterator iterator(live_out);
+  while (!iterator.Done()) {
+    int operand_index = iterator.Current();
+    LiveRange* range = LiveRangeFor(operand_index);
+    if (!range->IsEmpty() &&
+        range->Start().Value() == end.NextInstruction().Value()) {
+      range->AddUseInterval(start, end.NextInstruction());
+    } else {
+      range->AddUseInterval(start, end);
+    }
+    iterator.Advance();
+  }
+}
+
+
+int LAllocator::FixedDoubleLiveRangeID(int index) {
+  return -index - 1 - Register::kNumAllocatableRegisters;
+}
+
+
+LOperand* LAllocator::AllocateFixed(LUnallocated* operand,
+                                    int pos,
+                                    bool is_tagged) {
+  TraceAlloc("Allocating fixed reg for op %d\n", operand->virtual_register());
+  ASSERT(operand->HasFixedPolicy());
+  if (operand->policy() == LUnallocated::FIXED_SLOT) {
+    operand->ConvertTo(LOperand::STACK_SLOT, operand->fixed_index());
+  } else if (operand->policy() == LUnallocated::FIXED_REGISTER) {
+    int reg_index = operand->fixed_index();
+    operand->ConvertTo(LOperand::REGISTER, reg_index);
+  } else if (operand->policy() == LUnallocated::FIXED_DOUBLE_REGISTER) {
+    int reg_index = operand->fixed_index();
+    operand->ConvertTo(LOperand::DOUBLE_REGISTER, reg_index);
+  } else {
+    UNREACHABLE();
+  }
+  if (is_tagged) {
+    TraceAlloc("Fixed reg is tagged at %d\n", pos);
+    LInstruction* instr = chunk_->instructions()->at(pos);
+    if (instr->HasPointerMap()) {
+      instr->pointer_map()->RecordPointer(operand);
+    }
+  }
+  return operand;
+}
+
+
+LiveRange* LAllocator::FixedLiveRangeFor(int index) {
+  if (index >= fixed_live_ranges_.length()) {
+    fixed_live_ranges_.AddBlock(NULL,
+                                index - fixed_live_ranges_.length() + 1);
+  }
+
+  LiveRange* result = fixed_live_ranges_[index];
+  if (result == NULL) {
+    result = new LiveRange(FixedLiveRangeID(index));
+    ASSERT(result->IsFixed());
+    result->set_assigned_register(index, false);
+    fixed_live_ranges_[index] = result;
+  }
+  return result;
+}
+
+
+LiveRange* LAllocator::FixedDoubleLiveRangeFor(int index) {
+  if (index >= fixed_double_live_ranges_.length()) {
+    fixed_double_live_ranges_.AddBlock(NULL,
+                                index - fixed_double_live_ranges_.length() + 1);
+  }
+
+  LiveRange* result = fixed_double_live_ranges_[index];
+  if (result == NULL) {
+    result = new LiveRange(FixedDoubleLiveRangeID(index));
+    ASSERT(result->IsFixed());
+    result->set_assigned_register(index, true);
+    fixed_double_live_ranges_[index] = result;
+  }
+  return result;
+}
+
+LiveRange* LAllocator::LiveRangeFor(int index) {
+  if (index >= live_ranges_.length()) {
+    live_ranges_.AddBlock(NULL, index - live_ranges_.length() + 1);
+  }
+  LiveRange* result = live_ranges_[index];
+  if (result == NULL) {
+    result = new LiveRange(index);
+    live_ranges_[index] = result;
+  }
+  return result;
+}
+
+
+LGap* LAllocator::GetLastGap(HBasicBlock* block) const {
+  int last_instruction = block->last_instruction_index();
+  int index = chunk_->NearestGapPos(last_instruction);
+  return chunk_->GetGapAt(index);
+}
+
+
+HPhi* LAllocator::LookupPhi(LOperand* operand) const {
+  if (!operand->IsUnallocated()) return NULL;
+  int index = operand->VirtualRegister();
+  HValue* instr = graph()->LookupValue(index);
+  if (instr != NULL && instr->IsPhi()) {
+    return HPhi::cast(instr);
+  }
+  return NULL;
+}
+
+
+LiveRange* LAllocator::LiveRangeFor(LOperand* operand) {
+  if (operand->IsUnallocated()) {
+    return LiveRangeFor(LUnallocated::cast(operand)->virtual_register());
+  } else if (operand->IsRegister()) {
+    return FixedLiveRangeFor(operand->index());
+  } else if (operand->IsDoubleRegister()) {
+    return FixedDoubleLiveRangeFor(operand->index());
+  } else {
+    return NULL;
+  }
+}
+
+
+void LAllocator::Define(LifetimePosition position,
+                        LOperand* operand,
+                        LOperand* hint) {
+  LiveRange* range = LiveRangeFor(operand);
+  if (range == NULL) return;
+
+  if (range->IsEmpty() || range->Start().Value() > position.Value()) {
+    // Can happen if there is a definition without use.
+    range->AddUseInterval(position, position.NextInstruction());
+    range->AddUsePosition(position.NextInstruction(), NULL);
+  } else {
+    range->ShortenTo(position);
+  }
+
+  if (operand->IsUnallocated()) {
+    LUnallocated* unalloc_operand = LUnallocated::cast(operand);
+    range->AddUsePosition(position, unalloc_operand)->set_hint(hint);
+  }
+}
+
+
+void LAllocator::Use(LifetimePosition block_start,
+                     LifetimePosition position,
+                     LOperand* operand,
+                     LOperand* hint) {
+  LiveRange* range = LiveRangeFor(operand);
+  if (range == NULL) return;
+  if (operand->IsUnallocated()) {
+    LUnallocated* unalloc_operand = LUnallocated::cast(operand);
+    range->AddUsePosition(position, unalloc_operand)->set_hint(hint);
+  }
+  range->AddUseInterval(block_start, position);
+}
+
+
+void LAllocator::AddConstraintsGapMove(int index,
+                                       LOperand* from,
+                                       LOperand* to) {
+  LGap* gap = chunk_->GetGapAt(index);
+  LParallelMove* move = gap->GetOrCreateParallelMove(LGap::START);
+  if (from->IsUnallocated()) {
+    const ZoneList<LMoveOperands>* move_operands = move->move_operands();
+    for (int i = 0; i < move_operands->length(); ++i) {
+      LMoveOperands cur = move_operands->at(i);
+      LOperand* cur_to = cur.to();
+      if (cur_to->IsUnallocated()) {
+        if (cur_to->VirtualRegister() == from->VirtualRegister()) {
+          move->AddMove(cur.from(), to);
+          return;
+        }
+      }
+    }
+  }
+  move->AddMove(from, to);
+}
+
+
+void LAllocator::MeetRegisterConstraints(HBasicBlock* block) {
+  int start = block->first_instruction_index();
+  int end = block->last_instruction_index();
+  for (int i = start; i <= end; ++i) {
+    if (chunk_->IsGapAt(i)) {
+      InstructionSummary* summary = NULL;
+      InstructionSummary* prev_summary = NULL;
+      if (i < end) summary = GetSummary(i + 1);
+      if (i > start) prev_summary = GetSummary(i - 1);
+      MeetConstraintsBetween(prev_summary, summary, i);
+    }
+  }
+}
+
+
+void LAllocator::MeetConstraintsBetween(InstructionSummary* first,
+                                        InstructionSummary* second,
+                                        int gap_index) {
+  // Handle fixed temporaries.
+  if (first != NULL) {
+    for (int i = 0; i < first->TempCount(); ++i) {
+      LUnallocated* temp = LUnallocated::cast(first->TempAt(i));
+      if (temp->HasFixedPolicy()) {
+        AllocateFixed(temp, gap_index - 1, false);
+      }
+    }
+  }
+
+  // Handle fixed output operand.
+  if (first != NULL && first->Output() != NULL) {
+    LUnallocated* first_output = LUnallocated::cast(first->Output());
+    LiveRange* range = LiveRangeFor(first_output->VirtualRegister());
+    bool assigned = false;
+    if (first_output->HasFixedPolicy()) {
+      LUnallocated* output_copy = first_output->CopyUnconstrained();
+      bool is_tagged = HasTaggedValue(first_output->VirtualRegister());
+      AllocateFixed(first_output, gap_index, is_tagged);
+
+      // This value is produced on the stack, we never need to spill it.
+      if (first_output->IsStackSlot()) {
+        range->SetSpillOperand(first_output);
+        range->SetSpillStartIndex(gap_index - 1);
+        assigned = true;
+      }
+      chunk_->AddGapMove(gap_index, first_output, output_copy);
+    }
+
+    if (!assigned) {
+      range->SetSpillStartIndex(gap_index);
+
+      // This move to spill operand is not a real use. Liveness analysis
+      // and splitting of live ranges do not account for it.
+      // Thus it should be inserted to a lifetime position corresponding to
+      // the instruction end.
+      LGap* gap = chunk_->GetGapAt(gap_index);
+      LParallelMove* move = gap->GetOrCreateParallelMove(LGap::BEFORE);
+      move->AddMove(first_output, range->GetSpillOperand());
+    }
+  }
+
+  // Handle fixed input operands of second instruction.
+  if (second != NULL) {
+    for (int i = 0; i < second->InputCount(); ++i) {
+      LUnallocated* cur_input = LUnallocated::cast(second->InputAt(i));
+      if (cur_input->HasFixedPolicy()) {
+        LUnallocated* input_copy = cur_input->CopyUnconstrained();
+        bool is_tagged = HasTaggedValue(cur_input->VirtualRegister());
+        AllocateFixed(cur_input, gap_index + 1, is_tagged);
+        AddConstraintsGapMove(gap_index, input_copy, cur_input);
+      } else if (cur_input->policy() == LUnallocated::WRITABLE_REGISTER) {
+        LUnallocated* input_copy = cur_input->CopyUnconstrained();
+        cur_input->set_virtual_register(next_virtual_register_++);
+        second->AddTemp(cur_input);
+        AddConstraintsGapMove(gap_index, input_copy, cur_input);
+      }
+    }
+  }
+
+  // Handle "output same as input" for second instruction.
+  if (second != NULL && second->Output() != NULL) {
+    LUnallocated* second_output = LUnallocated::cast(second->Output());
+    if (second_output->HasSameAsInputPolicy()) {
+      LUnallocated* cur_input = LUnallocated::cast(second->InputAt(0));
+      int output_vreg = second_output->VirtualRegister();
+      int input_vreg = cur_input->VirtualRegister();
+
+      LUnallocated* input_copy = cur_input->CopyUnconstrained();
+      cur_input->set_virtual_register(second_output->virtual_register());
+      AddConstraintsGapMove(gap_index, input_copy, cur_input);
+
+      if (HasTaggedValue(input_vreg) && !HasTaggedValue(output_vreg)) {
+        int index = gap_index + 1;
+        LInstruction* instr = chunk_->instructions()->at(index);
+        if (instr->HasPointerMap()) {
+          instr->pointer_map()->RecordPointer(input_copy);
+        }
+      } else if (!HasTaggedValue(input_vreg) && HasTaggedValue(output_vreg)) {
+        // The input is assumed to immediately have a tagged representation,
+        // before the pointer map can be used. I.e. the pointer map at the
+        // instruction will include the output operand (whose value at the
+        // beginning of the instruction is equal to the input operand). If
+        // this is not desired, then the pointer map at this instruction needs
+        // to be adjusted manually.
+      }
+    }
+  }
+}
+
+
+void LAllocator::ProcessInstructions(HBasicBlock* block, BitVector* live) {
+  int block_start = block->first_instruction_index();
+  int index = block->last_instruction_index();
+
+  LifetimePosition block_start_position =
+      LifetimePosition::FromInstructionIndex(block_start);
+
+  while (index >= block_start) {
+    LifetimePosition curr_position =
+        LifetimePosition::FromInstructionIndex(index);
+
+    if (chunk_->IsGapAt(index)) {
+      // We have a gap at this position.
+      LGap* gap = chunk_->GetGapAt(index);
+      LParallelMove* move = gap->GetOrCreateParallelMove(LGap::START);
+      const ZoneList<LMoveOperands>* move_operands = move->move_operands();
+      for (int i = 0; i < move_operands->length(); ++i) {
+        LMoveOperands* cur = &move_operands->at(i);
+        if (cur->IsIgnored()) continue;
+        LOperand* from = cur->from();
+        LOperand* to = cur->to();
+        HPhi* phi = LookupPhi(to);
+        LOperand* hint = to;
+        if (phi != NULL) {
+          // This is a phi resolving move.
+          if (!phi->block()->IsLoopHeader()) {
+            hint = LiveRangeFor(phi->id())->FirstHint();
+          }
+        } else {
+          if (to->IsUnallocated()) {
+            if (live->Contains(to->VirtualRegister())) {
+              Define(curr_position, to, from);
+              live->Remove(to->VirtualRegister());
+            } else {
+              cur->Eliminate();
+              continue;
+            }
+          } else {
+            Define(curr_position, to, from);
+          }
+        }
+        Use(block_start_position, curr_position, from, hint);
+        if (from->IsUnallocated()) {
+          live->Add(from->VirtualRegister());
+        }
+      }
+    } else {
+      ASSERT(!chunk_->IsGapAt(index));
+      InstructionSummary* summary = GetSummary(index);
+
+      if (summary != NULL) {
+        LOperand* output = summary->Output();
+        if (output != NULL) {
+          if (output->IsUnallocated()) live->Remove(output->VirtualRegister());
+          Define(curr_position, output, NULL);
+        }
+
+        if (summary->IsCall()) {
+          for (int i = 0; i < Register::kNumAllocatableRegisters; ++i) {
+            if (output == NULL || !output->IsRegister() ||
+                output->index() != i) {
+              LiveRange* range = FixedLiveRangeFor(i);
+              range->AddUseInterval(curr_position,
+                                    curr_position.InstructionEnd());
+            }
+          }
+          for (int i = 0; i < DoubleRegister::kNumAllocatableRegisters; ++i) {
+            if (output == NULL || !output->IsDoubleRegister() ||
+                output->index() != i) {
+              LiveRange* range = FixedDoubleLiveRangeFor(i);
+              range->AddUseInterval(curr_position,
+                                    curr_position.InstructionEnd());
+            }
+          }
+        }
+
+        for (int i = 0; i < summary->InputCount(); ++i) {
+          LOperand* input = summary->InputAt(i);
+
+          LifetimePosition use_pos;
+          if (input->IsUnallocated() &&
+              LUnallocated::cast(input)->IsUsedAtStart()) {
+            use_pos = curr_position;
+          } else {
+            use_pos = curr_position.InstructionEnd();
+          }
+
+          Use(block_start_position, use_pos, input, NULL);
+          if (input->IsUnallocated()) live->Add(input->VirtualRegister());
+        }
+
+        for (int i = 0; i < summary->TempCount(); ++i) {
+          LOperand* temp = summary->TempAt(i);
+          if (summary->IsCall()) {
+            if (temp->IsRegister()) continue;
+            if (temp->IsUnallocated()) {
+              LUnallocated* temp_unalloc = LUnallocated::cast(temp);
+              if (temp_unalloc->HasFixedPolicy()) {
+                continue;
+              }
+            }
+          }
+          Use(block_start_position, curr_position, temp, NULL);
+          Define(curr_position.PrevInstruction(), temp, NULL);
+        }
+      }
+    }
+
+    index = index - 1;
+  }
+}
+
+
+void LAllocator::ResolvePhis(HBasicBlock* block) {
+  const ZoneList<HPhi*>* phis = block->phis();
+  for (int i = 0; i < phis->length(); ++i) {
+    HPhi* phi = phis->at(i);
+    LUnallocated* phi_operand = new LUnallocated(LUnallocated::NONE);
+    phi_operand->set_virtual_register(phi->id());
+    for (int j = 0; j < phi->OperandCount(); ++j) {
+      HValue* op = phi->OperandAt(j);
+      LOperand* operand = NULL;
+      if (op->IsConstant() && op->EmitAtUses()) {
+        HConstant* constant = HConstant::cast(op);
+        operand = chunk_->DefineConstantOperand(constant);
+      } else {
+        ASSERT(!op->EmitAtUses());
+        LUnallocated* unalloc = new LUnallocated(LUnallocated::NONE);
+        unalloc->set_virtual_register(op->id());
+        operand = unalloc;
+      }
+      HBasicBlock* cur_block = block->predecessors()->at(j);
+      // The gap move must be added without any special processing as in
+      // the AddConstraintsGapMove.
+      chunk_->AddGapMove(cur_block->last_instruction_index() - 1,
+                         operand,
+                         phi_operand);
+    }
+
+    LiveRange* live_range = LiveRangeFor(phi->id());
+    LLabel* label = chunk_->GetLabel(phi->block()->block_id());
+    label->GetOrCreateParallelMove(LGap::START)->
+        AddMove(phi_operand, live_range->GetSpillOperand());
+    live_range->SetSpillStartIndex(phi->block()->first_instruction_index());
+  }
+}
+
+
+void LAllocator::Allocate(LChunk* chunk) {
+  ASSERT(chunk_ == NULL);
+  chunk_ = chunk;
+  MeetRegisterConstraints();
+  ResolvePhis();
+  BuildLiveRanges();
+  AllocateGeneralRegisters();
+  AllocateDoubleRegisters();
+  PopulatePointerMaps();
+  if (has_osr_entry_) ProcessOsrEntry();
+  ConnectRanges();
+  ResolveControlFlow();
+}
+
+
+void LAllocator::MeetRegisterConstraints() {
+  HPhase phase("Register constraints", chunk());
+  const ZoneList<HBasicBlock*>* blocks = graph()->blocks();
+  for (int i = 0; i < blocks->length(); ++i) {
+    HBasicBlock* block = blocks->at(i);
+    MeetRegisterConstraints(block);
+  }
+}
+
+
+void LAllocator::ResolvePhis() {
+  HPhase phase("Resolve phis", chunk());
+
+  // Process the blocks in reverse order.
+  const ZoneList<HBasicBlock*>* blocks = graph()->blocks();
+  for (int block_id = blocks->length() - 1; block_id >= 0; --block_id) {
+    HBasicBlock* block = blocks->at(block_id);
+    ResolvePhis(block);
+  }
+}
+
+
+void LAllocator::ResolveControlFlow(LiveRange* range,
+                                    HBasicBlock* block,
+                                    HBasicBlock* pred) {
+  LifetimePosition pred_end =
+      LifetimePosition::FromInstructionIndex(pred->last_instruction_index()).
+      PrevInstruction();
+
+  LifetimePosition cur_start =
+      LifetimePosition::FromInstructionIndex(block->first_instruction_index());
+  LiveRange* pred_cover = NULL;
+  LiveRange* cur_cover = NULL;
+  LiveRange* cur_range = range;
+  while (cur_range != NULL && (cur_cover == NULL || pred_cover == NULL)) {
+    if (cur_range->CanCover(cur_start)) {
+      ASSERT(cur_cover == NULL);
+      cur_cover = cur_range;
+    }
+    if (cur_range->CanCover(pred_end)) {
+      ASSERT(pred_cover == NULL);
+      pred_cover = cur_range;
+    }
+    cur_range = cur_range->next();
+  }
+
+  if (cur_cover->IsSpilled()) return;
+  ASSERT(pred_cover != NULL && cur_cover != NULL);
+  if (pred_cover != cur_cover) {
+    LOperand* pred_op = pred_cover->CreateAssignedOperand();
+    LOperand* cur_op = cur_cover->CreateAssignedOperand();
+    if (!pred_op->Equals(cur_op)) {
+      LGap* gap = NULL;
+      if (block->predecessors()->length() == 1) {
+        gap = chunk_->GetGapAt(block->first_instruction_index());
+      } else {
+        ASSERT(pred->end()->SecondSuccessor() == NULL);
+        gap = GetLastGap(pred);
+      }
+      gap->GetOrCreateParallelMove(LGap::START)->AddMove(pred_op, cur_op);
+    }
+  }
+}
+
+
+LParallelMove* LAllocator::GetConnectingParallelMove(LifetimePosition pos) {
+  int index = pos.InstructionIndex();
+  if (chunk_->IsGapAt(index)) {
+    LGap* gap = chunk_->GetGapAt(index);
+    return gap->GetOrCreateParallelMove(
+        pos.IsInstructionStart() ? LGap::START : LGap::END);
+  }
+  int gap_pos = pos.IsInstructionStart() ? (index - 1) : (index + 1);
+  return chunk_->GetGapAt(gap_pos)->GetOrCreateParallelMove(
+      (gap_pos < index) ? LGap::AFTER : LGap::BEFORE);
+}
+
+
+HBasicBlock* LAllocator::GetBlock(LifetimePosition pos) {
+  LGap* gap = chunk_->GetGapAt(chunk_->NearestGapPos(pos.InstructionIndex()));
+  return gap->block();
+}
+
+
+void LAllocator::ConnectRanges() {
+  HPhase phase("Connect ranges", this);
+  for (int i = 0; i < live_ranges()->length(); ++i) {
+    LiveRange* first_range = live_ranges()->at(i);
+    if (first_range == NULL || first_range->parent() != NULL) continue;
+
+    LiveRange* second_range = first_range->next();
+    while (second_range != NULL) {
+      LifetimePosition pos = second_range->Start();
+
+      if (!second_range->IsSpilled()) {
+        // Add gap move if the two live ranges touch and there is no block
+        // boundary.
+        if (first_range->End().Value() == pos.Value()) {
+          bool should_insert = true;
+          if (IsBlockBoundary(pos)) {
+            should_insert = CanEagerlyResolveControlFlow(GetBlock(pos));
+          }
+          if (should_insert) {
+            LParallelMove* move = GetConnectingParallelMove(pos);
+            LOperand* prev_operand = first_range->CreateAssignedOperand();
+            LOperand* cur_operand = second_range->CreateAssignedOperand();
+            move->AddMove(prev_operand, cur_operand);
+          }
+        }
+      }
+
+      first_range = second_range;
+      second_range = second_range->next();
+    }
+  }
+}
+
+
+bool LAllocator::CanEagerlyResolveControlFlow(HBasicBlock* block) const {
+  if (block->predecessors()->length() != 1) return false;
+  return block->predecessors()->first()->block_id() == block->block_id() - 1;
+}
+
+
+void LAllocator::ResolveControlFlow() {
+  HPhase phase("Resolve control flow", this);
+  const ZoneList<HBasicBlock*>* blocks = graph()->blocks();
+  for (int block_id = 1; block_id < blocks->length(); ++block_id) {
+    HBasicBlock* block = blocks->at(block_id);
+    if (CanEagerlyResolveControlFlow(block)) continue;
+    BitVector* live = live_in_sets_[block->block_id()];
+    BitVector::Iterator iterator(live);
+    while (!iterator.Done()) {
+      int operand_index = iterator.Current();
+      for (int i = 0; i < block->predecessors()->length(); ++i) {
+        HBasicBlock* cur = block->predecessors()->at(i);
+        LiveRange* cur_range = LiveRangeFor(operand_index);
+        ResolveControlFlow(cur_range, block, cur);
+      }
+      iterator.Advance();
+    }
+  }
+}
+
+
+void LAllocator::BuildLiveRanges() {
+  HPhase phase("Build live ranges", this);
+  InitializeLivenessAnalysis();
+  // Process the blocks in reverse order.
+  const ZoneList<HBasicBlock*>* blocks = graph()->blocks();
+  for (int block_id = blocks->length() - 1; block_id >= 0; --block_id) {
+    HBasicBlock* block = blocks->at(block_id);
+    BitVector* live = ComputeLiveOut(block);
+    // Initially consider all live_out values live for the entire block. We
+    // will shorten these intervals if necessary.
+    AddInitialIntervals(block, live);
+
+    // Process the instructions in reverse order, generating and killing
+    // live values.
+    ProcessInstructions(block, live);
+    // All phi output operands are killed by this block.
+    const ZoneList<HPhi*>* phis = block->phis();
+    for (int i = 0; i < phis->length(); ++i) {
+      // The live range interval already ends at the first instruction of the
+      // block.
+      HPhi* phi = phis->at(i);
+      live->Remove(phi->id());
+
+      LOperand* hint = NULL;
+      LOperand* phi_operand = NULL;
+      LGap* gap = GetLastGap(phi->block()->predecessors()->at(0));
+      LParallelMove* move = gap->GetOrCreateParallelMove(LGap::START);
+      for (int j = 0; j < move->move_operands()->length(); ++j) {
+        LOperand* to = move->move_operands()->at(j).to();
+        if (to->IsUnallocated() && to->VirtualRegister() == phi->id()) {
+          hint = move->move_operands()->at(j).from();
+          phi_operand = to;
+          break;
+        }
+      }
+      ASSERT(hint != NULL);
+
+      LifetimePosition block_start = LifetimePosition::FromInstructionIndex(
+              block->first_instruction_index());
+      Define(block_start, phi_operand, hint);
+    }
+
+    // Now live is live_in for this block except not including values live
+    // out on backward successor edges.
+    live_in_sets_[block_id] = live;
+
+    // If this block is a loop header go back and patch up the necessary
+    // predecessor blocks.
+    if (block->IsLoopHeader()) {
+      // TODO(kmillikin): Need to be able to get the last block of the loop
+      // in the loop information. Add a live range stretching from the first
+      // loop instruction to the last for each value live on entry to the
+      // header.
+      HBasicBlock* back_edge = block->loop_information()->GetLastBackEdge();
+      BitVector::Iterator iterator(live);
+      LifetimePosition start = LifetimePosition::FromInstructionIndex(
+          block->first_instruction_index());
+      LifetimePosition end = LifetimePosition::FromInstructionIndex(
+          back_edge->last_instruction_index());
+      while (!iterator.Done()) {
+        int operand_index = iterator.Current();
+        LiveRange* range = LiveRangeFor(operand_index);
+        range->EnsureInterval(start, end);
+        iterator.Advance();
+      }
+
+      for (int i = block->block_id() + 1; i <= back_edge->block_id(); ++i) {
+        live_in_sets_[i]->Union(*live);
+      }
+    }
+
+#ifdef DEBUG
+    if (block_id == 0) {
+      BitVector::Iterator iterator(live);
+      bool found = false;
+      while (!iterator.Done()) {
+        found = true;
+        int operand_index = iterator.Current();
+        PrintF("Function: %s\n",
+               *graph()->info()->function()->debug_name()->ToCString());
+        PrintF("Value %d used before first definition!\n", operand_index);
+        LiveRange* range = LiveRangeFor(operand_index);
+        PrintF("First use is at %d\n", range->first_pos()->pos().Value());
+        iterator.Advance();
+      }
+      ASSERT(!found);
+    }
+#endif
+  }
+}
+
+
+void LAllocator::AllocateGeneralRegisters() {
+  HPhase phase("Allocate general registers", this);
+  num_registers_ = Register::kNumAllocatableRegisters;
+  mode_ = CPU_REGISTERS;
+  AllocateRegisters();
+}
+
+
+bool LAllocator::SafePointsAreInOrder() const {
+  const ZoneList<LPointerMap*>* pointer_maps = chunk_->pointer_maps();
+  int safe_point = 0;
+  for (int i = 0; i < pointer_maps->length(); ++i) {
+    LPointerMap* map = pointer_maps->at(i);
+    if (safe_point > map->lithium_position()) return false;
+    safe_point = map->lithium_position();
+  }
+  return true;
+}
+
+
+void LAllocator::PopulatePointerMaps() {
+  HPhase phase("Populate pointer maps", this);
+  const ZoneList<LPointerMap*>* pointer_maps = chunk_->pointer_maps();
+
+  ASSERT(SafePointsAreInOrder());
+
+  // Iterate over all safe point positions and record a pointer
+  // for all spilled live ranges at this point.
+  int first_safe_point_index = 0;
+  int last_range_start = 0;
+  for (int range_idx = 0; range_idx < live_ranges()->length(); ++range_idx) {
+    LiveRange* range = live_ranges()->at(range_idx);
+    if (range == NULL) continue;
+    // Iterate over the first parts of multi-part live ranges.
+    if (range->parent() != NULL) continue;
+    // Skip non-pointer values.
+    if (!HasTaggedValue(range->id())) continue;
+    // Skip empty live ranges.
+    if (range->IsEmpty()) continue;
+
+    // Find the extent of the range and its children.
+    int start = range->Start().InstructionIndex();
+    int end = 0;
+    for (LiveRange* cur = range; cur != NULL; cur = cur->next()) {
+      LifetimePosition this_end = cur->End();
+      if (this_end.InstructionIndex() > end) end = this_end.InstructionIndex();
+      ASSERT(cur->Start().InstructionIndex() >= start);
+    }
+
+    // Most of the ranges are in order, but not all.  Keep an eye on when
+    // they step backwards and reset the first_safe_point_index so we don't
+    // miss any safe points.
+    if (start < last_range_start) {
+      first_safe_point_index = 0;
+    }
+    last_range_start = start;
+
+    // Step across all the safe points that are before the start of this range,
+    // recording how far we step in order to save doing this for the next range.
+    while (first_safe_point_index < pointer_maps->length()) {
+      LPointerMap* map = pointer_maps->at(first_safe_point_index);
+      int safe_point = map->lithium_position();
+      if (safe_point >= start) break;
+      first_safe_point_index++;
+    }
+
+    // Step through the safe points to see whether they are in the range.
+    for (int safe_point_index = first_safe_point_index;
+         safe_point_index < pointer_maps->length();
+         ++safe_point_index) {
+      LPointerMap* map = pointer_maps->at(safe_point_index);
+      int safe_point = map->lithium_position();
+
+      // The safe points are sorted so we can stop searching here.
+      if (safe_point - 1 > end) break;
+
+      // Advance to the next active range that covers the current
+      // safe point position.
+      LifetimePosition safe_point_pos =
+          LifetimePosition::FromInstructionIndex(safe_point);
+      LiveRange* cur = range;
+      while (cur != NULL && !cur->Covers(safe_point_pos.PrevInstruction())) {
+        cur = cur->next();
+      }
+      if (cur == NULL) continue;
+
+      // Check if the live range is spilled and the safe point is after
+      // the spill position.
+      if (range->HasAllocatedSpillOperand() &&
+          safe_point >= range->spill_start_index()) {
+        TraceAlloc("Pointer for range %d (spilled at %d) at safe point %d\n",
+                   range->id(), range->spill_start_index(), safe_point);
+        map->RecordPointer(range->GetSpillOperand());
+      }
+
+      if (!cur->IsSpilled()) {
+        TraceAlloc("Pointer in register for range %d (start at %d) "
+                   "at safe point %d\n",
+                   cur->id(), cur->Start().Value(), safe_point);
+        LOperand* operand = cur->CreateAssignedOperand();
+        ASSERT(!operand->IsStackSlot());
+        map->RecordPointer(operand);
+      }
+    }
+  }
+}
+
+
+void LAllocator::ProcessOsrEntry() {
+  const ZoneList<LInstruction*>* instrs = chunk_->instructions();
+
+  // Linear search for the OSR entry instruction in the chunk.
+  int index = -1;
+  while (++index < instrs->length() &&
+         !instrs->at(index)->IsOsrEntry()) {
+  }
+  ASSERT(index < instrs->length());
+  LOsrEntry* instruction = LOsrEntry::cast(instrs->at(index));
+
+  LifetimePosition position = LifetimePosition::FromInstructionIndex(index);
+  for (int i = 0; i < live_ranges()->length(); ++i) {
+    LiveRange* range = live_ranges()->at(i);
+    if (range != NULL) {
+      if (range->Covers(position) &&
+          range->HasRegisterAssigned() &&
+          range->TopLevel()->HasAllocatedSpillOperand()) {
+        int reg_index = range->assigned_register();
+        LOperand* spill_operand = range->TopLevel()->GetSpillOperand();
+        if (range->IsDouble()) {
+          instruction->MarkSpilledDoubleRegister(reg_index, spill_operand);
+        } else {
+          instruction->MarkSpilledRegister(reg_index, spill_operand);
+        }
+      }
+    }
+  }
+}
+
+
+void LAllocator::AllocateDoubleRegisters() {
+  HPhase phase("Allocate double registers", this);
+  num_registers_ = DoubleRegister::kNumAllocatableRegisters;
+  mode_ = XMM_REGISTERS;
+  AllocateRegisters();
+}
+
+
+void LAllocator::AllocateRegisters() {
+  ASSERT(mode_ != NONE);
+  reusable_slots_.Clear();
+
+  for (int i = 0; i < live_ranges_.length(); ++i) {
+    if (live_ranges_[i] != NULL) {
+      if (HasDoubleValue(live_ranges_[i]->id()) == (mode_ == XMM_REGISTERS)) {
+        AddToUnhandledUnsorted(live_ranges_[i]);
+      }
+    }
+  }
+  SortUnhandled();
+  ASSERT(UnhandledIsSorted());
+
+  ASSERT(active_live_ranges_.is_empty());
+  ASSERT(inactive_live_ranges_.is_empty());
+
+  if (mode_ == XMM_REGISTERS) {
+    for (int i = 0; i < fixed_double_live_ranges_.length(); ++i) {
+      LiveRange* current = fixed_double_live_ranges_.at(i);
+      if (current != NULL) {
+        AddToInactive(current);
+      }
+    }
+  } else {
+    for (int i = 0; i < fixed_live_ranges_.length(); ++i) {
+      LiveRange* current = fixed_live_ranges_.at(i);
+      if (current != NULL) {
+        AddToInactive(current);
+      }
+    }
+  }
+
+  while (!unhandled_live_ranges_.is_empty()) {
+    ASSERT(UnhandledIsSorted());
+    LiveRange* current = unhandled_live_ranges_.RemoveLast();
+    ASSERT(UnhandledIsSorted());
+    LifetimePosition position = current->Start();
+    TraceAlloc("Processing interval %d start=%d\n",
+               current->id(),
+               position.Value());
+
+    if (current->HasAllocatedSpillOperand()) {
+      TraceAlloc("Live range %d already has a spill operand\n", current->id());
+      LifetimePosition next_pos = position;
+      if (chunk_->IsGapAt(next_pos.InstructionIndex())) {
+        next_pos = next_pos.NextInstruction();
+      }
+      UsePosition* pos = current->NextUsePositionRegisterIsBeneficial(next_pos);
+      // If the range already has a spill operand and it doesn't need a
+      // register immediately, split it and spill the first part of the range.
+      if (pos == NULL) {
+        Spill(current);
+        continue;
+      } else if (pos->pos().Value() >
+                 current->Start().NextInstruction().Value()) {
+        // Do not spill live range eagerly if use position that can benefit from
+        // the register is too close to the start of live range.
+        LiveRange* part = Split(current,
+                                current->Start().NextInstruction(),
+                                pos->pos());
+        Spill(current);
+        AddToUnhandledSorted(part);
+        ASSERT(UnhandledIsSorted());
+        continue;
+      }
+    }
+
+    for (int i = 0; i < active_live_ranges_.length(); ++i) {
+      LiveRange* cur_active = active_live_ranges_.at(i);
+      if (cur_active->End().Value() <= position.Value()) {
+        ActiveToHandled(cur_active);
+        --i;  // The live range was removed from the list of active live ranges.
+      } else if (!cur_active->Covers(position)) {
+        ActiveToInactive(cur_active);
+        --i;  // The live range was removed from the list of active live ranges.
+      }
+    }
+
+    for (int i = 0; i < inactive_live_ranges_.length(); ++i) {
+      LiveRange* cur_inactive = inactive_live_ranges_.at(i);
+      if (cur_inactive->End().Value() <= position.Value()) {
+        InactiveToHandled(cur_inactive);
+        --i;  // Live range was removed from the list of inactive live ranges.
+      } else if (cur_inactive->Covers(position)) {
+        InactiveToActive(cur_inactive);
+        --i;  // Live range was removed from the list of inactive live ranges.
+      }
+    }
+
+    ASSERT(!current->HasRegisterAssigned() && !current->IsSpilled());
+
+    bool result = TryAllocateFreeReg(current);
+    if (!result) {
+      AllocateBlockedReg(current);
+    }
+
+    if (current->HasRegisterAssigned()) {
+      AddToActive(current);
+    }
+  }
+
+  active_live_ranges_.Clear();
+  inactive_live_ranges_.Clear();
+}
+
+
+void LAllocator::Setup() {
+  LConstantOperand::SetupCache();
+  LStackSlot::SetupCache();
+  LDoubleStackSlot::SetupCache();
+  LRegister::SetupCache();
+  LDoubleRegister::SetupCache();
+}
+
+
+void LAllocator::TraceAlloc(const char* msg, ...) {
+  if (FLAG_trace_alloc) {
+    va_list arguments;
+    va_start(arguments, msg);
+    OS::VPrint(msg, arguments);
+    va_end(arguments);
+  }
+}
+
+
+void LAllocator::RecordUse(HValue* value, LUnallocated* operand) {
+  operand->set_virtual_register(value->id());
+  current_summary()->AddInput(operand);
+}
+
+
+bool LAllocator::HasTaggedValue(int virtual_register) const {
+  HValue* value = graph()->LookupValue(virtual_register);
+  if (value == NULL) return false;
+  return value->representation().IsTagged();
+}
+
+
+bool LAllocator::HasDoubleValue(int virtual_register) const {
+  HValue* value = graph()->LookupValue(virtual_register);
+  if (value == NULL) return false;
+  return value->representation().IsDouble();
+}
+
+
+void LAllocator::MarkAsCall() {
+  current_summary()->MarkAsCall();
+}
+
+
+void LAllocator::RecordDefinition(HInstruction* instr, LUnallocated* operand) {
+  operand->set_virtual_register(instr->id());
+  current_summary()->SetOutput(operand);
+}
+
+
+void LAllocator::RecordTemporary(LUnallocated* operand) {
+  ASSERT(next_virtual_register_ < LUnallocated::kMaxVirtualRegisters);
+  if (!operand->HasFixedPolicy()) {
+    operand->set_virtual_register(next_virtual_register_++);
+  }
+  current_summary()->AddTemp(operand);
+}
+
+
+int LAllocator::max_initial_value_ids() {
+  return LUnallocated::kMaxVirtualRegisters / 32;
+}
+
+
+void LAllocator::BeginInstruction() {
+  if (next_summary_ == NULL) {
+    next_summary_ = new InstructionSummary();
+  }
+  summary_stack_.Add(next_summary_);
+  next_summary_ = NULL;
+}
+
+
+void LAllocator::SummarizeInstruction(int index) {
+  InstructionSummary* sum = summary_stack_.RemoveLast();
+  if (summaries_.length() <= index) {
+    summaries_.AddBlock(NULL, index + 1 - summaries_.length());
+  }
+  ASSERT(summaries_[index] == NULL);
+  if (sum->Output() != NULL || sum->InputCount() > 0 || sum->TempCount() > 0) {
+    summaries_[index] = sum;
+  } else {
+    next_summary_ = sum;
+  }
+}
+
+
+void LAllocator::OmitInstruction() {
+  summary_stack_.RemoveLast();
+}
+
+
+void LAllocator::AddToActive(LiveRange* range) {
+  TraceAlloc("Add live range %d to active\n", range->id());
+  active_live_ranges_.Add(range);
+}
+
+
+void LAllocator::AddToInactive(LiveRange* range) {
+  TraceAlloc("Add live range %d to inactive\n", range->id());
+  inactive_live_ranges_.Add(range);
+}
+
+
+void LAllocator::AddToUnhandledSorted(LiveRange* range) {
+  if (range == NULL || range->IsEmpty()) return;
+  ASSERT(!range->HasRegisterAssigned() && !range->IsSpilled());
+  for (int i = unhandled_live_ranges_.length() - 1; i >= 0; --i) {
+    LiveRange* cur_range = unhandled_live_ranges_.at(i);
+    if (range->ShouldBeAllocatedBefore(cur_range)) {
+      TraceAlloc("Add live range %d to unhandled at %d\n", range->id(), i + 1);
+      unhandled_live_ranges_.InsertAt(i + 1, range);
+      ASSERT(UnhandledIsSorted());
+      return;
+    }
+  }
+  TraceAlloc("Add live range %d to unhandled at start\n", range->id());
+  unhandled_live_ranges_.InsertAt(0, range);
+  ASSERT(UnhandledIsSorted());
+}
+
+
+void LAllocator::AddToUnhandledUnsorted(LiveRange* range) {
+  if (range == NULL || range->IsEmpty()) return;
+  ASSERT(!range->HasRegisterAssigned() && !range->IsSpilled());
+  TraceAlloc("Add live range %d to unhandled unsorted at end\n", range->id());
+  unhandled_live_ranges_.Add(range);
+}
+
+
+static int UnhandledSortHelper(LiveRange* const* a, LiveRange* const* b) {
+  ASSERT(!(*a)->ShouldBeAllocatedBefore(*b) ||
+         !(*b)->ShouldBeAllocatedBefore(*a));
+  if ((*a)->ShouldBeAllocatedBefore(*b)) return 1;
+  if ((*b)->ShouldBeAllocatedBefore(*a)) return -1;
+  return (*a)->id() - (*b)->id();
+}
+
+
+// Sort the unhandled live ranges so that the ranges to be processed first are
+// at the end of the array list.  This is convenient for the register allocation
+// algorithm because it is efficient to remove elements from the end.
+void LAllocator::SortUnhandled() {
+  TraceAlloc("Sort unhandled\n");
+  unhandled_live_ranges_.Sort(&UnhandledSortHelper);
+}
+
+
+bool LAllocator::UnhandledIsSorted() {
+  int len = unhandled_live_ranges_.length();
+  for (int i = 1; i < len; i++) {
+    LiveRange* a = unhandled_live_ranges_.at(i - 1);
+    LiveRange* b = unhandled_live_ranges_.at(i);
+    if (a->Start().Value() < b->Start().Value()) return false;
+  }
+  return true;
+}
+
+
+void LAllocator::FreeSpillSlot(LiveRange* range) {
+  // Check that we are the last range.
+  if (range->next() != NULL) return;
+
+  if (!range->TopLevel()->HasAllocatedSpillOperand()) return;
+
+  int index = range->TopLevel()->GetSpillOperand()->index();
+  if (index >= 0) {
+    reusable_slots_.Add(range);
+  }
+}
+
+
+LOperand* LAllocator::TryReuseSpillSlot(LiveRange* range) {
+  if (reusable_slots_.is_empty()) return NULL;
+  if (reusable_slots_.first()->End().Value() >
+      range->TopLevel()->Start().Value()) {
+    return NULL;
+  }
+  LOperand* result = reusable_slots_.first()->TopLevel()->GetSpillOperand();
+  reusable_slots_.Remove(0);
+  return result;
+}
+
+
+void LAllocator::ActiveToHandled(LiveRange* range) {
+  ASSERT(active_live_ranges_.Contains(range));
+  active_live_ranges_.RemoveElement(range);
+  TraceAlloc("Moving live range %d from active to handled\n", range->id());
+  FreeSpillSlot(range);
+}
+
+
+void LAllocator::ActiveToInactive(LiveRange* range) {
+  ASSERT(active_live_ranges_.Contains(range));
+  active_live_ranges_.RemoveElement(range);
+  inactive_live_ranges_.Add(range);
+  TraceAlloc("Moving live range %d from active to inactive\n", range->id());
+}
+
+
+void LAllocator::InactiveToHandled(LiveRange* range) {
+  ASSERT(inactive_live_ranges_.Contains(range));
+  inactive_live_ranges_.RemoveElement(range);
+  TraceAlloc("Moving live range %d from inactive to handled\n", range->id());
+  FreeSpillSlot(range);
+}
+
+
+void LAllocator::InactiveToActive(LiveRange* range) {
+  ASSERT(inactive_live_ranges_.Contains(range));
+  inactive_live_ranges_.RemoveElement(range);
+  active_live_ranges_.Add(range);
+  TraceAlloc("Moving live range %d from inactive to active\n", range->id());
+}
+
+
+bool LAllocator::TryAllocateFreeReg(LiveRange* current) {
+  LifetimePosition max_pos = LifetimePosition::FromInstructionIndex(
+      chunk_->instructions()->length() + 1);
+  ASSERT(DoubleRegister::kNumAllocatableRegisters >=
+         Register::kNumAllocatableRegisters);
+  EmbeddedVector<LifetimePosition, DoubleRegister::kNumAllocatableRegisters>
+      free_pos(max_pos);
+  for (int i = 0; i < active_live_ranges_.length(); ++i) {
+    LiveRange* cur_active = active_live_ranges_.at(i);
+    free_pos[cur_active->assigned_register()] =
+        LifetimePosition::FromInstructionIndex(0);
+  }
+
+  for (int i = 0; i < inactive_live_ranges_.length(); ++i) {
+    LiveRange* cur_inactive = inactive_live_ranges_.at(i);
+    ASSERT(cur_inactive->End().Value() > current->Start().Value());
+    LifetimePosition next_intersection =
+        cur_inactive->FirstIntersection(current);
+    if (!next_intersection.IsValid()) continue;
+    int cur_reg = cur_inactive->assigned_register();
+    free_pos[cur_reg] = Min(free_pos[cur_reg], next_intersection);
+  }
+
+  UsePosition* pos = current->FirstPosWithHint();
+  if (pos != NULL) {
+    LOperand* hint = pos->hint();
+    if (hint->IsRegister() || hint->IsDoubleRegister()) {
+      int register_index = hint->index();
+      TraceAlloc("Found reg hint %d for live range %d (free [%d, end %d[)\n",
+                 register_index,
+                 current->id(),
+                 free_pos[register_index].Value(),
+                 current->End().Value());
+      if (free_pos[register_index].Value() >= current->End().Value()) {
+        TraceAlloc("Assigning preferred reg %d to live range %d\n",
+                   register_index,
+                   current->id());
+        current->set_assigned_register(register_index, mode_ == XMM_REGISTERS);
+        return true;
+      }
+    }
+  }
+
+  int max_reg = 0;
+  for (int i = 1; i < RegisterCount(); ++i) {
+    if (free_pos[i].Value() > free_pos[max_reg].Value()) {
+      max_reg = i;
+    }
+  }
+
+  if (free_pos[max_reg].InstructionIndex() == 0) {
+    return false;
+  } else if (free_pos[max_reg].Value() >= current->End().Value()) {
+    TraceAlloc("Assigning reg %d to live range %d\n", max_reg, current->id());
+    current->set_assigned_register(max_reg, mode_ == XMM_REGISTERS);
+  } else {
+    // Split the interval at the nearest gap and never split an interval at its
+    // start position.
+    LifetimePosition pos =
+        LifetimePosition::FromInstructionIndex(
+            chunk_->NearestGapPos(free_pos[max_reg].InstructionIndex()));
+    if (pos.Value() <= current->Start().Value()) return false;
+    LiveRange* second_range = Split(current, pos);
+    AddToUnhandledSorted(second_range);
+    current->set_assigned_register(max_reg, mode_ == XMM_REGISTERS);
+  }
+
+  return true;
+}
+
+
+void LAllocator::AllocateBlockedReg(LiveRange* current) {
+  LifetimePosition max_pos =
+      LifetimePosition::FromInstructionIndex(
+          chunk_->instructions()->length() + 1);
+  ASSERT(DoubleRegister::kNumAllocatableRegisters >=
+         Register::kNumAllocatableRegisters);
+  EmbeddedVector<LifetimePosition, DoubleRegister::kNumAllocatableRegisters>
+      use_pos(max_pos);
+  EmbeddedVector<LifetimePosition, DoubleRegister::kNumAllocatableRegisters>
+      block_pos(max_pos);
+
+  for (int i = 0; i < active_live_ranges_.length(); ++i) {
+    LiveRange* range = active_live_ranges_[i];
+    int cur_reg = range->assigned_register();
+    if (range->IsFixed() || !range->CanBeSpilled(current->Start())) {
+      block_pos[cur_reg] = use_pos[cur_reg] =
+          LifetimePosition::FromInstructionIndex(0);
+    } else {
+      UsePosition* next_use = range->NextUsePositionRegisterIsBeneficial(
+          current->Start());
+      if (next_use == NULL) {
+        use_pos[cur_reg] = range->End();
+      } else {
+        use_pos[cur_reg] = next_use->pos();
+      }
+    }
+  }
+
+  for (int i = 0; i < inactive_live_ranges_.length(); ++i) {
+    LiveRange* range = inactive_live_ranges_.at(i);
+    ASSERT(range->End().Value() > current->Start().Value());
+    LifetimePosition next_intersection = range->FirstIntersection(current);
+    if (!next_intersection.IsValid()) continue;
+    int cur_reg = range->assigned_register();
+    if (range->IsFixed()) {
+      block_pos[cur_reg] = Min(block_pos[cur_reg], next_intersection);
+      use_pos[cur_reg] = Min(block_pos[cur_reg], use_pos[cur_reg]);
+    } else {
+      use_pos[cur_reg] = Min(use_pos[cur_reg], next_intersection);
+    }
+  }
+
+  int max_reg = 0;
+  for (int i = 1; i < RegisterCount(); ++i) {
+    if (use_pos[i].Value() > use_pos[max_reg].Value()) {
+      max_reg = i;
+    }
+  }
+
+  UsePosition* first_usage = current->NextRegisterPosition(current->Start());
+  if (first_usage == NULL) {
+    Spill(current);
+  } else if (use_pos[max_reg].Value() < first_usage->pos().Value()) {
+    SplitAndSpill(current, current->Start(), first_usage->pos());
+  } else {
+    if (block_pos[max_reg].Value() < current->End().Value()) {
+      // Split current before blocked position.
+      LiveRange* second_range = Split(current,
+                                      current->Start(),
+                                      block_pos[max_reg]);
+      AddToUnhandledSorted(second_range);
+    }
+
+    current->set_assigned_register(max_reg, mode_ == XMM_REGISTERS);
+    SplitAndSpillIntersecting(current);
+  }
+}
+
+
+void LAllocator::SplitAndSpillIntersecting(LiveRange* current) {
+  ASSERT(current->HasRegisterAssigned());
+  int reg = current->assigned_register();
+  LifetimePosition split_pos =
+      LifetimePosition::FromInstructionIndex(
+          chunk_->NearestGapPos(current->Start().InstructionIndex()));
+  for (int i = 0; i < active_live_ranges_.length(); ++i) {
+    LiveRange* range = active_live_ranges_[i];
+    if (range->assigned_register() == reg) {
+      UsePosition* next_pos = range->NextRegisterPosition(current->Start());
+      if (next_pos == NULL) {
+        SplitAndSpill(range, split_pos);
+      } else {
+        SplitAndSpill(range, split_pos, next_pos->pos());
+      }
+      ActiveToHandled(range);
+      --i;
+    }
+  }
+
+  for (int i = 0; i < inactive_live_ranges_.length(); ++i) {
+    LiveRange* range = inactive_live_ranges_[i];
+    ASSERT(range->End().Value() > current->Start().Value());
+    if (range->assigned_register() == reg && !range->IsFixed()) {
+      LifetimePosition next_intersection = range->FirstIntersection(current);
+      if (next_intersection.IsValid()) {
+        UsePosition* next_pos = range->NextRegisterPosition(current->Start());
+        if (next_pos == NULL) {
+          SplitAndSpill(range, split_pos);
+        } else {
+          next_intersection = Min(next_intersection, next_pos->pos());
+          SplitAndSpill(range, split_pos, next_intersection);
+        }
+        InactiveToHandled(range);
+        --i;
+      }
+    }
+  }
+}
+
+
+LiveRange* LAllocator::Split(LiveRange* range,
+                             LifetimePosition start,
+                             LifetimePosition end) {
+  ASSERT(!range->IsFixed());
+  TraceAlloc("Splitting live range %d in position between [%d, %d[\n",
+             range->id(),
+             start.Value(),
+             end.Value());
+
+  LifetimePosition split_pos = FindOptimalSplitPos(
+      start, end.PrevInstruction().InstructionEnd());
+  ASSERT(split_pos.Value() >= start.Value());
+  return Split(range, split_pos);
+}
+
+
+LifetimePosition LAllocator::FindOptimalSplitPos(LifetimePosition start,
+                                                 LifetimePosition end) {
+  int start_instr = start.InstructionIndex();
+  int end_instr = end.InstructionIndex();
+  ASSERT(start_instr <= end_instr);
+
+  // We have no choice
+  if (start_instr == end_instr) return end;
+
+  HBasicBlock* end_block = GetBlock(start);
+  HBasicBlock* start_block = GetBlock(end);
+
+  if (end_block == start_block) {
+    // The interval is split in the same basic block. Split at latest possible
+    // position.
+    return end;
+  }
+
+  HBasicBlock* block = end_block;
+  // Move to the most outside loop header.
+  while (block->parent_loop_header() != NULL &&
+      block->parent_loop_header()->block_id() > start_block->block_id()) {
+    block = block->parent_loop_header();
+  }
+
+  if (block == end_block) {
+    return end;
+  }
+
+  return LifetimePosition::FromInstructionIndex(
+      block->first_instruction_index());
+}
+
+
+bool LAllocator::IsBlockBoundary(LifetimePosition pos) {
+  return pos.IsInstructionStart() &&
+      chunk_->instructions()->at(pos.InstructionIndex())->IsLabel();
+}
+
+
+void LAllocator::AddGapMove(int pos, LiveRange* prev, LiveRange* next) {
+  UsePosition* prev_pos = prev->AddUsePosition(
+      LifetimePosition::FromInstructionIndex(pos));
+  UsePosition* next_pos = next->AddUsePosition(
+      LifetimePosition::FromInstructionIndex(pos));
+  LOperand* prev_operand = prev_pos->operand();
+  LOperand* next_operand = next_pos->operand();
+  LGap* gap = chunk_->GetGapAt(pos);
+  gap->GetOrCreateParallelMove(LGap::START)->
+      AddMove(prev_operand, next_operand);
+  next_pos->set_hint(prev_operand);
+}
+
+
+LiveRange* LAllocator::Split(LiveRange* range, LifetimePosition pos) {
+  ASSERT(!range->IsFixed());
+  TraceAlloc("Splitting live range %d at %d\n", range->id(), pos.Value());
+  if (pos.Value() <= range->Start().Value()) {
+    return range;
+  }
+  LiveRange* result = LiveRangeFor(next_virtual_register_++);
+  range->SplitAt(pos, result);
+  return result;
+}
+
+
+void LAllocator::SplitAndSpill(LiveRange* range,
+                               LifetimePosition start,
+                               LifetimePosition end) {
+  // We have an interval range and want to make sure that it is
+  // spilled at start and at most spilled until end.
+  ASSERT(start.Value() < end.Value());
+  LiveRange* tail_part = Split(range, start);
+  if (tail_part->Start().Value() < end.Value()) {
+    LiveRange* third_part = Split(tail_part,
+                                  tail_part->Start().NextInstruction(),
+                                  end);
+    Spill(tail_part);
+    ASSERT(third_part != tail_part);
+    AddToUnhandledSorted(third_part);
+  } else {
+    AddToUnhandledSorted(tail_part);
+  }
+}
+
+
+void LAllocator::SplitAndSpill(LiveRange* range, LifetimePosition at) {
+  at = LifetimePosition::FromInstructionIndex(
+      chunk_->NearestGapPos(at.InstructionIndex()));
+  LiveRange* second_part = Split(range, at);
+  Spill(second_part);
+}
+
+
+void LAllocator::Spill(LiveRange* range) {
+  ASSERT(!range->IsSpilled());
+  TraceAlloc("Spilling live range %d\n", range->id());
+  LiveRange* first = range->TopLevel();
+
+  if (!first->HasAllocatedSpillOperand()) {
+    LOperand* op = TryReuseSpillSlot(range);
+    if (op == NULL) op = chunk_->GetNextSpillSlot(mode_ == XMM_REGISTERS);
+    first->SetSpillOperand(op);
+  }
+  range->MakeSpilled();
+}
+
+
+int LAllocator::RegisterCount() const {
+  return num_registers_;
+}
+
+
+#ifdef DEBUG
+
+
+void LAllocator::Verify() const {
+  for (int i = 0; i < live_ranges()->length(); ++i) {
+    LiveRange* current = live_ranges()->at(i);
+    if (current != NULL) current->Verify();
+  }
+}
+
+
+#endif
+
+
+} }  // namespace v8::internal
diff --git a/src/lithium-allocator.h b/src/lithium-allocator.h
new file mode 100644 (file)
index 0000000..52fee64
--- /dev/null
@@ -0,0 +1,954 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_LITHIUM_ALLOCATOR_H_
+#define V8_LITHIUM_ALLOCATOR_H_
+
+#include "v8.h"
+
+#include "zone.h"
+
+namespace v8 {
+namespace internal {
+
+// Forward declarations.
+class HBasicBlock;
+class HGraph;
+class HInstruction;
+class HPhi;
+class HTracer;
+class HValue;
+class BitVector;
+class StringStream;
+
+class LArgument;
+class LChunk;
+class LConstantOperand;
+class LGap;
+class LInstruction;
+class LParallelMove;
+class LPointerMap;
+class LStackSlot;
+class LRegister;
+
+// This class represents a single point of a LOperand's lifetime.
+// For each lithium instruction there are exactly two lifetime positions:
+// the beginning and the end of the instruction. Lifetime positions for
+// different lithium instructions are disjoint.
+class LifetimePosition {
+ public:
+  // Return the lifetime position that corresponds to the beginning of
+  // the instruction with the given index.
+  static LifetimePosition FromInstructionIndex(int index) {
+    return LifetimePosition(index * kStep);
+  }
+
+  // Returns a numeric representation of this lifetime position.
+  int Value() const {
+    return value_;
+  }
+
+  // Returns the index of the instruction to which this lifetime position
+  // corresponds.
+  int InstructionIndex() const {
+    ASSERT(IsValid());
+    return value_ / kStep;
+  }
+
+  // Returns true if this lifetime position corresponds to the instruction
+  // start.
+  bool IsInstructionStart() const {
+    return (value_ & (kStep - 1)) == 0;
+  }
+
+  // Returns the lifetime position for the start of the instruction which
+  // corresponds to this lifetime position.
+  LifetimePosition InstructionStart() const {
+    ASSERT(IsValid());
+    return LifetimePosition(value_ & ~(kStep - 1));
+  }
+
+  // Returns the lifetime position for the end of the instruction which
+  // corresponds to this lifetime position.
+  LifetimePosition InstructionEnd() const {
+    ASSERT(IsValid());
+    return LifetimePosition(InstructionStart().Value() + kStep/2);
+  }
+
+  // Returns the lifetime position for the beginning of the next instruction.
+  LifetimePosition NextInstruction() const {
+    ASSERT(IsValid());
+    return LifetimePosition(InstructionStart().Value() + kStep);
+  }
+
+  // Returns the lifetime position for the beginning of the previous
+  // instruction.
+  LifetimePosition PrevInstruction() const {
+    ASSERT(IsValid());
+    ASSERT(value_ > 1);
+    return LifetimePosition(InstructionStart().Value() - kStep);
+  }
+
+  // Constructs the lifetime position which does not correspond to any
+  // instruction.
+  LifetimePosition() : value_(-1) {}
+
+  // Returns true if this lifetime positions corrensponds to some
+  // instruction.
+  bool IsValid() const { return value_ != -1; }
+
+  static LifetimePosition Invalid() { return LifetimePosition(); }
+
+ private:
+  static const int kStep = 2;
+
+  // Code relies on kStep being a power of two.
+  STATIC_ASSERT(IS_POWER_OF_TWO(kStep));
+
+  explicit LifetimePosition(int value) : value_(value) { }
+
+  int value_;
+};
+
+
+class LOperand: public ZoneObject {
+ public:
+  enum Kind {
+    INVALID,
+    UNALLOCATED,
+    CONSTANT_OPERAND,
+    STACK_SLOT,
+    DOUBLE_STACK_SLOT,
+    REGISTER,
+    DOUBLE_REGISTER,
+    ARGUMENT
+  };
+
+  LOperand() : value_(KindField::encode(INVALID)) { }
+
+  Kind kind() const { return KindField::decode(value_); }
+  int index() const { return static_cast<int>(value_) >> kKindFieldWidth; }
+  bool IsConstantOperand() const { return kind() == CONSTANT_OPERAND; }
+  bool IsStackSlot() const { return kind() == STACK_SLOT; }
+  bool IsDoubleStackSlot() const { return kind() == DOUBLE_STACK_SLOT; }
+  bool IsRegister() const { return kind() == REGISTER; }
+  bool IsDoubleRegister() const { return kind() == DOUBLE_REGISTER; }
+  bool IsArgument() const { return kind() == ARGUMENT; }
+  bool IsUnallocated() const { return kind() == UNALLOCATED; }
+  bool Equals(LOperand* other) const { return value_ == other->value_; }
+  int VirtualRegister();
+
+  void PrintTo(StringStream* stream);
+  void ConvertTo(Kind kind, int index) {
+    value_ = KindField::encode(kind);
+    value_ |= index << kKindFieldWidth;
+    ASSERT(this->index() == index);
+  }
+
+ protected:
+  static const int kKindFieldWidth = 3;
+  class KindField : public BitField<Kind, 0, kKindFieldWidth> { };
+
+  LOperand(Kind kind, int index) { ConvertTo(kind, index); }
+
+  unsigned value_;
+};
+
+
+class LUnallocated: public LOperand {
+ public:
+  enum Policy {
+    NONE,
+    ANY,
+    FIXED_REGISTER,
+    FIXED_DOUBLE_REGISTER,
+    FIXED_SLOT,
+    MUST_HAVE_REGISTER,
+    WRITABLE_REGISTER,
+    SAME_AS_FIRST_INPUT,
+    SAME_AS_ANY_INPUT,
+    IGNORE
+  };
+
+  // Lifetime of operand inside the instruction.
+  enum Lifetime {
+    // USED_AT_START operand is guaranteed to be live only at
+    // instruction start. Register allocator is free to assign the same register
+    // to some other operand used inside instruction (i.e. temporary or
+    // output).
+    USED_AT_START,
+
+    // USED_AT_END operand is treated as live until the end of
+    // instruction. This means that register allocator will not reuse it's
+    // register for any other operand inside instruction.
+    USED_AT_END
+  };
+
+  explicit LUnallocated(Policy policy) : LOperand(UNALLOCATED, 0) {
+    Initialize(policy, 0, USED_AT_END);
+  }
+
+  LUnallocated(Policy policy, int fixed_index) : LOperand(UNALLOCATED, 0) {
+    Initialize(policy, fixed_index, USED_AT_END);
+  }
+
+  LUnallocated(Policy policy, Lifetime lifetime) : LOperand(UNALLOCATED, 0) {
+    Initialize(policy, 0, lifetime);
+  }
+
+  // The superclass has a KindField.  Some policies have a signed fixed
+  // index in the upper bits.
+  static const int kPolicyWidth = 4;
+  static const int kLifetimeWidth = 1;
+  static const int kVirtualRegisterWidth = 17;
+
+  static const int kPolicyShift = kKindFieldWidth;
+  static const int kLifetimeShift = kPolicyShift + kPolicyWidth;
+  static const int kVirtualRegisterShift = kLifetimeShift + kLifetimeWidth;
+  static const int kFixedIndexShift =
+      kVirtualRegisterShift + kVirtualRegisterWidth;
+
+  class PolicyField : public BitField<Policy, kPolicyShift, kPolicyWidth> { };
+
+  class LifetimeField
+      : public BitField<Lifetime, kLifetimeShift, kLifetimeWidth> {
+  };
+
+  class VirtualRegisterField
+      : public BitField<unsigned,
+                        kVirtualRegisterShift,
+                        kVirtualRegisterWidth> {
+  };
+
+  static const int kMaxVirtualRegisters = 1 << (kVirtualRegisterWidth + 1);
+  static const int kMaxFixedIndices = 128;
+
+  bool HasIgnorePolicy() const { return policy() == IGNORE; }
+  bool HasNoPolicy() const { return policy() == NONE; }
+  bool HasAnyPolicy() const {
+    return policy() == ANY;
+  }
+  bool HasFixedPolicy() const {
+    return policy() == FIXED_REGISTER ||
+        policy() == FIXED_DOUBLE_REGISTER ||
+        policy() == FIXED_SLOT;
+  }
+  bool HasRegisterPolicy() const {
+    return policy() == WRITABLE_REGISTER || policy() == MUST_HAVE_REGISTER;
+  }
+  bool HasSameAsInputPolicy() const {
+    return policy() == SAME_AS_FIRST_INPUT || policy() == SAME_AS_ANY_INPUT;
+  }
+  Policy policy() const { return PolicyField::decode(value_); }
+  void set_policy(Policy policy) {
+    value_ &= ~PolicyField::mask();
+    value_ |= PolicyField::encode(policy);
+  }
+  int fixed_index() const {
+    return static_cast<int>(value_) >> kFixedIndexShift;
+  }
+
+  unsigned virtual_register() const {
+    return VirtualRegisterField::decode(value_);
+  }
+
+  void set_virtual_register(unsigned id) {
+    value_ &= ~VirtualRegisterField::mask();
+    value_ |= VirtualRegisterField::encode(id);
+  }
+
+  LUnallocated* CopyUnconstrained() {
+    LUnallocated* result = new LUnallocated(ANY);
+    result->set_virtual_register(virtual_register());
+    return result;
+  }
+
+  static LUnallocated* cast(LOperand* op) {
+    ASSERT(op->IsUnallocated());
+    return reinterpret_cast<LUnallocated*>(op);
+  }
+
+  bool IsUsedAtStart() {
+    return LifetimeField::decode(value_) == USED_AT_START;
+  }
+
+ private:
+  void Initialize(Policy policy, int fixed_index, Lifetime lifetime) {
+    value_ |= PolicyField::encode(policy);
+    value_ |= LifetimeField::encode(lifetime);
+    value_ |= fixed_index << kFixedIndexShift;
+    ASSERT(this->fixed_index() == fixed_index);
+  }
+};
+
+
+class LMoveOperands BASE_EMBEDDED {
+ public:
+  LMoveOperands(LOperand* from, LOperand* to) : from_(from), to_(to) { }
+
+  LOperand* from() const { return from_; }
+  LOperand* to() const { return to_; }
+  bool IsRedundant() const {
+    return IsEliminated() || from_->Equals(to_) || IsIgnored();
+  }
+  bool IsEliminated() const { return from_ == NULL; }
+  bool IsIgnored() const {
+    if (to_ != NULL && to_->IsUnallocated() &&
+      LUnallocated::cast(to_)->HasIgnorePolicy()) {
+      return true;
+    }
+    return false;
+  }
+
+  void Eliminate() { from_ = to_ = NULL; }
+
+ private:
+  LOperand* from_;
+  LOperand* to_;
+};
+
+
+class LConstantOperand: public LOperand {
+ public:
+  static LConstantOperand* Create(int index) {
+    ASSERT(index >= 0);
+    if (index < kNumCachedOperands) return &cache[index];
+    return new LConstantOperand(index);
+  }
+
+  static LConstantOperand* cast(LOperand* op) {
+    ASSERT(op->IsConstantOperand());
+    return reinterpret_cast<LConstantOperand*>(op);
+  }
+
+  static void SetupCache();
+
+ private:
+  static const int kNumCachedOperands = 128;
+  static LConstantOperand cache[];
+
+  LConstantOperand() : LOperand() { }
+  explicit LConstantOperand(int index) : LOperand(CONSTANT_OPERAND, index) { }
+};
+
+
+class LArgument: public LOperand {
+ public:
+  explicit LArgument(int index) : LOperand(ARGUMENT, index) { }
+
+  static LArgument* cast(LOperand* op) {
+    ASSERT(op->IsArgument());
+    return reinterpret_cast<LArgument*>(op);
+  }
+};
+
+
+class LStackSlot: public LOperand {
+ public:
+  static LStackSlot* Create(int index) {
+    ASSERT(index >= 0);
+    if (index < kNumCachedOperands) return &cache[index];
+    return new LStackSlot(index);
+  }
+
+  static LStackSlot* cast(LOperand* op) {
+    ASSERT(op->IsStackSlot());
+    return reinterpret_cast<LStackSlot*>(op);
+  }
+
+  static void SetupCache();
+
+ private:
+  static const int kNumCachedOperands = 128;
+  static LStackSlot cache[];
+
+  LStackSlot() : LOperand() { }
+  explicit LStackSlot(int index) : LOperand(STACK_SLOT, index) { }
+};
+
+
+class LDoubleStackSlot: public LOperand {
+ public:
+  static LDoubleStackSlot* Create(int index) {
+    ASSERT(index >= 0);
+    if (index < kNumCachedOperands) return &cache[index];
+    return new LDoubleStackSlot(index);
+  }
+
+  static LDoubleStackSlot* cast(LOperand* op) {
+    ASSERT(op->IsStackSlot());
+    return reinterpret_cast<LDoubleStackSlot*>(op);
+  }
+
+  static void SetupCache();
+
+ private:
+  static const int kNumCachedOperands = 128;
+  static LDoubleStackSlot cache[];
+
+  LDoubleStackSlot() : LOperand() { }
+  explicit LDoubleStackSlot(int index) : LOperand(DOUBLE_STACK_SLOT, index) { }
+};
+
+
+class LRegister: public LOperand {
+ public:
+  static LRegister* Create(int index) {
+    ASSERT(index >= 0);
+    if (index < kNumCachedOperands) return &cache[index];
+    return new LRegister(index);
+  }
+
+  static LRegister* cast(LOperand* op) {
+    ASSERT(op->IsRegister());
+    return reinterpret_cast<LRegister*>(op);
+  }
+
+  static void SetupCache();
+
+ private:
+  static const int kNumCachedOperands = 16;
+  static LRegister cache[];
+
+  LRegister() : LOperand() { }
+  explicit LRegister(int index) : LOperand(REGISTER, index) { }
+};
+
+
+class LDoubleRegister: public LOperand {
+ public:
+  static LDoubleRegister* Create(int index) {
+    ASSERT(index >= 0);
+    if (index < kNumCachedOperands) return &cache[index];
+    return new LDoubleRegister(index);
+  }
+
+  static LDoubleRegister* cast(LOperand* op) {
+    ASSERT(op->IsDoubleRegister());
+    return reinterpret_cast<LDoubleRegister*>(op);
+  }
+
+  static void SetupCache();
+
+ private:
+  static const int kNumCachedOperands = 16;
+  static LDoubleRegister cache[];
+
+  LDoubleRegister() : LOperand() { }
+  explicit LDoubleRegister(int index) : LOperand(DOUBLE_REGISTER, index) { }
+};
+
+
+// A register-allocator view of a Lithium instruction. It contains the id of
+// the output operand and a list of input operand uses.
+class InstructionSummary: public ZoneObject {
+ public:
+  InstructionSummary()
+      : output_operand_(NULL), input_count_(0), operands_(4), is_call_(false) {}
+
+  // Output operands.
+  LOperand* Output() const { return output_operand_; }
+  void SetOutput(LOperand* output) {
+    ASSERT(output_operand_ == NULL);
+    output_operand_ = output;
+  }
+
+  // Input operands.
+  int InputCount() const { return input_count_; }
+  LOperand* InputAt(int i) const {
+    ASSERT(i < input_count_);
+    return operands_[i];
+  }
+  void AddInput(LOperand* input) {
+    operands_.InsertAt(input_count_, input);
+    input_count_++;
+  }
+
+  // Temporary operands.
+  int TempCount() const { return operands_.length() - input_count_; }
+  LOperand* TempAt(int i) const { return operands_[i + input_count_]; }
+  void AddTemp(LOperand* temp) { operands_.Add(temp); }
+
+  void MarkAsCall() { is_call_ = true; }
+  bool IsCall() const { return is_call_; }
+
+ private:
+  LOperand* output_operand_;
+  int input_count_;
+  ZoneList<LOperand*> operands_;
+  bool is_call_;
+};
+
+// Representation of the non-empty interval [start,end[.
+class UseInterval: public ZoneObject {
+ public:
+  UseInterval(LifetimePosition start, LifetimePosition end)
+      : start_(start), end_(end), next_(NULL) {
+    ASSERT(start.Value() < end.Value());
+  }
+
+  LifetimePosition start() const { return start_; }
+  LifetimePosition end() const { return end_; }
+  UseInterval* next() const { return next_; }
+
+  // Split this interval at the given position without effecting the
+  // live range that owns it. The interval must contain the position.
+  void SplitAt(LifetimePosition pos);
+
+  // If this interval intersects with other return smallest position
+  // that belongs to both of them.
+  LifetimePosition Intersect(const UseInterval* other) const {
+    if (other->start().Value() < start_.Value()) return other->Intersect(this);
+    if (other->start().Value() < end_.Value()) return other->start();
+    return LifetimePosition::Invalid();
+  }
+
+  bool Contains(LifetimePosition point) const {
+    return start_.Value() <= point.Value() && point.Value() < end_.Value();
+  }
+
+ private:
+  void set_start(LifetimePosition start) { start_ = start; }
+  void set_next(UseInterval* next) { next_ = next; }
+
+  LifetimePosition start_;
+  LifetimePosition end_;
+  UseInterval* next_;
+
+  friend class LiveRange;  // Assigns to start_.
+};
+
+// Representation of a use position.
+class UsePosition: public ZoneObject {
+ public:
+  UsePosition(LifetimePosition pos, LOperand* operand)
+      : operand_(operand),
+        hint_(NULL),
+        pos_(pos),
+        next_(NULL),
+        requires_reg_(false),
+        register_beneficial_(true) {
+    if (operand_ != NULL && operand_->IsUnallocated()) {
+      LUnallocated* unalloc = LUnallocated::cast(operand_);
+      requires_reg_ = unalloc->HasRegisterPolicy();
+      register_beneficial_ = !unalloc->HasAnyPolicy();
+    }
+    ASSERT(pos_.IsValid());
+  }
+
+  LOperand* operand() const { return operand_; }
+  bool HasOperand() const { return operand_ != NULL; }
+
+  LOperand* hint() const { return hint_; }
+  void set_hint(LOperand* hint) { hint_ = hint; }
+  bool HasHint() const { return hint_ != NULL && !hint_->IsUnallocated(); }
+  bool RequiresRegister() const;
+  bool RegisterIsBeneficial() const;
+
+  LifetimePosition pos() const { return pos_; }
+  UsePosition* next() const { return next_; }
+
+ private:
+  void set_next(UsePosition* next) { next_ = next; }
+
+  LOperand* operand_;
+  LOperand* hint_;
+  LifetimePosition pos_;
+  UsePosition* next_;
+  bool requires_reg_;
+  bool register_beneficial_;
+
+  friend class LiveRange;
+};
+
+// Representation of SSA values' live ranges as a collection of (continuous)
+// intervals over the instruction ordering.
+class LiveRange: public ZoneObject {
+ public:
+  static const int kInvalidAssignment = 0x7fffffff;
+
+  explicit LiveRange(int id)
+      : id_(id),
+        spilled_(false),
+        assigned_double_(false),
+        assigned_register_(kInvalidAssignment),
+        last_interval_(NULL),
+        first_interval_(NULL),
+        first_pos_(NULL),
+        parent_(NULL),
+        next_(NULL),
+        current_interval_(NULL),
+        last_processed_use_(NULL),
+        spill_start_index_(kMaxInt) {
+    spill_operand_ = new LUnallocated(LUnallocated::IGNORE);
+  }
+
+  UseInterval* first_interval() const { return first_interval_; }
+  UsePosition* first_pos() const { return first_pos_; }
+  LiveRange* parent() const { return parent_; }
+  LiveRange* TopLevel() { return (parent_ == NULL) ? this : parent_; }
+  LiveRange* next() const { return next_; }
+  bool IsChild() const { return parent() != NULL; }
+  bool IsParent() const { return parent() == NULL; }
+  int id() const { return id_; }
+  bool IsFixed() const { return id_ < 0; }
+  bool IsEmpty() const { return first_interval() == NULL; }
+  LOperand* CreateAssignedOperand();
+  int assigned_register() const { return assigned_register_; }
+  int spill_start_index() const { return spill_start_index_; }
+  void set_assigned_register(int reg, bool double_reg) {
+    ASSERT(!HasRegisterAssigned() && !IsSpilled());
+    assigned_register_ = reg;
+    assigned_double_ = double_reg;
+    ConvertOperands();
+  }
+  void MakeSpilled() {
+    ASSERT(!IsSpilled());
+    ASSERT(TopLevel()->HasAllocatedSpillOperand());
+    spilled_ = true;
+    assigned_register_ = kInvalidAssignment;
+    ConvertOperands();
+  }
+
+  // Returns use position in this live range that follows both start
+  // and last processed use position.
+  // Modifies internal state of live range!
+  UsePosition* NextUsePosition(LifetimePosition start);
+
+  // Returns use position for which register is required in this live
+  // range and which follows both start and last processed use position
+  // Modifies internal state of live range!
+  UsePosition* NextRegisterPosition(LifetimePosition start);
+
+  // Returns use position for which register is beneficial in this live
+  // range and which follows both start and last processed use position
+  // Modifies internal state of live range!
+  UsePosition* NextUsePositionRegisterIsBeneficial(LifetimePosition start);
+
+  // Can this live range be spilled at this position.
+  bool CanBeSpilled(LifetimePosition pos);
+
+  void SplitAt(LifetimePosition position, LiveRange* result);
+
+  bool IsDouble() const { return assigned_double_; }
+  bool HasRegisterAssigned() const {
+    return assigned_register_ != kInvalidAssignment;
+  }
+  bool IsSpilled() const { return spilled_; }
+  UsePosition* FirstPosWithHint() const;
+
+  LOperand* FirstHint() const {
+    UsePosition* pos = FirstPosWithHint();
+    if (pos != NULL) return pos->hint();
+    return NULL;
+  }
+
+  LifetimePosition Start() const {
+    ASSERT(!IsEmpty());
+    return first_interval()->start();
+  }
+
+  LifetimePosition End() const {
+    ASSERT(!IsEmpty());
+    return last_interval_->end();
+  }
+
+  bool HasAllocatedSpillOperand() const {
+    return spill_operand_ != NULL && !spill_operand_->IsUnallocated();
+  }
+  LOperand* GetSpillOperand() const { return spill_operand_; }
+  void SetSpillOperand(LOperand* operand) {
+    ASSERT(!operand->IsUnallocated());
+    ASSERT(spill_operand_ != NULL);
+    ASSERT(spill_operand_->IsUnallocated());
+    spill_operand_->ConvertTo(operand->kind(), operand->index());
+  }
+
+  void SetSpillStartIndex(int start) {
+    spill_start_index_ = Min(start, spill_start_index_);
+  }
+
+  bool ShouldBeAllocatedBefore(const LiveRange* other) const;
+  bool CanCover(LifetimePosition position) const;
+  bool Covers(LifetimePosition position);
+  LifetimePosition FirstIntersection(LiveRange* other);
+
+
+  // Add a new interval or a new use position to this live range.
+  void EnsureInterval(LifetimePosition start, LifetimePosition end);
+  void AddUseInterval(LifetimePosition start, LifetimePosition end);
+  UsePosition* AddUsePosition(LifetimePosition pos, LOperand* operand);
+  UsePosition* AddUsePosition(LifetimePosition pos);
+
+  // Shorten the most recently added interval by setting a new start.
+  void ShortenTo(LifetimePosition start);
+
+#ifdef DEBUG
+  // True if target overlaps an existing interval.
+  bool HasOverlap(UseInterval* target) const;
+  void Verify() const;
+#endif
+
+ private:
+  void ConvertOperands();
+  UseInterval* FirstSearchIntervalForPosition(LifetimePosition position) const;
+  void AdvanceLastProcessedMarker(UseInterval* to_start_of,
+                                  LifetimePosition but_not_past) const;
+
+  int id_;
+  bool spilled_;
+  bool assigned_double_;
+  int assigned_register_;
+  UseInterval* last_interval_;
+  UseInterval* first_interval_;
+  UsePosition* first_pos_;
+  LiveRange* parent_;
+  LiveRange* next_;
+  // This is used as a cache, it doesn't affect correctness.
+  mutable UseInterval* current_interval_;
+  UsePosition* last_processed_use_;
+  LOperand* spill_operand_;
+  int spill_start_index_;
+};
+
+
+class LAllocator BASE_EMBEDDED {
+ public:
+  explicit LAllocator(int first_virtual_register, HGraph* graph)
+      : chunk_(NULL),
+        summaries_(0),
+        next_summary_(NULL),
+        summary_stack_(2),
+        live_in_sets_(0),
+        live_ranges_(16),
+        fixed_live_ranges_(8),
+        fixed_double_live_ranges_(8),
+        unhandled_live_ranges_(8),
+        active_live_ranges_(8),
+        inactive_live_ranges_(8),
+        reusable_slots_(8),
+        next_virtual_register_(first_virtual_register),
+        mode_(NONE),
+        num_registers_(-1),
+        graph_(graph),
+        has_osr_entry_(false) {}
+
+  static void Setup();
+  static void TraceAlloc(const char* msg, ...);
+
+  // Lithium translation support.
+  // Record a use of an input operand in the current instruction.
+  void RecordUse(HValue* value, LUnallocated* operand);
+  // Record the definition of the output operand.
+  void RecordDefinition(HInstruction* instr, LUnallocated* operand);
+  // Record a temporary operand.
+  void RecordTemporary(LUnallocated* operand);
+
+  // Marks the current instruction as a call.
+  void MarkAsCall();
+
+  // Checks whether the value of a given virtual register is tagged.
+  bool HasTaggedValue(int virtual_register) const;
+
+  // Checks whether the value of a given virtual register is a double.
+  bool HasDoubleValue(int virtual_register) const;
+
+  // Begin a new instruction.
+  void BeginInstruction();
+
+  // Summarize the current instruction.
+  void SummarizeInstruction(int index);
+
+  // Summarize the current instruction.
+  void OmitInstruction();
+
+  // Control max function size.
+  static int max_initial_value_ids();
+
+  void Allocate(LChunk* chunk);
+
+  const ZoneList<LiveRange*>* live_ranges() const { return &live_ranges_; }
+  const ZoneList<LiveRange*>* fixed_live_ranges() const {
+    return &fixed_live_ranges_;
+  }
+  const ZoneList<LiveRange*>* fixed_double_live_ranges() const {
+    return &fixed_double_live_ranges_;
+  }
+
+  LChunk* chunk() const { return chunk_; }
+  HGraph* graph() const { return graph_; }
+
+  void MarkAsOsrEntry() {
+    // There can be only one.
+    ASSERT(!has_osr_entry_);
+    // Simply set a flag to find and process instruction later.
+    has_osr_entry_ = true;
+  }
+
+#ifdef DEBUG
+  void Verify() const;
+#endif
+
+ private:
+  enum OperationMode {
+    NONE,
+    CPU_REGISTERS,
+    XMM_REGISTERS
+  };
+
+  void MeetRegisterConstraints();
+  void ResolvePhis();
+  void BuildLiveRanges();
+  void AllocateGeneralRegisters();
+  void AllocateDoubleRegisters();
+  void ConnectRanges();
+  void ResolveControlFlow();
+  void PopulatePointerMaps();
+  void ProcessOsrEntry();
+  void AllocateRegisters();
+  bool CanEagerlyResolveControlFlow(HBasicBlock* block) const;
+  inline bool SafePointsAreInOrder() const;
+
+  // Liveness analysis support.
+  void InitializeLivenessAnalysis();
+  BitVector* ComputeLiveOut(HBasicBlock* block);
+  void AddInitialIntervals(HBasicBlock* block, BitVector* live_out);
+  void ProcessInstructions(HBasicBlock* block, BitVector* live);
+  void MeetRegisterConstraints(HBasicBlock* block);
+  void MeetConstraintsBetween(InstructionSummary* first,
+                              InstructionSummary* second,
+                              int gap_index);
+  void ResolvePhis(HBasicBlock* block);
+
+  // Helper methods for building intervals.
+  LOperand* AllocateFixed(LUnallocated* operand, int pos, bool is_tagged);
+  LiveRange* LiveRangeFor(LOperand* operand);
+  void Define(LifetimePosition position, LOperand* operand, LOperand* hint);
+  void Use(LifetimePosition block_start,
+           LifetimePosition position,
+           LOperand* operand,
+           LOperand* hint);
+  void AddConstraintsGapMove(int index, LOperand* from, LOperand* to);
+
+  // Helper methods for updating the life range lists.
+  void AddToActive(LiveRange* range);
+  void AddToInactive(LiveRange* range);
+  void AddToUnhandledSorted(LiveRange* range);
+  void AddToUnhandledUnsorted(LiveRange* range);
+  void SortUnhandled();
+  bool UnhandledIsSorted();
+  void ActiveToHandled(LiveRange* range);
+  void ActiveToInactive(LiveRange* range);
+  void InactiveToHandled(LiveRange* range);
+  void InactiveToActive(LiveRange* range);
+  void FreeSpillSlot(LiveRange* range);
+  LOperand* TryReuseSpillSlot(LiveRange* range);
+
+  // Helper methods for allocating registers.
+  bool TryAllocateFreeReg(LiveRange* range);
+  void AllocateBlockedReg(LiveRange* range);
+  void SplitAndSpillIntersecting(LiveRange* range);
+  LifetimePosition FindOptimalSplitPos(LifetimePosition start,
+                                       LifetimePosition end);
+  LiveRange* Split(LiveRange* range,
+                   LifetimePosition start,
+                   LifetimePosition end);
+  LiveRange* Split(LiveRange* range, LifetimePosition split_pos);
+  void SplitAndSpill(LiveRange* range,
+                     LifetimePosition start,
+                     LifetimePosition end);
+  void SplitAndSpill(LiveRange* range, LifetimePosition at);
+  void Spill(LiveRange* range);
+  bool IsBlockBoundary(LifetimePosition pos);
+  void AddGapMove(int pos, LiveRange* prev, LiveRange* next);
+
+  // Helper methods for resolving control flow.
+  void ResolveControlFlow(LiveRange* range,
+                          HBasicBlock* block,
+                          HBasicBlock* pred);
+
+  // Return parallel move that should be used to connect ranges split at the
+  // given position.
+  LParallelMove* GetConnectingParallelMove(LifetimePosition pos);
+
+  // Return the block which contains give lifetime position.
+  HBasicBlock* GetBlock(LifetimePosition pos);
+
+  // Current active summary.
+  InstructionSummary* current_summary() const { return summary_stack_.last(); }
+
+  // Get summary for given instruction index.
+  InstructionSummary* GetSummary(int index) const { return summaries_[index]; }
+
+  // Helper methods for the fixed registers.
+  int RegisterCount() const;
+  static int FixedLiveRangeID(int index) { return -index - 1; }
+  static int FixedDoubleLiveRangeID(int index);
+  LiveRange* FixedLiveRangeFor(int index);
+  LiveRange* FixedDoubleLiveRangeFor(int index);
+  LiveRange* LiveRangeFor(int index);
+  HPhi* LookupPhi(LOperand* operand) const;
+  LGap* GetLastGap(HBasicBlock* block) const;
+
+  LChunk* chunk_;
+  ZoneList<InstructionSummary*> summaries_;
+  InstructionSummary* next_summary_;
+
+  ZoneList<InstructionSummary*> summary_stack_;
+
+  // During liveness analysis keep a mapping from block id to live_in sets
+  // for blocks already analyzed.
+  ZoneList<BitVector*> live_in_sets_;
+
+  // Liveness analysis results.
+  ZoneList<LiveRange*> live_ranges_;
+
+  // Lists of live ranges
+  ZoneList<LiveRange*> fixed_live_ranges_;
+  ZoneList<LiveRange*> fixed_double_live_ranges_;
+  ZoneList<LiveRange*> unhandled_live_ranges_;
+  ZoneList<LiveRange*> active_live_ranges_;
+  ZoneList<LiveRange*> inactive_live_ranges_;
+  ZoneList<LiveRange*> reusable_slots_;
+
+  // Next virtual register number to be assigned to temporaries.
+  int next_virtual_register_;
+
+  OperationMode mode_;
+  int num_registers_;
+
+  HGraph* graph_;
+
+  bool has_osr_entry_;
+
+  DISALLOW_COPY_AND_ASSIGN(LAllocator);
+};
+
+
+} }  // namespace v8::internal
+
+#endif  // V8_LITHIUM_ALLOCATOR_H_
index 83b703f..0f7c12d 100644 (file)
@@ -140,9 +140,13 @@ Debug.LiveEdit = new function() {
     // Collect shared infos for functions whose code need to be patched.
     var replaced_function_infos = new Array();
     for (var i = 0; i < replace_code_list.length; i++) {
-      var info_wrapper = replace_code_list[i].live_shared_info_wrapper;
-      if (info_wrapper) {
-        replaced_function_infos.push(info_wrapper);
+      var live_shared_function_infos =
+          replace_code_list[i].live_shared_function_infos;
+
+      if (live_shared_function_infos) {
+        for (var i = 0; i < live_shared_function_infos.length; i++) {
+          replaced_function_infos.push(live_shared_function_infos[i]);
+        }
       }
     }
 
@@ -204,6 +208,13 @@ Debug.LiveEdit = new function() {
       // unchanged and whether positions changed at all.
       PatchPositions(update_positions_list[i], diff_array,
           position_patch_report);
+
+      if (update_positions_list[i].live_shared_function_infos) {
+        update_positions_list[i].live_shared_function_infos.
+            forEach(function (info) {
+                %LiveEditFunctionSourceUpdated(info.raw_array);
+              });
+      }
     }
 
     break_points_restorer(pos_translator, old_script);
@@ -294,29 +305,34 @@ Debug.LiveEdit = new function() {
   // Replaces function's Code.
   function PatchFunctionCode(old_node, change_log) {
     var new_info = old_node.corresponding_node.info;
-    var shared_info_wrapper = old_node.live_shared_info_wrapper;
-    if (shared_info_wrapper) {
-      %LiveEditReplaceFunctionCode(new_info.raw_array,
-          shared_info_wrapper.raw_array);
-
-      // The function got a new code. However, this new code brings all new
-      // instances of SharedFunctionInfo for nested functions. However,
-      // we want the original instances to be used wherever possible.
-      // (This is because old instances and new instances will be both
-      // linked to a script and breakpoints subsystem does not really
-      // expects this; neither does LiveEdit subsystem on next call).
-      for (var i = 0; i < old_node.children.length; i++) {
-        if (old_node.children[i].corresponding_node) {
-          var corresponding_child = old_node.children[i].corresponding_node;
-          var child_shared_info_wrapper =
-              old_node.children[i].live_shared_info_wrapper;
-          if (child_shared_info_wrapper) {
-            %LiveEditReplaceRefToNestedFunction(shared_info_wrapper.info,
-                corresponding_child.info.shared_function_info,
-                child_shared_info_wrapper.info);
+    if (old_node.live_shared_function_infos) {
+      old_node.live_shared_function_infos.forEach(function (old_info) {
+        %LiveEditReplaceFunctionCode(new_info.raw_array,
+                                     old_info.raw_array);
+
+        // The function got a new code. However, this new code brings all new
+        // instances of SharedFunctionInfo for nested functions. However,
+        // we want the original instances to be used wherever possible.
+        // (This is because old instances and new instances will be both
+        // linked to a script and breakpoints subsystem does not really
+        // expects this; neither does LiveEdit subsystem on next call).
+        for (var i = 0; i < old_node.children.length; i++) {
+          if (old_node.children[i].corresponding_node) {
+            var corresponding_child_info =
+                old_node.children[i].corresponding_node.info.
+                    shared_function_info;
+
+            if (old_node.children[i].live_shared_function_infos) {
+              old_node.children[i].live_shared_function_infos.
+                  forEach(function (old_child_info) {
+                    %LiveEditReplaceRefToNestedFunction(old_info.info,
+                                                        corresponding_child_info,
+                                                        old_child_info.info);
+                  });
+            }
           }
         }
-      }
+      });
 
       change_log.push( {function_patched: new_info.function_name} );
     } else {
@@ -330,10 +346,13 @@ Debug.LiveEdit = new function() {
   // one representing its old version). This way the function still
   // may access its own text.
   function LinkToOldScript(old_info_node, old_script, report_array) {
-    var shared_info = old_info_node.live_shared_info_wrapper;
-    if (shared_info) {
-      %LiveEditFunctionSetScript(shared_info.info, old_script);
-      report_array.push( { name: shared_info.function_name } );
+    if (old_info_node.live_shared_function_infos) {
+      old_info_node.live_shared_function_infos.
+          forEach(function (info) {
+            %LiveEditFunctionSetScript(info.info, old_script);
+          });
+
+      report_array.push( { name: old_info_node.info.function_name } );
     } else {
       report_array.push(
           { name: old_info_node.info.function_name, not_found: true } );
@@ -525,7 +544,7 @@ Debug.LiveEdit = new function() {
     this.textual_corresponding_node = void 0;
     this.textually_unmatched_new_nodes = void 0;
 
-    this.live_shared_info_wrapper = void 0;
+    this.live_shared_function_infos = void 0;
   }
 
   // From array of function infos that is implicitly a tree creates
@@ -765,23 +784,27 @@ Debug.LiveEdit = new function() {
       shared_infos.push(new SharedInfoWrapper(shared_raw_list[i]));
     }
 
-    // Finds SharedFunctionInfo that corresponds compile info with index
+    // Finds all SharedFunctionInfos that corresponds to compile info
     // in old version of the script.
-    function FindFunctionInfo(compile_info) {
+    function FindFunctionInfos(compile_info) {
+      var wrappers = [];
+
       for (var i = 0; i < shared_infos.length; i++) {
         var wrapper = shared_infos[i];
         if (wrapper.start_position == compile_info.start_position &&
             wrapper.end_position == compile_info.end_position) {
-          return wrapper;
+          wrappers.push(wrapper);
         }
       }
+
+      if (wrappers.length > 0) {
+        return wrappers;
+      }
     }
 
     function TraverseTree(node) {
-      var info_wrapper = FindFunctionInfo(node.info);
-      if (info_wrapper) {
-        node.live_shared_info_wrapper = info_wrapper;
-      }
+      node.live_shared_function_infos = FindFunctionInfos(node.info);
+
       for (var i = 0; i < node.children.length; i++) {
         TraverseTree(node.children[i]);
       }
@@ -817,16 +840,18 @@ Debug.LiveEdit = new function() {
 
   // Changes positions (including all statments) in function.
   function PatchPositions(old_info_node, diff_array, report_array) {
-    var shared_info_wrapper = old_info_node.live_shared_info_wrapper;
-    if (!shared_info_wrapper) {
+    if (old_info_node.live_shared_function_infos) {
+      old_info_node.live_shared_function_infos.forEach(function (info) {
+          %LiveEditPatchFunctionPositions(info.raw_array,
+                                          diff_array);
+      });
+
+      report_array.push( { name: old_info_node.info.function_name } );
+    } else {
       // TODO(LiveEdit): function is not compiled yet or is already collected.
       report_array.push(
           { name: old_info_node.info.function_name, info_not_found: true } );
-      return;
     }
-    %LiveEditPatchFunctionPositions(shared_info_wrapper.raw_array,
-        diff_array);
-    report_array.push( { name: old_info_node.info.function_name } );
   }
 
   // Adds a suffix to script name to mark that it is old version.
index 642b3e6..c4cb68e 100644 (file)
@@ -31,7 +31,9 @@
 #include "liveedit.h"
 
 #include "compiler.h"
+#include "compilation-cache.h"
 #include "debug.h"
+#include "deoptimizer.h"
 #include "global-handles.h"
 #include "memory.h"
 #include "oprofile-agent.h"
@@ -605,18 +607,18 @@ class FunctionInfoListener {
 
   void FunctionDone() {
     HandleScope scope;
-    Object* element =
-        result_->GetElementNoExceptionThrown(current_parent_index_);
-    FunctionInfoWrapper info = FunctionInfoWrapper::cast(element);
+    FunctionInfoWrapper info =
+        FunctionInfoWrapper::cast(
+            result_->GetElementNoExceptionThrown(current_parent_index_));
     current_parent_index_ = info.GetParentIndex();
   }
 
   // Saves only function code, because for a script function we
   // may never create a SharedFunctionInfo object.
   void FunctionCode(Handle<Code> function_code) {
-    Object* element =
-        result_->GetElementNoExceptionThrown(current_parent_index_);
-    FunctionInfoWrapper info = FunctionInfoWrapper::cast(element);
+    FunctionInfoWrapper info =
+        FunctionInfoWrapper::cast(
+            result_->GetElementNoExceptionThrown(current_parent_index_));
     info.SetFunctionCode(function_code, Handle<Object>(Heap::null_value()));
   }
 
@@ -626,9 +628,9 @@ class FunctionInfoListener {
     if (!shared->IsSharedFunctionInfo()) {
       return;
     }
-    Object* element =
-        result_->GetElementNoExceptionThrown(current_parent_index_);
-    FunctionInfoWrapper info = FunctionInfoWrapper::cast(element);
+    FunctionInfoWrapper info =
+        FunctionInfoWrapper::cast(
+            result_->GetElementNoExceptionThrown(current_parent_index_));
     info.SetFunctionCode(Handle<Code>(shared->code()),
         Handle<Object>(shared->scope_info()));
     info.SetSharedFunctionInfo(shared);
@@ -828,6 +830,61 @@ static bool IsJSFunctionCode(Code* code) {
 }
 
 
+// Returns true if an instance of candidate were inlined into function's code.
+static bool IsInlined(JSFunction* function, SharedFunctionInfo* candidate) {
+  AssertNoAllocation no_gc;
+
+  if (function->code()->kind() != Code::OPTIMIZED_FUNCTION) return false;
+
+  DeoptimizationInputData* data =
+      DeoptimizationInputData::cast(function->code()->deoptimization_data());
+
+  if (data == Heap::empty_fixed_array()) return false;
+
+  FixedArray* literals = data->LiteralArray();
+
+  int inlined_count = data->InlinedFunctionCount()->value();
+  for (int i = 0; i < inlined_count; ++i) {
+    JSFunction* inlined = JSFunction::cast(literals->get(i));
+    if (inlined->shared() == candidate) return true;
+  }
+
+  return false;
+}
+
+
+class DependentFunctionsDeoptimizingVisitor : public OptimizedFunctionVisitor {
+ public:
+  explicit DependentFunctionsDeoptimizingVisitor(
+      SharedFunctionInfo* function_info)
+      : function_info_(function_info) {}
+
+  virtual void EnterContext(Context* context) {
+  }
+
+  virtual void VisitFunction(JSFunction* function) {
+    if (function->shared() == function_info_ ||
+        IsInlined(function, function_info_)) {
+      Deoptimizer::DeoptimizeFunction(function);
+    }
+  }
+
+  virtual void LeaveContext(Context* context) {
+  }
+
+ private:
+  SharedFunctionInfo* function_info_;
+};
+
+
+static void DeoptimizeDependentFunctions(SharedFunctionInfo* function_info) {
+  AssertNoAllocation no_allocation;
+
+  DependentFunctionsDeoptimizingVisitor visitor(function_info);
+  Deoptimizer::VisitAllOptimizedFunctions(&visitor);
+}
+
+
 MaybeObject* LiveEdit::ReplaceFunctionCode(
     Handle<JSArray> new_compile_info_array,
     Handle<JSArray> shared_info_array) {
@@ -864,17 +921,38 @@ MaybeObject* LiveEdit::ReplaceFunctionCode(
   shared_info->set_construct_stub(
       Builtins::builtin(Builtins::JSConstructStubGeneric));
 
+  DeoptimizeDependentFunctions(*shared_info);
+  CompilationCache::Remove(shared_info);
+
+  return Heap::undefined_value();
+}
+
+
+MaybeObject* LiveEdit::FunctionSourceUpdated(
+    Handle<JSArray> shared_info_array) {
+  HandleScope scope;
+
+  if (!SharedInfoWrapper::IsInstance(shared_info_array)) {
+    return Top::ThrowIllegalOperation();
+  }
+
+  SharedInfoWrapper shared_info_wrapper(shared_info_array);
+  Handle<SharedFunctionInfo> shared_info = shared_info_wrapper.GetInfo();
+
+  DeoptimizeDependentFunctions(*shared_info);
+  CompilationCache::Remove(shared_info);
+
   return Heap::undefined_value();
 }
 
 
-// TODO(635): Eval caches its scripts (same text -- same compiled info).
-// Make sure we clear such caches.
 void LiveEdit::SetFunctionScript(Handle<JSValue> function_wrapper,
                                  Handle<Object> script_handle) {
   Handle<SharedFunctionInfo> shared_info =
       Handle<SharedFunctionInfo>::cast(UnwrapJSValue(function_wrapper));
   shared_info->set_script(*script_handle);
+
+  CompilationCache::Remove(shared_info);
 }
 
 
@@ -1135,11 +1213,14 @@ void LiveEdit::ReplaceRefToNestedFunction(
 // Check an activation against list of functions. If there is a function
 // that matches, its status in result array is changed to status argument value.
 static bool CheckActivation(Handle<JSArray> shared_info_array,
-                            Handle<JSArray> result, StackFrame* frame,
+                            Handle<JSArray> result,
+                            StackFrame* frame,
                             LiveEdit::FunctionPatchabilityStatus status) {
-  if (!frame->is_java_script()) {
-    return false;
-  }
+  if (!frame->is_java_script()) return false;
+
+  Handle<JSFunction> function(
+      JSFunction::cast(JavaScriptFrame::cast(frame)->function()));
+
   int len = Smi::cast(shared_info_array->length())->value();
   for (int i = 0; i < len; i++) {
     JSValue* wrapper =
@@ -1147,7 +1228,7 @@ static bool CheckActivation(Handle<JSArray> shared_info_array,
     Handle<SharedFunctionInfo> shared(
         SharedFunctionInfo::cast(wrapper->value()));
 
-    if (frame->code() == shared->code()) {
+    if (function->shared() == *shared || IsInlined(*function, *shared)) {
       SetElement(result, i, Handle<Smi>(Smi::FromInt(status)));
       return true;
     }
index c9bf96d..3632180 100644 (file)
@@ -87,6 +87,8 @@ class LiveEdit : AllStatic {
       Handle<JSArray> new_compile_info_array,
       Handle<JSArray> shared_info_array);
 
+  static MaybeObject* FunctionSourceUpdated(Handle<JSArray> shared_info_array);
+
   // Updates script field in FunctionSharedInfo.
   static void SetFunctionScript(Handle<JSValue> function_wrapper,
                                 Handle<Object> script_handle);
index 55f15de..6723347 100644 (file)
 
 #include "bootstrapper.h"
 #include "code-stubs.h"
+#include "deoptimizer.h"
 #include "global-handles.h"
 #include "log.h"
 #include "macro-assembler.h"
+#include "runtime-profiler.h"
 #include "serialize.h"
 #include "string-stream.h"
+#include "vm-state-inl.h"
 
 namespace v8 {
 namespace internal {
@@ -147,6 +150,7 @@ void StackTracer::Trace(TickSample* sample) {
   sample->function = NULL;
   sample->frames_count = 0;
 
+  // Avoid collecting traces while doing GC.
   if (sample->state == GC) return;
 
   const Address js_entry_sp = Top::js_entry_sp(Top::GetCurrentThread());
@@ -155,15 +159,18 @@ void StackTracer::Trace(TickSample* sample) {
     return;
   }
 
-  const Address functionAddr =
+  const Address function_address =
       sample->fp + JavaScriptFrameConstants::kFunctionOffset;
   if (SafeStackFrameIterator::IsWithinBounds(sample->sp, js_entry_sp,
-                                             functionAddr)) {
-    sample->function = Memory::Address_at(functionAddr) - kHeapObjectTag;
+                                             function_address)) {
+    Object* object = Memory::Object_at(function_address);
+    if (object->IsHeapObject()) {
+      sample->function = HeapObject::cast(object)->address();
+    }
   }
 
   int i = 0;
-  const Address callback = VMState::external_callback();
+  const Address callback = Top::external_callback();
   // Surprisingly, PC can point _exactly_ to callback start, with good
   // probability, and this will result in reporting fake nested
   // callback call.
@@ -174,9 +181,10 @@ void StackTracer::Trace(TickSample* sample) {
   SafeStackTraceFrameIterator it(sample->fp, sample->sp,
                                  sample->sp, js_entry_sp);
   while (!it.done() && i < TickSample::kMaxFramesCount) {
-    sample->stack[i++] =
-        reinterpret_cast<Address>(it.frame()->function_slot_object()) -
-            kHeapObjectTag;
+    Object* object = it.frame()->function_slot_object();
+    if (object->IsHeapObject()) {
+      sample->stack[i++] = HeapObject::cast(object)->address();
+    }
     it.Advance();
   }
   sample->frames_count = i;
@@ -189,8 +197,10 @@ void StackTracer::Trace(TickSample* sample) {
 //
 class Ticker: public Sampler {
  public:
-  explicit Ticker(int interval):
-      Sampler(interval, FLAG_prof), window_(NULL), profiler_(NULL) {}
+  explicit Ticker(int interval) :
+      Sampler(interval),
+      window_(NULL),
+      profiler_(NULL) {}
 
   ~Ticker() { if (IsActive()) Stop(); }
 
@@ -206,22 +216,24 @@ class Ticker: public Sampler {
 
   void ClearWindow() {
     window_ = NULL;
-    if (!profiler_ && IsActive()) Stop();
+    if (!profiler_ && IsActive() && !RuntimeProfiler::IsEnabled()) Stop();
   }
 
   void SetProfiler(Profiler* profiler) {
+    ASSERT(profiler_ == NULL);
     profiler_ = profiler;
+    IncreaseProfilingDepth();
     if (!FLAG_prof_lazy && !IsActive()) Start();
   }
 
   void ClearProfiler() {
+    DecreaseProfilingDepth();
     profiler_ = NULL;
-    if (!window_ && IsActive()) Stop();
+    if (!window_ && IsActive() && !RuntimeProfiler::IsEnabled()) Stop();
   }
 
  protected:
   virtual void DoSampleStack(TickSample* sample) {
-    ASSERT(IsSynchronous());
     StackTracer::Trace(sample);
   }
 
@@ -759,6 +771,15 @@ void Logger::SetterCallbackEvent(String* name, Address entry_point) {
 }
 
 
+static const char* ComputeMarker(Code* code) {
+  switch (code->kind()) {
+    case Code::FUNCTION: return code->optimizable() ? "~" : "";
+    case Code::OPTIMIZED_FUNCTION: return "*";
+    default: return "";
+  }
+}
+
+
 void Logger::CodeCreateEvent(LogEventsAndTags tag,
                              Code* code,
                              const char* comment) {
@@ -767,7 +788,7 @@ void Logger::CodeCreateEvent(LogEventsAndTags tag,
   LogMessageBuilder msg;
   msg.Append("%s,%s,", log_events_[CODE_CREATION_EVENT], log_events_[tag]);
   msg.AppendAddress(code->address());
-  msg.Append(",%d,\"", code->ExecutableSize());
+  msg.Append(",%d,\"%s", code->ExecutableSize(), ComputeMarker(code));
   for (const char* p = comment; *p != '\0'; p++) {
     if (*p == '"') {
       msg.Append('\\');
@@ -794,7 +815,7 @@ void Logger::CodeCreateEvent(LogEventsAndTags tag, Code* code, String* name) {
       name->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
   msg.Append("%s,%s,", log_events_[CODE_CREATION_EVENT], log_events_[tag]);
   msg.AppendAddress(code->address());
-  msg.Append(",%d,\"%s\"", code->ExecutableSize(), *str);
+  msg.Append(",%d,\"%s%s\"", code->ExecutableSize(), ComputeMarker(code), *str);
   LowLevelCodeCreateEvent(code, &msg);
   if (FLAG_compress_log) {
     ASSERT(compression_helper_ != NULL);
@@ -818,8 +839,12 @@ void Logger::CodeCreateEvent(LogEventsAndTags tag,
       source->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
   msg.Append("%s,%s,", log_events_[CODE_CREATION_EVENT], log_events_[tag]);
   msg.AppendAddress(code->address());
-  msg.Append(",%d,\"%s %s:%d\"",
-             code->ExecutableSize(), *str, *sourcestr, line);
+  msg.Append(",%d,\"%s%s %s:%d\"",
+             code->ExecutableSize(),
+             ComputeMarker(code),
+             *str,
+             *sourcestr,
+             line);
   LowLevelCodeCreateEvent(code, &msg);
   if (FLAG_compress_log) {
     ASSERT(compression_helper_ != NULL);
@@ -1226,7 +1251,9 @@ void Logger::PauseProfiler(int flags, int tag) {
     if (--cpu_profiler_nesting_ == 0) {
       profiler_->pause();
       if (FLAG_prof_lazy) {
-        if (!FLAG_sliding_state_window) ticker_->Stop();
+        if (!FLAG_sliding_state_window && !RuntimeProfiler::IsEnabled()) {
+          ticker_->Stop();
+        }
         FLAG_log_code = false;
         // Must be the same message as Log::kDynamicBufferSeal.
         LOG(UncheckedStringEvent("profiler", "pause"));
@@ -1262,7 +1289,9 @@ void Logger::ResumeProfiler(int flags, int tag) {
         LogCompiledFunctions();
         LogFunctionObjects();
         LogAccessorCallbacks();
-        if (!FLAG_sliding_state_window) ticker_->Start();
+        if (!FLAG_sliding_state_window && !ticker_->IsActive()) {
+          ticker_->Start();
+        }
       }
       profiler_->resume();
     }
@@ -1295,9 +1324,41 @@ int Logger::GetLogLines(int from_pos, char* dest_buf, int max_size) {
 }
 
 
-static int EnumerateCompiledFunctions(Handle<SharedFunctionInfo>* sfis) {
+class EnumerateOptimizedFunctionsVisitor: public OptimizedFunctionVisitor {
+ public:
+  EnumerateOptimizedFunctionsVisitor(Handle<SharedFunctionInfo>* sfis,
+                                     Handle<Code>* code_objects,
+                                     int* count)
+      : sfis_(sfis), code_objects_(code_objects), count_(count) { }
+
+  virtual void EnterContext(Context* context) {}
+  virtual void LeaveContext(Context* context) {}
+
+  virtual void VisitFunction(JSFunction* function) {
+    if (sfis_ != NULL) {
+      sfis_[*count_] = Handle<SharedFunctionInfo>(function->shared());
+    }
+    if (code_objects_ != NULL) {
+      ASSERT(function->code()->kind() == Code::OPTIMIZED_FUNCTION);
+      code_objects_[*count_] = Handle<Code>(function->code());
+    }
+    *count_ = *count_ + 1;
+  }
+
+ private:
+  Handle<SharedFunctionInfo>* sfis_;
+  Handle<Code>* code_objects_;
+  int* count_;
+};
+
+
+static int EnumerateCompiledFunctions(Handle<SharedFunctionInfo>* sfis,
+                                      Handle<Code>* code_objects) {
   AssertNoAllocation no_alloc;
   int compiled_funcs_count = 0;
+
+  // Iterate the heap to find shared function info objects and record
+  // the unoptimized code for them.
   HeapIterator iterator;
   for (HeapObject* obj = iterator.next(); obj != NULL; obj = iterator.next()) {
     if (!obj->IsSharedFunctionInfo()) continue;
@@ -1305,11 +1366,22 @@ static int EnumerateCompiledFunctions(Handle<SharedFunctionInfo>* sfis) {
     if (sfi->is_compiled()
         && (!sfi->script()->IsScript()
             || Script::cast(sfi->script())->HasValidSource())) {
-      if (sfis != NULL)
+      if (sfis != NULL) {
         sfis[compiled_funcs_count] = Handle<SharedFunctionInfo>(sfi);
+      }
+      if (code_objects != NULL) {
+        code_objects[compiled_funcs_count] = Handle<Code>(sfi->code());
+      }
       ++compiled_funcs_count;
     }
   }
+
+  // Iterate all optimized functions in all contexts.
+  EnumerateOptimizedFunctionsVisitor visitor(sfis,
+                                             code_objects,
+                                             &compiled_funcs_count);
+  Deoptimizer::VisitAllOptimizedFunctions(&visitor);
+
   return compiled_funcs_count;
 }
 
@@ -1321,9 +1393,11 @@ void Logger::LogCodeObject(Object* object) {
     const char* description = "Unknown code from the snapshot";
     switch (code_object->kind()) {
       case Code::FUNCTION:
+      case Code::OPTIMIZED_FUNCTION:
         return;  // We log this later using LogCompiledFunctions.
-      case Code::BINARY_OP_IC:
-        // fall through
+      case Code::BINARY_OP_IC:  // fall through
+      case Code::TYPE_RECORDING_BINARY_OP_IC:   // fall through
+      case Code::COMPARE_IC:  // fall through
       case Code::STUB:
         description =
             CodeStub::MajorName(CodeStub::GetMajorKey(code_object), true);
@@ -1406,9 +1480,10 @@ void Logger::LogCodeObjects() {
 
 void Logger::LogCompiledFunctions() {
   HandleScope scope;
-  const int compiled_funcs_count = EnumerateCompiledFunctions(NULL);
+  const int compiled_funcs_count = EnumerateCompiledFunctions(NULL, NULL);
   ScopedVector< Handle<SharedFunctionInfo> > sfis(compiled_funcs_count);
-  EnumerateCompiledFunctions(sfis.start());
+  ScopedVector< Handle<Code> > code_objects(compiled_funcs_count);
+  EnumerateCompiledFunctions(sfis.start(), code_objects.start());
 
   // During iteration, there can be heap allocation due to
   // GetScriptLineNumber call.
@@ -1425,18 +1500,18 @@ void Logger::LogCompiledFunctions() {
         if (line_num > 0) {
           PROFILE(CodeCreateEvent(
               Logger::ToNativeByScript(Logger::LAZY_COMPILE_TAG, *script),
-              shared->code(), *func_name,
+              *code_objects[i], *func_name,
               *script_name, line_num + 1));
         } else {
           // Can't distinguish eval and script here, so always use Script.
           PROFILE(CodeCreateEvent(
               Logger::ToNativeByScript(Logger::SCRIPT_TAG, *script),
-              shared->code(), *script_name));
+              *code_objects[i], *script_name));
         }
       } else {
         PROFILE(CodeCreateEvent(
             Logger::ToNativeByScript(Logger::LAZY_COMPILE_TAG, *script),
-            shared->code(), *func_name));
+            *code_objects[i], *func_name));
       }
     } else if (shared->IsApiFunction()) {
       // API function.
@@ -1450,7 +1525,7 @@ void Logger::LogCompiledFunctions() {
       }
     } else {
       PROFILE(CodeCreateEvent(
-          Logger::LAZY_COMPILE_TAG, shared->code(), *func_name));
+          Logger::LAZY_COMPILE_TAG, *code_objects[i], *func_name));
     }
   }
 }
@@ -1571,8 +1646,6 @@ bool Logger::Setup() {
     }
   }
 
-  ASSERT(VMState::is_outermost_external());
-
   if (FLAG_ll_prof) LogCodeInfo();
 
   ticker_ = new Ticker(kSamplingIntervalMs);
@@ -1604,7 +1677,6 @@ bool Logger::Setup() {
   }
 
   LogMessageBuilder::set_write_failure_handler(StopLoggingAndProfiling);
-
   return true;
 
 #else
@@ -1613,6 +1685,17 @@ bool Logger::Setup() {
 }
 
 
+void Logger::EnsureTickerStarted() {
+  ASSERT(ticker_ != NULL);
+  if (!ticker_->IsActive()) ticker_->Start();
+}
+
+
+void Logger::EnsureTickerStopped() {
+  if (ticker_ != NULL && ticker_->IsActive()) ticker_->Stop();
+}
+
+
 void Logger::TearDown() {
 #ifdef ENABLE_LOGGING_AND_PROFILING
   LogMessageBuilder::set_write_failure_handler(NULL);
index 3a4d79b..54b131b 100644 (file)
--- a/src/log.h
+++ b/src/log.h
@@ -149,6 +149,9 @@ class Logger {
   // Acquires resources for logging if the right flags are set.
   static bool Setup();
 
+  static void EnsureTickerStarted();
+  static void EnsureTickerStopped();
+
   // Frees resources acquired in Setup.
   static void TearDown();
 
index 40194e3..b570db9 100644 (file)
@@ -215,6 +215,121 @@ void MarkCompactCollector::Finish() {
 
 static MarkingStack marking_stack;
 
+class FlushCode : public AllStatic {
+ public:
+  static void AddCandidate(SharedFunctionInfo* shared_info) {
+    SetNextCandidate(shared_info, shared_function_info_candidates_head_);
+    shared_function_info_candidates_head_ = shared_info;
+  }
+
+
+  static void AddCandidate(JSFunction* function) {
+    ASSERT(function->unchecked_code() ==
+           function->unchecked_shared()->unchecked_code());
+
+    SetNextCandidate(function, jsfunction_candidates_head_);
+    jsfunction_candidates_head_ = function;
+  }
+
+
+  static void ProcessCandidates() {
+    ProcessSharedFunctionInfoCandidates();
+    ProcessJSFunctionCandidates();
+  }
+
+ private:
+  static void ProcessJSFunctionCandidates() {
+    Code* lazy_compile = Builtins::builtin(Builtins::LazyCompile);
+
+    JSFunction* candidate = jsfunction_candidates_head_;
+    JSFunction* next_candidate;
+    while (candidate != NULL) {
+      next_candidate = GetNextCandidate(candidate);
+
+      SharedFunctionInfo* shared = candidate->unchecked_shared();
+
+      Code* code = shared->unchecked_code();
+      if (!code->IsMarked()) {
+        shared->set_code(lazy_compile);
+        candidate->set_code(lazy_compile);
+      } else {
+        candidate->set_code(shared->unchecked_code());
+      }
+
+      candidate = next_candidate;
+    }
+
+    jsfunction_candidates_head_ = NULL;
+  }
+
+
+  static void ProcessSharedFunctionInfoCandidates() {
+    Code* lazy_compile = Builtins::builtin(Builtins::LazyCompile);
+
+    SharedFunctionInfo* candidate = shared_function_info_candidates_head_;
+    SharedFunctionInfo* next_candidate;
+    while (candidate != NULL) {
+      next_candidate = GetNextCandidate(candidate);
+      SetNextCandidate(candidate, NULL);
+
+      Code* code = candidate->unchecked_code();
+      if (!code->IsMarked()) {
+        candidate->set_code(lazy_compile);
+      }
+
+      candidate = next_candidate;
+    }
+
+    shared_function_info_candidates_head_ = NULL;
+  }
+
+
+  static JSFunction** GetNextCandidateField(JSFunction* candidate) {
+    return reinterpret_cast<JSFunction**>(
+        candidate->address() + JSFunction::kCodeEntryOffset);
+  }
+
+
+  static JSFunction* GetNextCandidate(JSFunction* candidate) {
+    return *GetNextCandidateField(candidate);
+  }
+
+
+  static void SetNextCandidate(JSFunction* candidate,
+                               JSFunction* next_candidate) {
+    *GetNextCandidateField(candidate) = next_candidate;
+  }
+
+
+  STATIC_ASSERT(kPointerSize <= Code::kHeaderSize - Code::kHeaderPaddingStart);
+
+
+  static SharedFunctionInfo** GetNextCandidateField(
+      SharedFunctionInfo* candidate) {
+    Code* code = candidate->unchecked_code();
+    return reinterpret_cast<SharedFunctionInfo**>(
+        code->address() + Code::kHeaderPaddingStart);
+  }
+
+
+  static SharedFunctionInfo* GetNextCandidate(SharedFunctionInfo* candidate) {
+    return *GetNextCandidateField(candidate);
+  }
+
+
+  static void SetNextCandidate(SharedFunctionInfo* candidate,
+                               SharedFunctionInfo* next_candidate) {
+    *GetNextCandidateField(candidate) = next_candidate;
+  }
+
+  static JSFunction* jsfunction_candidates_head_;
+
+  static SharedFunctionInfo* shared_function_info_candidates_head_;
+};
+
+JSFunction* FlushCode::jsfunction_candidates_head_ = NULL;
+
+SharedFunctionInfo* FlushCode::shared_function_info_candidates_head_ = NULL;
 
 static inline HeapObject* ShortCircuitConsString(Object** p) {
   // Optimization: If the heap object pointed to by p is a non-symbol
@@ -260,8 +375,13 @@ class StaticMarkingVisitor : public StaticVisitorBase {
   static void EnableCodeFlushing(bool enabled) {
     if (enabled) {
       table_.Register(kVisitJSFunction, &VisitJSFunctionAndFlushCode);
+      table_.Register(kVisitSharedFunctionInfo,
+                      &VisitSharedFunctionInfoAndFlushCode);
+
     } else {
       table_.Register(kVisitJSFunction, &VisitJSFunction);
+      table_.Register(kVisitSharedFunctionInfo,
+                      &VisitSharedFunctionInfoGeneric);
     }
   }
 
@@ -287,8 +407,6 @@ class StaticMarkingVisitor : public StaticVisitorBase {
                                       Context::MarkCompactBodyDescriptor,
                                       void>::Visit);
 
-    table_.Register(kVisitSharedFunctionInfo, &VisitSharedFunctionInfo);
-
     table_.Register(kVisitByteArray, &DataObjectVisitor::Visit);
     table_.Register(kVisitSeqAsciiString, &DataObjectVisitor::Visit);
     table_.Register(kVisitSeqTwoByteString, &DataObjectVisitor::Visit);
@@ -304,7 +422,11 @@ class StaticMarkingVisitor : public StaticVisitorBase {
 
     table_.Register(kVisitCode, &VisitCode);
 
-    table_.Register(kVisitJSFunction, &VisitJSFunctionAndFlushCode);
+    table_.Register(kVisitSharedFunctionInfo,
+                    &VisitSharedFunctionInfoAndFlushCode);
+
+    table_.Register(kVisitJSFunction,
+                    &VisitJSFunctionAndFlushCode);
 
     table_.Register(kVisitPropertyCell,
                     &FixedBodyVisitor<StaticMarkingVisitor,
@@ -350,6 +472,16 @@ class StaticMarkingVisitor : public StaticVisitorBase {
     }
   }
 
+  static void VisitGlobalPropertyCell(RelocInfo* rinfo) {
+    ASSERT(rinfo->rmode() == RelocInfo::GLOBAL_PROPERTY_CELL);
+    Object* cell = rinfo->target_cell();
+    Object* old_cell = cell;
+    VisitPointer(&cell);
+    if (cell != old_cell) {
+      rinfo->set_target_cell(reinterpret_cast<JSGlobalPropertyCell*>(cell));
+    }
+  }
+
   static inline void VisitDebugTarget(RelocInfo* rinfo) {
     ASSERT((RelocInfo::IsJSReturn(rinfo->rmode()) &&
             rinfo->IsPatchedReturnSequence()) ||
@@ -446,62 +578,75 @@ class StaticMarkingVisitor : public StaticVisitorBase {
         function->unchecked_code() != Builtins::builtin(Builtins::LazyCompile);
   }
 
-
-  static void FlushCodeForFunction(JSFunction* function) {
+  inline static bool IsFlushable(JSFunction* function) {
     SharedFunctionInfo* shared_info = function->unchecked_shared();
 
-    if (shared_info->IsMarked()) return;
-
-    // Special handling if the function and shared info objects
-    // have different code objects.
-    if (function->unchecked_code() != shared_info->unchecked_code()) {
-      // If the shared function has been flushed but the function has not,
-      // we flush the function if possible.
-      if (!IsCompiled(shared_info) &&
-          IsCompiled(function) &&
-          !function->unchecked_code()->IsMarked()) {
-        function->set_code(shared_info->unchecked_code());
-      }
-      return;
+    // Code is either on stack, in compilation cache or referenced
+    // by optimized version of function.
+    if (function->unchecked_code()->IsMarked()) {
+      shared_info->set_code_age(0);
+      return false;
+    }
+
+    // We do not flush code for optimized functions.
+    if (function->code() != shared_info->unchecked_code()) {
+      return false;
     }
 
-    // Code is either on stack or in compilation cache.
+    return IsFlushable(shared_info);
+  }
+
+  inline static bool IsFlushable(SharedFunctionInfo* shared_info) {
+    // Code is either on stack, in compilation cache or referenced
+    // by optimized version of function.
     if (shared_info->unchecked_code()->IsMarked()) {
       shared_info->set_code_age(0);
-      return;
+      return false;
     }
 
     // The function must be compiled and have the source code available,
     // to be able to recompile it in case we need the function again.
-    if (!(shared_info->is_compiled() && HasSourceCode(shared_info))) return;
+    if (!(shared_info->is_compiled() && HasSourceCode(shared_info))) {
+      return false;
+    }
 
     // We never flush code for Api functions.
     Object* function_data = shared_info->function_data();
     if (function_data->IsHeapObject() &&
         (SafeMap(function_data)->instance_type() ==
          FUNCTION_TEMPLATE_INFO_TYPE)) {
-      return;
+      return false;
     }
 
     // Only flush code for functions.
-    if (shared_info->code()->kind() != Code::FUNCTION) return;
+    if (shared_info->code()->kind() != Code::FUNCTION) return false;
 
     // Function must be lazy compilable.
-    if (!shared_info->allows_lazy_compilation()) return;
+    if (!shared_info->allows_lazy_compilation()) return false;
 
     // If this is a full script wrapped in a function we do no flush the code.
-    if (shared_info->is_toplevel()) return;
+    if (shared_info->is_toplevel()) return false;
 
     // Age this shared function info.
     if (shared_info->code_age() < kCodeAgeThreshold) {
       shared_info->set_code_age(shared_info->code_age() + 1);
-      return;
+      return false;
     }
 
-    // Compute the lazy compilable version of the code.
-    Code* code = Builtins::builtin(Builtins::LazyCompile);
-    shared_info->set_code(code);
-    function->set_code(code);
+    return true;
+  }
+
+
+  static bool FlushCodeForFunction(JSFunction* function) {
+    if (!IsFlushable(function)) return false;
+
+    // This function's code looks flushable. But we have to postpone the
+    // decision until we see all functions that point to the same
+    // SharedFunctionInfo because some of them might be optimized.
+    // That would make the nonoptimized version of the code nonflushable,
+    // because it is required for bailing out from optimized code.
+    FlushCode::AddCandidate(function);
+    return true;
   }
 
 
@@ -539,17 +684,38 @@ class StaticMarkingVisitor : public StaticVisitorBase {
   }
 
 
-  static void VisitSharedFunctionInfo(Map* map, HeapObject* object) {
+  static void VisitSharedFunctionInfoGeneric(Map* map, HeapObject* object) {
     SharedFunctionInfo* shared = reinterpret_cast<SharedFunctionInfo*>(object);
-    if (shared->IsInobjectSlackTrackingInProgress()) {
-      shared->DetachInitialMap();
-    }
+
+    if (shared->IsInobjectSlackTrackingInProgress()) shared->DetachInitialMap();
+
     FixedBodyVisitor<StaticMarkingVisitor,
                      SharedFunctionInfo::BodyDescriptor,
                      void>::Visit(map, object);
   }
 
 
+  static void VisitSharedFunctionInfoAndFlushCode(Map* map,
+                                                  HeapObject* object) {
+    VisitSharedFunctionInfoAndFlushCodeGeneric(map, object, false);
+  }
+
+
+  static void VisitSharedFunctionInfoAndFlushCodeGeneric(
+      Map* map, HeapObject* object, bool known_flush_code_candidate) {
+    SharedFunctionInfo* shared = reinterpret_cast<SharedFunctionInfo*>(object);
+
+    if (shared->IsInobjectSlackTrackingInProgress()) shared->DetachInitialMap();
+
+    if (!known_flush_code_candidate) {
+      known_flush_code_candidate = IsFlushable(shared);
+      if (known_flush_code_candidate) FlushCode::AddCandidate(shared);
+    }
+
+    VisitSharedFunctionInfoFields(object, known_flush_code_candidate);
+  }
+
+
   static void VisitCodeEntry(Address entry_address) {
     Object* code = Code::GetObjectFromEntryAddress(entry_address);
     Object* old_code = code;
@@ -564,30 +730,98 @@ class StaticMarkingVisitor : public StaticVisitorBase {
   static void VisitJSFunctionAndFlushCode(Map* map, HeapObject* object) {
     JSFunction* jsfunction = reinterpret_cast<JSFunction*>(object);
     // The function must have a valid context and not be a builtin.
+    bool flush_code_candidate = false;
     if (IsValidNotBuiltinContext(jsfunction->unchecked_context())) {
-      FlushCodeForFunction(jsfunction);
+      flush_code_candidate = FlushCodeForFunction(jsfunction);
     }
-    VisitJSFunction(map, object);
+
+    if (!flush_code_candidate) {
+      MarkCompactCollector::MarkObject(
+          jsfunction->unchecked_shared()->unchecked_code());
+
+      if (jsfunction->unchecked_code()->kind() == Code::OPTIMIZED_FUNCTION) {
+        // For optimized functions we should retain both non-optimized version
+        // of it's code and non-optimized version of all inlined functions.
+        // This is required to support bailing out from inlined code.
+        DeoptimizationInputData* data =
+            reinterpret_cast<DeoptimizationInputData*>(
+                jsfunction->unchecked_code()->unchecked_deoptimization_data());
+
+        FixedArray* literals = data->UncheckedLiteralArray();
+
+        for (int i = 0, count = data->InlinedFunctionCount()->value();
+             i < count;
+             i++) {
+          JSFunction* inlined = reinterpret_cast<JSFunction*>(literals->get(i));
+          MarkCompactCollector::MarkObject(
+              inlined->unchecked_shared()->unchecked_code());
+        }
+      }
+    }
+
+    VisitJSFunctionFields(map,
+                          reinterpret_cast<JSFunction*>(object),
+                          flush_code_candidate);
   }
 
 
   static void VisitJSFunction(Map* map, HeapObject* object) {
-#define SLOT_ADDR(obj, offset)   \
-    reinterpret_cast<Object**>((obj)->address() + offset)
+    VisitJSFunctionFields(map,
+                          reinterpret_cast<JSFunction*>(object),
+                          false);
+  }
+
 
+#define SLOT_ADDR(obj, offset) \
+  reinterpret_cast<Object**>((obj)->address() + offset)
+
+
+  static inline void VisitJSFunctionFields(Map* map,
+                                           JSFunction* object,
+                                           bool flush_code_candidate) {
     VisitPointers(SLOT_ADDR(object, JSFunction::kPropertiesOffset),
                   SLOT_ADDR(object, JSFunction::kCodeEntryOffset));
 
-    VisitCodeEntry(object->address() + JSFunction::kCodeEntryOffset);
+    if (!flush_code_candidate) {
+      VisitCodeEntry(object->address() + JSFunction::kCodeEntryOffset);
+    } else {
+      // Don't visit code object.
+
+      // Visit shared function info to avoid double checking of it's
+      // flushability.
+      SharedFunctionInfo* shared_info = object->unchecked_shared();
+      if (!shared_info->IsMarked()) {
+        Map* shared_info_map = shared_info->map();
+        MarkCompactCollector::SetMark(shared_info);
+        MarkCompactCollector::MarkObject(shared_info_map);
+        VisitSharedFunctionInfoAndFlushCodeGeneric(shared_info_map,
+                                                   shared_info,
+                                                   true);
+      }
+    }
 
     VisitPointers(SLOT_ADDR(object,
                             JSFunction::kCodeEntryOffset + kPointerSize),
-                  SLOT_ADDR(object, JSFunction::kSize));
+                  SLOT_ADDR(object, JSFunction::kNonWeakFieldsEndOffset));
 
-#undef SLOT_ADDR
+    // Don't visit the next function list field as it is a weak reference.
   }
 
 
+  static void VisitSharedFunctionInfoFields(HeapObject* object,
+                                            bool flush_code_candidate) {
+    VisitPointer(SLOT_ADDR(object, SharedFunctionInfo::kNameOffset));
+
+    if (!flush_code_candidate) {
+      VisitPointer(SLOT_ADDR(object, SharedFunctionInfo::kCodeOffset));
+    }
+
+    VisitPointers(SLOT_ADDR(object, SharedFunctionInfo::kScopeInfoOffset),
+                  SLOT_ADDR(object, SharedFunctionInfo::kSize));
+  }
+
+  #undef SLOT_ADDR
+
   typedef void (*Callback)(Map* map, HeapObject* object);
 
   static VisitorDispatchTable<Callback> table_;
@@ -612,6 +846,10 @@ class MarkingVisitor : public ObjectVisitor {
     StaticMarkingVisitor::VisitCodeTarget(rinfo);
   }
 
+  void VisitGlobalPropertyCell(RelocInfo* rinfo) {
+    StaticMarkingVisitor::VisitGlobalPropertyCell(rinfo);
+  }
+
   void VisitDebugTarget(RelocInfo* rinfo) {
     StaticMarkingVisitor::VisitDebugTarget(rinfo);
   }
@@ -636,8 +874,10 @@ class SharedFunctionInfoMarkingVisitor : public ObjectVisitor {
 
   void VisitPointer(Object** slot) {
     Object* obj = *slot;
-    if (obj->IsHeapObject()) {
-      MarkCompactCollector::MarkObject(HeapObject::cast(obj));
+    if (obj->IsSharedFunctionInfo()) {
+      SharedFunctionInfo* shared = reinterpret_cast<SharedFunctionInfo*>(obj);
+      MarkCompactCollector::MarkObject(shared->unchecked_code());
+      MarkCompactCollector::MarkObject(shared);
     }
   }
 };
@@ -673,6 +913,7 @@ void MarkCompactCollector::PrepareForCodeFlushing() {
 
   SharedFunctionInfoMarkingVisitor visitor;
   CompilationCache::IterateFunctions(&visitor);
+  HandleScopeImplementer::Iterate(&visitor);
 
   ProcessMarkingStack();
 }
@@ -1096,6 +1337,9 @@ void MarkCompactCollector::MarkLiveObjects() {
 
   // Remove object groups after marking phase.
   GlobalHandles::RemoveObjectGroups();
+
+  // Flush code from collected candidates.
+  FlushCode::ProcessCandidates();
 }
 
 
@@ -1305,8 +1549,8 @@ MUST_USE_RESULT inline MaybeObject* MCAllocateFromMapSpace(
 }
 
 
-MUST_USE_RESULT inline MaybeObject* MCAllocateFromCellSpace(
-    HeapObject* ignore, int object_size) {
+MUST_USE_RESULT inline MaybeObject* MCAllocateFromCellSpace(HeapObject* ignore,
+                                                            int object_size) {
   return Heap::cell_space()->MCAllocateRaw(object_size);
 }
 
@@ -2292,8 +2536,9 @@ void MarkCompactCollector::UpdatePointers() {
 
   // Large objects do not move, the map word can be updated directly.
   LargeObjectIterator it(Heap::lo_space());
-  for (HeapObject* obj = it.next(); obj != NULL; obj = it.next())
+  for (HeapObject* obj = it.next(); obj != NULL; obj = it.next()) {
     UpdatePointersInNewObject(obj);
+  }
 
   USE(live_maps_size);
   USE(live_pointer_olds_size);
index 90667d7..fa1934d 100644 (file)
@@ -258,6 +258,14 @@ function SetupMath() {
     "max", MathMax,
     "min", MathMin
   ));
+
+  // The values here are from the MathFunctionId enum in objects.h.
+  %SetMathFunctionId($Math.floor, 1);
+  %SetMathFunctionId($Math.round, 2);
+  %SetMathFunctionId($Math.abs, 4);
+  %SetMathFunctionId($Math.sqrt, 0xd);
+  // TODO(erikcorry): Set the id of the other functions so they can be
+  // optimized.
 };
 
 
index 27f32f7..901e78d 100644 (file)
@@ -60,6 +60,10 @@ class Memory {
     return *reinterpret_cast<int*>(addr);
   }
 
+  static double& double_at(Address addr)  {
+    return *reinterpret_cast<double*>(addr);
+  }
+
   static Address& Address_at(Address addr)  {
     return *reinterpret_cast<Address*>(addr);
   }
index 7f9c0f8..c783566 100644 (file)
@@ -904,11 +904,12 @@ function FormatStackTrace(error, frames) {
 
 function FormatRawStackTrace(error, raw_stack) {
   var frames = [ ];
-  for (var i = 0; i < raw_stack.length; i += 3) {
+  for (var i = 0; i < raw_stack.length; i += 4) {
     var recv = raw_stack[i];
-    var fun = raw_stack[i+1];
-    var pc = raw_stack[i+2];
-    var pos = %FunctionGetPositionForOffset(fun, pc);
+    var fun = raw_stack[i + 1];
+    var code = raw_stack[i + 2];
+    var pc = raw_stack[i + 3];
+    var pos = %FunctionGetPositionForOffset(code, pc);
     frames.push(new CallSite(recv, fun, pos));
   }
   if (IS_FUNCTION($Error.prepareStackTrace)) {
index 69219ee..cb7b35e 100644 (file)
@@ -775,6 +775,9 @@ void JSFunction::JSFunctionPrint() {
 void JSFunction::JSFunctionVerify() {
   CHECK(IsJSFunction());
   VerifyObjectField(kPrototypeOrInitialMapOffset);
+  VerifyObjectField(kNextFunctionLinkOffset);
+  CHECK(next_function_link()->IsUndefined() ||
+        next_function_link()->IsJSFunction());
 }
 
 
index 499cb91..5910128 100644 (file)
@@ -1,4 +1,4 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2010 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -459,6 +459,33 @@ bool Object::IsDescriptorArray() {
 }
 
 
+bool Object::IsDeoptimizationInputData() {
+  // Must be a fixed array.
+  if (!IsFixedArray()) return false;
+
+  // There's no sure way to detect the difference between a fixed array and
+  // a deoptimization data array.  Since this is used for asserts we can
+  // check that the length is zero or else the fixed size plus a multiple of
+  // the entry size.
+  int length = FixedArray::cast(this)->length();
+  if (length == 0) return true;
+
+  length -= DeoptimizationInputData::kFirstDeoptEntryIndex;
+  return length >= 0 &&
+      length % DeoptimizationInputData::kDeoptEntrySize == 0;
+}
+
+
+bool Object::IsDeoptimizationOutputData() {
+  if (!IsFixedArray()) return false;
+  // There's actually no way to see the difference between a fixed array and
+  // a deoptimization data array.  Since this is used for asserts we can check
+  // that the length is plausible though.
+  if (FixedArray::cast(this)->length() % 2 != 0) return false;
+  return true;
+}
+
+
 bool Object::IsContext() {
   return Object::IsHeapObject()
     && (HeapObject::cast(this)->map() == Heap::context_map() ||
@@ -1682,6 +1709,8 @@ void NumberDictionary::set_requires_slow_elements() {
 
 CAST_ACCESSOR(FixedArray)
 CAST_ACCESSOR(DescriptorArray)
+CAST_ACCESSOR(DeoptimizationInputData)
+CAST_ACCESSOR(DeoptimizationOutputData)
 CAST_ACCESSOR(SymbolTable)
 CAST_ACCESSOR(JSFunctionResultCache)
 CAST_ACCESSOR(NormalizedMapCache)
@@ -2376,18 +2405,160 @@ int Code::arguments_count() {
 
 
 int Code::major_key() {
-  ASSERT(kind() == STUB || kind() == BINARY_OP_IC);
+  ASSERT(kind() == STUB ||
+         kind() == BINARY_OP_IC ||
+         kind() == TYPE_RECORDING_BINARY_OP_IC ||
+         kind() == COMPARE_IC);
   return READ_BYTE_FIELD(this, kStubMajorKeyOffset);
 }
 
 
 void Code::set_major_key(int major) {
-  ASSERT(kind() == STUB || kind() == BINARY_OP_IC);
+  ASSERT(kind() == STUB ||
+         kind() == BINARY_OP_IC ||
+         kind() == TYPE_RECORDING_BINARY_OP_IC ||
+         kind() == COMPARE_IC);
   ASSERT(0 <= major && major < 256);
   WRITE_BYTE_FIELD(this, kStubMajorKeyOffset, major);
 }
 
 
+bool Code::optimizable() {
+  ASSERT(kind() == FUNCTION);
+  return READ_BYTE_FIELD(this, kOptimizableOffset) == 1;
+}
+
+
+void Code::set_optimizable(bool value) {
+  ASSERT(kind() == FUNCTION);
+  WRITE_BYTE_FIELD(this, kOptimizableOffset, value ? 1 : 0);
+}
+
+
+bool Code::has_deoptimization_support() {
+  ASSERT(kind() == FUNCTION);
+  return READ_BYTE_FIELD(this, kHasDeoptimizationSupportOffset) == 1;
+}
+
+
+void Code::set_has_deoptimization_support(bool value) {
+  ASSERT(kind() == FUNCTION);
+  WRITE_BYTE_FIELD(this, kHasDeoptimizationSupportOffset, value ? 1 : 0);
+}
+
+
+int Code::allow_osr_at_loop_nesting_level() {
+  ASSERT(kind() == FUNCTION);
+  return READ_BYTE_FIELD(this, kAllowOSRAtLoopNestingLevelOffset);
+}
+
+
+void Code::set_allow_osr_at_loop_nesting_level(int level) {
+  ASSERT(kind() == FUNCTION);
+  ASSERT(level >= 0 && level <= kMaxLoopNestingMarker);
+  WRITE_BYTE_FIELD(this, kAllowOSRAtLoopNestingLevelOffset, level);
+}
+
+
+unsigned Code::stack_slots() {
+  ASSERT(kind() == OPTIMIZED_FUNCTION);
+  return READ_UINT32_FIELD(this, kStackSlotsOffset);
+}
+
+
+void Code::set_stack_slots(unsigned slots) {
+  ASSERT(kind() == OPTIMIZED_FUNCTION);
+  WRITE_UINT32_FIELD(this, kStackSlotsOffset, slots);
+}
+
+
+unsigned Code::safepoint_table_start() {
+  ASSERT(kind() == OPTIMIZED_FUNCTION);
+  return READ_UINT32_FIELD(this, kSafepointTableStartOffset);
+}
+
+
+void Code::set_safepoint_table_start(unsigned offset) {
+  ASSERT(kind() == OPTIMIZED_FUNCTION);
+  ASSERT(IsAligned(offset, static_cast<unsigned>(kIntSize)));
+  WRITE_UINT32_FIELD(this, kSafepointTableStartOffset, offset);
+}
+
+
+unsigned Code::stack_check_table_start() {
+  ASSERT(kind() == FUNCTION);
+  return READ_UINT32_FIELD(this, kStackCheckTableStartOffset);
+}
+
+
+void Code::set_stack_check_table_start(unsigned offset) {
+  ASSERT(kind() == FUNCTION);
+  ASSERT(IsAligned(offset, static_cast<unsigned>(kIntSize)));
+  WRITE_UINT32_FIELD(this, kStackCheckTableStartOffset, offset);
+}
+
+
+CheckType Code::check_type() {
+  ASSERT(is_call_stub() || is_keyed_call_stub());
+  byte type = READ_BYTE_FIELD(this, kCheckTypeOffset);
+  return static_cast<CheckType>(type);
+}
+
+
+void Code::set_check_type(CheckType value) {
+  ASSERT(is_call_stub() || is_keyed_call_stub());
+  WRITE_BYTE_FIELD(this, kCheckTypeOffset, value);
+}
+
+
+byte Code::binary_op_type() {
+  ASSERT(is_binary_op_stub());
+  return READ_BYTE_FIELD(this, kBinaryOpTypeOffset);
+}
+
+
+void Code::set_binary_op_type(byte value) {
+  ASSERT(is_binary_op_stub());
+  WRITE_BYTE_FIELD(this, kBinaryOpTypeOffset, value);
+}
+
+
+byte Code::type_recording_binary_op_type() {
+  ASSERT(is_type_recording_binary_op_stub());
+  return READ_BYTE_FIELD(this, kBinaryOpTypeOffset);
+}
+
+
+void Code::set_type_recording_binary_op_type(byte value) {
+  ASSERT(is_type_recording_binary_op_stub());
+  WRITE_BYTE_FIELD(this, kBinaryOpTypeOffset, value);
+}
+
+
+byte Code::type_recording_binary_op_result_type() {
+  ASSERT(is_type_recording_binary_op_stub());
+  return READ_BYTE_FIELD(this, kBinaryOpReturnTypeOffset);
+}
+
+
+void Code::set_type_recording_binary_op_result_type(byte value) {
+  ASSERT(is_type_recording_binary_op_stub());
+  WRITE_BYTE_FIELD(this, kBinaryOpReturnTypeOffset, value);
+}
+
+
+byte Code::compare_state() {
+  ASSERT(is_compare_ic_stub());
+  return READ_BYTE_FIELD(this, kCompareStateOffset);
+}
+
+
+void Code::set_compare_state(byte value) {
+  ASSERT(is_compare_ic_stub());
+  WRITE_BYTE_FIELD(this, kCompareStateOffset, value);
+}
+
+
 bool Code::is_inline_cache_stub() {
   Kind kind = this->kind();
   return kind >= FIRST_IC_KIND && kind <= LAST_IC_KIND;
@@ -2530,6 +2701,7 @@ ACCESSORS(Map, constructor, Object, kConstructorOffset)
 
 ACCESSORS(JSFunction, shared, SharedFunctionInfo, kSharedFunctionInfoOffset)
 ACCESSORS(JSFunction, literals, FixedArray, kLiteralsOffset)
+ACCESSORS(JSFunction, next_function_link, Object, kNextFunctionLinkOffset)
 
 ACCESSORS(GlobalObject, builtins, JSBuiltinsObject, kBuiltinsOffset)
 ACCESSORS(GlobalObject, global_context, Context, kGlobalContextOffset)
@@ -2667,6 +2839,7 @@ SMI_ACCESSORS(SharedFunctionInfo, compiler_hints,
               kCompilerHintsOffset)
 SMI_ACCESSORS(SharedFunctionInfo, this_property_assignments_count,
               kThisPropertyAssignmentsCountOffset)
+SMI_ACCESSORS(SharedFunctionInfo, opt_count, kOptCountOffset)
 #else
 
 #define PSEUDO_SMI_ACCESSORS_LO(holder, name, offset)             \
@@ -2716,6 +2889,7 @@ PSEUDO_SMI_ACCESSORS_HI(SharedFunctionInfo,
 PSEUDO_SMI_ACCESSORS_LO(SharedFunctionInfo,
                         this_property_assignments_count,
                         kThisPropertyAssignmentsCountOffset)
+PSEUDO_SMI_ACCESSORS_HI(SharedFunctionInfo, opt_count, kOptCountOffset)
 #endif
 
 
@@ -2749,6 +2923,23 @@ bool SharedFunctionInfo::IsInobjectSlackTrackingInProgress() {
 }
 
 
+bool SharedFunctionInfo::optimization_disabled() {
+  return BooleanBit::get(compiler_hints(), kOptimizationDisabled);
+}
+
+
+void SharedFunctionInfo::set_optimization_disabled(bool disable) {
+  set_compiler_hints(BooleanBit::set(compiler_hints(),
+                                     kOptimizationDisabled,
+                                     disable));
+  // If disabling optimizations we reflect that in the code object so
+  // it will not be counted as optimizable code.
+  if ((code()->kind() == Code::FUNCTION) && disable) {
+    code()->set_optimizable(false);
+  }
+}
+
+
 ACCESSORS(CodeCache, default_cache, FixedArray, kDefaultCacheOffset)
 ACCESSORS(CodeCache, normal_type_cache, Object, kNormalTypeCacheOffset)
 
@@ -2794,6 +2985,13 @@ Code* SharedFunctionInfo::unchecked_code() {
 
 
 void SharedFunctionInfo::set_code(Code* value, WriteBarrierMode mode) {
+  // If optimization has been disabled for the shared function info,
+  // reflect that in the code object so it will not be counted as
+  // optimizable code.
+  ASSERT(value->kind() != Code::FUNCTION ||
+         !value->optimizable() ||
+         this->code() == Builtins::builtin(Builtins::Illegal) ||
+         this->allows_lazy_compilation());
   WRITE_FIELD(this, kCodeOffset, value);
   CONDITIONAL_WRITE_BARRIER(this, kCodeOffset, mode);
 }
@@ -2812,6 +3010,16 @@ void SharedFunctionInfo::set_scope_info(SerializedScopeInfo* value,
 }
 
 
+Smi* SharedFunctionInfo::deopt_counter() {
+  return reinterpret_cast<Smi*>(READ_FIELD(this, kDeoptCounterOffset));
+}
+
+
+void SharedFunctionInfo::set_deopt_counter(Smi* value) {
+  WRITE_FIELD(this, kDeoptCounterOffset, value);
+}
+
+
 bool SharedFunctionInfo::is_compiled() {
   return code() != Builtins::builtin(Builtins::LazyCompile);
 }
@@ -2833,6 +3041,19 @@ bool SharedFunctionInfo::HasCustomCallGenerator() {
 }
 
 
+MathFunctionId SharedFunctionInfo::math_function_id() {
+  return static_cast<MathFunctionId>(
+      (compiler_hints() >> kMathFunctionShift) & kMathFunctionMask);
+}
+
+
+void SharedFunctionInfo::set_math_function_id(int math_fn) {
+  ASSERT(math_fn <= max_math_id_number());
+  set_compiler_hints(compiler_hints() |
+                     ((math_fn & kMathFunctionMask) << kMathFunctionShift));
+}
+
+
 int SharedFunctionInfo::custom_call_generator_id() {
   ASSERT(HasCustomCallGenerator());
   return Smi::cast(function_data())->value();
@@ -2850,11 +3071,33 @@ void SharedFunctionInfo::set_code_age(int code_age) {
 }
 
 
+bool SharedFunctionInfo::has_deoptimization_support() {
+  Code* code = this->code();
+  return code->kind() == Code::FUNCTION && code->has_deoptimization_support();
+}
+
+
 bool JSFunction::IsBuiltin() {
   return context()->global()->IsJSBuiltinsObject();
 }
 
 
+bool JSFunction::NeedsArgumentsAdaption() {
+  return shared()->formal_parameter_count() !=
+      SharedFunctionInfo::kDontAdaptArgumentsSentinel;
+}
+
+
+bool JSFunction::IsOptimized() {
+  return code()->kind() == Code::OPTIMIZED_FUNCTION;
+}
+
+
+bool JSFunction::IsMarkedForLazyRecompilation() {
+  return code() == Builtins::builtin(Builtins::LazyRecompile);
+}
+
+
 Code* JSFunction::code() {
   return Code::cast(unchecked_code());
 }
@@ -2874,6 +3117,23 @@ void JSFunction::set_code(Code* value) {
 }
 
 
+void JSFunction::ReplaceCode(Code* code) {
+  bool was_optimized = IsOptimized();
+  bool is_optimized = code->kind() == Code::OPTIMIZED_FUNCTION;
+
+  set_code(code);
+
+  // Add/remove the function from the list of optimized functions for this
+  // context based on the state change.
+  if (!was_optimized && is_optimized) {
+    context()->global_context()->AddOptimizedFunction(this);
+  }
+  if (was_optimized && !is_optimized) {
+    context()->global_context()->RemoveOptimizedFunction(this);
+  }
+}
+
+
 Context* JSFunction::context() {
   return Context::cast(READ_FIELD(this, kContextOffset));
 }
@@ -3007,6 +3267,7 @@ JSValue* JSValue::cast(Object* obj) {
 
 INT_ACCESSORS(Code, instruction_size, kInstructionSizeOffset)
 ACCESSORS(Code, relocation_info, ByteArray, kRelocationInfoOffset)
+ACCESSORS(Code, deoptimization_data, FixedArray, kDeoptimizationDataOffset)
 
 
 byte* Code::instruction_start()  {
@@ -3024,6 +3285,12 @@ int Code::body_size() {
 }
 
 
+FixedArray* Code::unchecked_deoptimization_data() {
+  return reinterpret_cast<FixedArray*>(
+      READ_FIELD(this, kDeoptimizationDataOffset));
+}
+
+
 ByteArray* Code::unchecked_relocation_info() {
   return reinterpret_cast<ByteArray*>(READ_FIELD(this, kRelocationInfoOffset));
 }
index ed76cb9..55a0a53 100644 (file)
@@ -352,6 +352,7 @@ VisitorDispatchTable<typename StaticNewSpaceVisitor<StaticVisitor>::Callback>
 void Code::CodeIterateBody(ObjectVisitor* v) {
   int mode_mask = RelocInfo::kCodeTargetMask |
                   RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT) |
+                  RelocInfo::ModeMask(RelocInfo::GLOBAL_PROPERTY_CELL) |
                   RelocInfo::ModeMask(RelocInfo::EXTERNAL_REFERENCE) |
                   RelocInfo::ModeMask(RelocInfo::JS_RETURN) |
                   RelocInfo::ModeMask(RelocInfo::DEBUG_BREAK_SLOT) |
@@ -361,9 +362,8 @@ void Code::CodeIterateBody(ObjectVisitor* v) {
   // the heap compaction in the next statement.
   RelocIterator it(this, mode_mask);
 
-  IteratePointers(v,
-                  kRelocationInfoOffset,
-                  kRelocationInfoOffset + kPointerSize);
+  IteratePointer(v, kRelocationInfoOffset);
+  IteratePointer(v, kDeoptimizationDataOffset);
 
   for (; !it.done(); it.next()) {
     it.rinfo()->Visit(v);
@@ -375,6 +375,7 @@ template<typename StaticVisitor>
 void Code::CodeIterateBody() {
   int mode_mask = RelocInfo::kCodeTargetMask |
                   RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT) |
+                  RelocInfo::ModeMask(RelocInfo::GLOBAL_PROPERTY_CELL) |
                   RelocInfo::ModeMask(RelocInfo::EXTERNAL_REFERENCE) |
                   RelocInfo::ModeMask(RelocInfo::JS_RETURN) |
                   RelocInfo::ModeMask(RelocInfo::DEBUG_BREAK_SLOT) |
@@ -386,6 +387,8 @@ void Code::CodeIterateBody() {
 
   StaticVisitor::VisitPointer(
       reinterpret_cast<Object**>(this->address() + kRelocationInfoOffset));
+  StaticVisitor::VisitPointer(
+      reinterpret_cast<Object**>(this->address() + kDeoptimizationDataOffset));
 
   for (; !it.done(); it.next()) {
     it.rinfo()->template Visit<StaticVisitor>();
index f5d19e2..1c652f6 100644 (file)
 #include "api.h"
 #include "arguments.h"
 #include "bootstrapper.h"
+#include "codegen.h"
 #include "debug.h"
+#include "deoptimizer.h"
 #include "execution.h"
+#include "full-codegen.h"
+#include "hydrogen.h"
 #include "objects-inl.h"
 #include "objects-visiting.h"
 #include "macro-assembler.h"
+#include "safepoint-table.h"
 #include "scanner-base.h"
 #include "scopeinfo.h"
 #include "string-stream.h"
 #include "utils.h"
+#include "vm-state-inl.h"
 
 #ifdef ENABLE_DISASSEMBLER
+#include "disasm.h"
 #include "disassembler.h"
 #endif
 
@@ -1728,6 +1735,23 @@ void JSObject::LookupInDescriptor(String* name, LookupResult* result) {
 }
 
 
+void Map::LookupInDescriptors(JSObject* holder,
+                              String* name,
+                              LookupResult* result) {
+  DescriptorArray* descriptors = instance_descriptors();
+  int number = DescriptorLookupCache::Lookup(descriptors, name);
+  if (number == DescriptorLookupCache::kAbsent) {
+    number = descriptors->Search(name);
+    DescriptorLookupCache::Update(descriptors, name, number);
+  }
+  if (number != DescriptorArray::kNotFound) {
+    result->DescriptorResult(holder, descriptors->GetDetails(number), number);
+  } else {
+    result->NotFound();
+  }
+}
+
+
 void JSObject::LocalLookupRealNamedProperty(String* name,
                                             LookupResult* result) {
   if (IsJSGlobalProxy()) {
@@ -3051,6 +3075,10 @@ MaybeObject* JSObject::SetPropertyCallback(String* name,
       if (!maybe_new_map->ToObject(&new_map)) return maybe_new_map;
     }
     set_map(Map::cast(new_map));
+    // When running crankshaft, changing the map is not enough. We
+    // need to deoptimize all functions that rely on this global
+    // object.
+    Deoptimizer::DeoptimizeGlobalObject(this);
   }
 
   // Update the dictionary with the new CALLBACKS property.
@@ -4123,6 +4151,22 @@ int DescriptorArray::LinearSearch(String* name, int len) {
 }
 
 
+MaybeObject* DeoptimizationInputData::Allocate(int deopt_entry_count,
+                                               PretenureFlag pretenure) {
+  ASSERT(deopt_entry_count > 0);
+  return Heap::AllocateFixedArray(LengthFor(deopt_entry_count),
+                                  pretenure);
+}
+
+
+MaybeObject* DeoptimizationOutputData::Allocate(int number_of_deopt_points,
+                                                PretenureFlag pretenure) {
+  if (number_of_deopt_points == 0) return Heap::empty_fixed_array();
+  return Heap::AllocateFixedArray(LengthOfFixedArray(number_of_deopt_points),
+                                  pretenure);
+}
+
+
 #ifdef DEBUG
 bool DescriptorArray::IsEqualTo(DescriptorArray* other) {
   if (IsEmpty()) return other->IsEmpty();
@@ -5331,6 +5375,38 @@ void JSFunction::JSFunctionIterateBody(int object_size, ObjectVisitor* v) {
 }
 
 
+void JSFunction::MarkForLazyRecompilation() {
+  ASSERT(is_compiled() && !IsOptimized());
+  ASSERT(shared()->allows_lazy_compilation());
+  ReplaceCode(Builtins::builtin(Builtins::LazyRecompile));
+}
+
+
+uint32_t JSFunction::SourceHash() {
+  uint32_t hash = 0;
+  Object* script = shared()->script();
+  if (!script->IsUndefined()) {
+    Object* source = Script::cast(script)->source();
+    if (source->IsUndefined()) hash = String::cast(source)->Hash();
+  }
+  hash ^= ComputeIntegerHash(shared()->start_position_and_type());
+  hash += ComputeIntegerHash(shared()->end_position());
+  return hash;
+}
+
+
+bool JSFunction::IsInlineable() {
+  if (IsBuiltin()) return false;
+  // Check that the function has a script associated with it.
+  if (!shared()->script()->IsScript()) return false;
+  Code* code = shared()->code();
+  if (code->kind() == Code::OPTIMIZED_FUNCTION) return true;
+  // If we never ran this (unlikely) then lets try to optimize it.
+  if (code->kind() != Code::FUNCTION) return true;
+  return code->optimizable();
+}
+
+
 Object* JSFunction::SetInstancePrototype(Object* value) {
   ASSERT(value->IsJSObject());
 
@@ -5390,6 +5466,12 @@ Object* JSFunction::SetInstanceClassName(String* name) {
 }
 
 
+void JSFunction::PrintName() {
+  SmartPointer<char> name = shared()->DebugName()->ToCString();
+  PrintF("%s", *name);
+}
+
+
 Context* JSFunction::GlobalContextFromLiterals(FixedArray* literals) {
   return Context::cast(literals->get(JSFunction::kLiteralGlobalContextIndex));
 }
@@ -5420,15 +5502,19 @@ bool SharedFunctionInfo::HasSourceCode() {
 
 
 Object* SharedFunctionInfo::GetSourceCode() {
+  if (!HasSourceCode()) return Heap::undefined_value();
   HandleScope scope;
-  if (script()->IsUndefined()) return Heap::undefined_value();
   Object* source = Script::cast(script())->source();
-  if (source->IsUndefined()) return Heap::undefined_value();
   return *SubString(Handle<String>(String::cast(source)),
                     start_position(), end_position());
 }
 
 
+int SharedFunctionInfo::SourceSize() {
+  return end_position() - start_position();
+}
+
+
 int SharedFunctionInfo::CalculateInstanceSize() {
   int instance_size =
       JSObject::kHeaderSize +
@@ -5546,8 +5632,7 @@ Object* SharedFunctionInfo::GetThisPropertyAssignmentConstant(int index) {
 void SharedFunctionInfo::SourceCodePrint(StringStream* accumulator,
                                          int max_length) {
   // For some native functions there is no source.
-  if (script()->IsUndefined() ||
-      Script::cast(script())->source()->IsUndefined()) {
+  if (!HasSourceCode()) {
     accumulator->Add("<No Source>");
     return;
   }
@@ -5572,14 +5657,60 @@ void SharedFunctionInfo::SourceCodePrint(StringStream* accumulator,
   }
 
   int len = end_position() - start_position();
-  if (len > max_length) {
+  if (len <= max_length || max_length < 0) {
+    accumulator->Put(script_source, start_position(), end_position());
+  } else {
     accumulator->Put(script_source,
                      start_position(),
                      start_position() + max_length);
     accumulator->Add("...\n");
+  }
+}
+
+
+static bool IsCodeEquivalent(Code* code, Code* recompiled) {
+  if (code->instruction_size() != recompiled->instruction_size()) return false;
+  ByteArray* code_relocation = code->relocation_info();
+  ByteArray* recompiled_relocation = recompiled->relocation_info();
+  int length = code_relocation->length();
+  if (length != recompiled_relocation->length()) return false;
+  int compare = memcmp(code_relocation->GetDataStartAddress(),
+                       recompiled_relocation->GetDataStartAddress(),
+                       length);
+  return compare == 0;
+}
+
+
+void SharedFunctionInfo::EnableDeoptimizationSupport(Code* recompiled) {
+  ASSERT(!has_deoptimization_support());
+  AssertNoAllocation no_allocation;
+  Code* code = this->code();
+  if (IsCodeEquivalent(code, recompiled)) {
+    // Copy the deoptimization data from the recompiled code.
+    code->set_deoptimization_data(recompiled->deoptimization_data());
+    code->set_has_deoptimization_support(true);
   } else {
-    accumulator->Put(script_source, start_position(), end_position());
+    // TODO(3025757): In case the recompiled isn't equivalent to the
+    // old code, we have to replace it. We should try to avoid this
+    // altogether because it flushes valuable type feedback by
+    // effectively resetting all IC state.
+    set_code(recompiled);
   }
+  ASSERT(has_deoptimization_support());
+}
+
+
+bool SharedFunctionInfo::VerifyBailoutId(int id) {
+  // TODO(srdjan): debugging ARM crashes in hydrogen. OK to disable while
+  // we are always bailing out on ARM.
+
+  ASSERT(id != AstNode::kNoNumber);
+  Code* unoptimized = code();
+  DeoptimizationOutputData* data =
+      DeoptimizationOutputData::cast(unoptimized->deoptimization_data());
+  unsigned ignore = Deoptimizer::GetOutputInfo(data, id, this);
+  USE(ignore);
+  return true;  // Return true if there was no ASSERT.
 }
 
 
@@ -5703,6 +5834,17 @@ void ObjectVisitor::VisitCodeEntry(Address entry_address) {
 }
 
 
+void ObjectVisitor::VisitGlobalPropertyCell(RelocInfo* rinfo) {
+  ASSERT(rinfo->rmode() == RelocInfo::GLOBAL_PROPERTY_CELL);
+  Object* cell = rinfo->target_cell();
+  Object* old_cell = cell;
+  VisitPointer(&cell);
+  if (cell != old_cell) {
+    rinfo->set_target_cell(reinterpret_cast<JSGlobalPropertyCell*>(cell));
+  }
+}
+
+
 void ObjectVisitor::VisitDebugTarget(RelocInfo* rinfo) {
   ASSERT((RelocInfo::IsJSReturn(rinfo->rmode()) &&
           rinfo->IsPatchedReturnSequence()) ||
@@ -5715,6 +5857,12 @@ void ObjectVisitor::VisitDebugTarget(RelocInfo* rinfo) {
 }
 
 
+void Code::InvalidateRelocation() {
+  HandleScope scope;
+  set_relocation_info(Heap::empty_byte_array());
+}
+
+
 void Code::Relocate(intptr_t delta) {
   for (RelocIterator it(this, RelocInfo::kApplyMask); !it.done(); it.next()) {
     it.rinfo()->apply(delta);
@@ -5736,6 +5884,7 @@ void Code::CopyFrom(const CodeDesc& desc) {
   intptr_t delta = instruction_start() - desc.buffer;
   int mode_mask = RelocInfo::kCodeTargetMask |
                   RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT) |
+                  RelocInfo::ModeMask(RelocInfo::GLOBAL_PROPERTY_CELL) |
                   RelocInfo::kApplyMask;
   Assembler* origin = desc.origin;  // Needed to find target_object on X64.
   for (RelocIterator it(this, mode_mask); !it.done(); it.next()) {
@@ -5743,6 +5892,9 @@ void Code::CopyFrom(const CodeDesc& desc) {
     if (mode == RelocInfo::EMBEDDED_OBJECT) {
       Handle<Object> p = it.rinfo()->target_object_handle(origin);
       it.rinfo()->set_target_object(*p);
+    } else if (mode == RelocInfo::GLOBAL_PROPERTY_CELL) {
+      Handle<JSGlobalPropertyCell> cell  = it.rinfo()->target_cell_handle();
+      it.rinfo()->set_target_cell(*cell);
     } else if (RelocInfo::IsCodeTarget(mode)) {
       // rewrite code handles in inline cache targets to direct
       // pointers to the first instruction in the code object
@@ -5813,11 +5965,194 @@ int Code::SourceStatementPosition(Address pc) {
 }
 
 
+uint8_t* Code::GetSafepointEntry(Address pc) {
+  SafepointTable table(this);
+  unsigned pc_offset = pc - instruction_start();
+  for (unsigned i = 0; i < table.length(); i++) {
+    // TODO(kasperl): Replace the linear search with binary search.
+    if (table.GetPcOffset(i) == pc_offset) return table.GetEntry(i);
+  }
+  return NULL;
+}
+
+
+void Code::SetNoStackCheckTable() {
+  // Indicate the absence of a stack-check table by a table start after the
+  // end of the instructions.  Table start must be aligned, so round up.
+  set_stack_check_table_start(RoundUp(instruction_size(), kIntSize));
+}
+
+
+Map* Code::FindFirstMap() {
+  ASSERT(is_inline_cache_stub());
+  AssertNoAllocation no_allocation;
+  int mask = RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT);
+  for (RelocIterator it(this, mask); !it.done(); it.next()) {
+    RelocInfo* info = it.rinfo();
+    Object* object = info->target_object();
+    if (object->IsMap()) return Map::cast(object);
+  }
+  return NULL;
+}
+
+
 #ifdef ENABLE_DISASSEMBLER
+
+#ifdef DEBUG
+
+void DeoptimizationInputData::DeoptimizationInputDataPrint() {
+  disasm::NameConverter converter;
+  int deopt_count = DeoptCount();
+  PrintF("Deoptimization Input Data (deopt points = %d)\n", deopt_count);
+  if (0 == deopt_count) return;
+
+  PrintF("%6s  %6s  %6s  %12s\n", "index", "ast id", "argc", "commands");
+  for (int i = 0; i < deopt_count; i++) {
+    int command_count = 0;
+    PrintF("%6d  %6d  %6d",
+           i, AstId(i)->value(), ArgumentsStackHeight(i)->value());
+    int translation_index = TranslationIndex(i)->value();
+    TranslationIterator iterator(TranslationByteArray(), translation_index);
+    Translation::Opcode opcode =
+        static_cast<Translation::Opcode>(iterator.Next());
+    ASSERT(Translation::BEGIN == opcode);
+    int frame_count = iterator.Next();
+    if (FLAG_print_code_verbose) {
+      PrintF("  %s {count=%d}\n", Translation::StringFor(opcode), frame_count);
+    }
+
+    for (int i = 0; i < frame_count; ++i) {
+      opcode = static_cast<Translation::Opcode>(iterator.Next());
+      ASSERT(Translation::FRAME == opcode);
+      int ast_id = iterator.Next();
+      int function_id = iterator.Next();
+      JSFunction* function =
+          JSFunction::cast(LiteralArray()->get(function_id));
+      unsigned height = iterator.Next();
+      if (FLAG_print_code_verbose) {
+        PrintF("%24s  %s {ast_id=%d, function=",
+               "", Translation::StringFor(opcode), ast_id);
+        function->PrintName();
+        PrintF(", height=%u}\n", height);
+      }
+
+      // Size of translation is height plus all incoming arguments including
+      // receiver.
+      int size = height + function->shared()->formal_parameter_count() + 1;
+      command_count += size;
+      for (int j = 0; j < size; ++j) {
+        opcode = static_cast<Translation::Opcode>(iterator.Next());
+        if (FLAG_print_code_verbose) {
+          PrintF("%24s    %s ", "", Translation::StringFor(opcode));
+        }
+
+        if (opcode == Translation::DUPLICATE) {
+          opcode = static_cast<Translation::Opcode>(iterator.Next());
+          if (FLAG_print_code_verbose) {
+            PrintF("%s ", Translation::StringFor(opcode));
+          }
+          --j;  // Two commands share the same frame index.
+        }
+
+        switch (opcode) {
+          case Translation::BEGIN:
+          case Translation::FRAME:
+          case Translation::DUPLICATE:
+            UNREACHABLE();
+            break;
+
+          case Translation::REGISTER: {
+            int reg_code = iterator.Next();
+            if (FLAG_print_code_verbose)  {
+              PrintF("{input=%s}", converter.NameOfCPURegister(reg_code));
+            }
+            break;
+          }
+
+          case Translation::INT32_REGISTER: {
+            int reg_code = iterator.Next();
+            if (FLAG_print_code_verbose)  {
+              PrintF("{input=%s}", converter.NameOfCPURegister(reg_code));
+            }
+            break;
+          }
+
+          case Translation::DOUBLE_REGISTER: {
+            int reg_code = iterator.Next();
+            if (FLAG_print_code_verbose)  {
+              PrintF("{input=%s}",
+                     DoubleRegister::AllocationIndexToString(reg_code));
+            }
+            break;
+          }
+
+          case Translation::STACK_SLOT: {
+            int input_slot_index = iterator.Next();
+            if (FLAG_print_code_verbose)  {
+              PrintF("{input=%d}", input_slot_index);
+            }
+            break;
+          }
+
+          case Translation::INT32_STACK_SLOT: {
+            int input_slot_index = iterator.Next();
+            if (FLAG_print_code_verbose)  {
+              PrintF("{input=%d}", input_slot_index);
+            }
+            break;
+          }
+
+          case Translation::DOUBLE_STACK_SLOT: {
+            int input_slot_index = iterator.Next();
+            if (FLAG_print_code_verbose)  {
+              PrintF("{input=%d}", input_slot_index);
+            }
+            break;
+          }
+
+          case Translation::LITERAL: {
+            unsigned literal_index = iterator.Next();
+            if (FLAG_print_code_verbose)  {
+              PrintF("{literal_id=%u}", literal_index);
+            }
+            break;
+          }
+
+          case Translation::ARGUMENTS_OBJECT:
+            break;
+        }
+        if (FLAG_print_code_verbose) PrintF("\n");
+      }
+    }
+    if (!FLAG_print_code_verbose) PrintF("  %12d\n", command_count);
+  }
+}
+
+
+void DeoptimizationOutputData::DeoptimizationOutputDataPrint() {
+  PrintF("Deoptimization Output Data (deopt points = %d)\n",
+         this->DeoptPoints());
+  if (this->DeoptPoints() == 0) return;
+
+  PrintF("%6s  %8s  %s\n", "ast id", "pc", "state");
+  for (int i = 0; i < this->DeoptPoints(); i++) {
+    int pc_and_state = this->PcAndState(i)->value();
+    PrintF("%6d  %8d  %s\n",
+           this->AstId(i)->value(),
+           FullCodeGenerator::PcField::decode(pc_and_state),
+           FullCodeGenerator::State2String(
+               FullCodeGenerator::StateField::decode(pc_and_state)));
+  }
+}
+
+#endif
+
+
 // Identify kind of code.
 const char* Code::Kind2String(Kind kind) {
   switch (kind) {
     case FUNCTION: return "FUNCTION";
+    case OPTIMIZED_FUNCTION: return "OPTIMIZED_FUNCTION";
     case STUB: return "STUB";
     case BUILTIN: return "BUILTIN";
     case LOAD_IC: return "LOAD_IC";
@@ -5827,6 +6162,8 @@ const char* Code::Kind2String(Kind kind) {
     case CALL_IC: return "CALL_IC";
     case KEYED_CALL_IC: return "KEYED_CALL_IC";
     case BINARY_OP_IC: return "BINARY_OP_IC";
+    case TYPE_RECORDING_BINARY_OP_IC: return "TYPE_RECORDING_BINARY_OP_IC";
+    case COMPARE_IC: return "COMPARE_IC";
   }
   UNREACHABLE();
   return NULL;
@@ -5863,6 +6200,7 @@ const char* Code::PropertyType2String(PropertyType type) {
   return NULL;
 }
 
+
 void Code::Disassemble(const char* name) {
   PrintF("kind = %s\n", Kind2String(kind()));
   if (is_inline_cache_stub()) {
@@ -5875,14 +6213,64 @@ void Code::Disassemble(const char* name) {
   if ((name != NULL) && (name[0] != '\0')) {
     PrintF("name = %s\n", name);
   }
+  if (kind() == OPTIMIZED_FUNCTION) {
+    PrintF("stack_slots = %d\n", stack_slots());
+  }
 
   PrintF("Instructions (size = %d)\n", instruction_size());
   Disassembler::Decode(NULL, this);
   PrintF("\n");
 
+#ifdef DEBUG
+  if (kind() == FUNCTION) {
+    DeoptimizationOutputData* data =
+        DeoptimizationOutputData::cast(this->deoptimization_data());
+    data->DeoptimizationOutputDataPrint();
+  } else if (kind() == OPTIMIZED_FUNCTION) {
+    DeoptimizationInputData* data =
+        DeoptimizationInputData::cast(this->deoptimization_data());
+    data->DeoptimizationInputDataPrint();
+  }
+  PrintF("\n");
+#endif
+
+  if (kind() == OPTIMIZED_FUNCTION) {
+    SafepointTable table(this);
+    PrintF("Safepoints (size = %u)\n", table.size());
+    for (unsigned i = 0; i < table.length(); i++) {
+      unsigned pc_offset = table.GetPcOffset(i);
+      PrintF("%p  %4d  ", (instruction_start() + pc_offset), pc_offset);
+      table.PrintEntry(i);
+      PrintF(" (sp -> fp)");
+      int deoptimization_index = table.GetDeoptimizationIndex(i);
+      if (deoptimization_index != Safepoint::kNoDeoptimizationIndex) {
+        PrintF("  %6d", deoptimization_index);
+      } else {
+        PrintF("  <none>");
+      }
+      PrintF("\n");
+    }
+    PrintF("\n");
+  } else if (kind() == FUNCTION) {
+    unsigned offset = stack_check_table_start();
+    // If there is no stack check table, the "table start" will at or after
+    // (due to alignment) the end of the instruction stream.
+    if (static_cast<int>(offset) < instruction_size()) {
+      unsigned* address =
+          reinterpret_cast<unsigned*>(instruction_start() + offset);
+      unsigned length = address[0];
+      PrintF("Stack checks (size = %u)\n", length);
+      PrintF("ast_id  pc_offset\n");
+      for (unsigned i = 0; i < length; ++i) {
+        unsigned index = (2 * i) + 1;
+        PrintF("%6u  %9u\n", address[index], address[index + 1]);
+      }
+      PrintF("\n");
+    }
+  }
+
   PrintF("RelocInfo (size = %d)\n", relocation_size());
-  for (RelocIterator it(this); !it.done(); it.next())
-    it.rinfo()->Print();
+  for (RelocIterator it(this); !it.done(); it.next()) it.rinfo()->Print();
   PrintF("\n");
 }
 #endif  // ENABLE_DISASSEMBLER
@@ -8304,11 +8692,10 @@ MaybeObject* ExternalFloatArray::SetValue(uint32_t index, Object* value) {
 }
 
 
-Object* GlobalObject::GetPropertyCell(LookupResult* result) {
+JSGlobalPropertyCell* GlobalObject::GetPropertyCell(LookupResult* result) {
   ASSERT(!HasFastProperties());
   Object* value = property_dictionary()->ValueAt(result->GetDictionaryEntry());
-  ASSERT(value->IsJSGlobalPropertyCell());
-  return value;
+  return JSGlobalPropertyCell::cast(value);
 }
 
 
@@ -8564,6 +8951,20 @@ MaybeObject* CompilationCacheTable::PutRegExp(String* src,
 }
 
 
+void CompilationCacheTable::Remove(Object* value) {
+  for (int entry = 0, size = Capacity(); entry < size; entry++) {
+    int entry_index = EntryToIndex(entry);
+    int value_index = entry_index + 1;
+    if (get(value_index) == value) {
+      fast_set(this, entry_index, Heap::null_value());
+      fast_set(this, value_index, Heap::null_value());
+      ElementRemoved();
+    }
+  }
+  return;
+}
+
+
 // SymbolsKey used for HashTable where key is array of symbols.
 class SymbolsKey : public HashTableKey {
  public:
diff --git a/src/objects.cc.rej b/src/objects.cc.rej
new file mode 100644 (file)
index 0000000..a16b278
--- /dev/null
@@ -0,0 +1,85 @@
+--- src/objects.cc     (revision 757)
++++ src/objects.cc     (working copy)
+@@ -5377,19 +5377,26 @@
+     Address header = code->instruction_start() + code->safepoint_table_start();
+     length_ = Memory::uint32_at(header + kLengthOffset);
+     entry_size_ = Memory::uint32_at(header + kEntrySizeOffset);
+-    pc_offsets_ = header + kHeaderSize;
+-    entries_ = pc_offsets_ + (length_ * kPcOffsetSize);
++    fixed_entries_ = header + kHeaderSize;
++    entries_ = fixed_entries_ + (length_ * kFixedEntrySize);
+   }
+-  int size() const { return length_ * (entry_size_ + kPcOffsetSize); }
++  int size() const { return length_ * (entry_size_ + kFixedEntrySize); }
+   unsigned length() const { return length_; }
+   unsigned entry_size() const { return entry_size_; }
+   unsigned GetPcOffset(unsigned index) const {
+     ASSERT(index < length_);
+-    return Memory::uint32_at(pc_offsets_ + (index * kPcOffsetSize));
++    return Memory::uint32_at(
++        fixed_entries_ + kPcOffsetOffset + (index * kFixedEntrySize));
+   }
++  unsigned GetBailoutId(unsigned index) const {
++    ASSERT(index < length_);
++    return Memory::uint32_at(
++        fixed_entries_ + kBailoutIdOffset + (index * kFixedEntrySize));
++  }
++
+   uint8_t* GetEntry(unsigned index) const {
+     ASSERT(index < length_);
+     return &Memory::uint8_at(entries_ + (index * entry_size_));
+@@ -5408,7 +5415,9 @@
+   static const int kEntrySizeOffset = kLengthOffset + kIntSize;
+   static const int kHeaderSize = kEntrySizeOffset + kIntSize;
+-  static const int kPcOffsetSize = kIntSize;
++  static const int kPcOffsetOffset = 0;
++  static const int kBailoutIdOffset = kPcOffsetOffset + kIntSize;
++  static const int kFixedEntrySize = kBailoutIdOffset + kIntSize;
+   static void PrintBits(uint8_t byte, int digits) {
+     ASSERT(digits >= 0 && digits <= kBitsPerByte);
+@@ -5422,7 +5431,7 @@
+   unsigned length_;
+   unsigned entry_size_;
+-  Address pc_offsets_;
++  Address fixed_entries_;
+   Address entries_;
+ };
+@@ -5451,12 +5460,31 @@
+ }
++void Code::DeoptimizeNow() {
++  ASSERT(kind() == OPTIMIZED_FUNCTION);
++
++  SafepointTable table(this);
++  for (unsigned i = 0; i < table.length(); i++) {
++    Address pc = entry() + table.GetPcOffset(i);
++    unsigned id = table.GetBailoutId(i);
++    USE(pc);
++    USE(id);
++  }
++
++  // Done. Update the flags to change the code kind.
++  Flags flags = static_cast<Flags>(this->flags() & (~kFlagsKindMask));
++  flags = static_cast<Flags>(flags | (DEOPTIMIZED_FUNCTION << kFlagsKindShift));
++  ASSERT(kind() == DEOPTIMIZED_FUNCTION);
++}
++
++
+ #ifdef ENABLE_DISASSEMBLER
+ // Identify kind of code.
+ const char* Code::Kind2String(Kind kind) {
+   switch (kind) {
+     case FUNCTION: return "FUNCTION";
+     case OPTIMIZED_FUNCTION: return "OPTIMIZED_FUNCTION";
++    case DEOPTIMIZED_FUNCTION: return "DEOPTIMIZED_FUNCTION";
+     case STUB: return "STUB";
+     case BUILTIN: return "BUILTIN";
+     case LOAD_IC: return "LOAD_IC";
index b52bac2..1827ab0 100644 (file)
@@ -585,6 +585,7 @@ struct ValueInfo : public Malloced {
 // A template-ized version of the IsXXX functions.
 template <class C> static inline bool Is(Object* obj);
 
+
 class MaybeObject BASE_EMBEDDED {
  public:
   inline bool IsFailure();
@@ -654,6 +655,8 @@ class Object : public MaybeObject {
   inline bool IsMap();
   inline bool IsFixedArray();
   inline bool IsDescriptorArray();
+  inline bool IsDeoptimizationInputData();
+  inline bool IsDeoptimizationOutputData();
   inline bool IsContext();
   inline bool IsCatchContext();
   inline bool IsGlobalContext();
@@ -2886,6 +2889,122 @@ class ExternalFloatArray: public ExternalArray {
 };
 
 
+// DeoptimizationInputData is a fixed array used to hold the deoptimization
+// data for code generated by the Hydrogen/Lithium compiler.  It also
+// contains information about functions that were inlined.  If N different
+// functions were inlined then first N elements of the literal array will
+// contain these functions.
+//
+// It can be empty.
+class DeoptimizationInputData: public FixedArray {
+ public:
+  // Layout description.  Indices in the array.
+  static const int kTranslationByteArrayIndex = 0;
+  static const int kInlinedFunctionCountIndex = 1;
+  static const int kLiteralArrayIndex = 2;
+  static const int kOsrAstIdIndex = 3;
+  static const int kOsrPcOffsetIndex = 4;
+  static const int kFirstDeoptEntryIndex = 5;
+
+  // Offsets of deopt entry elements relative to the start of the entry.
+  static const int kAstIdOffset = 0;
+  static const int kTranslationIndexOffset = 1;
+  static const int kArgumentsStackHeightOffset = 2;
+  static const int kDeoptEntrySize = 3;
+
+  // Simple element accessors.
+#define DEFINE_ELEMENT_ACCESSORS(name, type)      \
+  type* name() {                                  \
+    return type::cast(get(k##name##Index));       \
+  }                                               \
+  void Set##name(type* value) {                   \
+    set(k##name##Index, value);                   \
+  }
+
+  DEFINE_ELEMENT_ACCESSORS(TranslationByteArray, ByteArray)
+  DEFINE_ELEMENT_ACCESSORS(InlinedFunctionCount, Smi)
+  DEFINE_ELEMENT_ACCESSORS(LiteralArray, FixedArray)
+  DEFINE_ELEMENT_ACCESSORS(OsrAstId, Smi)
+  DEFINE_ELEMENT_ACCESSORS(OsrPcOffset, Smi)
+
+  // Unchecked accessor to be used during GC.
+  FixedArray* UncheckedLiteralArray() {
+    return reinterpret_cast<FixedArray*>(get(kLiteralArrayIndex));
+  }
+
+#undef DEFINE_ELEMENT_ACCESSORS
+
+  // Accessors for elements of the ith deoptimization entry.
+#define DEFINE_ENTRY_ACCESSORS(name, type)                       \
+  type* name(int i) {                                            \
+    return type::cast(get(IndexForEntry(i) + k##name##Offset));  \
+  }                                                              \
+  void Set##name(int i, type* value) {                           \
+    set(IndexForEntry(i) + k##name##Offset, value);              \
+  }
+
+  DEFINE_ENTRY_ACCESSORS(AstId, Smi)
+  DEFINE_ENTRY_ACCESSORS(TranslationIndex, Smi)
+  DEFINE_ENTRY_ACCESSORS(ArgumentsStackHeight, Smi)
+
+#undef DEFINE_ENTRY_ACCESSORS
+
+  int DeoptCount() {
+    return (length() - kFirstDeoptEntryIndex) / kDeoptEntrySize;
+  }
+
+  // Allocates a DeoptimizationInputData.
+  MUST_USE_RESULT static MaybeObject* Allocate(int deopt_entry_count,
+                                               PretenureFlag pretenure);
+
+  // Casting.
+  static inline DeoptimizationInputData* cast(Object* obj);
+
+#ifdef DEBUG
+  void DeoptimizationInputDataPrint();
+#endif
+
+ private:
+  static int IndexForEntry(int i) {
+    return kFirstDeoptEntryIndex + (i * kDeoptEntrySize);
+  }
+
+  static int LengthFor(int entry_count) {
+    return IndexForEntry(entry_count);
+  }
+};
+
+
+// DeoptimizationOutputData is a fixed array used to hold the deoptimization
+// data for code generated by the full compiler.
+// The format of the these objects is
+//   [i * 2]: Ast ID for ith deoptimization.
+//   [i * 2 + 1]: PC and state of ith deoptimization
+class DeoptimizationOutputData: public FixedArray {
+ public:
+  int DeoptPoints() { return length() / 2; }
+  Smi* AstId(int index) { return Smi::cast(get(index * 2)); }
+  void SetAstId(int index, Smi* id) { set(index * 2, id); }
+  Smi* PcAndState(int index) { return Smi::cast(get(1 + index * 2)); }
+  void SetPcAndState(int index, Smi* offset) { set(1 + index * 2, offset); }
+
+  static int LengthOfFixedArray(int deopt_points) {
+    return deopt_points * 2;
+  }
+
+  // Allocates a DeoptimizationOutputData.
+  MUST_USE_RESULT static MaybeObject* Allocate(int number_of_deopt_points,
+                                               PretenureFlag pretenure);
+
+  // Casting.
+  static inline DeoptimizationOutputData* cast(Object* obj);
+
+#ifdef DEBUG
+  void DeoptimizationOutputDataPrint();
+#endif
+};
+
+
 // Code describes objects with on-the-fly generated machine code.
 class Code: public HeapObject {
  public:
@@ -2900,6 +3019,7 @@ class Code: public HeapObject {
 
   enum Kind {
     FUNCTION,
+    OPTIMIZED_FUNCTION,
     STUB,
     BUILTIN,
     LOAD_IC,
@@ -2909,13 +3029,15 @@ class Code: public HeapObject {
     STORE_IC,
     KEYED_STORE_IC,
     BINARY_OP_IC,
+    TYPE_RECORDING_BINARY_OP_IC,
+    COMPARE_IC,
     // No more than 16 kinds. The value currently encoded in four bits in
     // Flags.
 
     // Pseudo-kinds.
     REGEXP = BUILTIN,
     FIRST_IC_KIND = LOAD_IC,
-    LAST_IC_KIND = BINARY_OP_IC
+    LAST_IC_KIND = COMPARE_IC
   };
 
   enum {
@@ -2936,9 +3058,14 @@ class Code: public HeapObject {
 
   // [relocation_info]: Code relocation information
   DECL_ACCESSORS(relocation_info, ByteArray)
+  void InvalidateRelocation();
 
-  // Unchecked accessor to be used during GC.
+  // [deoptimization_data]: Array containing data for deopt.
+  DECL_ACCESSORS(deoptimization_data, FixedArray)
+
+  // Unchecked accessors to be used during GC.
   inline ByteArray* unchecked_relocation_info();
+  inline FixedArray* unchecked_deoptimization_data();
 
   inline int relocation_size();
 
@@ -2961,10 +3088,77 @@ class Code: public HeapObject {
   inline bool is_keyed_store_stub() { return kind() == KEYED_STORE_IC; }
   inline bool is_call_stub() { return kind() == CALL_IC; }
   inline bool is_keyed_call_stub() { return kind() == KEYED_CALL_IC; }
+  inline bool is_binary_op_stub() { return kind() == BINARY_OP_IC; }
+  inline bool is_type_recording_binary_op_stub() {
+    return kind() == TYPE_RECORDING_BINARY_OP_IC;
+  }
+  inline bool is_compare_ic_stub() { return kind() == COMPARE_IC; }
 
   // [major_key]: For kind STUB or BINARY_OP_IC, the major key.
   inline int major_key();
-  inline void set_major_key(int major);
+  inline void set_major_key(int value);
+
+  // [optimizable]: For FUNCTION kind, tells if it is optimizable.
+  inline bool optimizable();
+  inline void set_optimizable(bool value);
+
+  // [has_deoptimization_support]: For FUNCTION kind, tells if it has
+  // deoptimization support.
+  inline bool has_deoptimization_support();
+  inline void set_has_deoptimization_support(bool value);
+
+  // [allow_osr_at_loop_nesting_level]: For FUNCTION kind, tells for
+  // how long the function has been marked for OSR and therefore which
+  // level of loop nesting we are willing to do on-stack replacement
+  // for.
+  inline void set_allow_osr_at_loop_nesting_level(int level);
+  inline int allow_osr_at_loop_nesting_level();
+
+  // [stack_slots]: For kind OPTIMIZED_FUNCTION, the number of stack slots
+  // reserved in the code prologue.
+  inline unsigned stack_slots();
+  inline void set_stack_slots(unsigned slots);
+
+  // [safepoint_table_start]: For kind OPTIMIZED_CODE, the offset in
+  // the instruction stream where the safepoint table starts.
+  inline unsigned safepoint_table_start();
+  inline void set_safepoint_table_start(unsigned offset);
+
+  // [stack_check_table_start]: For kind FUNCTION, the offset in the
+  // instruction stream where the stack check table starts.
+  inline unsigned stack_check_table_start();
+  inline void set_stack_check_table_start(unsigned offset);
+
+  // [check type]: For kind CALL_IC, tells how to check if the
+  // receiver is valid for the given call.
+  inline CheckType check_type();
+  inline void set_check_type(CheckType value);
+
+  // [binary op type]: For all BINARY_OP_IC.
+  inline byte binary_op_type();
+  inline void set_binary_op_type(byte value);
+
+  // [type-recording binary op type]: For all TYPE_RECORDING_BINARY_OP_IC.
+  inline byte type_recording_binary_op_type();
+  inline void set_type_recording_binary_op_type(byte value);
+  inline byte type_recording_binary_op_result_type();
+  inline void set_type_recording_binary_op_result_type(byte value);
+
+  // [compare state]: For kind compare IC stubs, tells what state the
+  // stub is in.
+  inline byte compare_state();
+  inline void set_compare_state(byte value);
+
+  // Get the safepoint entry for the given pc. Returns NULL for
+  // non-safepoint pcs.
+  uint8_t* GetSafepointEntry(Address pc);
+
+  // Mark this code object as not having a stack check table.  Assumes kind
+  // is FUNCTION.
+  void SetNoStackCheckTable();
+
+  // Find the first map in an IC stub.
+  Map* FindFirstMap();
 
   // Flags operations.
   static inline Flags ComputeFlags(Kind kind,
@@ -3052,18 +3246,45 @@ class Code: public HeapObject {
   void CodePrint();
   void CodeVerify();
 #endif
+
+  // Max loop nesting marker used to postpose OSR. We don't take loop
+  // nesting that is deeper than 5 levels into account.
+  static const int kMaxLoopNestingMarker = 6;
+
   // Layout description.
   static const int kInstructionSizeOffset = HeapObject::kHeaderSize;
   static const int kRelocationInfoOffset = kInstructionSizeOffset + kIntSize;
-  static const int kFlagsOffset = kRelocationInfoOffset + kPointerSize;
+  static const int kDeoptimizationDataOffset =
+      kRelocationInfoOffset + kPointerSize;
+  static const int kFlagsOffset = kDeoptimizationDataOffset + kPointerSize;
   static const int kKindSpecificFlagsOffset  = kFlagsOffset + kIntSize;
+
+  static const int kKindSpecificFlagsSize = 2 * kIntSize;
+
+  static const int kHeaderPaddingStart = kKindSpecificFlagsOffset +
+      kKindSpecificFlagsSize;
+
   // Add padding to align the instruction start following right after
   // the Code object header.
   static const int kHeaderSize =
-      CODE_POINTER_ALIGN(kKindSpecificFlagsOffset + kIntSize);
+      (kHeaderPaddingStart + kCodeAlignmentMask) & ~kCodeAlignmentMask;
 
   // Byte offsets within kKindSpecificFlagsOffset.
-  static const int kStubMajorKeyOffset = kKindSpecificFlagsOffset + 1;
+  static const int kStubMajorKeyOffset = kKindSpecificFlagsOffset;
+  static const int kOptimizableOffset = kKindSpecificFlagsOffset;
+  static const int kStackSlotsOffset = kKindSpecificFlagsOffset;
+  static const int kCheckTypeOffset = kKindSpecificFlagsOffset;
+
+  static const int kCompareStateOffset = kStubMajorKeyOffset + 1;
+  static const int kBinaryOpTypeOffset = kStubMajorKeyOffset + 1;
+  static const int kHasDeoptimizationSupportOffset = kOptimizableOffset + 1;
+
+  static const int kBinaryOpReturnTypeOffset = kBinaryOpTypeOffset + 1;
+  static const int kAllowOSRAtLoopNestingLevelOffset =
+      kHasDeoptimizationSupportOffset + 1;
+
+  static const int kSafepointTableStartOffset = kStackSlotsOffset + kIntSize;
+  static const int kStackCheckTableStartOffset = kStackSlotsOffset + kIntSize;
 
   // Flags layout.
   static const int kFlagsICStateShift        = 0;
@@ -3239,6 +3460,13 @@ class Map: public HeapObject {
   // [stub cache]: contains stubs compiled for this map.
   DECL_ACCESSORS(code_cache, Object)
 
+  // Lookup in the map's instance descriptors and fill out the result
+  // with the given holder if the name is found. The holder may be
+  // NULL when this function is used from the compiler.
+  void LookupInDescriptors(JSObject* holder,
+                           String* name,
+                           LookupResult* result);
+
   MUST_USE_RESULT MaybeObject* CopyDropDescriptors();
 
   MUST_USE_RESULT MaybeObject* CopyNormalized(PropertyNormalizationMode mode,
@@ -3486,6 +3714,25 @@ class Script: public Struct {
 };
 
 
+enum MathFunctionId {
+  kNotSpecialMathFunction = 0,
+  // These numbers must be kept in sync with the ones in math.js.
+  kMathFloor = 1,
+  kMathRound = 2,
+  kMathCeil = 3,
+  kMathAbs = 4,
+  kMathLog = 5,
+  kMathSin = 6,
+  kMathCos = 7,
+  kMathTan = 8,
+  kMathASin = 9,
+  kMathACos = 0xa,
+  kMathATan = 0xb,
+  kMathExp = 0xc,
+  kMathSqrt = 0xd
+};
+
+
 // SharedFunctionInfo describes the JSFunction information that can be
 // shared by multiple instances of the function.
 class SharedFunctionInfo: public HeapObject {
@@ -3687,6 +3934,11 @@ class SharedFunctionInfo: public HeapObject {
   inline int compiler_hints();
   inline void set_compiler_hints(int value);
 
+  // A counter used to determine when to stress the deoptimizer with a
+  // deopt.
+  inline Smi* deopt_counter();
+  inline void set_deopt_counter(Smi* counter);
+
   // Add information on assignments of the form this.x = ...;
   void SetThisPropertyAssignmentsInfo(
       bool has_only_simple_this_property_assignments,
@@ -3716,6 +3968,24 @@ class SharedFunctionInfo: public HeapObject {
   inline int code_age();
   inline void set_code_age(int age);
 
+  // Indicates whether optimizations have been disabled for this
+  // shared function info. If a function is repeatedly optimized or if
+  // we cannot optimize the function we disable optimization to avoid
+  // spending time attempting to optimize it again.
+  inline bool optimization_disabled();
+  inline void set_optimization_disabled(bool value);
+
+  // Indicates whether or not the code in the shared function support
+  // deoptimization.
+  inline bool has_deoptimization_support();
+
+  // Enable deoptimization support through recompiled code.
+  void EnableDeoptimizationSupport(Code* recompiled);
+
+  // Lookup the bailout ID and ASSERT that it exists in the non-optimized
+  // code, returns whether it asserted (i.e., always true if assertions are
+  // disabled).
+  bool VerifyBailoutId(int id);
 
   // Check whether a inlined constructor can be generated with the given
   // prototype.
@@ -3739,6 +4009,12 @@ class SharedFunctionInfo: public HeapObject {
   bool HasSourceCode();
   Object* GetSourceCode();
 
+  inline int opt_count();
+  inline void set_opt_count(int opt_count);
+
+  // Source size of this function.
+  int SourceSize();
+
   // Calculate the instance size.
   int CalculateInstanceSize();
 
@@ -3776,10 +4052,12 @@ class SharedFunctionInfo: public HeapObject {
       kInferredNameOffset + kPointerSize;
   static const int kThisPropertyAssignmentsOffset =
       kInitialMapOffset + kPointerSize;
+  static const int kDeoptCounterOffset =
+      kThisPropertyAssignmentsOffset + kPointerSize;
 #if V8_HOST_ARCH_32_BIT
   // Smi fields.
   static const int kLengthOffset =
-      kThisPropertyAssignmentsOffset + kPointerSize;
+      kDeoptCounterOffset + kPointerSize;
   static const int kFormalParameterCountOffset = kLengthOffset + kPointerSize;
   static const int kExpectedNofPropertiesOffset =
       kFormalParameterCountOffset + kPointerSize;
@@ -3795,8 +4073,10 @@ class SharedFunctionInfo: public HeapObject {
       kFunctionTokenPositionOffset + kPointerSize;
   static const int kThisPropertyAssignmentsCountOffset =
       kCompilerHintsOffset + kPointerSize;
+  static const int kOptCountOffset =
+      kThisPropertyAssignmentsCountOffset + kPointerSize;
   // Total size.
-  static const int kSize = kThisPropertyAssignmentsCountOffset + kPointerSize;
+  static const int kSize = kOptCountOffset + kPointerSize;
 #else
   // The only reason to use smi fields instead of int fields
   // is to allow iteration without maps decoding during
@@ -3808,7 +4088,7 @@ class SharedFunctionInfo: public HeapObject {
   // word is not set and thus this word cannot be treated as pointer
   // to HeapObject during old space traversal.
   static const int kLengthOffset =
-      kThisPropertyAssignmentsOffset + kPointerSize;
+      kDeoptCounterOffset + kPointerSize;
   static const int kFormalParameterCountOffset =
       kLengthOffset + kIntSize;
 
@@ -3829,9 +4109,11 @@ class SharedFunctionInfo: public HeapObject {
 
   static const int kThisPropertyAssignmentsCountOffset =
       kCompilerHintsOffset + kIntSize;
+  static const int kOptCountOffset =
+      kThisPropertyAssignmentsCountOffset + kIntSize;
 
   // Total size.
-  static const int kSize = kThisPropertyAssignmentsCountOffset + kIntSize;
+  static const int kSize = kOptCountOffset + kIntSize;
 
 #endif
 
@@ -3848,6 +4130,12 @@ class SharedFunctionInfo: public HeapObject {
 
   static const int kAlignedSize = POINTER_SIZE_ALIGN(kSize);
 
+  // Get/set a special tag on the functions from math.js so we can inline
+  // efficient versions of them in the code.
+  inline MathFunctionId math_function_id();
+  inline void set_math_function_id(int id);
+  static inline int max_math_id_number() { return kMathFunctionMask; }
+
   typedef FixedBodyDescriptor<kNameOffset,
                               kThisPropertyAssignmentsOffset + kPointerSize,
                               kSize> BodyDescriptor;
@@ -3865,9 +4153,12 @@ class SharedFunctionInfo: public HeapObject {
   static const int kHasOnlySimpleThisPropertyAssignments = 0;
   static const int kTryFullCodegen = 1;
   static const int kAllowLazyCompilation = 2;
-  static const int kLiveObjectsMayExist = 3;
-  static const int kCodeAgeShift = 4;
-  static const int kCodeAgeMask = 7;
+  static const int kMathFunctionShift = 3;
+  static const int kMathFunctionMask = 0xf;
+  static const int kLiveObjectsMayExist = 7;
+  static const int kCodeAgeShift = 8;
+  static const int kCodeAgeMask = 0x7;
+  static const int kOptimizationDisabled = 11;
 
   DISALLOW_IMPLICIT_CONSTRUCTORS(SharedFunctionInfo);
 };
@@ -3895,13 +4186,34 @@ class JSFunction: public JSObject {
   // [[Call]] and [[Construct]] description in ECMA-262, section
   // 8.6.2, page 27.
   inline Code* code();
-  inline void set_code(Code* value);
+  inline void set_code(Code* code);
+  inline void ReplaceCode(Code* code);
 
   inline Code* unchecked_code();
 
   // Tells whether this function is builtin.
   inline bool IsBuiltin();
 
+  // Tells whether or not the function needs arguments adaption.
+  inline bool NeedsArgumentsAdaption();
+
+  // Tells whether or not this function has been optimized.
+  inline bool IsOptimized();
+
+  // Mark this function for lazy recompilation. The function will be
+  // recompiled the next time it is executed.
+  void MarkForLazyRecompilation();
+
+  // Tells whether or not the function is already marked for lazy
+  // recompilation.
+  inline bool IsMarkedForLazyRecompilation();
+
+  // Compute a hash code for the source code of this function.
+  uint32_t SourceHash();
+
+  // Check whether or not this function is inlineable.
+  bool IsInlineable();
+
   // [literals]: Fixed array holding the materialized literals.
   //
   // If the function contains object, regexp or array literals, the
@@ -3948,6 +4260,13 @@ class JSFunction: public JSObject {
   // Returns if this function has been compiled to native code yet.
   inline bool is_compiled();
 
+  // [next_function_link]: Field for linking functions. This list is treated as
+  // a weak list by the GC.
+  DECL_ACCESSORS(next_function_link, Object)
+
+  // Prints the name of the function using PrintF.
+  void PrintName();
+
   // Casting.
   static inline JSFunction* cast(Object* obj);
 
@@ -3967,7 +4286,8 @@ class JSFunction: public JSObject {
   // Retrieve the global context from a function's literal array.
   static Context* GlobalContextFromLiterals(FixedArray* literals);
 
-  // Layout descriptors.
+  // Layout descriptors. The last property (from kNonWeakFieldsEndOffset to
+  // kSize) is weak and has special handling during garbage collection.
   static const int kCodeEntryOffset = JSObject::kHeaderSize;
   static const int kPrototypeOrInitialMapOffset =
       kCodeEntryOffset + kPointerSize;
@@ -3975,7 +4295,9 @@ class JSFunction: public JSObject {
       kPrototypeOrInitialMapOffset + kPointerSize;
   static const int kContextOffset = kSharedFunctionInfoOffset + kPointerSize;
   static const int kLiteralsOffset = kContextOffset + kPointerSize;
-  static const int kSize = kLiteralsOffset + kPointerSize;
+  static const int kNonWeakFieldsEndOffset = kLiteralsOffset + kPointerSize;
+  static const int kNextFunctionLinkOffset = kNonWeakFieldsEndOffset;
+  static const int kSize = kNextFunctionLinkOffset + kPointerSize;
 
   // Layout of the literals array.
   static const int kLiteralsPrefixSize = 1;
@@ -4020,6 +4342,7 @@ class JSGlobalProxy : public JSObject {
 
 // Forward declaration.
 class JSBuiltinsObject;
+class JSGlobalPropertyCell;
 
 // Common super class for JavaScript global objects and the special
 // builtins global objects.
@@ -4035,7 +4358,7 @@ class GlobalObject: public JSObject {
   DECL_ACCESSORS(global_receiver, JSObject)
 
   // Retrieve the property cell used to store a property.
-  Object* GetPropertyCell(LookupResult* result);
+  JSGlobalPropertyCell* GetPropertyCell(LookupResult* result);
 
   // This is like GetProperty, but is used when you know the lookup won't fail
   // by throwing an exception.  This is for the debug and builtins global
@@ -4297,6 +4620,9 @@ class CompilationCacheTable: public HashTable<CompilationCacheShape,
   MaybeObject* PutEval(String* src, Context* context, Object* value);
   MaybeObject* PutRegExp(String* src, JSRegExp::Flags flags, FixedArray* value);
 
+  // Remove given value from cache.
+  void Remove(Object* value);
+
   static inline CompilationCacheTable* cast(Object* obj);
 
  private:
@@ -5705,6 +6031,9 @@ class ObjectVisitor BASE_EMBEDDED {
   // Visits a code entry in a JS function.
   virtual void VisitCodeEntry(Address entry_address);
 
+  // Visits a global property cell reference in the instruction stream.
+  virtual void VisitGlobalPropertyCell(RelocInfo* rinfo);
+
   // Visits a runtime entry in the instruction stream.
   virtual void VisitRuntimeEntry(RelocInfo* rinfo) {}
 
index db36297..056332b 100644 (file)
@@ -595,6 +595,7 @@ Parser::Parser(Handle<Script> script,
       pre_data_(pre_data),
       fni_(NULL),
       stack_overflow_(false) {
+  AstNode::ResetIds();
 }
 
 
@@ -705,6 +706,9 @@ FunctionLiteral* Parser::ParseLazy(Handle<SharedFunctionInfo> info) {
   if (result == NULL) {
     Top::StackOverflow();
     zone_scope.DeleteOnExit();
+  } else {
+    Handle<String> inferred_name(info->inferred_name());
+    result->set_inferred_name(inferred_name);
   }
   return result;
 }
@@ -1794,7 +1798,7 @@ CaseClause* Parser::ParseCaseClause(bool* default_seen_ptr, bool* ok) {
     *default_seen_ptr = true;
   }
   Expect(Token::COLON, CHECK_OK);
-
+  int pos = scanner().location().beg_pos;
   ZoneList<Statement*>* statements = new ZoneList<Statement*>(5);
   while (peek() != Token::CASE &&
          peek() != Token::DEFAULT &&
@@ -1803,7 +1807,7 @@ CaseClause* Parser::ParseCaseClause(bool* default_seen_ptr, bool* ok) {
     statements->Add(stat);
   }
 
-  return new CaseClause(label, statements);
+  return new CaseClause(label, statements, pos);
 }
 
 
@@ -1875,7 +1879,7 @@ TryStatement* Parser::ParseTryStatement(bool* ok) {
   }
 
   Block* catch_block = NULL;
-  VariableProxy* catch_var = NULL;
+  Variable* catch_var = NULL;
   Block* finally_block = NULL;
 
   Token::Value tok = peek();
@@ -1905,7 +1909,8 @@ TryStatement* Parser::ParseTryStatement(bool* ok) {
       // executing the finally block.
       catch_var = top_scope_->NewTemporary(Factory::catch_var_symbol());
       Literal* name_literal = new Literal(name);
-      Expression* obj = new CatchExtensionObject(name_literal, catch_var);
+      VariableProxy* catch_var_use = new VariableProxy(catch_var);
+      Expression* obj = new CatchExtensionObject(name_literal, catch_var_use);
       { Target target(&this->target_stack_, &catch_collector);
         catch_block = WithHelper(obj, NULL, true, CHECK_OK);
       }
@@ -1929,8 +1934,9 @@ TryStatement* Parser::ParseTryStatement(bool* ok) {
   //   'try { try { } catch { } } finally { }'
 
   if (catch_block != NULL && finally_block != NULL) {
+    VariableProxy* catch_var_defn = new VariableProxy(catch_var);
     TryCatchStatement* statement =
-        new TryCatchStatement(try_block, catch_var, catch_block);
+        new TryCatchStatement(try_block, catch_var_defn, catch_block);
     statement->set_escaping_targets(collector.targets());
     try_block = new Block(NULL, 1, false);
     try_block->AddStatement(statement);
@@ -1940,7 +1946,8 @@ TryStatement* Parser::ParseTryStatement(bool* ok) {
   TryStatement* result = NULL;
   if (catch_block != NULL) {
     ASSERT(finally_block == NULL);
-    result = new TryCatchStatement(try_block, catch_var, catch_block);
+    VariableProxy* catch_var_defn = new VariableProxy(catch_var);
+    result = new TryCatchStatement(try_block, catch_var_defn, catch_block);
     result->set_escaping_targets(collector.targets());
   } else {
     ASSERT(finally_block != NULL);
@@ -2814,6 +2821,7 @@ bool Parser::IsBoilerplateProperty(ObjectLiteral::Property* property) {
 
 
 bool CompileTimeValue::IsCompileTimeValue(Expression* expression) {
+  if (expression->AsLiteral() != NULL) return true;
   MaterializedLiteral* lit = expression->AsMaterializedLiteral();
   return lit != NULL && lit->is_simple();
 }
index 0d89a16..b58d066 100644 (file)
@@ -53,6 +53,7 @@
 #include "v8.h"
 
 #include "platform.h"
+#include "vm-state-inl.h"
 
 
 namespace v8 {
@@ -616,10 +617,9 @@ class Sampler::PlatformData : public Malloced {
 };
 
 
-Sampler::Sampler(int interval, bool profiling)
+Sampler::Sampler(int interval)
     : interval_(interval),
-      profiling_(profiling),
-      synchronous_(profiling),
+      profiling_(false),
       active_(false),
       samples_taken_(0) {
   data_ = new PlatformData();
index cb8e919..6948653 100644 (file)
@@ -59,6 +59,7 @@
 #include "platform.h"
 #include "top.h"
 #include "v8threads.h"
+#include "vm-state-inl.h"
 
 
 namespace v8 {
@@ -651,6 +652,16 @@ class LinuxMutex : public Mutex {
     return result;
   }
 
+  virtual bool TryLock() {
+    int result = pthread_mutex_trylock(&mutex_);
+    // Return false if the lock is busy and locking failed.
+    if (result == EBUSY) {
+      return false;
+    }
+    ASSERT(result == 0);  // Verify no other errors.
+    return true;
+  }
+
  private:
   pthread_mutex_t mutex_;   // Pthread mutex for POSIX platforms.
 };
@@ -734,6 +745,7 @@ Semaphore* OS::CreateSemaphore(int count) {
 #ifdef ENABLE_LOGGING_AND_PROFILING
 
 static Sampler* active_sampler_ = NULL;
+static int vm_tid_ = 0;
 
 
 #if !defined(__GLIBC__) && (defined(__arm__) || defined(__thumb__))
@@ -762,50 +774,51 @@ enum ArmRegisters {R15 = 15, R13 = 13, R11 = 11};
 #endif
 
 
+static int GetThreadID() {
+  // Glibc doesn't provide a wrapper for gettid(2).
+  return syscall(SYS_gettid);
+}
+
+
 static void ProfilerSignalHandler(int signal, siginfo_t* info, void* context) {
 #ifndef V8_HOST_ARCH_MIPS
   USE(info);
   if (signal != SIGPROF) return;
-  if (active_sampler_ == NULL) return;
+  if (active_sampler_ == NULL || !active_sampler_->IsActive()) return;
+  if (vm_tid_ != GetThreadID()) return;
 
   TickSample sample_obj;
   TickSample* sample = CpuProfiler::TickSampleEvent();
   if (sample == NULL) sample = &sample_obj;
 
-  // We always sample the VM state.
-  sample->state = VMState::current_state();
-
-  // If profiling, we extract the current pc and sp.
-  if (active_sampler_->IsProfiling()) {
-    // Extracting the sample from the context is extremely machine dependent.
-    ucontext_t* ucontext = reinterpret_cast<ucontext_t*>(context);
-    mcontext_t& mcontext = ucontext->uc_mcontext;
+  // Extracting the sample from the context is extremely machine dependent.
+  ucontext_t* ucontext = reinterpret_cast<ucontext_t*>(context);
+  mcontext_t& mcontext = ucontext->uc_mcontext;
+  sample->state = Top::current_vm_state();
 #if V8_HOST_ARCH_IA32
-    sample->pc = reinterpret_cast<Address>(mcontext.gregs[REG_EIP]);
-    sample->sp = reinterpret_cast<Address>(mcontext.gregs[REG_ESP]);
-    sample->fp = reinterpret_cast<Address>(mcontext.gregs[REG_EBP]);
+  sample->pc = reinterpret_cast<Address>(mcontext.gregs[REG_EIP]);
+  sample->sp = reinterpret_cast<Address>(mcontext.gregs[REG_ESP]);
+  sample->fp = reinterpret_cast<Address>(mcontext.gregs[REG_EBP]);
 #elif V8_HOST_ARCH_X64
-    sample->pc = reinterpret_cast<Address>(mcontext.gregs[REG_RIP]);
-    sample->sp = reinterpret_cast<Address>(mcontext.gregs[REG_RSP]);
-    sample->fp = reinterpret_cast<Address>(mcontext.gregs[REG_RBP]);
+  sample->pc = reinterpret_cast<Address>(mcontext.gregs[REG_RIP]);
+  sample->sp = reinterpret_cast<Address>(mcontext.gregs[REG_RSP]);
+  sample->fp = reinterpret_cast<Address>(mcontext.gregs[REG_RBP]);
 #elif V8_HOST_ARCH_ARM
 // An undefined macro evaluates to 0, so this applies to Android's Bionic also.
 #if (__GLIBC__ < 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ <= 3))
-    sample->pc = reinterpret_cast<Address>(mcontext.gregs[R15]);
-    sample->sp = reinterpret_cast<Address>(mcontext.gregs[R13]);
-    sample->fp = reinterpret_cast<Address>(mcontext.gregs[R11]);
+  sample->pc = reinterpret_cast<Address>(mcontext.gregs[R15]);
+  sample->sp = reinterpret_cast<Address>(mcontext.gregs[R13]);
+  sample->fp = reinterpret_cast<Address>(mcontext.gregs[R11]);
 #else
-    sample->pc = reinterpret_cast<Address>(mcontext.arm_pc);
-    sample->sp = reinterpret_cast<Address>(mcontext.arm_sp);
-    sample->fp = reinterpret_cast<Address>(mcontext.arm_fp);
+  sample->pc = reinterpret_cast<Address>(mcontext.arm_pc);
+  sample->sp = reinterpret_cast<Address>(mcontext.arm_sp);
+  sample->fp = reinterpret_cast<Address>(mcontext.arm_fp);
 #endif
 #elif V8_HOST_ARCH_MIPS
-    // Implement this on MIPS.
-    UNIMPLEMENTED();
+  // Implement this on MIPS.
+  UNIMPLEMENTED();
 #endif
-    active_sampler_->SampleStack(sample);
-  }
-
+  active_sampler_->SampleStack(sample);
   active_sampler_->Tick(sample);
 #endif
 }
@@ -813,43 +826,64 @@ static void ProfilerSignalHandler(int signal, siginfo_t* info, void* context) {
 
 class Sampler::PlatformData : public Malloced {
  public:
+  enum SleepInterval {
+    FULL_INTERVAL,
+    HALF_INTERVAL
+  };
+
   explicit PlatformData(Sampler* sampler)
       : sampler_(sampler),
         signal_handler_installed_(false),
         vm_tgid_(getpid()),
-        // Glibc doesn't provide a wrapper for gettid(2).
-        vm_tid_(syscall(SYS_gettid)),
         signal_sender_launched_(false) {
   }
 
   void SignalSender() {
     while (sampler_->IsActive()) {
-      // Glibc doesn't provide a wrapper for tgkill(2).
-      syscall(SYS_tgkill, vm_tgid_, vm_tid_, SIGPROF);
-      // Convert ms to us and subtract 100 us to compensate delays
-      // occuring during signal delivery.
-      const useconds_t interval = sampler_->interval_ * 1000 - 100;
-      int result = usleep(interval);
-#ifdef DEBUG
-      if (result != 0 && errno != EINTR) {
-        fprintf(stderr,
-                "SignalSender usleep error; interval = %u, errno = %d\n",
-                interval,
-                errno);
-        ASSERT(result == 0 || errno == EINTR);
+      if (rate_limiter_.SuspendIfNecessary()) continue;
+      if (sampler_->IsProfiling() && RuntimeProfiler::IsEnabled()) {
+        SendProfilingSignal();
+        Sleep(HALF_INTERVAL);
+        RuntimeProfiler::NotifyTick();
+        Sleep(HALF_INTERVAL);
+      } else {
+        if (sampler_->IsProfiling()) SendProfilingSignal();
+        if (RuntimeProfiler::IsEnabled()) RuntimeProfiler::NotifyTick();
+        Sleep(FULL_INTERVAL);
       }
-#endif
-      USE(result);
     }
   }
 
+  void SendProfilingSignal() {
+    // Glibc doesn't provide a wrapper for tgkill(2).
+    syscall(SYS_tgkill, vm_tgid_, vm_tid_, SIGPROF);
+  }
+
+  void Sleep(SleepInterval full_or_half) {
+    // Convert ms to us and subtract 100 us to compensate delays
+    // occuring during signal delivery.
+    useconds_t interval = sampler_->interval_ * 1000 - 100;
+    if (full_or_half == HALF_INTERVAL) interval /= 2;
+    int result = usleep(interval);
+#ifdef DEBUG
+    if (result != 0 && errno != EINTR) {
+      fprintf(stderr,
+              "SignalSender usleep error; interval = %u, errno = %d\n",
+              interval,
+              errno);
+      ASSERT(result == 0 || errno == EINTR);
+    }
+#endif
+    USE(result);
+  }
+
   Sampler* sampler_;
   bool signal_handler_installed_;
   struct sigaction old_signal_handler_;
   int vm_tgid_;
-  int vm_tid_;
   bool signal_sender_launched_;
   pthread_t signal_sender_thread_;
+  RuntimeProfilerRateLimiter rate_limiter_;
 };
 
 
@@ -861,10 +895,9 @@ static void* SenderEntry(void* arg) {
 }
 
 
-Sampler::Sampler(int interval, bool profiling)
+Sampler::Sampler(int interval)
     : interval_(interval),
-      profiling_(profiling),
-      synchronous_(profiling),
+      profiling_(false),
       active_(false),
       samples_taken_(0) {
   data_ = new PlatformData(this);
@@ -880,7 +913,8 @@ Sampler::~Sampler() {
 void Sampler::Start() {
   // There can only be one active sampler at the time on POSIX
   // platforms.
-  if (active_sampler_ != NULL) return;
+  ASSERT(!IsActive());
+  vm_tid_ = GetThreadID();
 
   // Request profiling signals.
   struct sigaction sa;
@@ -893,7 +927,7 @@ void Sampler::Start() {
   // Start a thread that sends SIGPROF signal to VM thread.
   // Sending the signal ourselves instead of relying on itimer provides
   // much better accuracy.
-  active_ = true;
+  SetActive(true);
   if (pthread_create(
           &data_->signal_sender_thread_, NULL, SenderEntry, data_) == 0) {
     data_->signal_sender_launched_ = true;
@@ -905,11 +939,12 @@ void Sampler::Start() {
 
 
 void Sampler::Stop() {
-  active_ = false;
+  SetActive(false);
 
   // Wait for signal sender termination (it will exit after setting
   // active_ to false).
   if (data_->signal_sender_launched_) {
+    Top::WakeUpRuntimeProfilerThreadBeforeShutdown();
     pthread_join(data_->signal_sender_thread_, NULL);
     data_->signal_sender_launched_ = false;
   }
index c3f21dc..85c7088 100644 (file)
@@ -57,6 +57,7 @@
 #include "v8.h"
 
 #include "platform.h"
+#include "vm-state-inl.h"
 
 // Manually define these here as weak imports, rather than including execinfo.h.
 // This lets us launch on 10.4 which does not have these calls.
@@ -483,11 +484,20 @@ class MacOSMutex : public Mutex {
     pthread_mutex_init(&mutex_, &attr);
   }
 
-  ~MacOSMutex() { pthread_mutex_destroy(&mutex_); }
+  virtual ~MacOSMutex() { pthread_mutex_destroy(&mutex_); }
 
-  int Lock() { return pthread_mutex_lock(&mutex_); }
+  virtual int Lock() { return pthread_mutex_lock(&mutex_); }
+  virtual int Unlock() { return pthread_mutex_unlock(&mutex_); }
 
-  int Unlock() { return pthread_mutex_unlock(&mutex_); }
+  virtual bool TryLock() {
+    int result = pthread_mutex_trylock(&mutex_);
+    // Return false if the lock is busy and locking failed.
+    if (result == EBUSY) {
+      return false;
+    }
+    ASSERT(result == 0);  // Verify no other errors.
+    return true;
+  }
 
  private:
   pthread_mutex_t mutex_;
@@ -554,40 +564,38 @@ class Sampler::PlatformData : public Malloced {
   mach_port_t task_self_;
   thread_act_t profiled_thread_;
   pthread_t sampler_thread_;
+  RuntimeProfilerRateLimiter rate_limiter_;
 
   // Sampler thread handler.
   void Runner() {
-    // Loop until the sampler is disengaged, keeping the specified
-    // sampling frequency.
-    for ( ; sampler_->IsActive(); OS::Sleep(sampler_->interval_)) {
+    while (sampler_->IsActive()) {
+      if (rate_limiter_.SuspendIfNecessary()) continue;
+      Sample();
+      OS::Sleep(sampler_->interval_);
+    }
+  }
+
+  void Sample() {
+    if (sampler_->IsProfiling()) {
       TickSample sample_obj;
       TickSample* sample = CpuProfiler::TickSampleEvent();
       if (sample == NULL) sample = &sample_obj;
 
-      // If the sampler runs in sync with the JS thread, we try to
-      // suspend it. If we fail, we skip the current sample.
-      if (sampler_->IsSynchronous()) {
-        if (KERN_SUCCESS != thread_suspend(profiled_thread_)) continue;
-      }
+      if (KERN_SUCCESS != thread_suspend(profiled_thread_)) return;
 
-      // We always sample the VM state.
-      sample->state = VMState::current_state();
-
-      // If profiling, we record the pc and sp of the profiled thread.
-      if (sampler_->IsProfiling()) {
 #if V8_HOST_ARCH_X64
-        thread_state_flavor_t flavor = x86_THREAD_STATE64;
-        x86_thread_state64_t state;
-        mach_msg_type_number_t count = x86_THREAD_STATE64_COUNT;
+      thread_state_flavor_t flavor = x86_THREAD_STATE64;
+      x86_thread_state64_t state;
+      mach_msg_type_number_t count = x86_THREAD_STATE64_COUNT;
 #if __DARWIN_UNIX03
 #define REGISTER_FIELD(name) __r ## name
 #else
 #define REGISTER_FIELD(name) r ## name
 #endif  // __DARWIN_UNIX03
 #elif V8_HOST_ARCH_IA32
-        thread_state_flavor_t flavor = i386_THREAD_STATE;
-        i386_thread_state_t state;
-        mach_msg_type_number_t count = i386_THREAD_STATE_COUNT;
+      thread_state_flavor_t flavor = i386_THREAD_STATE;
+      i386_thread_state_t state;
+      mach_msg_type_number_t count = i386_THREAD_STATE_COUNT;
 #if __DARWIN_UNIX03
 #define REGISTER_FIELD(name) __e ## name
 #else
@@ -597,24 +605,20 @@ class Sampler::PlatformData : public Malloced {
 #error Unsupported Mac OS X host architecture.
 #endif  // V8_HOST_ARCH
 
-        if (thread_get_state(profiled_thread_,
-                             flavor,
-                             reinterpret_cast<natural_t*>(&state),
-                             &count) == KERN_SUCCESS) {
-          sample->pc = reinterpret_cast<Address>(state.REGISTER_FIELD(ip));
-          sample->sp = reinterpret_cast<Address>(state.REGISTER_FIELD(sp));
-          sample->fp = reinterpret_cast<Address>(state.REGISTER_FIELD(bp));
-          sampler_->SampleStack(sample);
-        }
+      if (thread_get_state(profiled_thread_,
+                           flavor,
+                           reinterpret_cast<natural_t*>(&state),
+                           &count) == KERN_SUCCESS) {
+        sample->state = Top::current_vm_state();
+        sample->pc = reinterpret_cast<Address>(state.REGISTER_FIELD(ip));
+        sample->sp = reinterpret_cast<Address>(state.REGISTER_FIELD(sp));
+        sample->fp = reinterpret_cast<Address>(state.REGISTER_FIELD(bp));
+        sampler_->SampleStack(sample);
+        sampler_->Tick(sample);
       }
-
-      // Invoke tick handler with program counter and stack pointer.
-      sampler_->Tick(sample);
-
-      // If the sampler runs in sync with the JS thread, we have to
-      // remember to resume it.
-      if (sampler_->IsSynchronous()) thread_resume(profiled_thread_);
+      thread_resume(profiled_thread_);
     }
+    if (RuntimeProfiler::IsEnabled()) RuntimeProfiler::NotifyTick();
   }
 };
 
@@ -630,10 +634,9 @@ static void* SamplerEntry(void* arg) {
 }
 
 
-Sampler::Sampler(int interval, bool profiling)
+Sampler::Sampler(int interval)
     : interval_(interval),
-      profiling_(profiling),
-      synchronous_(profiling),
+      profiling_(false),
       active_(false),
       samples_taken_(0) {
   data_ = new PlatformData(this);
@@ -646,11 +649,9 @@ Sampler::~Sampler() {
 
 
 void Sampler::Start() {
-  // If we are starting a synchronous sampler, we need to be able to
-  // access the calling thread.
-  if (IsSynchronous()) {
-    data_->profiled_thread_ = mach_thread_self();
-  }
+  // Do not start multiple threads for the same sampler.
+  ASSERT(!IsActive());
+  data_->profiled_thread_ = mach_thread_self();
 
   // Create sampler thread with high priority.
   // According to POSIX spec, when SCHED_FIFO policy is used, a thread
@@ -663,7 +664,7 @@ void Sampler::Start() {
   fifo_param.sched_priority = sched_get_priority_max(SCHED_FIFO);
   pthread_attr_setschedparam(&sched_attr, &fifo_param);
 
-  active_ = true;
+  SetActive(true);
   pthread_create(&data_->sampler_thread_, &sched_attr, SamplerEntry, data_);
 }
 
@@ -671,15 +672,14 @@ void Sampler::Start() {
 void Sampler::Stop() {
   // Seting active to false triggers termination of the sampler
   // thread.
-  active_ = false;
+  SetActive(false);
 
   // Wait for sampler thread to terminate.
+  Top::WakeUpRuntimeProfilerThreadBeforeShutdown();
   pthread_join(data_->sampler_thread_, NULL);
 
   // Deallocate Mach port for thread.
-  if (IsSynchronous()) {
-    mach_port_deallocate(data_->task_self_, data_->profiled_thread_);
-  }
+  mach_port_deallocate(data_->task_self_, data_->profiled_thread_);
 }
 
 #endif  // ENABLE_LOGGING_AND_PROFILING
index b5caa5e..f34483d 100644 (file)
@@ -35,6 +35,7 @@
 #include "v8.h"
 
 #include "platform.h"
+#include "vm-state-inl.h"
 
 
 namespace v8 {
index 0751fc7..b698d16 100644 (file)
@@ -52,6 +52,7 @@
 #include "v8.h"
 
 #include "platform.h"
+#include "vm-state-inl.h"
 
 
 namespace v8 {
@@ -571,10 +572,9 @@ class Sampler::PlatformData : public Malloced {
 };
 
 
-Sampler::Sampler(int interval, bool profiling)
+Sampler::Sampler(int interval)
     : interval_(interval),
-      profiling_(profiling),
-      synchronous_(profiling),
+      profiling_(false),
       active_(false),
       samples_taken_(0) {
   data_ = new PlatformData();
index ff5d83b..f84e80d 100644 (file)
@@ -52,6 +52,7 @@
 #include "v8.h"
 
 #include "platform.h"
+#include "vm-state-inl.h"
 
 
 // It seems there is a bug in some Solaris distributions (experienced in
@@ -601,10 +602,9 @@ class Sampler::PlatformData : public Malloced {
 };
 
 
-Sampler::Sampler(int interval, bool profiling)
+Sampler::Sampler(int interval)
     : interval_(interval),
-      profiling_(profiling),
-      synchronous_(profiling),
+      profiling_(false),
       active_(false),
       samples_taken_(0) {
   data_ = new PlatformData();
index 7791d62..badc31b 100644 (file)
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
 // Platform specific code for Win32.
-#ifndef WIN32_LEAN_AND_MEAN
-// WIN32_LEAN_AND_MEAN implies NOCRYPT and NOGDI.
-#define WIN32_LEAN_AND_MEAN
-#endif
-#ifndef NOMINMAX
-#define NOMINMAX
-#endif
-#ifndef NOKERNEL
-#define NOKERNEL
-#endif
-#ifndef NOUSER
-#define NOUSER
-#endif
-#ifndef NOSERVICE
-#define NOSERVICE
-#endif
-#ifndef NOSOUND
-#define NOSOUND
-#endif
-#ifndef NOMCX
-#define NOMCX
-#endif
-// Require Windows XP or higher (this is required for the RtlCaptureContext
-// function to be present).
-#ifndef _WIN32_WINNT
-#define _WIN32_WINNT 0x501
-#endif
 
-#include <windows.h>
-
-#include <time.h>  // For LocalOffset() implementation.
-#include <mmsystem.h>  // For timeGetTime().
-#ifdef __MINGW32__
-// Require Windows XP or higher when compiling with MinGW. This is for MinGW
-// header files to expose getaddrinfo.
-#undef _WIN32_WINNT
-#define _WIN32_WINNT 0x501
-#endif  // __MINGW32__
-#ifndef __MINGW32__
-#include <dbghelp.h>  // For SymLoadModule64 and al.
-#endif  // __MINGW32__
-#include <limits.h>  // For INT_MAX and al.
-#include <tlhelp32.h>  // For Module32First and al.
-
-// These additional WIN32 includes have to be right here as the #undef's below
-// makes it impossible to have them elsewhere.
-#include <winsock2.h>
-#include <ws2tcpip.h>
-#include <process.h>  // for _beginthreadex()
-#include <stdlib.h>
-
-#undef VOID
-#undef DELETE
-#undef IN
-#undef THIS
-#undef CONST
-#undef NAN
-#undef GetObject
-#undef CreateMutex
-#undef CreateSemaphore
+#define V8_WIN32_HEADERS_FULL
+#include "win32-headers.h"
 
 #include "v8.h"
 
 #include "platform.h"
+#include "vm-state-inl.h"
 
 // Extra POSIX/ANSI routines for Win32 when when using Visual Studio C++. Please
 // refer to The Open Group Base Specification for specification of the correct
@@ -1162,7 +1106,7 @@ static bool LoadSymbols(HANDLE process_handle) {
   // Initialize the symbol engine.
   ok = _SymInitialize(process_handle,  // hProcess
                       NULL,            // UserSearchPath
-                      FALSE);          // fInvadeProcess
+                      false);          // fInvadeProcess
   if (!ok) return false;
 
   DWORD options = _SymGetOptions();
@@ -1422,7 +1366,7 @@ bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) {
 
 bool VirtualMemory::Uncommit(void* address, size_t size) {
   ASSERT(IsReserved());
-  return VirtualFree(address, size, MEM_DECOMMIT) != FALSE;
+  return VirtualFree(address, size, MEM_DECOMMIT) != false;
 }
 
 
@@ -1582,18 +1526,24 @@ class Win32Mutex : public Mutex {
 
   Win32Mutex() { InitializeCriticalSection(&cs_); }
 
-  ~Win32Mutex() { DeleteCriticalSection(&cs_); }
+  virtual ~Win32Mutex() { DeleteCriticalSection(&cs_); }
 
-  int Lock() {
+  virtual int Lock() {
     EnterCriticalSection(&cs_);
     return 0;
   }
 
-  int Unlock() {
+  virtual int Unlock() {
     LeaveCriticalSection(&cs_);
     return 0;
   }
 
+
+  virtual bool TryLock() {
+    // Returns non-zero if critical section is entered successfully entered.
+    return TryEnterCriticalSection(&cs_);
+  }
+
  private:
   CRITICAL_SECTION cs_;  // Critical section used for mutex
 };
@@ -1776,7 +1726,7 @@ int Win32Socket::Receive(char* data, int len) const {
 
 
 bool Win32Socket::SetReuseAddress(bool reuse_address) {
-  BOOL on = reuse_address ? TRUE : FALSE;
+  BOOL on = reuse_address ? true : false;
   int status = setsockopt(socket_, SOL_SOCKET, SO_REUSEADDR,
                           reinterpret_cast<char*>(&on), sizeof(on));
   return status == SOCKET_ERROR;
@@ -1846,53 +1796,48 @@ class Sampler::PlatformData : public Malloced {
   Sampler* sampler_;
   HANDLE sampler_thread_;
   HANDLE profiled_thread_;
+  RuntimeProfilerRateLimiter rate_limiter_;
 
   // Sampler thread handler.
   void Runner() {
-    // Context used for sampling the register state of the profiled thread.
-    CONTEXT context;
-    memset(&context, 0, sizeof(context));
-    // Loop until the sampler is disengaged, keeping the specified
-    // sampling frequency.
-    for ( ; sampler_->IsActive(); Sleep(sampler_->interval_)) {
+    while (sampler_->IsActive()) {
+      if (rate_limiter_.SuspendIfNecessary()) continue;
+      Sample();
+      Sleep(sampler_->interval_);
+    }
+  }
+
+  void Sample() {
+    if (sampler_->IsProfiling()) {
+      // Context used for sampling the register state of the profiled thread.
+      CONTEXT context;
+      memset(&context, 0, sizeof(context));
+
       TickSample sample_obj;
       TickSample* sample = CpuProfiler::TickSampleEvent();
       if (sample == NULL) sample = &sample_obj;
 
-      // If the sampler runs in sync with the JS thread, we try to
-      // suspend it. If we fail, we skip the current sample.
-      if (sampler_->IsSynchronous()) {
-        static const DWORD kSuspendFailed = static_cast<DWORD>(-1);
-        if (SuspendThread(profiled_thread_) == kSuspendFailed) continue;
-      }
-
-      // We always sample the VM state.
-      sample->state = VMState::current_state();
+      static const DWORD kSuspendFailed = static_cast<DWORD>(-1);
+      if (SuspendThread(profiled_thread_) == kSuspendFailed) return;
+      sample->state = Top::current_vm_state();
 
-      // If profiling, we record the pc and sp of the profiled thread.
-      if (sampler_->IsProfiling()) {
-        context.ContextFlags = CONTEXT_FULL;
-        if (GetThreadContext(profiled_thread_, &context) != 0) {
+      context.ContextFlags = CONTEXT_FULL;
+      if (GetThreadContext(profiled_thread_, &context) != 0) {
 #if V8_HOST_ARCH_X64
-          sample->pc = reinterpret_cast<Address>(context.Rip);
-          sample->sp = reinterpret_cast<Address>(context.Rsp);
-          sample->fp = reinterpret_cast<Address>(context.Rbp);
+        sample->pc = reinterpret_cast<Address>(context.Rip);
+        sample->sp = reinterpret_cast<Address>(context.Rsp);
+        sample->fp = reinterpret_cast<Address>(context.Rbp);
 #else
-          sample->pc = reinterpret_cast<Address>(context.Eip);
-          sample->sp = reinterpret_cast<Address>(context.Esp);
-          sample->fp = reinterpret_cast<Address>(context.Ebp);
+        sample->pc = reinterpret_cast<Address>(context.Eip);
+        sample->sp = reinterpret_cast<Address>(context.Esp);
+        sample->fp = reinterpret_cast<Address>(context.Ebp);
 #endif
-          sampler_->SampleStack(sample);
-        }
+        sampler_->SampleStack(sample);
+        sampler_->Tick(sample);
       }
-
-      // Invoke tick handler with program counter and stack pointer.
-      sampler_->Tick(sample);
-
-      // If the sampler runs in sync with the JS thread, we have to
-      // remember to resume it.
-      if (sampler_->IsSynchronous()) ResumeThread(profiled_thread_);
+      ResumeThread(profiled_thread_);
     }
+    if (RuntimeProfiler::IsEnabled()) RuntimeProfiler::NotifyTick();
   }
 };
 
@@ -1907,10 +1852,9 @@ static unsigned int __stdcall SamplerEntry(void* arg) {
 
 
 // Initialize a profile sampler.
-Sampler::Sampler(int interval, bool profiling)
+Sampler::Sampler(int interval)
     : interval_(interval),
-      profiling_(profiling),
-      synchronous_(profiling),
+      profiling_(false),
       active_(false),
       samples_taken_(0) {
   data_ = new PlatformData(this);
@@ -1924,26 +1868,25 @@ Sampler::~Sampler() {
 
 // Start profiling.
 void Sampler::Start() {
-  // If we are starting a synchronous sampler, we need to be able to
-  // access the calling thread.
-  if (IsSynchronous()) {
-    // Get a handle to the calling thread. This is the thread that we are
-    // going to profile. We need to make a copy of the handle because we are
-    // going to use it in the sampler thread. Using GetThreadHandle() will
-    // not work in this case. We're using OpenThread because DuplicateHandle
-    // for some reason doesn't work in Chrome's sandbox.
-    data_->profiled_thread_ = OpenThread(THREAD_GET_CONTEXT |
-                                         THREAD_SUSPEND_RESUME |
-                                         THREAD_QUERY_INFORMATION,
-                                         FALSE,
-                                         GetCurrentThreadId());
-    BOOL ok = data_->profiled_thread_ != NULL;
-    if (!ok) return;
-  }
+  // Do not start multiple threads for the same sampler.
+  ASSERT(!IsActive());
+
+  // Get a handle to the calling thread. This is the thread that we are
+  // going to profile. We need to make a copy of the handle because we are
+  // going to use it in the sampler thread. Using GetThreadHandle() will
+  // not work in this case. We're using OpenThread because DuplicateHandle
+  // for some reason doesn't work in Chrome's sandbox.
+  data_->profiled_thread_ = OpenThread(THREAD_GET_CONTEXT |
+                                       THREAD_SUSPEND_RESUME |
+                                       THREAD_QUERY_INFORMATION,
+                                       false,
+                                       GetCurrentThreadId());
+  BOOL ok = data_->profiled_thread_ != NULL;
+  if (!ok) return;
 
   // Start sampler thread.
   unsigned int tid;
-  active_ = true;
+  SetActive(true);
   data_->sampler_thread_ = reinterpret_cast<HANDLE>(
       _beginthreadex(NULL, 0, SamplerEntry, data_, 0, &tid));
   // Set thread to high priority to increase sampling accuracy.
@@ -1955,9 +1898,10 @@ void Sampler::Start() {
 void Sampler::Stop() {
   // Seting active to false triggers termination of the sampler
   // thread.
-  active_ = false;
+  SetActive(false);
 
   // Wait for sampler thread to terminate.
+  Top::WakeUpRuntimeProfilerThreadBeforeShutdown();
   WaitForSingleObject(data_->sampler_thread_, INFINITE);
 
   // Release the thread handles
index 49efc3c..68a2689 100644 (file)
@@ -113,6 +113,8 @@ int signbit(double x);
 
 #endif  // __GNUC__
 
+#include "atomicops.h"
+
 namespace v8 {
 namespace internal {
 
@@ -433,6 +435,10 @@ class Mutex {
   // Unlocks the given mutex. The mutex is assumed to be locked and owned by
   // the calling thread on entrance.
   virtual int Unlock() = 0;
+
+  // Tries to lock the given mutex. Returns whether the mutex was
+  // successfully locked.
+  virtual bool TryLock() = 0;
 };
 
 
@@ -554,7 +560,7 @@ class TickSample {
 class Sampler {
  public:
   // Initialize sampler.
-  Sampler(int interval, bool profiling);
+  explicit Sampler(int interval);
   virtual ~Sampler();
 
   // Performs stack sampling.
@@ -572,16 +578,12 @@ class Sampler {
   void Stop();
 
   // Is the sampler used for profiling?
-  bool IsProfiling() const { return profiling_; }
-
-  // Is the sampler running in sync with the JS thread? On platforms
-  // where the sampler is implemented with a thread that wakes up
-  // every now and then, having a synchronous sampler implies
-  // suspending/resuming the JS thread.
-  bool IsSynchronous() const { return synchronous_; }
+  bool IsProfiling() const { return NoBarrier_Load(&profiling_) > 0; }
+  void IncreaseProfilingDepth() { NoBarrier_AtomicIncrement(&profiling_, 1); }
+  void DecreaseProfilingDepth() { NoBarrier_AtomicIncrement(&profiling_, -1); }
 
   // Whether the sampler is running (that is, consumes resources).
-  bool IsActive() const { return active_; }
+  bool IsActive() const { return NoBarrier_Load(&active_); }
 
   // Used in tests to make sure that stack sampling is performed.
   int samples_taken() const { return samples_taken_; }
@@ -593,12 +595,12 @@ class Sampler {
   virtual void DoSampleStack(TickSample* sample) = 0;
 
  private:
+  void SetActive(bool value) { NoBarrier_Store(&active_, value); }
   void IncSamplesTaken() { if (++samples_taken_ < 0) samples_taken_ = 0; }
 
   const int interval_;
-  const bool profiling_;
-  const bool synchronous_;
-  bool active_;
+  Atomic32 profiling_;
+  Atomic32 active_;
   PlatformData* data_;  // Platform specific data.
   int samples_taken_;  // Counts stack samples taken.
   DISALLOW_IMPLICIT_CONSTRUCTORS(Sampler);
index dfff49a..c83de34 100644 (file)
@@ -51,6 +51,7 @@ class PrettyPrinter: public AstVisitor {
   // Print a node to stdout.
   static void PrintOut(AstNode* node);
 
+  virtual void VisitSlot(Slot* node);
   // Individual nodes
 #define DECLARE_VISIT(type) virtual void Visit##type(type* node);
   AST_NODE_LIST(DECLARE_VISIT)
@@ -85,9 +86,11 @@ class AstPrinter: public PrettyPrinter {
   const char* PrintProgram(FunctionLiteral* program);
 
   // Individual nodes
+  virtual void VisitSlot(Slot* node);
 #define DECLARE_VISIT(type) virtual void Visit##type(type* node);
   AST_NODE_LIST(DECLARE_VISIT)
 #undef DECLARE_VISIT
+
  private:
   friend class IndentedScope;
   void PrintIndented(const char* txt);
@@ -160,6 +163,7 @@ class JsonAstBuilder: public PrettyPrinter {
   void AddAttribute(const char* name, bool value);
 
   // AST node visit functions.
+  virtual void VisitSlot(Slot* node);
 #define DECLARE_VISIT(type) virtual void Visit##type(type* node);
   AST_NODE_LIST(DECLARE_VISIT)
 #undef DECLARE_VISIT
index 1a6ea95..ff4661f 100644 (file)
@@ -603,8 +603,8 @@ CpuProfile* CpuProfilesCollection::GetProfile(int security_token_id,
   }
   List<CpuProfile*>* list = GetProfilesList(security_token_id);
   if (list->at(index) == NULL) {
-      list->at(index) =
-          unabridged_list->at(index)->FilteredClone(security_token_id);
+    (*list)[index] =
+        unabridged_list->at(index)->FilteredClone(security_token_id);
   }
   return list->at(index);
 }
@@ -653,7 +653,7 @@ List<CpuProfile*>* CpuProfilesCollection::Profiles(int security_token_id) {
   const int current_count = unabridged_list->length();
   for (int i = 0; i < current_count; ++i) {
     if (list->at(i) == NULL) {
-      list->at(i) = unabridged_list->at(i)->FilteredClone(security_token_id);
+      (*list)[i] = unabridged_list->at(i)->FilteredClone(security_token_id);
     }
   }
   return list;
@@ -1403,7 +1403,7 @@ void HeapSnapshot::FillReversePostorderIndexes(Vector<HeapEntry*>* entries) {
     }
     if (!has_new_edges) {
       entry->set_ordered_index(current_entry);
-      entries->at(current_entry++) = entry;
+      (*entries)[current_entry++] = entry;
       nodes_to_visit.RemoveLast();
     }
   }
@@ -1427,8 +1427,8 @@ void HeapSnapshot::BuildDominatorTree(const Vector<HeapEntry*>& entries,
                                       Vector<HeapEntry*>* dominators) {
   if (entries.length() == 0) return;
   const int root_index = entries.length() - 1;
-  for (int i = 0; i < root_index; ++i) dominators->at(i) = NULL;
-  dominators->at(root_index) = entries[root_index];
+  for (int i = 0; i < root_index; ++i) (*dominators)[i] = NULL;
+  (*dominators)[root_index] = entries[root_index];
   bool changed = true;
   while (changed) {
     changed = false;
@@ -1454,7 +1454,7 @@ void HeapSnapshot::BuildDominatorTree(const Vector<HeapEntry*>& entries,
         }
       }
       if (new_idom != NULL && dominators->at(i) != new_idom) {
-        dominators->at(i) = new_idom;
+        (*dominators)[i] = new_idom;
         changed = true;
       }
     }
index 4715a72..537b4e6 100644 (file)
@@ -266,12 +266,26 @@ class LookupResult BASE_EMBEDDED {
     return Map::cast(GetValue());
   }
 
+  Map* GetTransitionMapFromMap(Map* map) {
+    ASSERT(lookup_type_ == DESCRIPTOR_TYPE);
+    ASSERT(type() == MAP_TRANSITION);
+    return Map::cast(map->instance_descriptors()->GetValue(number_));
+  }
+
   int GetFieldIndex() {
     ASSERT(lookup_type_ == DESCRIPTOR_TYPE);
     ASSERT(type() == FIELD);
     return Descriptor::IndexFromValue(GetValue());
   }
 
+  int GetLocalFieldIndexFromMap(Map* map) {
+    ASSERT(lookup_type_ == DESCRIPTOR_TYPE);
+    ASSERT(type() == FIELD);
+    return Descriptor::IndexFromValue(
+        map->instance_descriptors()->GetValue(number_)) -
+        map->inobject_properties();
+  }
+
   int GetDictionaryEntry() {
     ASSERT(lookup_type_ == DICTIONARY_TYPE);
     return number_;
@@ -282,6 +296,12 @@ class LookupResult BASE_EMBEDDED {
     return JSFunction::cast(GetValue());
   }
 
+  JSFunction* GetConstantFunctionFromMap(Map* map) {
+    ASSERT(lookup_type_ == DESCRIPTOR_TYPE);
+    ASSERT(type() == CONSTANT_FUNCTION);
+    return JSFunction::cast(map->instance_descriptors()->GetValue(number_));
+  }
+
   Object* GetCallbackObject() {
     if (lookup_type_ == CONSTANT_TYPE) {
       // For now we only have the __proto__ as constant type.
index b6f8240..3d737a4 100644 (file)
@@ -222,11 +222,6 @@ void AstOptimizer::VisitConditional(Conditional* node) {
 }
 
 
-void AstOptimizer::VisitSlot(Slot* node) {
-  USE(node);
-}
-
-
 void AstOptimizer::VisitVariableProxy(VariableProxy* node) {
   Variable* var = node->AsVariable();
   if (var != NULL) {
@@ -686,7 +681,7 @@ void AstOptimizer::VisitThisFunction(ThisFunction* node) {
 
 class Processor: public AstVisitor {
  public:
-  explicit Processor(VariableProxy* result)
+  explicit Processor(Variable* result)
       : result_(result),
         result_assigned_(false),
         is_set_(false),
@@ -697,7 +692,7 @@ class Processor: public AstVisitor {
   bool result_assigned() const { return result_assigned_; }
 
  private:
-  VariableProxy* result_;
+  Variable* result_;
 
   // We are not tracking result usage via the result_'s use
   // counts (we leave the accurate computation to the
@@ -714,7 +709,8 @@ class Processor: public AstVisitor {
 
   Expression* SetResult(Expression* value) {
     result_assigned_ = true;
-    return new Assignment(Token::ASSIGN, result_, value,
+    VariableProxy* result_proxy = new VariableProxy(result_);
+    return new Assignment(Token::ASSIGN, result_proxy, value,
                           RelocInfo::kNoPosition);
   }
 
@@ -869,12 +865,6 @@ void Processor::VisitConditional(Conditional* node) {
 }
 
 
-void Processor::VisitSlot(Slot* node) {
-  USE(node);
-  UNREACHABLE();
-}
-
-
 void Processor::VisitVariableProxy(VariableProxy* node) {
   USE(node);
   UNREACHABLE();
@@ -999,12 +989,15 @@ bool Rewriter::Rewrite(CompilationInfo* info) {
 
   ZoneList<Statement*>* body = function->body();
   if (!body->is_empty()) {
-    VariableProxy* result = scope->NewTemporary(Factory::result_symbol());
+    Variable* result = scope->NewTemporary(Factory::result_symbol());
     Processor processor(result);
     processor.Process(body);
     if (processor.HasStackOverflow()) return false;
 
-    if (processor.result_assigned()) body->Add(new ReturnStatement(result));
+    if (processor.result_assigned()) {
+      VariableProxy* result_proxy = new VariableProxy(result);
+      body->Add(new ReturnStatement(result_proxy));
+    }
   }
 
   return true;
diff --git a/src/runtime-profiler.cc b/src/runtime-profiler.cc
new file mode 100644 (file)
index 0000000..4b135a6
--- /dev/null
@@ -0,0 +1,385 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "runtime-profiler.h"
+
+#include "assembler.h"
+#include "code-stubs.h"
+#include "compilation-cache.h"
+#include "deoptimizer.h"
+#include "execution.h"
+#include "global-handles.h"
+#include "scopeinfo.h"
+#include "top.h"
+
+namespace v8 {
+namespace internal {
+
+
+class PendingListNode : public Malloced {
+ public:
+  explicit PendingListNode(JSFunction* function);
+  ~PendingListNode() { Destroy(); }
+
+  PendingListNode* next() const { return next_; }
+  void set_next(PendingListNode* node) { next_ = node; }
+  Handle<JSFunction> function() { return Handle<JSFunction>::cast(function_); }
+
+  // If the function is garbage collected before we've had the chance
+  // to optimize it the weak handle will be null.
+  bool IsValid() { return !function_.is_null(); }
+
+  // Returns the number of microseconds this node has been pending.
+  int Delay() const { return static_cast<int>(OS::Ticks() - start_); }
+
+ private:
+  void Destroy();
+  static void WeakCallback(v8::Persistent<v8::Value> object, void* data);
+
+  PendingListNode* next_;
+  Handle<Object> function_;  // Weak handle.
+  int64_t start_;
+};
+
+
+// Optimization sampler constants.
+static const int kSamplerFrameCount = 2;
+static const int kSamplerFrameWeight[kSamplerFrameCount] = { 2, 1 };
+static const int kSamplerWindowSize = 16;
+
+static const int kSamplerTicksDelta = 32;
+
+static const int kSamplerThresholdInit = 3;
+static const int kSamplerThresholdMin = 1;
+static const int kSamplerThresholdDelta = 1;
+
+static const int kSamplerThresholdSizeFactorInit = 3;
+static const int kSamplerThresholdSizeFactorMin = 1;
+static const int kSamplerThresholdSizeFactorDelta = 1;
+
+static const int kSizeLimit = 1500;
+
+static int sampler_threshold = kSamplerThresholdInit;
+static int sampler_threshold_size_factor = kSamplerThresholdSizeFactorInit;
+
+
+// The JSFunctions in the sampler window are not GC safe. Old-space
+// pointers are not cleared during mark-sweep collection and therefore
+// the window might contain stale pointers. The window is updated on
+// scavenges and (parts of it) cleared on mark-sweep and
+// mark-sweep-compact.
+static JSFunction* sampler_window[kSamplerWindowSize] = { NULL, };
+static int sampler_window_position = 0;
+static int sampler_window_weight[kSamplerWindowSize] = { 0, };
+
+
+// Support for pending 'optimize soon' requests.
+static PendingListNode* optimize_soon_list = NULL;
+
+
+PendingListNode::PendingListNode(JSFunction* function) : next_(NULL) {
+  function_ = GlobalHandles::Create(function);
+  start_ = OS::Ticks();
+  GlobalHandles::MakeWeak(function_.location(), this, &WeakCallback);
+}
+
+
+void PendingListNode::Destroy() {
+  if (!IsValid()) return;
+  GlobalHandles::Destroy(function_.location());
+  function_= Handle<Object>::null();
+}
+
+
+void PendingListNode::WeakCallback(v8::Persistent<v8::Value>, void* data) {
+  reinterpret_cast<PendingListNode*>(data)->Destroy();
+}
+
+
+static bool IsOptimizable(JSFunction* function) {
+  Code* code = function->code();
+  return code->kind() == Code::FUNCTION && code->optimizable();
+}
+
+
+static void Optimize(JSFunction* function, bool eager, int delay) {
+  ASSERT(IsOptimizable(function));
+  if (FLAG_trace_opt) {
+    PrintF("[marking (%s) ", eager ? "eagerly" : "lazily");
+    function->PrintName();
+    PrintF(" for recompilation");
+    if (delay > 0) {
+      PrintF(" (delayed %0.3f ms)", static_cast<double>(delay) / 1000);
+    }
+    PrintF("]\n");
+  }
+
+  // The next call to the function will trigger optimization.
+  function->MarkForLazyRecompilation();
+}
+
+
+static void AttemptOnStackReplacement(JSFunction* function) {
+  // See AlwaysFullCompiler (in compiler.cc) comment on why we need
+  // Debug::has_break_points().
+  ASSERT(function->IsMarkedForLazyRecompilation());
+  if (!FLAG_use_osr || Debug::has_break_points() || function->IsBuiltin()) {
+    return;
+  }
+
+  SharedFunctionInfo* shared = function->shared();
+  // If the code is not optimizable, don't try OSR.
+  if (!shared->code()->optimizable()) return;
+
+  // We are not prepared to do OSR for a function that already has an
+  // allocated arguments object.  The optimized code would bypass it for
+  // arguments accesses, which is unsound.  Don't try OSR.
+  if (shared->scope_info()->HasArgumentsShadow()) return;
+
+  // We're using on-stack replacement: patch the unoptimized code so that
+  // any back edge in any unoptimized frame will trigger on-stack
+  // replacement for that frame.
+  if (FLAG_trace_osr) {
+    PrintF("[patching stack checks in ");
+    function->PrintName();
+    PrintF(" for on-stack replacement]\n");
+  }
+
+  // Get the stack check stub code object to match against.  We aren't
+  // prepared to generate it, but we don't expect to have to.
+  StackCheckStub check_stub;
+  Object* check_code;
+  MaybeObject* maybe_check_code = check_stub.TryGetCode();
+  if (maybe_check_code->ToObject(&check_code)) {
+    Code* replacement_code = Builtins::builtin(Builtins::OnStackReplacement);
+    Code* unoptimized_code = shared->code();
+    // Iterate the unoptimized code and patch every stack check except at
+    // the function entry.  This code assumes the function entry stack
+    // check appears first i.e., is not deferred or otherwise reordered.
+    bool first = true;
+    for (RelocIterator it(unoptimized_code, RelocInfo::kCodeTargetMask);
+         !it.done();
+         it.next()) {
+      RelocInfo* rinfo = it.rinfo();
+      if (rinfo->target_address() == Code::cast(check_code)->entry()) {
+        if (first) {
+          first = false;
+        } else {
+          Deoptimizer::PatchStackCheckCode(rinfo, replacement_code);
+        }
+      }
+    }
+  }
+}
+
+
+static void ClearSampleBuffer() {
+  for (int i = 0; i < kSamplerWindowSize; i++) {
+    sampler_window[i] = NULL;
+    sampler_window_weight[i] = 0;
+  }
+}
+
+
+static void ClearSampleBufferNewSpaceEntries() {
+  for (int i = 0; i < kSamplerWindowSize; i++) {
+    if (Heap::InNewSpace(sampler_window[i])) {
+      sampler_window[i] = NULL;
+      sampler_window_weight[i] = 0;
+    }
+  }
+}
+
+
+static int LookupSample(JSFunction* function) {
+  int weight = 0;
+  for (int i = 0; i < kSamplerWindowSize; i++) {
+    JSFunction* sample = sampler_window[i];
+    if (sample != NULL) {
+      if (function == sample) {
+        weight += sampler_window_weight[i];
+      }
+    }
+  }
+  return weight;
+}
+
+
+static void AddSample(JSFunction* function, int weight) {
+  ASSERT(IsPowerOf2(kSamplerWindowSize));
+  sampler_window[sampler_window_position] = function;
+  sampler_window_weight[sampler_window_position] = weight;
+  sampler_window_position = (sampler_window_position + 1) &
+      (kSamplerWindowSize - 1);
+}
+
+
+void RuntimeProfiler::OptimizeNow() {
+  HandleScope scope;
+  PendingListNode* current = optimize_soon_list;
+  while (current != NULL) {
+    PendingListNode* next = current->next();
+    if (current->IsValid()) {
+      Handle<JSFunction> function = current->function();
+      int delay = current->Delay();
+      if (IsOptimizable(*function)) {
+        Optimize(*function, true, delay);
+      }
+    }
+    delete current;
+    current = next;
+  }
+  optimize_soon_list = NULL;
+
+  // Run through the JavaScript frames and collect them. If we already
+  // have a sample of the function, we mark it for optimizations
+  // (eagerly or lazily).
+  JSFunction* samples[kSamplerFrameCount];
+  int count = 0;
+  for (JavaScriptFrameIterator it;
+       count < kSamplerFrameCount && !it.done();
+       it.Advance()) {
+    JavaScriptFrame* frame = it.frame();
+    JSFunction* function = JSFunction::cast(frame->function());
+    int function_size = function->shared()->SourceSize();
+    int threshold_size_factor;
+    if (function_size > kSizeLimit) {
+      threshold_size_factor = sampler_threshold_size_factor;
+    } else {
+      threshold_size_factor = 1;
+    }
+
+    int threshold = sampler_threshold * threshold_size_factor;
+    samples[count++] = function;
+    if (function->IsMarkedForLazyRecompilation()) {
+      Code* unoptimized = function->shared()->code();
+      int nesting = unoptimized->allow_osr_at_loop_nesting_level();
+      if (nesting == 0) AttemptOnStackReplacement(function);
+      int new_nesting = Min(nesting + 1, Code::kMaxLoopNestingMarker);
+      unoptimized->set_allow_osr_at_loop_nesting_level(new_nesting);
+    } else if (LookupSample(function) >= threshold) {
+      if (IsOptimizable(function)) {
+        Optimize(function, false, 0);
+        CompilationCache::MarkForEagerOptimizing(Handle<JSFunction>(function));
+      }
+    }
+  }
+
+  // Add the collected functions as samples. It's important not to do
+  // this as part of collecting them because this will interfere with
+  // the sample lookup in case of recursive functions.
+  for (int i = 0; i < count; i++) {
+    AddSample(samples[i], kSamplerFrameWeight[i]);
+  }
+}
+
+
+void RuntimeProfiler::OptimizeSoon(JSFunction* function) {
+  if (!IsOptimizable(function)) return;
+  PendingListNode* node = new PendingListNode(function);
+  node->set_next(optimize_soon_list);
+  optimize_soon_list = node;
+}
+
+
+void RuntimeProfiler::NotifyTick() {
+  StackGuard::RequestRuntimeProfilerTick();
+}
+
+
+void RuntimeProfiler::MarkCompactPrologue(bool is_compacting) {
+  if (is_compacting) {
+    // Clear all samples before mark-sweep-compact because every
+    // function might move.
+    ClearSampleBuffer();
+  } else {
+    // Clear only new space entries on mark-sweep since none of the
+    // old-space functions will move.
+    ClearSampleBufferNewSpaceEntries();
+  }
+}
+
+
+bool IsEqual(void* first, void* second) {
+  return first == second;
+}
+
+
+void RuntimeProfiler::Setup() {
+  ClearSampleBuffer();
+  // If the ticker hasn't already started, make sure to do so to get
+  // the ticks for the runtime profiler.
+  if (IsEnabled()) Logger::EnsureTickerStarted();
+}
+
+
+void RuntimeProfiler::Reset() {
+  sampler_threshold = kSamplerThresholdInit;
+  sampler_threshold_size_factor = kSamplerThresholdSizeFactorInit;
+}
+
+
+void RuntimeProfiler::TearDown() {
+  // Nothing to do.
+}
+
+
+Object** RuntimeProfiler::SamplerWindowAddress() {
+  return reinterpret_cast<Object**>(sampler_window);
+}
+
+
+int RuntimeProfiler::SamplerWindowSize() {
+  return kSamplerWindowSize;
+}
+
+
+bool RuntimeProfilerRateLimiter::SuspendIfNecessary() {
+  static const int kNonJSTicksThreshold = 100;
+  // We suspend the runtime profiler thread when not running
+  // JavaScript. If the CPU profiler is active we must not do this
+  // because it samples both JavaScript and C++ code.
+  if (RuntimeProfiler::IsEnabled() &&
+      !CpuProfiler::is_profiling() &&
+      !(FLAG_prof && FLAG_prof_auto)) {
+    if (Top::IsInJSState()) {
+      non_js_ticks_ = 0;
+    } else {
+      if (non_js_ticks_ < kNonJSTicksThreshold) {
+        ++non_js_ticks_;
+      } else {
+        if (Top::WaitForJSState()) return true;
+      }
+    }
+  }
+  return false;
+}
+
+
+} }  // namespace v8::internal
diff --git a/src/runtime-profiler.h b/src/runtime-profiler.h
new file mode 100644 (file)
index 0000000..e041c05
--- /dev/null
@@ -0,0 +1,76 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_RUNTIME_PROFILER_H_
+#define V8_RUNTIME_PROFILER_H_
+
+#include "v8.h"
+#include "allocation.h"
+
+namespace v8 {
+namespace internal {
+
+class RuntimeProfiler : public AllStatic {
+ public:
+  static bool IsEnabled() { return V8::UseCrankshaft() && FLAG_opt; }
+
+  static void OptimizeNow();
+  static void OptimizeSoon(JSFunction* function);
+
+  static void NotifyTick();
+
+  static void Setup();
+  static void Reset();
+  static void TearDown();
+
+  static void MarkCompactPrologue(bool is_compacting);
+  static Object** SamplerWindowAddress();
+  static int SamplerWindowSize();
+};
+
+
+// Rate limiter intended to be used in the profiler thread.
+class RuntimeProfilerRateLimiter BASE_EMBEDDED {
+ public:
+  RuntimeProfilerRateLimiter() : non_js_ticks_(0) { }
+
+  // Suspends the current thread when not executing JavaScript to
+  // minimize CPU usage. Returns whether this thread was suspended
+  // (and so might have to check whether profiling is still active.)
+  //
+  // Does nothing when runtime profiling is not enabled.
+  bool SuspendIfNecessary();
+
+ private:
+  int non_js_ticks_;
+
+  DISALLOW_COPY_AND_ASSIGN(RuntimeProfilerRateLimiter);
+};
+
+} }  // namespace v8::internal
+
+#endif  // V8_RUNTIME_PROFILER_H_
index 4477ed9..c747046 100644 (file)
@@ -1,4 +1,4 @@
-// Copyright 2006-2009 the V8 project authors. All rights reserved.
+// Copyright 2010 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
 #include "api.h"
 #include "arguments.h"
 #include "codegen.h"
+#include "compilation-cache.h"
 #include "compiler.h"
 #include "cpu.h"
 #include "dateparser-inl.h"
 #include "debug.h"
+#include "deoptimizer.h"
 #include "execution.h"
 #include "jsregexp.h"
 #include "liveedit.h"
 #include "parser.h"
 #include "platform.h"
 #include "runtime.h"
+#include "runtime-profiler.h"
 #include "scopeinfo.h"
 #include "smart-pointer.h"
 #include "stub-cache.h"
@@ -611,6 +614,22 @@ static MaybeObject* Runtime_SetHiddenPrototype(Arguments args) {
 }
 
 
+// Sets the magic number that identifies a function as one of the special
+// math functions that can be inlined.
+static MaybeObject* Runtime_SetMathFunctionId(Arguments args) {
+  NoHandleAllocation ha;
+  ASSERT(args.length() == 2);
+  CONVERT_CHECKED(JSFunction, function, args[0]);
+  CONVERT_CHECKED(Smi, id, args[1]);
+  RUNTIME_ASSERT(id->value() >= 0);
+  RUNTIME_ASSERT(id->value() < SharedFunctionInfo::max_math_id_number());
+
+  function->shared()->set_math_function_id(id->value());
+
+  return Heap::undefined_value();
+}
+
+
 static MaybeObject* Runtime_IsConstructCall(Arguments args) {
   NoHandleAllocation ha;
   ASSERT(args.length() == 0);
@@ -1640,14 +1659,13 @@ static MaybeObject* Runtime_FunctionGetScriptSourcePosition(Arguments args) {
 static MaybeObject* Runtime_FunctionGetPositionForOffset(Arguments args) {
   ASSERT(args.length() == 2);
 
-  CONVERT_CHECKED(JSFunction, fun, args[0]);
+  CONVERT_CHECKED(Code, code, args[0]);
   CONVERT_NUMBER_CHECKED(int, offset, Int32, args[1]);
 
-  Code* code = fun->code();
   RUNTIME_ASSERT(0 <= offset && offset < code->Size());
 
   Address pc = code->address() + offset;
-  return Smi::FromInt(fun->code()->SourcePosition(pc));
+  return Smi::FromInt(code->SourcePosition(pc));
 }
 
 
@@ -1724,10 +1742,14 @@ static MaybeObject* Runtime_SetCode(Arguments args) {
     if (!EnsureCompiled(shared, KEEP_EXCEPTION)) {
       return Failure::Exception();
     }
+    // Since we don't store the source for this we should never
+    // optimize this.
+    shared->code()->set_optimizable(false);
+
     // Set the code, scope info, formal parameter count,
     // and the length of the target function.
     target->shared()->set_code(shared->code());
-    target->set_code(shared->code());
+    target->ReplaceCode(shared->code());
     target->shared()->set_scope_info(shared->scope_info());
     target->shared()->set_length(shared->length());
     target->shared()->set_formal_parameter_count(
@@ -1757,6 +1779,7 @@ static MaybeObject* Runtime_SetCode(Arguments args) {
     // It's okay to skip the write barrier here because the literals
     // are guaranteed to be in old space.
     target->set_literals(*literals, SKIP_WRITE_BARRIER);
+    target->set_next_function_link(Heap::undefined_value());
   }
 
   target->set_context(*context);
@@ -5339,6 +5362,13 @@ static MaybeObject* Runtime_NumberToSmi(Arguments args) {
 }
 
 
+static MaybeObject* Runtime_AllocateHeapNumber(Arguments args) {
+  NoHandleAllocation ha;
+  ASSERT(args.length() == 0);
+  return Heap::AllocateHeapNumber(0);
+}
+
+
 static MaybeObject* Runtime_NumberAdd(Arguments args) {
   NoHandleAllocation ha;
   ASSERT(args.length() == 2);
@@ -6611,9 +6641,12 @@ static MaybeObject* Runtime_NewObject(Arguments args) {
     }
   }
 
-  // The function should be compiled for the optimization hints to be available.
+  // The function should be compiled for the optimization hints to be
+  // available. We cannot use EnsureCompiled because that forces a
+  // compilation through the shared function info which makes it
+  // impossible for us to optimize.
   Handle<SharedFunctionInfo> shared(function->shared());
-  EnsureCompiled(shared, CLEAR_EXCEPTION);
+  if (!function->is_compiled()) CompileLazy(function, CLEAR_EXCEPTION);
 
   if (!function->has_initial_map() &&
       shared->IsInobjectSlackTrackingInProgress()) {
@@ -6657,7 +6690,7 @@ static MaybeObject* Runtime_LazyCompile(Arguments args) {
 #ifdef DEBUG
   if (FLAG_trace_lazy && !function->shared()->is_compiled()) {
     PrintF("[lazy: ");
-    function->shared()->name()->Print();
+    function->PrintName();
     PrintF("]\n");
   }
 #endif
@@ -6674,10 +6707,236 @@ static MaybeObject* Runtime_LazyCompile(Arguments args) {
     return Failure::Exception();
   }
 
+  // All done. Return the compiled code.
+  ASSERT(function->is_compiled());
   return function->code();
 }
 
 
+static MaybeObject* Runtime_LazyRecompile(Arguments args) {
+  HandleScope scope;
+  ASSERT(args.length() == 1);
+  Handle<JSFunction> function = args.at<JSFunction>(0);
+  // If the function is not optimizable or debugger is active continue using the
+  // code from the full compiler.
+  if (!function->shared()->code()->optimizable() ||
+      Debug::has_break_points()) {
+    function->ReplaceCode(function->shared()->code());
+    return function->code();
+  }
+  if (CompileOptimized(function, AstNode::kNoNumber)) {
+    return function->code();
+  }
+  function->ReplaceCode(function->shared()->code());
+  return Failure::Exception();
+}
+
+
+static MaybeObject* Runtime_NotifyDeoptimized(Arguments args) {
+  HandleScope scope;
+  ASSERT(args.length() == 1);
+  RUNTIME_ASSERT(args[0]->IsSmi());
+  Deoptimizer::BailoutType type =
+      static_cast<Deoptimizer::BailoutType>(Smi::cast(args[0])->value());
+  Deoptimizer* deoptimizer = Deoptimizer::Grab();
+  ASSERT(Heap::IsAllocationAllowed());
+  int frames = deoptimizer->output_count();
+
+  JavaScriptFrameIterator it;
+  JavaScriptFrame* frame = NULL;
+  for (int i = 0; i < frames; i++) {
+    if (i != 0) it.Advance();
+    frame = it.frame();
+    deoptimizer->InsertHeapNumberValues(frames - i - 1, frame);
+  }
+  delete deoptimizer;
+
+  RUNTIME_ASSERT(frame->function()->IsJSFunction());
+  Handle<JSFunction> function(JSFunction::cast(frame->function()));
+  Handle<Object> arguments;
+  for (int i = frame->ComputeExpressionsCount() - 1; i >= 0; --i) {
+    if (frame->GetExpression(i) == Heap::the_hole_value()) {
+      if (arguments.is_null()) {
+        // FunctionGetArguments can't throw an exception, so cast away the
+        // doubt with an assert.
+        arguments = Handle<Object>(
+            Accessors::FunctionGetArguments(*function,
+                                            NULL)->ToObjectUnchecked());
+        ASSERT(*arguments != Heap::null_value());
+        ASSERT(*arguments != Heap::undefined_value());
+      }
+      frame->SetExpression(i, *arguments);
+    }
+  }
+
+  CompilationCache::MarkForLazyOptimizing(function);
+  if (type == Deoptimizer::EAGER) {
+    RUNTIME_ASSERT(function->IsOptimized());
+  } else {
+    RUNTIME_ASSERT(!function->IsOptimized());
+  }
+
+  // Avoid doing too much work when running with --always-opt and keep
+  // the optimized code around.
+  if (FLAG_always_opt || type == Deoptimizer::LAZY) {
+    return Heap::undefined_value();
+  }
+
+  // Count the number of optimized activations of the function.
+  int activations = 0;
+  while (!it.done()) {
+    JavaScriptFrame* frame = it.frame();
+    if (frame->is_optimized() && frame->function() == *function) {
+      activations++;
+    }
+    it.Advance();
+  }
+
+  // TODO(kasperl): For now, we cannot support removing the optimized
+  // code when we have recursive invocations of the same function.
+  if (activations == 0) {
+    if (FLAG_trace_deopt) {
+      PrintF("[removing optimized code for: ");
+      function->PrintName();
+      PrintF("]\n");
+    }
+    function->ReplaceCode(function->shared()->code());
+  }
+  return Heap::undefined_value();
+}
+
+
+static MaybeObject* Runtime_NotifyOSR(Arguments args) {
+  Deoptimizer* deoptimizer = Deoptimizer::Grab();
+  delete deoptimizer;
+  return Heap::undefined_value();
+}
+
+
+static MaybeObject* Runtime_DeoptimizeFunction(Arguments args) {
+  HandleScope scope;
+  ASSERT(args.length() == 1);
+  CONVERT_ARG_CHECKED(JSFunction, function, 0);
+  if (!function->IsOptimized()) return Heap::undefined_value();
+
+  Deoptimizer::DeoptimizeFunction(*function);
+
+  return Heap::undefined_value();
+}
+
+
+static MaybeObject* Runtime_CompileForOnStackReplacement(Arguments args) {
+  HandleScope scope;
+  ASSERT(args.length() == 1);
+  CONVERT_ARG_CHECKED(JSFunction, function, 0);
+
+  // We're not prepared to handle a function with arguments object.
+  ASSERT(!function->shared()->scope_info()->HasArgumentsShadow());
+
+  // We have hit a back edge in an unoptimized frame for a function that was
+  // selected for on-stack replacement.  Find the unoptimized code object.
+  Handle<Code> unoptimized(function->shared()->code());
+  // Keep track of whether we've succeeded in optimizing.
+  bool succeeded = unoptimized->optimizable();
+  if (succeeded) {
+    // If we are trying to do OSR when there are already optimized
+    // activations of the function, it means (a) the function is directly or
+    // indirectly recursive and (b) an optimized invocation has been
+    // deoptimized so that we are currently in an unoptimized activation.
+    // Check for optimized activations of this function.
+    JavaScriptFrameIterator it;
+    while (succeeded && !it.done()) {
+      JavaScriptFrame* frame = it.frame();
+      succeeded = !frame->is_optimized() || frame->function() != *function;
+      it.Advance();
+    }
+  }
+
+  int ast_id = AstNode::kNoNumber;
+  if (succeeded) {
+    // The top JS function is this one, the PC is somewhere in the
+    // unoptimized code.
+    JavaScriptFrameIterator it;
+    JavaScriptFrame* frame = it.frame();
+    ASSERT(frame->function() == *function);
+    ASSERT(frame->code() == *unoptimized);
+    ASSERT(unoptimized->contains(frame->pc()));
+
+    // Use linear search of the unoptimized code's stack check table to find
+    // the AST id matching the PC.
+    Address start = unoptimized->instruction_start();
+    unsigned target_pc_offset = frame->pc() - start;
+    Address table_cursor = start + unoptimized->stack_check_table_start();
+    uint32_t table_length = Memory::uint32_at(table_cursor);
+    table_cursor += kIntSize;
+    for (unsigned i = 0; i < table_length; ++i) {
+      // Table entries are (AST id, pc offset) pairs.
+      uint32_t pc_offset = Memory::uint32_at(table_cursor + kIntSize);
+      if (pc_offset == target_pc_offset) {
+        ast_id = static_cast<int>(Memory::uint32_at(table_cursor));
+        break;
+      }
+      table_cursor += 2 * kIntSize;
+    }
+    ASSERT(ast_id != AstNode::kNoNumber);
+    if (FLAG_trace_osr) {
+      PrintF("[replacing on-stack at AST id %d in ", ast_id);
+      function->PrintName();
+      PrintF("]\n");
+    }
+
+    // Try to compile the optimized code.  A true return value from
+    // CompileOptimized means that compilation succeeded, not necessarily
+    // that optimization succeeded.
+    if (CompileOptimized(function, ast_id) && function->IsOptimized()) {
+      DeoptimizationInputData* data = DeoptimizationInputData::cast(
+          function->code()->deoptimization_data());
+      if (FLAG_trace_osr) {
+        PrintF("[on-stack replacement offset %d in optimized code]\n",
+               data->OsrPcOffset()->value());
+      }
+      ASSERT(data->OsrAstId()->value() == ast_id);
+      ASSERT(data->OsrPcOffset()->value() >= 0);
+    } else {
+      succeeded = false;
+    }
+  }
+
+  // Revert to the original stack checks in the original unoptimized code.
+  if (FLAG_trace_osr) {
+    PrintF("[restoring original stack checks in ");
+    function->PrintName();
+    PrintF("]\n");
+  }
+  StackCheckStub check_stub;
+  Handle<Code> check_code = check_stub.GetCode();
+  Handle<Code> replacement_code(
+      Builtins::builtin(Builtins::OnStackReplacement));
+  // Iterate the unoptimized code and revert all the patched stack checks.
+  for (RelocIterator it(*unoptimized, RelocInfo::kCodeTargetMask);
+       !it.done();
+       it.next()) {
+    RelocInfo* rinfo = it.rinfo();
+    if (rinfo->target_address() == replacement_code->entry()) {
+      Deoptimizer::RevertStackCheckCode(rinfo, *check_code);
+    }
+  }
+
+  // Allow OSR only at nesting level zero again.
+  unoptimized->set_allow_osr_at_loop_nesting_level(0);
+
+  // If the optimization attempt succeeded, return the AST id tagged as a
+  // smi. This tells the builtin that we need to translate the unoptimized
+  // frame to an optimized one.
+  if (succeeded) {
+    ASSERT(function->code()->kind() == Code::OPTIMIZED_FUNCTION);
+    return Smi::FromInt(ast_id);
+  } else {
+    return Smi::FromInt(-1);
+  }
+}
+
+
 static MaybeObject* Runtime_GetFunctionDelegate(Arguments args) {
   HandleScope scope;
   ASSERT(args.length() == 1);
@@ -7955,7 +8214,7 @@ static MaybeObject* Runtime_GetArrayKeys(Arguments args) {
     int keys_length = keys->length();
     for (int i = 0; i < keys_length; i++) {
       Object* key = keys->get(i);
-      uint32_t index;
+      uint32_t index = 0;
       if (!key->ToArrayIndex(&index) || index >= length) {
         // Zap invalid keys.
         keys->set_undefined(i);
@@ -8082,6 +8341,7 @@ static MaybeObject* DebugLookupResultValue(Object* receiver, String* name,
         MaybeObject* maybe_value = receiver->GetPropertyWithCallback(
             receiver, structure, name, result->holder());
         if (!maybe_value->ToObject(&value)) {
+          if (maybe_value->IsRetryAfterGC()) return maybe_value;
           ASSERT(maybe_value->IsException());
           maybe_value = Top::pending_exception();
           Top::clear_pending_exception();
@@ -8382,6 +8642,9 @@ static MaybeObject* Runtime_GetFrameDetails(Arguments args) {
   }
   if (it.done()) return Heap::undefined_value();
 
+  bool is_optimized_frame =
+      it.frame()->code()->kind() == Code::OPTIMIZED_FUNCTION;
+
   // Traverse the saved contexts chain to find the active context for the
   // selected frame.
   SaveContext* save = Top::save_context();
@@ -8413,18 +8676,28 @@ static MaybeObject* Runtime_GetFrameDetails(Arguments args) {
   // (e.g. .result)?  For users of the debugger, they will probably be
   // confusing.
   Handle<FixedArray> locals = Factory::NewFixedArray(info.NumberOfLocals() * 2);
+
+  // Fill in the names of the locals.
   for (int i = 0; i < info.NumberOfLocals(); i++) {
-    // Name of the local.
     locals->set(i * 2, *info.LocalName(i));
+  }
 
-    // Fetch the value of the local - either from the stack or from a
-    // heap-allocated context.
-    if (i < info.number_of_stack_slots()) {
+  // Fill in the values of the locals.
+  for (int i = 0; i < info.NumberOfLocals(); i++) {
+    if (is_optimized_frame) {
+      // If we are inspecting an optimized frame use undefined as the
+      // value for all locals.
+      //
+      // TODO(3141533): We should be able to get the correct values
+      // for locals in optimized frames.
+      locals->set(i * 2 + 1, Heap::undefined_value());
+    } else if (i < info.number_of_stack_slots()) {
+      // Get the value from the stack.
       locals->set(i * 2 + 1, it.frame()->GetExpression(i));
     } else {
-      Handle<String> name = info.LocalName(i);
       // Traverse the context chain to the function context as all local
       // variables stored in the context will be on the function context.
+      Handle<String> name = info.LocalName(i);
       while (!context->is_function_context()) {
         context = Handle<Context>(context->previous());
       }
@@ -8434,8 +8707,12 @@ static MaybeObject* Runtime_GetFrameDetails(Arguments args) {
     }
   }
 
-  // Check whether this frame is positioned at return.
-  int at_return = (index == 0) ? Debug::IsBreakAtReturn(it.frame()) : false;
+  // Check whether this frame is positioned at return. If not top
+  // frame or if the frame is optimized it cannot be at a return.
+  bool at_return = false;
+  if (!is_optimized_frame && index == 0) {
+    at_return = Debug::IsBreakAtReturn(it.frame());
+  }
 
   // If positioned just before return find the value to be returned and add it
   // to the frame information.
@@ -8529,8 +8806,13 @@ static MaybeObject* Runtime_GetFrameDetails(Arguments args) {
       details->set(details_index++, Heap::undefined_value());
     }
 
-    // Parameter value.
-    if (i < it.frame()->GetProvidedParametersCount()) {
+    // Parameter value. If we are inspecting an optimized frame, use
+    // undefined as the value.
+    //
+    // TODO(3141533): We should be able to get the actual parameter
+    // value for optimized frames.
+    if (!is_optimized_frame &&
+        (i < it.frame()->GetProvidedParametersCount())) {
       details->set(details_index++, it.frame()->GetParameter(i));
     } else {
       details->set(details_index++, Heap::undefined_value());
@@ -9124,7 +9406,7 @@ Object* Runtime::FindSharedFunctionInfoInScript(Handle<Script> script,
   // Iterate the heap looking for SharedFunctionInfo generated from the
   // script. The inner most SharedFunctionInfo containing the source position
   // for the requested break point is found.
-  // NOTE: This might reqire several heap iterations. If the SharedFunctionInfo
+  // NOTE: This might require several heap iterations. If the SharedFunctionInfo
   // which is found is not compiled it is compiled and the heap is iterated
   // again as the compilation might create inner functions from the newly
   // compiled function and the actual requested break point might be in one of
@@ -9946,6 +10228,15 @@ static MaybeObject* Runtime_LiveEditReplaceScript(Arguments args) {
   }
 }
 
+
+static MaybeObject* Runtime_LiveEditFunctionSourceUpdated(Arguments args) {
+  ASSERT(args.length() == 1);
+  HandleScope scope;
+  CONVERT_ARG_CHECKED(JSArray, shared_info, 0);
+  return LiveEdit::FunctionSourceUpdated(shared_info);
+}
+
+
 // Replaces code of SharedFunctionInfo with a new one.
 static MaybeObject* Runtime_LiveEditReplaceFunctionCode(Arguments args) {
   ASSERT(args.length() == 2);
@@ -10048,6 +10339,11 @@ static MaybeObject* Runtime_GetFunctionCodePositionFromSource(Arguments args) {
 
   Handle<Code> code(function->code());
 
+  if (code->kind() != Code::FUNCTION &&
+      code->kind() != Code::OPTIMIZED_FUNCTION) {
+    return Heap::undefined_value();
+  }
+
   RelocIterator it(*code, RelocInfo::ModeMask(RelocInfo::STATEMENT_POSITION));
   int closest_pc = 0;
   int distance = kMaxInt;
@@ -10202,9 +10498,9 @@ static bool ShowFrameInStackTrace(StackFrame* raw_frame, Object* caller,
 }
 
 
-// Collect the raw data for a stack trace.  Returns an array of three
-// element segments each containing a receiver, function and native
-// code offset.
+// Collect the raw data for a stack trace.  Returns an array of 4
+// element segments each containing a receiver, function, code and
+// native code offset.
 static MaybeObject* Runtime_CollectStackTrace(Arguments args) {
   ASSERT_EQ(args.length(), 2);
   Handle<Object> caller = args.at<Object>(0);
@@ -10214,7 +10510,7 @@ static MaybeObject* Runtime_CollectStackTrace(Arguments args) {
 
   limit = Max(limit, 0);  // Ensure that limit is not negative.
   int initial_size = Min(limit, 10);
-  Handle<JSArray> result = Factory::NewJSArray(initial_size * 3);
+  Handle<JSArray> result = Factory::NewJSArray(initial_size * 4);
 
   StackFrameIterator iter;
   // If the caller parameter is a function we skip frames until we're
@@ -10227,23 +10523,25 @@ static MaybeObject* Runtime_CollectStackTrace(Arguments args) {
     if (ShowFrameInStackTrace(raw_frame, *caller, &seen_caller)) {
       frames_seen++;
       JavaScriptFrame* frame = JavaScriptFrame::cast(raw_frame);
-      Object* recv = frame->receiver();
-      Object* fun = frame->function();
-      Address pc = frame->pc();
-      Address start = frame->code()->address();
-      Smi* offset = Smi::FromInt(static_cast<int>(pc - start));
-      FixedArray* elements = FixedArray::cast(result->elements());
-      if (cursor + 2 < elements->length()) {
-        elements->set(cursor++, recv);
-        elements->set(cursor++, fun);
-        elements->set(cursor++, offset);
-      } else {
-        HandleScope scope;
-        Handle<Object> recv_handle(recv);
-        Handle<Object> fun_handle(fun);
-        SetElement(result, cursor++, recv_handle);
-        SetElement(result, cursor++, fun_handle);
-        SetElement(result, cursor++, Handle<Smi>(offset));
+      List<FrameSummary> frames(3);  // Max 2 levels of inlining.
+      frame->Summarize(&frames);
+      for (int i = frames.length() - 1; i >= 0; i--) {
+        Handle<Object> recv = frames[i].receiver();
+        Handle<JSFunction> fun = frames[i].function();
+        Handle<Code> code = frames[i].code();
+        Handle<Smi> offset(Smi::FromInt(frames[i].offset()));
+        FixedArray* elements = FixedArray::cast(result->elements());
+        if (cursor + 3 < elements->length()) {
+          elements->set(cursor++, *recv);
+          elements->set(cursor++, *fun);
+          elements->set(cursor++, *code);
+          elements->set(cursor++, *offset);
+        } else {
+          SetElement(result, cursor++, recv);
+          SetElement(result, cursor++, fun);
+          SetElement(result, cursor++, code);
+          SetElement(result, cursor++, offset);
+        }
       }
     }
     iter.Advance();
index 8b6f988..5c841fc 100644 (file)
@@ -66,6 +66,7 @@ namespace internal {
   \
   F(IsInPrototypeChain, 2, 1) \
   F(SetHiddenPrototype, 2, 1) \
+  F(SetMathFunctionId, 2, 1) \
   \
   F(IsConstructCall, 0, 1) \
   \
@@ -79,6 +80,11 @@ namespace internal {
   F(GetConstructorDelegate, 1, 1) \
   F(NewArgumentsFast, 3, 1) \
   F(LazyCompile, 1, 1) \
+  F(LazyRecompile, 1, 1) \
+  F(NotifyDeoptimized, 1, 1) \
+  F(NotifyOSR, 0, 1) \
+  F(DeoptimizeFunction, 1, 1)             \
+  F(CompileForOnStackReplacement, 1, 1) \
   F(SetNewFunctionAttributes, 1, 1) \
   F(AllocateInNewSpace, 1, 1) \
   \
@@ -109,6 +115,7 @@ namespace internal {
   F(NumberToJSUint32, 1, 1) \
   F(NumberToJSInt32, 1, 1) \
   F(NumberToSmi, 1, 1) \
+  F(AllocateHeapNumber, 0, 1) \
   \
   /* Arithmetic operations */ \
   F(NumberAdd, 2, 1) \
@@ -350,6 +357,7 @@ namespace internal {
   F(LiveEditGatherCompileInfo, 2, 1) \
   F(LiveEditReplaceScript, 3, 1) \
   F(LiveEditReplaceFunctionCode, 2, 1) \
+  F(LiveEditFunctionSourceUpdated, 1, 1) \
   F(LiveEditFunctionSetScript, 2, 1) \
   F(LiveEditReplaceRefToNestedFunction, 3, 1) \
   F(LiveEditPatchFunctionPositions, 2, 1) \
diff --git a/src/safepoint-table.cc b/src/safepoint-table.cc
new file mode 100644 (file)
index 0000000..b9468a5
--- /dev/null
@@ -0,0 +1,210 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "safepoint-table.h"
+#include "disasm.h"
+
+namespace v8 {
+namespace internal {
+
+SafepointTable::SafepointTable(Code* code) {
+  ASSERT(code->kind() == Code::OPTIMIZED_FUNCTION);
+  code_ = code;
+  Address header = code->instruction_start() + code->safepoint_table_start();
+  length_ = Memory::uint32_at(header + kLengthOffset);
+  entry_size_ = Memory::uint32_at(header + kEntrySizeOffset);
+  pc_and_deoptimization_indexes_ = header + kHeaderSize;
+  entries_ = pc_and_deoptimization_indexes_ +
+            (length_ * kPcAndDeoptimizationIndexSize);
+  ASSERT(entry_size_ > 0);
+  ASSERT_EQ(DeoptimizationIndexField::max(), Safepoint::kNoDeoptimizationIndex);
+}
+
+
+bool SafepointTable::HasRegisters(uint8_t* entry) {
+  ASSERT(IsAligned(kNumSafepointRegisters, kBitsPerByte));
+  const int num_reg_bytes = kNumSafepointRegisters >> kBitsPerByteLog2;
+  for (int i = 0; i < num_reg_bytes; i++) {
+    if (entry[i] != kNoRegisters) return true;
+  }
+  return false;
+}
+
+
+bool SafepointTable::HasRegisterAt(uint8_t* entry, int reg_index) {
+  ASSERT(reg_index >= 0 && reg_index < kNumSafepointRegisters);
+  int byte_index = reg_index >> kBitsPerByteLog2;
+  int bit_index = reg_index & (kBitsPerByte - 1);
+  return (entry[byte_index] & (1 << bit_index)) != 0;
+}
+
+
+void SafepointTable::PrintEntry(unsigned index) const {
+  disasm::NameConverter converter;
+  uint8_t* entry = GetEntry(index);
+
+  // Print the stack slot bits.
+  if (entry_size_ > 0) {
+    ASSERT(IsAligned(kNumSafepointRegisters, kBitsPerByte));
+    const int first = kNumSafepointRegisters >> kBitsPerByteLog2;
+    int last = entry_size_ - 1;
+    for (int i = first; i < last; i++) PrintBits(entry[i], kBitsPerByte);
+    int last_bits = code_->stack_slots() - ((last - first) * kBitsPerByte);
+    PrintBits(entry[last], last_bits);
+
+    // Print the registers (if any).
+    if (!HasRegisters(entry)) return;
+    for (int j = 0; j < kNumSafepointRegisters; j++) {
+      if (HasRegisterAt(entry, j)) {
+        PrintF(" | %s", converter.NameOfCPURegister(j));
+      }
+    }
+  }
+}
+
+
+void SafepointTable::PrintBits(uint8_t byte, int digits) {
+  ASSERT(digits >= 0 && digits <= kBitsPerByte);
+  for (int i = 0; i < digits; i++) {
+    PrintF("%c", ((byte & (1 << i)) == 0) ? '0' : '1');
+  }
+}
+
+
+Safepoint SafepointTableBuilder::DefineSafepoint(Assembler* assembler,
+                                                 int deoptimization_index) {
+  ASSERT(deoptimization_index != -1);
+  DeoptimizationInfo pc_and_deoptimization_index;
+  pc_and_deoptimization_index.pc = assembler->pc_offset();
+  pc_and_deoptimization_index.deoptimization_index = deoptimization_index;
+  pc_and_deoptimization_index.pc_after_gap = assembler->pc_offset();
+  deoptimization_info_.Add(pc_and_deoptimization_index);
+  indexes_.Add(new ZoneList<int>(8));
+  registers_.Add(NULL);
+  return Safepoint(indexes_.last(), registers_.last());
+}
+
+
+Safepoint SafepointTableBuilder::DefineSafepointWithRegisters(
+    Assembler* assembler, int arguments, int deoptimization_index) {
+  ASSERT(deoptimization_index != -1);
+  ASSERT(arguments == 0);  // Only case that works for now.
+  DeoptimizationInfo pc_and_deoptimization_index;
+  pc_and_deoptimization_index.pc = assembler->pc_offset();
+  pc_and_deoptimization_index.deoptimization_index = deoptimization_index;
+  pc_and_deoptimization_index.pc_after_gap = assembler->pc_offset();
+  deoptimization_info_.Add(pc_and_deoptimization_index);
+  indexes_.Add(new ZoneList<int>(8));
+  registers_.Add(new ZoneList<int>(4));
+  return Safepoint(indexes_.last(), registers_.last());
+}
+
+
+unsigned SafepointTableBuilder::GetCodeOffset() const {
+  ASSERT(emitted_);
+  return offset_;
+}
+
+
+void SafepointTableBuilder::Emit(Assembler* assembler, int bits_per_entry) {
+  // Make sure the safepoint table is properly aligned. Pad with nops.
+  assembler->Align(kIntSize);
+  assembler->RecordComment(";;; Safepoint table.");
+  offset_ = assembler->pc_offset();
+
+  // Take the register bits into account.
+  bits_per_entry += kNumSafepointRegisters;
+
+  // Compute the number of bytes per safepoint entry.
+  int bytes_per_entry =
+      RoundUp(bits_per_entry, kBitsPerByte) >> kBitsPerByteLog2;
+
+  // Emit the table header.
+  int length = deoptimization_info_.length();
+  assembler->dd(length);
+  assembler->dd(bytes_per_entry);
+
+  // Emit sorted table of pc offsets together with deoptimization indexes and
+  // pc after gap information.
+  for (int i = 0; i < length; i++) {
+    assembler->dd(deoptimization_info_[i].pc);
+    assembler->dd(EncodeDeoptimizationIndexAndGap(deoptimization_info_[i]));
+  }
+
+  // Emit table of bitmaps.
+  ZoneList<uint8_t> bits(bytes_per_entry);
+  for (int i = 0; i < length; i++) {
+    ZoneList<int>* indexes = indexes_[i];
+    ZoneList<int>* registers = registers_[i];
+    bits.Clear();
+    bits.AddBlock(0, bytes_per_entry);
+
+    // Run through the registers (if any).
+    ASSERT(IsAligned(kNumSafepointRegisters, kBitsPerByte));
+    if (registers == NULL) {
+      const int num_reg_bytes = kNumSafepointRegisters >> kBitsPerByteLog2;
+      for (int j = 0; j < num_reg_bytes; j++) {
+        bits[j] = SafepointTable::kNoRegisters;
+      }
+    } else {
+      for (int j = 0; j < registers->length(); j++) {
+        int index = registers->at(j);
+        ASSERT(index >= 0 && index < kNumSafepointRegisters);
+        int byte_index = index >> kBitsPerByteLog2;
+        int bit_index = index & (kBitsPerByte - 1);
+        bits[byte_index] |= (1 << bit_index);
+      }
+    }
+
+    // Run through the indexes and build a bitmap.
+    for (int j = 0; j < indexes->length(); j++) {
+      int index = bits_per_entry - 1 - indexes->at(j);
+      int byte_index = index >> kBitsPerByteLog2;
+      int bit_index = index & (kBitsPerByte - 1);
+      bits[byte_index] |= (1U << bit_index);
+    }
+
+    // Emit the bitmap for the current entry.
+    for (int k = 0; k < bytes_per_entry; k++) {
+      assembler->db(bits[k]);
+    }
+  }
+  emitted_ = true;
+}
+
+
+uint32_t SafepointTableBuilder::EncodeDeoptimizationIndexAndGap(
+    DeoptimizationInfo info) {
+  unsigned index = info.deoptimization_index;
+  unsigned gap_size = info.pc_after_gap - info.pc;
+  uint32_t encoding = SafepointTable::DeoptimizationIndexField::encode(index);
+  encoding |= SafepointTable::GapCodeSizeField::encode(gap_size);
+  return encoding;
+}
+
+
+} }  // namespace v8::internal
diff --git a/src/safepoint-table.h b/src/safepoint-table.h
new file mode 100644 (file)
index 0000000..010ac57
--- /dev/null
@@ -0,0 +1,189 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_SAFEPOINT_TABLE_H_
+#define V8_SAFEPOINT_TABLE_H_
+
+#include "v8.h"
+
+#include "macro-assembler.h"
+#include "zone.h"
+#include "zone-inl.h"
+
+namespace v8 {
+namespace internal {
+
+class SafepointTable BASE_EMBEDDED {
+ public:
+  explicit SafepointTable(Code* code);
+
+  int size() const {
+    return kHeaderSize +
+           (length_ * (kPcAndDeoptimizationIndexSize + entry_size_)); }
+  unsigned length() const { return length_; }
+  unsigned entry_size() const { return entry_size_; }
+
+  unsigned GetPcOffset(unsigned index) const {
+    ASSERT(index < length_);
+    return Memory::uint32_at(GetPcOffsetLocation(index));
+  }
+
+  int GetDeoptimizationIndex(unsigned index) const {
+    ASSERT(index < length_);
+    unsigned value = Memory::uint32_at(GetDeoptimizationLocation(index));
+    return DeoptimizationIndexField::decode(value);
+  }
+
+  unsigned GetGapCodeSize(unsigned index) const {
+    ASSERT(index < length_);
+    unsigned value = Memory::uint32_at(GetDeoptimizationLocation(index));
+    return GapCodeSizeField::decode(value);
+  }
+
+  uint8_t* GetEntry(unsigned index) const {
+    ASSERT(index < length_);
+    return &Memory::uint8_at(entries_ + (index * entry_size_));
+  }
+
+  class GapCodeSizeField: public BitField<unsigned, 0, 8> {};
+  class DeoptimizationIndexField: public BitField<int, 8, 24> {};
+
+  static bool HasRegisters(uint8_t* entry);
+  static bool HasRegisterAt(uint8_t* entry, int reg_index);
+
+  void PrintEntry(unsigned index) const;
+
+ private:
+  static const uint8_t kNoRegisters = 0xFF;
+
+  static const int kLengthOffset = 0;
+  static const int kEntrySizeOffset = kLengthOffset + kIntSize;
+  static const int kHeaderSize = kEntrySizeOffset + kIntSize;
+
+  static const int kPcSize = kIntSize;
+  static const int kDeoptimizationIndexSize = kIntSize;
+  static const int kPcAndDeoptimizationIndexSize =
+      kPcSize + kDeoptimizationIndexSize;
+
+  Address GetPcOffsetLocation(unsigned index) const {
+    return pc_and_deoptimization_indexes_ +
+           (index * kPcAndDeoptimizationIndexSize);
+  }
+
+  Address GetDeoptimizationLocation(unsigned index) const {
+    return GetPcOffsetLocation(index) + kPcSize;
+  }
+
+  static void PrintBits(uint8_t byte, int digits);
+
+  AssertNoAllocation no_allocation_;
+  Code* code_;
+  unsigned length_;
+  unsigned entry_size_;
+
+  Address pc_and_deoptimization_indexes_;
+  Address entries_;
+
+  friend class SafepointTableBuilder;
+};
+
+
+class Safepoint BASE_EMBEDDED {
+ public:
+  static const int kNoDeoptimizationIndex = 0x00ffffff;
+
+  void DefinePointerSlot(int index) { indexes_->Add(index); }
+  void DefinePointerRegister(Register reg) { registers_->Add(reg.code()); }
+
+ private:
+  Safepoint(ZoneList<int>* indexes, ZoneList<int>* registers) :
+      indexes_(indexes), registers_(registers) { }
+  ZoneList<int>* indexes_;
+  ZoneList<int>* registers_;
+
+  friend class SafepointTableBuilder;
+};
+
+
+class SafepointTableBuilder BASE_EMBEDDED {
+ public:
+  SafepointTableBuilder()
+      : deoptimization_info_(32),
+        indexes_(32),
+        registers_(32),
+        emitted_(false) { }
+
+  // Get the offset of the emitted safepoint table in the code.
+  unsigned GetCodeOffset() const;
+
+  // Define a new safepoint for the current position in the body.
+  Safepoint DefineSafepoint(
+      Assembler* assembler,
+      int deoptimization_index = Safepoint::kNoDeoptimizationIndex);
+
+  // Define a new safepoint with registers on the stack for the
+  // current position in the body and take the number of arguments on
+  // top of the registers into account.
+  Safepoint DefineSafepointWithRegisters(
+      Assembler* assembler,
+      int arguments,
+      int deoptimization_index = Safepoint::kNoDeoptimizationIndex);
+
+  // Update the last safepoint with the size of the code generated for the gap
+  // following it.
+  void SetPcAfterGap(int pc) {
+    ASSERT(!deoptimization_info_.is_empty());
+    int index = deoptimization_info_.length() - 1;
+    deoptimization_info_[index].pc_after_gap = pc;
+  }
+
+  // Emit the safepoint table after the body. The number of bits per
+  // entry must be enough to hold all the pointer indexes.
+  void Emit(Assembler* assembler, int bits_per_entry);
+
+ private:
+  struct DeoptimizationInfo {
+    unsigned pc;
+    unsigned deoptimization_index;
+    unsigned pc_after_gap;
+  };
+
+  uint32_t EncodeDeoptimizationIndexAndGap(DeoptimizationInfo info);
+
+  ZoneList<DeoptimizationInfo> deoptimization_info_;
+  ZoneList<ZoneList<int>*> indexes_;
+  ZoneList<ZoneList<int>*> registers_;
+
+  bool emitted_;
+  unsigned offset_;
+
+  DISALLOW_COPY_AND_ASSIGN(SafepointTableBuilder);
+};
+
+} }  // namespace v8::internal
+
+#endif  // V8_SAFEPOINT_TABLE_H_
index b210ae7..dd49a4e 100644 (file)
@@ -109,9 +109,14 @@ class SerializedScopeInfo : public FixedArray {
     return reinterpret_cast<SerializedScopeInfo*>(object);
   }
 
-  // Does this scope call eval.
+  // Does this scope call eval?
   bool CallsEval();
 
+  // Does this scope have an arguments shadow?
+  bool HasArgumentsShadow() {
+    return StackSlotIndex(Heap::arguments_shadow_symbol()) >= 0;
+  }
+
   // Return the number of stack slots for code.
   int NumberOfStackSlots();
 
index 5ff250f..3565e11 100644 (file)
@@ -291,13 +291,11 @@ void Scope::RemoveUnresolved(VariableProxy* var) {
 }
 
 
-VariableProxy* Scope::NewTemporary(Handle<String> name) {
-  Variable* var = new Variable(this, name, Variable::TEMPORARY, true,
-                               Variable::NORMAL);
-  VariableProxy* tmp = new VariableProxy(name, false, false);
-  tmp->BindTo(var);
+Variable* Scope::NewTemporary(Handle<String> name) {
+  Variable* var =
+      new Variable(this, name, Variable::TEMPORARY, true, Variable::NORMAL);
   temps_.Add(var);
-  return tmp;
+  return var;
 }
 
 
@@ -861,11 +859,13 @@ void Scope::AllocateParameterLocals() {
           // allocated.
           arguments_shadow_->is_accessed_from_inner_scope_ = true;
         }
-        var->rewrite_ =
+        Property* rewrite =
             new Property(new VariableProxy(arguments_shadow_),
                          new Literal(Handle<Object>(Smi::FromInt(i))),
                          RelocInfo::kNoPosition,
                          Property::SYNTHETIC);
+        rewrite->set_is_arguments_access(true);
+        var->rewrite_ = rewrite;
       }
     }
 
index 526c3d3..d909b81 100644 (file)
@@ -105,7 +105,7 @@ class Scope: public ZoneObject {
   static bool Analyze(CompilationInfo* info);
 
   // The scope name is only used for printing/debugging.
-  void SetScopeName(Handle<String> scope_name)  { scope_name_ = scope_name; }
+  void SetScopeName(Handle<String> scope_name) { scope_name_ = scope_name; }
 
   virtual void Initialize(bool inside_with);
 
@@ -156,11 +156,11 @@ class Scope: public ZoneObject {
   // such a variable again if it was added; otherwise this is a no-op.
   void RemoveUnresolved(VariableProxy* var);
 
-  // Creates a new temporary variable in this scope and binds a proxy to it.
-  // The name is only used for printing and cannot be used to find the variable.
-  // In particular, the only way to get hold of the temporary is by keeping the
-  // VariableProxy* around.
-  virtual VariableProxy* NewTemporary(Handle<String> name);
+  // Creates a new temporary variable in this scope.  The name is only used
+  // for printing and cannot be used to find the variable.  In particular,
+  // the only way to get hold of the temporary is by keeping the Variable*
+  // around.
+  virtual Variable* NewTemporary(Handle<String> name);
 
   // Adds the specific declaration node to the list of declarations in
   // this scope. The declarations are processed as part of entering
@@ -188,10 +188,10 @@ class Scope: public ZoneObject {
   // Scope-specific info.
 
   // Inform the scope that the corresponding code contains a with statement.
-  void RecordWithStatement()  { scope_contains_with_ = true; }
+  void RecordWithStatement() { scope_contains_with_ = true; }
 
   // Inform the scope that the corresponding code contains an eval call.
-  void RecordEvalCall()  { scope_calls_eval_ = true; }
+  void RecordEvalCall() { scope_calls_eval_ = true; }
 
 
   // ---------------------------------------------------------------------------
@@ -423,7 +423,7 @@ class DummyScope : public Scope {
     return NULL;
   }
 
-  virtual VariableProxy* NewTemporary(Handle<String> name)  { return NULL; }
+  virtual Variable* NewTemporary(Handle<String> name)  { return NULL; }
 
   virtual bool HasTrivialOuterContext() const {
     return (nesting_level_ == 0 || inside_with_level_ <= 0);
index 15fed44..6edc4fa 100644 (file)
@@ -470,6 +470,22 @@ void ExternalReferenceTable::PopulateTable() {
       UNCLASSIFIED,
       32,
       "HandleScope::level");
+  Add(ExternalReference::new_deoptimizer_function().address(),
+      UNCLASSIFIED,
+      33,
+      "Deoptimizer::New()");
+  Add(ExternalReference::compute_output_frames_function().address(),
+      UNCLASSIFIED,
+      34,
+      "Deoptimizer::ComputeOutputFrames()");
+  Add(ExternalReference::address_of_min_int().address(),
+      UNCLASSIFIED,
+      35,
+      "LDoubleConstant::min_int");
+  Add(ExternalReference::address_of_one_half().address(),
+      UNCLASSIFIED,
+      36,
+      "LDoubleConstant::one_half");
 }
 
 
@@ -1370,6 +1386,13 @@ void Serializer::ObjectSerializer::VisitCodeEntry(Address entry_address) {
 }
 
 
+void Serializer::ObjectSerializer::VisitGlobalPropertyCell(RelocInfo* rinfo) {
+  // We shouldn't have any global property cell references in code
+  // objects in the snapshot.
+  UNREACHABLE();
+}
+
+
 void Serializer::ObjectSerializer::VisitExternalAsciiString(
     v8::String::ExternalAsciiStringResource** resource_pointer) {
   Address references_start = reinterpret_cast<Address>(resource_pointer);
index 92a5149..e80c302 100644 (file)
@@ -449,6 +449,7 @@ class Serializer : public SerializerDeserializer {
     void VisitExternalReferences(Address* start, Address* end);
     void VisitCodeTarget(RelocInfo* target);
     void VisitCodeEntry(Address entry_address);
+    void VisitGlobalPropertyCell(RelocInfo* rinfo);
     void VisitRuntimeEntry(RelocInfo* reloc);
     // Used for seralizing the external strings that hold the natives source.
     void VisitExternalAsciiString(
index 7806223..ad09ae2 100644 (file)
@@ -412,6 +412,14 @@ bool PagedSpace::Contains(Address addr) {
 }
 
 
+bool PagedSpace::SafeContains(Address addr) {
+  if (!MemoryAllocator::SafeIsInAPageChunk(addr)) return false;
+  Page* p = Page::FromAddress(addr);
+  if (!p->is_valid()) return false;
+  return MemoryAllocator::IsPageInSpace(p, this);
+}
+
+
 // Try linear allocation in the page of alloc_info's allocation top.  Does
 // not contain slow case logic (eg, move to the next page or try free list
 // allocation) so it can be used by all the allocation functions and for all
@@ -460,14 +468,18 @@ MaybeObject* PagedSpace::MCAllocateRaw(int size_in_bytes) {
 // -----------------------------------------------------------------------------
 // LargeObjectChunk
 
-HeapObject* LargeObjectChunk::GetObject() {
+Address LargeObjectChunk::GetStartAddress() {
   // Round the chunk address up to the nearest page-aligned address
   // and return the heap object in that page.
   Page* page = Page::FromAddress(RoundUp(address(), Page::kPageSize));
-  return HeapObject::FromAddress(page->ObjectAreaStart());
+  return page->ObjectAreaStart();
 }
 
 
+void LargeObjectChunk::Free(Executability executable) {
+  MemoryAllocator::FreeRawMemory(address(), size(), executable);
+}
+
 // -----------------------------------------------------------------------------
 // LargeObjectSpace
 
index 239c9cd..369eb6f 100644 (file)
@@ -333,6 +333,11 @@ bool MemoryAllocator::Setup(intptr_t capacity, intptr_t capacity_executable) {
 }
 
 
+bool MemoryAllocator::SafeIsInAPageChunk(Address addr) {
+  return InInitialChunk(addr) || InAllocatedChunks(addr);
+}
+
+
 void MemoryAllocator::TearDown() {
   for (int i = 0; i < max_nof_chunks_; i++) {
     if (chunks_[i].address() != NULL) DeleteChunk(i);
@@ -346,6 +351,10 @@ void MemoryAllocator::TearDown() {
     initial_chunk_ = NULL;
   }
 
+  FreeChunkTables(&chunk_table_[0],
+                  kChunkTableTopLevelEntries,
+                  kChunkTableLevels);
+
   ASSERT(top_ == max_nof_chunks_);  // all chunks are free
   top_ = 0;
   capacity_ = 0;
@@ -355,6 +364,22 @@ void MemoryAllocator::TearDown() {
 }
 
 
+void MemoryAllocator::FreeChunkTables(AtomicWord* array, int len, int level) {
+  for (int i = 0; i < len; i++) {
+    if (array[i] != kUnusedChunkTableEntry) {
+      AtomicWord* subarray = reinterpret_cast<AtomicWord*>(array[i]);
+      if (level > 1) {
+        Release_Store(&array[i], kUnusedChunkTableEntry);
+        FreeChunkTables(subarray, 1 << kChunkTableBitsPerLevel, level - 1);
+      } else {
+        Release_Store(&array[i], kUnusedChunkTableEntry);
+      }
+      delete[] subarray;
+    }
+  }
+}
+
+
 void* MemoryAllocator::AllocateRawMemory(const size_t requested,
                                          size_t* allocated,
                                          Executability executable) {
@@ -488,25 +513,19 @@ static int PagesInChunk(Address start, size_t size) {
 }
 
 
-Page* MemoryAllocator::AllocatePages(int requested_pages, int* allocated_pages,
+Page* MemoryAllocator::AllocatePages(int requested_pages,
+                                     int* allocated_pages,
                                      PagedSpace* owner) {
   if (requested_pages <= 0) return Page::FromAddress(NULL);
   size_t chunk_size = requested_pages * Page::kPageSize;
 
-  // There is not enough space to guarantee the desired number pages can be
-  // allocated.
-  if (size_ + static_cast<int>(chunk_size) > capacity_) {
-    // Request as many pages as we can.
-    chunk_size = capacity_ - size_;
-    requested_pages = static_cast<int>(chunk_size >> kPageSizeBits);
-
-    if (requested_pages <= 0) return Page::FromAddress(NULL);
-  }
   void* chunk = AllocateRawMemory(chunk_size, &chunk_size, owner->executable());
   if (chunk == NULL) return Page::FromAddress(NULL);
   LOG(NewEvent("PagedChunk", chunk, chunk_size));
 
   *allocated_pages = PagesInChunk(static_cast<Address>(chunk), chunk_size);
+  // We may 'lose' a page due to alignment.
+  ASSERT(*allocated_pages >= kPagesPerChunk - 1);
   if (*allocated_pages == 0) {
     FreeRawMemory(chunk, chunk_size, owner->executable());
     LOG(DeleteEvent("PagedChunk", chunk));
@@ -518,7 +537,11 @@ Page* MemoryAllocator::AllocatePages(int requested_pages, int* allocated_pages,
 
   ObjectSpace space = static_cast<ObjectSpace>(1 << owner->identity());
   PerformAllocationCallback(space, kAllocationActionAllocate, chunk_size);
-  return InitializePagesInChunk(chunk_id, *allocated_pages, owner);
+  Page* new_pages = InitializePagesInChunk(chunk_id, *allocated_pages, owner);
+
+  AddToAllocatedChunks(static_cast<Address>(chunk), chunk_size);
+
+  return new_pages;
 }
 
 
@@ -675,6 +698,7 @@ void MemoryAllocator::DeleteChunk(int chunk_id) {
     initial_chunk_->Uncommit(c.address(), c.size());
     Counters::memory_allocated.Decrement(static_cast<int>(c.size()));
   } else {
+    RemoveFromAllocatedChunks(c.address(), c.size());
     LOG(DeleteEvent("PagedChunk", c.address()));
     ObjectSpace space = static_cast<ObjectSpace>(1 << c.owner()->identity());
     size_t size = c.size();
@@ -788,6 +812,126 @@ Page* MemoryAllocator::RelinkPagesInChunk(int chunk_id,
 }
 
 
+void MemoryAllocator::AddToAllocatedChunks(Address addr, intptr_t size) {
+  ASSERT(size == kChunkSize);
+  uintptr_t int_address = reinterpret_cast<uintptr_t>(addr);
+  AddChunkUsingAddress(int_address, int_address);
+  AddChunkUsingAddress(int_address, int_address + size - 1);
+}
+
+
+void MemoryAllocator::AddChunkUsingAddress(uintptr_t chunk_start,
+                                           uintptr_t chunk_index_base) {
+  AtomicWord* fine_grained = AllocatedChunksFinder(
+      chunk_table_,
+      chunk_index_base,
+      kChunkSizeLog2 + (kChunkTableLevels - 1) * kChunkTableBitsPerLevel,
+      kCreateTablesAsNeeded);
+  int index = FineGrainedIndexForAddress(chunk_index_base);
+  if (fine_grained[index] != kUnusedChunkTableEntry) index++;
+  ASSERT(fine_grained[index] == kUnusedChunkTableEntry);
+  Release_Store(&fine_grained[index], chunk_start);
+}
+
+
+void MemoryAllocator::RemoveFromAllocatedChunks(Address addr, intptr_t size) {
+  ASSERT(size == kChunkSize);
+  uintptr_t int_address = reinterpret_cast<uintptr_t>(addr);
+  RemoveChunkFoundUsingAddress(int_address, int_address);
+  RemoveChunkFoundUsingAddress(int_address, int_address + size - 1);
+}
+
+
+void MemoryAllocator::RemoveChunkFoundUsingAddress(
+    uintptr_t chunk_start,
+    uintptr_t chunk_index_base) {
+  AtomicWord* fine_grained = AllocatedChunksFinder(
+      chunk_table_,
+      chunk_index_base,
+      kChunkSizeLog2 + (kChunkTableLevels - 1) * kChunkTableBitsPerLevel,
+      kDontCreateTables);
+  // Can't remove an entry that's not there.
+  ASSERT(fine_grained != kUnusedChunkTableEntry);
+  int index = FineGrainedIndexForAddress(chunk_index_base);
+  ASSERT(fine_grained[index] != kUnusedChunkTableEntry);
+  if (fine_grained[index] != static_cast<AtomicWord>(chunk_start)) {
+    index++;
+    ASSERT(fine_grained[index] == static_cast<AtomicWord>(chunk_start));
+    Release_Store(&fine_grained[index], kUnusedChunkTableEntry);
+  } else {
+    Release_Store(&fine_grained[index], fine_grained[index + 1]);
+    // Here for a moment the two entries are duplicates, but the reader can
+    // handle that.
+    NoBarrier_Store(&fine_grained[index + 1], kUnusedChunkTableEntry);
+  }
+}
+
+
+bool MemoryAllocator::InAllocatedChunks(Address addr) {
+  uintptr_t int_address = reinterpret_cast<uintptr_t>(addr);
+  AtomicWord* fine_grained = AllocatedChunksFinder(
+      chunk_table_,
+      int_address,
+      kChunkSizeLog2 + (kChunkTableLevels - 1) * kChunkTableBitsPerLevel,
+      kDontCreateTables);
+  if (fine_grained == NULL) return false;
+  int index = FineGrainedIndexForAddress(int_address);
+  if (fine_grained[index] == kUnusedChunkTableEntry) return false;
+  uintptr_t entry = static_cast<uintptr_t>(fine_grained[index]);
+  if (entry <= int_address && entry + kChunkSize > int_address) return true;
+  index++;
+  if (fine_grained[index] == kUnusedChunkTableEntry) return false;
+  entry = static_cast<uintptr_t>(fine_grained[index]);
+  // At this point it would seem that we must have a hit, but there is a small
+  // window during RemoveChunkFoundUsingAddress where the two entries are
+  // duplicates and we have to handle that.
+  if (entry <= int_address && entry + kChunkSize > int_address) return true;
+  return false;
+}
+
+
+AtomicWord* MemoryAllocator::AllocatedChunksFinder(
+    AtomicWord* table,
+    uintptr_t address,
+    int bit_position,
+    CreateTables create_as_needed) {
+  if (bit_position == kChunkSizeLog2) {
+    return table;
+  }
+  ASSERT(bit_position >= kChunkSizeLog2 + kChunkTableBitsPerLevel);
+  int index =
+      ((address >> bit_position) &
+       ((V8_INTPTR_C(1) << kChunkTableBitsPerLevel) - 1));
+  uintptr_t more_fine_grained_address =
+      address & ((V8_INTPTR_C(1) << bit_position) - 1);
+  ASSERT((table == chunk_table_ && index < kChunkTableTopLevelEntries) ||
+         (table != chunk_table_ && index < 1 << kChunkTableBitsPerLevel));
+  AtomicWord* more_fine_grained_table =
+      reinterpret_cast<AtomicWord*>(table[index]);
+  if (more_fine_grained_table == kUnusedChunkTableEntry) {
+    if (create_as_needed == kDontCreateTables) return NULL;
+    int words_needed = 1 << kChunkTableBitsPerLevel;
+    if (bit_position == kChunkTableBitsPerLevel + kChunkSizeLog2) {
+      words_needed =
+          (1 << kChunkTableBitsPerLevel) * kChunkTableFineGrainedWordsPerEntry;
+    }
+    more_fine_grained_table = new AtomicWord[words_needed];
+    for (int i = 0; i < words_needed; i++) {
+      more_fine_grained_table[i] = NULL;
+    }
+    Release_Store(&table[index],
+                  reinterpret_cast<AtomicWord>(more_fine_grained_table));
+  }
+  return AllocatedChunksFinder(
+      more_fine_grained_table,
+      more_fine_grained_address,
+      bit_position - kChunkTableBitsPerLevel,
+      create_as_needed);
+}
+
+
+AtomicWord MemoryAllocator::chunk_table_[kChunkTableTopLevelEntries];
+
 
 // -----------------------------------------------------------------------------
 // PagedSpace implementation
@@ -1010,7 +1154,10 @@ bool PagedSpace::Expand(Page* last_page) {
 
   int available_pages =
       static_cast<int>((max_capacity_ - Capacity()) / Page::kObjectAreaSize);
-  if (available_pages <= 0) return false;
+  // We don't want to have to handle small chunks near the end so if there are
+  // not kPagesPerChunk pages available without exceeding the max capacity then
+  // act as if memory has run out.
+  if (available_pages < MemoryAllocator::kPagesPerChunk) return false;
 
   int desired_pages = Min(available_pages, MemoryAllocator::kPagesPerChunk);
   Page* p = MemoryAllocator::AllocatePages(desired_pages, &desired_pages, this);
@@ -1544,6 +1691,7 @@ static void ReportCodeKindStatistics() {
   for (int i = 0; i < Code::NUMBER_OF_KINDS; i++) {
     switch (static_cast<Code::Kind>(i)) {
       CASE(FUNCTION);
+      CASE(OPTIMIZED_FUNCTION);
       CASE(STUB);
       CASE(BUILTIN);
       CASE(LOAD_IC);
@@ -1553,6 +1701,8 @@ static void ReportCodeKindStatistics() {
       CASE(CALL_IC);
       CASE(KEYED_CALL_IC);
       CASE(BINARY_OP_IC);
+      CASE(TYPE_RECORDING_BINARY_OP_IC);
+      CASE(COMPARE_IC);
     }
   }
 
@@ -2697,32 +2847,40 @@ HeapObject* LargeObjectIterator::next() {
 // LargeObjectChunk
 
 LargeObjectChunk* LargeObjectChunk::New(int size_in_bytes,
-                                        size_t* chunk_size,
                                         Executability executable) {
   size_t requested = ChunkSizeFor(size_in_bytes);
-  void* mem = MemoryAllocator::AllocateRawMemory(requested,
-                                                 chunk_size,
-                                                 executable);
+  size_t size;
+  void* mem = MemoryAllocator::AllocateRawMemory(requested, &size, executable);
   if (mem == NULL) return NULL;
-  LOG(NewEvent("LargeObjectChunk", mem, *chunk_size));
-  if (*chunk_size < requested) {
-    MemoryAllocator::FreeRawMemory(mem, *chunk_size, executable);
+
+  // The start of the chunk may be overlayed with a page so we have to
+  // make sure that the page flags fit in the size field.
+  ASSERT((size & Page::kPageFlagMask) == 0);
+
+  LOG(NewEvent("LargeObjectChunk", mem, size));
+  if (size < requested) {
+    MemoryAllocator::FreeRawMemory(mem, size, executable);
     LOG(DeleteEvent("LargeObjectChunk", mem));
     return NULL;
   }
-  ObjectSpace space =
-      (executable == EXECUTABLE) ? kObjectSpaceCodeSpace : kObjectSpaceLoSpace;
-  MemoryAllocator::PerformAllocationCallback(space,
-                                             kAllocationActionAllocate,
-                                             *chunk_size);
-  return reinterpret_cast<LargeObjectChunk*>(mem);
+
+  ObjectSpace space = (executable == EXECUTABLE)
+      ? kObjectSpaceCodeSpace
+      : kObjectSpaceLoSpace;
+  MemoryAllocator::PerformAllocationCallback(
+      space, kAllocationActionAllocate, size);
+
+  LargeObjectChunk* chunk = reinterpret_cast<LargeObjectChunk*>(mem);
+  chunk->size_ = size;
+  return chunk;
 }
 
 
 int LargeObjectChunk::ChunkSizeFor(int size_in_bytes) {
   int os_alignment = static_cast<int>(OS::AllocateAlignment());
-  if (os_alignment < Page::kPageSize)
+  if (os_alignment < Page::kPageSize) {
     size_in_bytes += (Page::kPageSize - os_alignment);
+  }
   return size_in_bytes + Page::kObjectStartOffset;
 }
 
@@ -2803,27 +2961,24 @@ MaybeObject* LargeObjectSpace::AllocateRawInternal(int requested_size,
     return Failure::RetryAfterGC(identity());
   }
 
-  size_t chunk_size;
-  LargeObjectChunk* chunk =
-      LargeObjectChunk::New(requested_size, &chunk_size, executable);
+  LargeObjectChunk* chunk = LargeObjectChunk::New(requested_size, executable);
   if (chunk == NULL) {
     return Failure::RetryAfterGC(identity());
   }
 
-  size_ += static_cast<int>(chunk_size);
+  size_ += static_cast<int>(chunk->size());
   objects_size_ += requested_size;
   page_count_++;
   chunk->set_next(first_chunk_);
-  chunk->set_size(chunk_size);
   first_chunk_ = chunk;
 
   // Initialize page header.
   Page* page = Page::FromAddress(RoundUp(chunk->address(), Page::kPageSize));
   Address object_address = page->ObjectAreaStart();
+
   // Clear the low order bit of the second word in the page to flag it as a
   // large object page.  If the chunk_size happened to be written there, its
   // low order bit should already be clear.
-  ASSERT((chunk_size & 0x1) == 0);
   page->SetIsLargeObjectPage(true);
   page->SetIsPageExecutable(executable);
   page->SetRegionMarks(Page::kAllRegionsCleanMarks);
index 60068c3..2e85003 100644 (file)
@@ -28,6 +28,7 @@
 #ifndef V8_SPACES_H_
 #define V8_SPACES_H_
 
+#include "atomicops.h"
 #include "list-inl.h"
 #include "log.h"
 
@@ -609,6 +610,9 @@ class MemoryAllocator : public AllStatic {
     return (Available() / Page::kPageSize) * Page::kObjectAreaSize;
   }
 
+  // Sanity check on a pointer.
+  static bool SafeIsInAPageChunk(Address addr);
+
   // Links two pages.
   static inline void SetNextPage(Page* prev, Page* next);
 
@@ -650,23 +654,50 @@ class MemoryAllocator : public AllStatic {
   static void ReportStatistics();
 #endif
 
+  static void AddToAllocatedChunks(Address addr, intptr_t size);
+  static void RemoveFromAllocatedChunks(Address addr, intptr_t size);
+  // Note: This only checks the regular chunks, not the odd-sized initial
+  // chunk.
+  static bool InAllocatedChunks(Address addr);
+
   // Due to encoding limitation, we can only have 8K chunks.
   static const int kMaxNofChunks = 1 << kPageSizeBits;
   // If a chunk has at least 16 pages, the maximum heap size is about
   // 8K * 8K * 16 = 1G bytes.
 #ifdef V8_TARGET_ARCH_X64
   static const int kPagesPerChunk = 32;
+  // On 64 bit the chunk table consists of 4 levels of 4096-entry tables.
+  static const int kPagesPerChunkLog2 = 5;
+  static const int kChunkTableLevels = 4;
+  static const int kChunkTableBitsPerLevel = 12;
 #else
   static const int kPagesPerChunk = 16;
+  // On 32 bit the chunk table consists of 2 levels of 256-entry tables.
+  static const int kPagesPerChunkLog2 = 4;
+  static const int kChunkTableLevels = 2;
+  static const int kChunkTableBitsPerLevel = 8;
 #endif
-  static const int kChunkSize = kPagesPerChunk * Page::kPageSize;
 
  private:
+  static const int kChunkSize = kPagesPerChunk * Page::kPageSize;
+  static const int kChunkSizeLog2 = kPagesPerChunkLog2 + kPageSizeBits;
+  static const int kChunkTableTopLevelEntries =
+      1 << (sizeof(intptr_t) * kBitsPerByte - kChunkSizeLog2 -
+          (kChunkTableLevels - 1) * kChunkTableBitsPerLevel);
+
+  // The chunks are not chunk-size aligned so for a given chunk-sized area of
+  // memory there can be two chunks that cover it.
+  static const int kChunkTableFineGrainedWordsPerEntry = 2;
+  static const AtomicWord kUnusedChunkTableEntry = 0;
+
   // Maximum space size in bytes.
   static intptr_t capacity_;
   // Maximum subset of capacity_ that can be executable
   static intptr_t capacity_executable_;
 
+  // Top level table to track whether memory is part of a chunk or not.
+  static AtomicWord chunk_table_[kChunkTableTopLevelEntries];
+
   // Allocated space size in bytes.
   static intptr_t size_;
   // Allocated executable space size in bytes.
@@ -725,6 +756,28 @@ class MemoryAllocator : public AllStatic {
   // Frees a chunk.
   static void DeleteChunk(int chunk_id);
 
+  // Helpers to maintain and query the chunk tables.
+  static void AddChunkUsingAddress(
+      uintptr_t chunk_start,        // Where the chunk starts.
+      uintptr_t chunk_index_base);  // Used to place the chunk in the tables.
+  static void RemoveChunkFoundUsingAddress(
+      uintptr_t chunk_start,        // Where the chunk starts.
+      uintptr_t chunk_index_base);  // Used to locate the entry in the tables.
+  // Controls whether the lookup creates intermediate levels of tables as
+  // needed.
+  enum CreateTables { kDontCreateTables, kCreateTablesAsNeeded };
+  static AtomicWord* AllocatedChunksFinder(AtomicWord* table,
+                                           uintptr_t address,
+                                           int bit_position,
+                                           CreateTables create_as_needed);
+  static void FreeChunkTables(AtomicWord* array, int length, int level);
+  static int FineGrainedIndexForAddress(uintptr_t address) {
+    int index = ((address >> kChunkSizeLog2) &
+        ((1 << kChunkTableBitsPerLevel) - 1));
+    return index * kChunkTableFineGrainedWordsPerEntry;
+  }
+
+
   // Basic check whether a chunk id is in the valid range.
   static inline bool IsValidChunkId(int chunk_id);
 
@@ -1019,6 +1072,8 @@ class PagedSpace : public Space {
   // Checks whether an object/address is in this space.
   inline bool Contains(Address a);
   bool Contains(HeapObject* o) { return Contains(o->address()); }
+  // Never crashes even if a is not a valid pointer.
+  inline bool SafeContains(Address a);
 
   // Given an address occupied by a live object, return that object if it is
   // in this space, or Failure::Exception() if it is not. The implementation
@@ -2132,10 +2187,10 @@ class LargeObjectChunk {
   // Allocates a new LargeObjectChunk that contains a large object page
   // (Page::kPageSize aligned) that has at least size_in_bytes (for a large
   // object) bytes after the object area start of that page.
-  // The allocated chunk size is set in the output parameter chunk_size.
-  static LargeObjectChunk* New(int size_in_bytes,
-                               size_t* chunk_size,
-                               Executability executable);
+  static LargeObjectChunk* New(int size_in_bytes, Executability executable);
+
+  // Free the memory associated with the chunk.
+  inline void Free(Executability executable);
 
   // Interpret a raw address as a large object chunk.
   static LargeObjectChunk* FromAddress(Address address) {
@@ -2148,12 +2203,13 @@ class LargeObjectChunk {
   // Accessors for the fields of the chunk.
   LargeObjectChunk* next() { return next_; }
   void set_next(LargeObjectChunk* chunk) { next_ = chunk; }
-
   size_t size() { return size_ & ~Page::kPageFlagMask; }
-  void set_size(size_t size_in_bytes) { size_ = size_in_bytes; }
+
+  // Compute the start address in the chunk.
+  inline Address GetStartAddress();
 
   // Returns the object in this chunk.
-  inline HeapObject* GetObject();
+  HeapObject* GetObject() { return HeapObject::FromAddress(GetStartAddress()); }
 
   // Given a requested size returns the physical size of a chunk to be
   // allocated.
@@ -2170,7 +2226,7 @@ class LargeObjectChunk {
   // A pointer to the next large object chunk in the space or NULL.
   LargeObjectChunk* next_;
 
-  // The size of this chunk.
+  // The total size of this chunk.
   size_t size_;
 
  public:
index 323a6d6..b74c793 100644 (file)
@@ -142,6 +142,7 @@ class StringStream {
   void Log();
   Handle<String> ToString();
   SmartPointer<const char> ToCString() const;
+  int length() const { return length_; }
 
   // Object printing support.
   void PrintName(Object* o);
index 5cc009f..651f018 100644 (file)
@@ -31,6 +31,7 @@
 #include "arguments.h"
 #include "ic-inl.h"
 #include "stub-cache.h"
+#include "vm-state-inl.h"
 
 namespace v8 {
 namespace internal {
@@ -425,6 +426,27 @@ MaybeObject* StubCache::ComputeKeyedLoadFunctionPrototype(
 }
 
 
+MaybeObject* StubCache::ComputeKeyedLoadSpecialized(JSObject* receiver) {
+  Code::Flags flags =
+      Code::ComputeMonomorphicFlags(Code::KEYED_LOAD_IC, NORMAL);
+  String* name = Heap::KeyedLoadSpecialized_symbol();
+  Object* code = receiver->map()->FindInCodeCache(name, flags);
+  if (code->IsUndefined()) {
+    KeyedLoadStubCompiler compiler;
+    { MaybeObject* maybe_code = compiler.CompileLoadSpecialized(receiver);
+      if (!maybe_code->ToObject(&code)) return maybe_code;
+    }
+    PROFILE(CodeCreateEvent(Logger::KEYED_LOAD_IC_TAG, Code::cast(code), 0));
+    Object* result;
+    { MaybeObject* maybe_result =
+          receiver->UpdateMapCodeCache(name, Code::cast(code));
+      if (!maybe_result->ToObject(&result)) return maybe_result;
+    }
+  }
+  return code;
+}
+
+
 MaybeObject* StubCache::ComputeStoreField(String* name,
                                           JSObject* receiver,
                                           int field_index,
@@ -449,6 +471,27 @@ MaybeObject* StubCache::ComputeStoreField(String* name,
 }
 
 
+MaybeObject* StubCache::ComputeKeyedStoreSpecialized(JSObject* receiver) {
+  Code::Flags flags =
+      Code::ComputeMonomorphicFlags(Code::KEYED_STORE_IC, NORMAL);
+  String* name = Heap::KeyedStoreSpecialized_symbol();
+  Object* code = receiver->map()->FindInCodeCache(name, flags);
+  if (code->IsUndefined()) {
+    KeyedStoreStubCompiler compiler;
+    { MaybeObject* maybe_code = compiler.CompileStoreSpecialized(receiver);
+      if (!maybe_code->ToObject(&code)) return maybe_code;
+    }
+    PROFILE(CodeCreateEvent(Logger::KEYED_STORE_IC_TAG, Code::cast(code), 0));
+    Object* result;
+    { MaybeObject* maybe_result =
+          receiver->UpdateMapCodeCache(name, Code::cast(code));
+      if (!maybe_result->ToObject(&result)) return maybe_result;
+    }
+  }
+  return code;
+}
+
+
 MaybeObject* StubCache::ComputeStoreNormal() {
   return Builtins::builtin(Builtins::StoreIC_Normal);
 }
@@ -561,13 +604,13 @@ MaybeObject* StubCache::ComputeCallConstant(int argc,
   JSObject* map_holder = IC::GetCodeCacheHolder(object, cache_holder);
 
   // Compute check type based on receiver/holder.
-  StubCompiler::CheckType check = StubCompiler::RECEIVER_MAP_CHECK;
+  CheckType check = RECEIVER_MAP_CHECK;
   if (object->IsString()) {
-    check = StubCompiler::STRING_CHECK;
+    check = STRING_CHECK;
   } else if (object->IsNumber()) {
-    check = StubCompiler::NUMBER_CHECK;
+    check = NUMBER_CHECK;
   } else if (object->IsBoolean()) {
-    check = StubCompiler::BOOLEAN_CHECK;
+    check = BOOLEAN_CHECK;
   }
 
   Code::Flags flags =
@@ -589,6 +632,7 @@ MaybeObject* StubCache::ComputeCallConstant(int argc,
           compiler.CompileCallConstant(object, holder, function, name, check);
       if (!maybe_code->ToObject(&code)) return maybe_code;
     }
+    Code::cast(code)->set_check_type(check);
     ASSERT_EQ(flags, Code::cast(code)->flags());
     PROFILE(CodeCreateEvent(CALL_LOGGER_TAG(kind, CALL_IC_TAG),
                             Code::cast(code), name));
@@ -953,6 +997,48 @@ void StubCache::Clear() {
 }
 
 
+void StubCache::CollectMatchingMaps(ZoneMapList* types,
+                                    String* name,
+                                    Code::Flags flags) {
+  for (int i = 0; i < kPrimaryTableSize; i++) {
+    if (primary_[i].key == name) {
+      Map* map = primary_[i].value->FindFirstMap();
+      // Map can be NULL, if the stub is constant function call
+      // with a primitive receiver.
+      if (map == NULL) continue;
+
+      int offset = PrimaryOffset(name, flags, map);
+      if (entry(primary_, offset) == &primary_[i]) {
+        types->Add(Handle<Map>(map));
+      }
+    }
+  }
+
+  for (int i = 0; i < kSecondaryTableSize; i++) {
+    if (secondary_[i].key == name) {
+      Map* map = secondary_[i].value->FindFirstMap();
+      // Map can be NULL, if the stub is constant function call
+      // with a primitive receiver.
+      if (map == NULL) continue;
+
+      // Lookup in primary table and skip duplicates.
+      int primary_offset = PrimaryOffset(name, flags, map);
+      Entry* primary_entry = entry(primary_, primary_offset);
+      if (primary_entry->key == name) {
+        Map* primary_map = primary_entry->value->FindFirstMap();
+        if (map == primary_map) continue;
+      }
+
+      // Lookup in secondary table and add matches.
+      int offset = SecondaryOffset(name, flags, primary_offset);
+      if (entry(secondary_, offset) == &secondary_[i]) {
+        types->Add(Handle<Map>(map));
+      }
+    }
+  }
+}
+
+
 // ------------------------------------------------------------------------
 // StubCompiler implementation.
 
@@ -970,9 +1056,7 @@ MaybeObject* LoadCallbackProperty(Arguments args) {
   {
     // Leaving JavaScript.
     VMState state(EXTERNAL);
-#ifdef ENABLE_LOGGING_AND_PROFILING
-    state.set_external_callback(getter_address);
-#endif
+    ExternalCallbackScope call_scope(getter_address);
     result = fun(v8::Utils::ToLocal(args.at<String>(4)), info);
   }
   RETURN_IF_SCHEDULED_EXCEPTION();
@@ -996,9 +1080,7 @@ MaybeObject* StoreCallbackProperty(Arguments args) {
   {
     // Leaving JavaScript.
     VMState state(EXTERNAL);
-#ifdef ENABLE_LOGGING_AND_PROFILING
-    state.set_external_callback(setter_address);
-#endif
+    ExternalCallbackScope call_scope(setter_address);
     fun(v8::Utils::ToLocal(name), v8::Utils::ToLocal(value), info);
   }
   RETURN_IF_SCHEDULED_EXCEPTION();
index cef5481..7a6d400 100644 (file)
@@ -29,6 +29,7 @@
 #define V8_STUB_CACHE_H_
 
 #include "macro-assembler.h"
+#include "zone-inl.h"
 
 namespace v8 {
 namespace internal {
@@ -44,6 +45,7 @@ namespace internal {
 
 class SCTableReference;
 
+
 class StubCache : public AllStatic {
  public:
   struct Entry {
@@ -76,9 +78,10 @@ class StubCache : public AllStatic {
                                                           JSObject* holder,
                                                           Object* value);
 
-  MUST_USE_RESULT static MaybeObject* ComputeLoadInterceptor(String* name,
-                                                             JSObject* receiver,
-                                                             JSObject* holder);
+  MUST_USE_RESULT static MaybeObject* ComputeLoadInterceptor(
+      String* name,
+      JSObject* receiver,
+      JSObject* holder);
 
   MUST_USE_RESULT static MaybeObject* ComputeLoadNormal();
 
@@ -127,6 +130,9 @@ class StubCache : public AllStatic {
       String* name,
       JSFunction* receiver);
 
+  MUST_USE_RESULT static MaybeObject* ComputeKeyedLoadSpecialized(
+      JSObject* receiver);
+
   // ---
 
   MUST_USE_RESULT static MaybeObject* ComputeStoreField(String* name,
@@ -158,6 +164,9 @@ class StubCache : public AllStatic {
       int field_index,
       Map* transition = NULL);
 
+  MUST_USE_RESULT static MaybeObject* ComputeKeyedStoreSpecialized(
+      JSObject* receiver);
+
   // ---
 
   MUST_USE_RESULT static MaybeObject* ComputeCallField(int argc,
@@ -244,6 +253,11 @@ class StubCache : public AllStatic {
   // Clear the lookup table (@ mark compact collection).
   static void Clear();
 
+  // Collect all maps that match the name and flags.
+  static void CollectMatchingMaps(ZoneMapList* types,
+                                  String* name,
+                                  Code::Flags flags);
+
   // Generate code for probing the stub cache table.
   // Arguments extra and extra2 may be used to pass additional scratch
   // registers. Set to no_reg if not needed.
@@ -366,13 +380,6 @@ MaybeObject* KeyedLoadPropertyWithInterceptor(Arguments args);
 // The stub compiler compiles stubs for the stub cache.
 class StubCompiler BASE_EMBEDDED {
  public:
-  enum CheckType {
-    RECEIVER_MAP_CHECK,
-    STRING_CHECK,
-    NUMBER_CHECK,
-    BOOLEAN_CHECK
-  };
-
   StubCompiler() : scope_(), masm_(NULL, 256), failure_(NULL) { }
 
   MUST_USE_RESULT MaybeObject* CompileCallInitialize(Code::Flags flags);
@@ -564,7 +571,7 @@ class LoadStubCompiler: public StubCompiler {
                                                  bool is_dont_delete);
 
  private:
-  MaybeObject* GetCode(PropertyType type, String* name);
+  MUST_USE_RESULT MaybeObject* GetCode(PropertyType type, String* name);
 };
 
 
@@ -593,6 +600,8 @@ class KeyedLoadStubCompiler: public StubCompiler {
   MUST_USE_RESULT MaybeObject* CompileLoadStringLength(String* name);
   MUST_USE_RESULT MaybeObject* CompileLoadFunctionPrototype(String* name);
 
+  MUST_USE_RESULT MaybeObject* CompileLoadSpecialized(JSObject* receiver);
+
  private:
   MaybeObject* GetCode(PropertyType type, String* name);
 };
@@ -604,6 +613,7 @@ class StoreStubCompiler: public StubCompiler {
                                                  int index,
                                                  Map* transition,
                                                  String* name);
+
   MUST_USE_RESULT MaybeObject* CompileStoreCallback(JSObject* object,
                                                     AccessorInfo* callbacks,
                                                     String* name);
@@ -615,16 +625,18 @@ class StoreStubCompiler: public StubCompiler {
 
 
  private:
-  MUST_USE_RESULT MaybeObject* GetCode(PropertyType type, String* name);
+  MaybeObject* GetCode(PropertyType type, String* name);
 };
 
 
 class KeyedStoreStubCompiler: public StubCompiler {
  public:
-  MaybeObject* CompileStoreField(JSObject* object,
-                                 int index,
-                                 Map* transition,
-                                 String* name);
+  MUST_USE_RESULT MaybeObject* CompileStoreField(JSObject* object,
+                                                 int index,
+                                                 Map* transition,
+                                                 String* name);
+
+  MUST_USE_RESULT MaybeObject* CompileStoreSpecialized(JSObject* receiver);
 
  private:
   MaybeObject* GetCode(PropertyType type, String* name);
index 74d9539..2f5ca1b 100644 (file)
@@ -238,6 +238,40 @@ class Token {
     return EQ <= op && op <= IN;
   }
 
+  static bool IsOrderedCompareOp(Value op) {
+    return op == LT || op == LTE || op == GT || op == GTE;
+  }
+
+  static Value NegateCompareOp(Value op) {
+    ASSERT(IsCompareOp(op));
+    switch (op) {
+      case EQ: return NE;
+      case NE: return EQ;
+      case EQ_STRICT: return NE_STRICT;
+      case LT: return GTE;
+      case GT: return LTE;
+      case LTE: return GT;
+      case GTE: return LT;
+      default:
+        return op;
+    }
+  }
+
+  static Value InvertCompareOp(Value op) {
+    ASSERT(IsCompareOp(op));
+    switch (op) {
+      case EQ: return NE;
+      case NE: return EQ;
+      case EQ_STRICT: return NE_STRICT;
+      case LT: return GT;
+      case GT: return LT;
+      case LTE: return GTE;
+      case GTE: return LTE;
+      default:
+        return op;
+    }
+  }
+
   static bool IsBitOp(Value op) {
     return (BIT_OR <= op && op <= SHR) || op == BIT_NOT;
   }
index 1f0d159..6187ef0 100644 (file)
 #include "platform.h"
 #include "simulator.h"
 #include "string-stream.h"
+#include "vm-state-inl.h"
 
 namespace v8 {
 namespace internal {
 
+Semaphore* Top::runtime_profiler_semaphore_ = NULL;
 ThreadLocalTop Top::thread_local_;
 Mutex* Top::break_access_ = OS::CreateMutex();
 
@@ -74,10 +76,12 @@ void ThreadLocalTop::Initialize() {
 #endif
 #endif
 #ifdef ENABLE_LOGGING_AND_PROFILING
-  js_entry_sp_ = 0;
+  js_entry_sp_ = NULL;
+  external_callback_ = NULL;
 #endif
 #ifdef ENABLE_VMSTATE_TRACKING
-  current_vm_state_ = NULL;
+  current_vm_state_ = EXTERNAL;
+  runtime_profiler_state_ = Top::PROF_NOT_IN_JS;
 #endif
   try_catch_handler_address_ = NULL;
   context_ = NULL;
@@ -273,6 +277,9 @@ static bool initialized = false;
 void Top::Initialize() {
   CHECK(!initialized);
 
+  ASSERT(runtime_profiler_semaphore_ == NULL);
+  runtime_profiler_semaphore_ = OS::CreateSemaphore(0);
+
   InitializeThreadLocal();
 
   // Only preallocate on the first initialization.
@@ -290,6 +297,9 @@ void Top::Initialize() {
 
 void Top::TearDown() {
   if (initialized) {
+    delete runtime_profiler_semaphore_;
+    runtime_profiler_semaphore_ = NULL;
+
     // Remove the external reference to the preallocated stack memory.
     if (preallocated_message_space != NULL) {
       delete preallocated_message_space;
@@ -376,79 +386,85 @@ Handle<JSArray> Top::CaptureCurrentStackTrace(
   StackTraceFrameIterator it;
   int frames_seen = 0;
   while (!it.done() && (frames_seen < limit)) {
-    // Create a JSObject to hold the information for the StackFrame.
-    Handle<JSObject> stackFrame = Factory::NewJSObject(object_function());
-
     JavaScriptFrame* frame = it.frame();
-    Handle<JSFunction> fun(JSFunction::cast(frame->function()));
-    Handle<Script> script(Script::cast(fun->shared()->script()));
-
-    if (options & StackTrace::kLineNumber) {
-      int script_line_offset = script->line_offset()->value();
-      int position = frame->code()->SourcePosition(frame->pc());
-      int line_number = GetScriptLineNumber(script, position);
-      // line_number is already shifted by the script_line_offset.
-      int relative_line_number = line_number - script_line_offset;
-      if (options & StackTrace::kColumnOffset && relative_line_number >= 0) {
-        Handle<FixedArray> line_ends(FixedArray::cast(script->line_ends()));
-        int start = (relative_line_number == 0) ? 0 :
-            Smi::cast(line_ends->get(relative_line_number - 1))->value() + 1;
-        int column_offset = position - start;
-        if (relative_line_number == 0) {
-          // For the case where the code is on the same line as the script tag.
-          column_offset += script->column_offset()->value();
+
+    List<FrameSummary> frames(3);  // Max 2 levels of inlining.
+    frame->Summarize(&frames);
+    for (int i = frames.length() - 1; i >= 0 && frames_seen < limit; i--) {
+      // Create a JSObject to hold the information for the StackFrame.
+      Handle<JSObject> stackFrame = Factory::NewJSObject(object_function());
+
+      Handle<JSFunction> fun = frames[i].function();
+      Handle<Script> script(Script::cast(fun->shared()->script()));
+
+      if (options & StackTrace::kLineNumber) {
+        int script_line_offset = script->line_offset()->value();
+        int position = frames[i].code()->SourcePosition(frames[i].pc());
+        int line_number = GetScriptLineNumber(script, position);
+        // line_number is already shifted by the script_line_offset.
+        int relative_line_number = line_number - script_line_offset;
+        if (options & StackTrace::kColumnOffset && relative_line_number >= 0) {
+          Handle<FixedArray> line_ends(FixedArray::cast(script->line_ends()));
+          int start = (relative_line_number == 0) ? 0 :
+              Smi::cast(line_ends->get(relative_line_number - 1))->value() + 1;
+          int column_offset = position - start;
+          if (relative_line_number == 0) {
+            // For the case where the code is on the same line as the script
+            // tag.
+            column_offset += script->column_offset()->value();
+          }
+          SetProperty(stackFrame, column_key,
+                      Handle<Smi>(Smi::FromInt(column_offset + 1)), NONE);
         }
-        SetProperty(stackFrame, column_key,
-                    Handle<Smi>(Smi::FromInt(column_offset + 1)), NONE);
+        SetProperty(stackFrame, line_key,
+                    Handle<Smi>(Smi::FromInt(line_number + 1)), NONE);
       }
-      SetProperty(stackFrame, line_key,
-                  Handle<Smi>(Smi::FromInt(line_number + 1)), NONE);
-    }
 
-    if (options & StackTrace::kScriptName) {
-      Handle<Object> script_name(script->name());
-      SetProperty(stackFrame, script_key, script_name, NONE);
-    }
+      if (options & StackTrace::kScriptName) {
+        Handle<Object> script_name(script->name());
+        SetProperty(stackFrame, script_key, script_name, NONE);
+      }
 
-    if (options & StackTrace::kScriptNameOrSourceURL) {
-      Handle<Object> script_name(script->name());
-      Handle<JSValue> script_wrapper = GetScriptWrapper(script);
-      Handle<Object> property = GetProperty(script_wrapper,
-                                            name_or_source_url_key);
-      ASSERT(property->IsJSFunction());
-      Handle<JSFunction> method = Handle<JSFunction>::cast(property);
-      bool caught_exception;
-      Handle<Object> result = Execution::TryCall(method, script_wrapper, 0,
-                                                 NULL, &caught_exception);
-      if (caught_exception) {
-        result = Factory::undefined_value();
+      if (options & StackTrace::kScriptNameOrSourceURL) {
+        Handle<Object> script_name(script->name());
+        Handle<JSValue> script_wrapper = GetScriptWrapper(script);
+        Handle<Object> property = GetProperty(script_wrapper,
+                                              name_or_source_url_key);
+        ASSERT(property->IsJSFunction());
+        Handle<JSFunction> method = Handle<JSFunction>::cast(property);
+        bool caught_exception;
+        Handle<Object> result = Execution::TryCall(method, script_wrapper, 0,
+                                                   NULL, &caught_exception);
+        if (caught_exception) {
+          result = Factory::undefined_value();
+        }
+        SetProperty(stackFrame, script_name_or_source_url_key, result, NONE);
       }
-      SetProperty(stackFrame, script_name_or_source_url_key, result, NONE);
-    }
 
-    if (options & StackTrace::kFunctionName) {
-      Handle<Object> fun_name(fun->shared()->name());
-      if (fun_name->ToBoolean()->IsFalse()) {
-        fun_name = Handle<Object>(fun->shared()->inferred_name());
+      if (options & StackTrace::kFunctionName) {
+        Handle<Object> fun_name(fun->shared()->name());
+        if (fun_name->ToBoolean()->IsFalse()) {
+          fun_name = Handle<Object>(fun->shared()->inferred_name());
+        }
+        SetProperty(stackFrame, function_key, fun_name, NONE);
       }
-      SetProperty(stackFrame, function_key, fun_name, NONE);
-    }
 
-    if (options & StackTrace::kIsEval) {
-      int type = Smi::cast(script->compilation_type())->value();
-      Handle<Object> is_eval = (type == Script::COMPILATION_TYPE_EVAL) ?
-          Factory::true_value() : Factory::false_value();
-      SetProperty(stackFrame, eval_key, is_eval, NONE);
-    }
+      if (options & StackTrace::kIsEval) {
+        int type = Smi::cast(script->compilation_type())->value();
+        Handle<Object> is_eval = (type == Script::COMPILATION_TYPE_EVAL) ?
+            Factory::true_value() : Factory::false_value();
+        SetProperty(stackFrame, eval_key, is_eval, NONE);
+      }
 
-    if (options & StackTrace::kIsConstructor) {
-      Handle<Object> is_constructor = (frame->IsConstructor()) ?
-          Factory::true_value() : Factory::false_value();
-      SetProperty(stackFrame, constructor_key, is_constructor, NONE);
-    }
+      if (options & StackTrace::kIsConstructor) {
+        Handle<Object> is_constructor = (frames[i].is_constructor()) ?
+            Factory::true_value() : Factory::false_value();
+        SetProperty(stackFrame, constructor_key, is_constructor, NONE);
+      }
 
-    FixedArray::cast(stack_trace->elements())->set(frames_seen, *stackFrame);
-    frames_seen++;
+      FixedArray::cast(stack_trace->elements())->set(frames_seen, *stackFrame);
+      frames_seen++;
+    }
     it.Advance();
   }
 
@@ -1079,15 +1095,4 @@ char* Top::RestoreThread(char* from) {
   return from + sizeof(thread_local_);
 }
 
-
-ExecutionAccess::ExecutionAccess() {
-  Top::break_access_->Lock();
-}
-
-
-ExecutionAccess::~ExecutionAccess() {
-  Top::break_access_->Unlock();
-}
-
-
 } }  // namespace v8::internal
index bc3a85e..e485de1 100644 (file)
--- a/src/top.h
+++ b/src/top.h
 #ifndef V8_TOP_H_
 #define V8_TOP_H_
 
+#include "atomicops.h"
+#include "compilation-cache.h"
 #include "frames-inl.h"
+#include "runtime-profiler.h"
 #include "simulator.h"
 
 namespace v8 {
@@ -114,10 +117,15 @@ class ThreadLocalTop BASE_EMBEDDED {
 
 #ifdef ENABLE_LOGGING_AND_PROFILING
   Address js_entry_sp_;  // the stack pointer of the bottom js entry frame
+  Address external_callback_;  // the external callback we're currently in
 #endif
 
 #ifdef ENABLE_VMSTATE_TRACKING
-  VMState* current_vm_state_;
+  StateTag current_vm_state_;
+
+  // Used for communication with the runtime profiler thread.
+  // Possible values are specified in RuntimeProfilerState.
+  Atomic32 runtime_profiler_state_;
 #endif
 
   // Generated code scratch locations.
@@ -267,16 +275,72 @@ class Top {
   static inline Address* js_entry_sp_address() {
     return &thread_local_.js_entry_sp_;
   }
+
+  static Address external_callback() {
+    return thread_local_.external_callback_;
+  }
+  static void set_external_callback(Address callback) {
+    thread_local_.external_callback_ = callback;
+  }
 #endif
 
 #ifdef ENABLE_VMSTATE_TRACKING
-  static VMState* current_vm_state() {
+  static StateTag current_vm_state() {
     return thread_local_.current_vm_state_;
   }
 
-  static void set_current_vm_state(VMState* state) {
+  static void SetCurrentVMState(StateTag state) {
+    if (RuntimeProfiler::IsEnabled()) {
+      if (state == JS) {
+        // JS or non-JS -> JS transition.
+        RuntimeProfilerState old_state = SwapRuntimeProfilerState(PROF_IN_JS);
+        if (old_state == PROF_NOT_IN_JS_WAITING_FOR_JS) {
+          // If the runtime profiler was waiting, we reset the eager
+          // optimizing data in the compilation cache to get a fresh
+          // start after not running JavaScript code for a while and
+          // signal the runtime profiler so it can resume.
+          CompilationCache::ResetEagerOptimizingData();
+          runtime_profiler_semaphore_->Signal();
+        }
+      } else if (thread_local_.current_vm_state_ == JS) {
+        // JS -> non-JS transition. Update the runtime profiler state.
+        ASSERT(IsInJSState());
+        SetRuntimeProfilerState(PROF_NOT_IN_JS);
+      }
+    }
     thread_local_.current_vm_state_ = state;
   }
+
+  // Called in the runtime profiler thread.
+  // Returns whether the current VM state is set to JS.
+  static bool IsInJSState() {
+    ASSERT(RuntimeProfiler::IsEnabled());
+    return static_cast<RuntimeProfilerState>(
+        NoBarrier_Load(&thread_local_.runtime_profiler_state_)) == PROF_IN_JS;
+  }
+
+  // Called in the runtime profiler thread.
+  // Waits for the VM state to transtion from non-JS to JS. Returns
+  // true when notified of the transition, false when the current
+  // state is not the expected non-JS state.
+  static bool WaitForJSState() {
+    ASSERT(RuntimeProfiler::IsEnabled());
+    // Try to switch to waiting state.
+    RuntimeProfilerState old_state = CompareAndSwapRuntimeProfilerState(
+        PROF_NOT_IN_JS, PROF_NOT_IN_JS_WAITING_FOR_JS);
+    if (old_state == PROF_NOT_IN_JS) {
+      runtime_profiler_semaphore_->Wait();
+      return true;
+    }
+    return false;
+  }
+
+  // When shutting down we join the profiler thread. Doing so while
+  // it's waiting on a semaphore will cause a deadlock, so we have to
+  // wake it up first.
+  static void WakeUpRuntimeProfilerThreadBeforeShutdown() {
+    runtime_profiler_semaphore_->Signal();
+  }
 #endif
 
   // Generated code scratch locations.
@@ -386,6 +450,51 @@ class Top {
   static const char* kStackOverflowMessage;
 
  private:
+#ifdef ENABLE_VMSTATE_TRACKING
+  // Set of states used when communicating with the runtime profiler.
+  //
+  // The set of possible transitions is divided between the VM and the
+  // profiler threads.
+  //
+  // The VM thread can perform these transitions:
+  //   o IN_JS -> NOT_IN_JS
+  //   o NOT_IN_JS -> IN_JS
+  //   o NOT_IN_JS_WAITING_FOR_JS -> IN_JS notifying the profiler thread
+  //     using the semaphore.
+  // All the above transitions are caused by VM state changes.
+  //
+  // The profiler thread can only perform a single transition
+  // NOT_IN_JS -> NOT_IN_JS_WAITING_FOR_JS before it starts waiting on
+  // the semaphore.
+  enum RuntimeProfilerState {
+    PROF_NOT_IN_JS,
+    PROF_NOT_IN_JS_WAITING_FOR_JS,
+    PROF_IN_JS
+  };
+
+  static void SetRuntimeProfilerState(RuntimeProfilerState state) {
+    NoBarrier_Store(&thread_local_.runtime_profiler_state_, state);
+  }
+
+  static RuntimeProfilerState SwapRuntimeProfilerState(
+      RuntimeProfilerState state) {
+    return static_cast<RuntimeProfilerState>(
+        NoBarrier_AtomicExchange(&thread_local_.runtime_profiler_state_,
+                                 state));
+  }
+
+  static RuntimeProfilerState CompareAndSwapRuntimeProfilerState(
+      RuntimeProfilerState old_state,
+      RuntimeProfilerState state) {
+    return static_cast<RuntimeProfilerState>(
+        NoBarrier_CompareAndSwap(&thread_local_.runtime_profiler_state_,
+                                 old_state,
+                                 state));
+  }
+
+  static Semaphore* runtime_profiler_semaphore_;
+#endif  // ENABLE_VMSTATE_TRACKING
+
   // The context that initiated this JS execution.
   static ThreadLocalTop thread_local_;
   static void InitializeThreadLocal();
@@ -402,6 +511,7 @@ class Top {
   friend class SaveContext;
   friend class AssertNoContextChange;
   friend class ExecutionAccess;
+  friend class ThreadLocalTop;
 
   static void FillCache();
 };
@@ -471,8 +581,15 @@ class AssertNoContextChange BASE_EMBEDDED {
 
 class ExecutionAccess BASE_EMBEDDED {
  public:
-  ExecutionAccess();
-  ~ExecutionAccess();
+  ExecutionAccess() { Lock(); }
+  ~ExecutionAccess() { Unlock(); }
+
+  static void Lock() { Top::break_access_->Lock(); }
+  static void Unlock() { Top::break_access_->Unlock(); }
+
+  static bool TryLock() {
+    return Top::break_access_->TryLock();
+  }
 };
 
 } }  // namespace v8::internal
index 3fc929d..f9ee47d 100644 (file)
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
 #include "v8.h"
+
+#include "ast.h"
+#include "compiler.h"
+#include "ic.h"
+#include "macro-assembler.h"
+#include "stub-cache.h"
 #include "type-info.h"
+
+#include "ic-inl.h"
 #include "objects-inl.h"
 
 namespace v8 {
@@ -50,4 +58,293 @@ TypeInfo TypeInfo::TypeFromValue(Handle<Object> value) {
 }
 
 
+TypeFeedbackOracle::TypeFeedbackOracle(Handle<Code> code) {
+  Initialize(code);
+}
+
+
+void TypeFeedbackOracle::Initialize(Handle<Code> code) {
+  ASSERT(map_.is_null());  // Only initialize once.
+  map_ = Factory::NewJSObject(Top::object_function());
+  PopulateMap(code);
+}
+
+
+bool TypeFeedbackOracle::LoadIsMonomorphic(Property* expr) {
+  return IsMonomorphic(expr->position());
+}
+
+
+bool TypeFeedbackOracle:: StoreIsMonomorphic(Assignment* expr) {
+  return IsMonomorphic(expr->position());
+}
+
+
+bool TypeFeedbackOracle::CallIsMonomorphic(Call* expr) {
+  return IsMonomorphic(expr->position());
+}
+
+
+Handle<Map> TypeFeedbackOracle::LoadMonomorphicReceiverType(Property* expr) {
+  ASSERT(LoadIsMonomorphic(expr));
+  return Handle<Map>::cast(GetElement(map_, expr->position()));
+}
+
+
+Handle<Map> TypeFeedbackOracle::StoreMonomorphicReceiverType(Assignment* expr) {
+  ASSERT(StoreIsMonomorphic(expr));
+  return Handle<Map>::cast(GetElement(map_, expr->position()));
+}
+
+
+Handle<Map> TypeFeedbackOracle::CallMonomorphicReceiverType(Call* expr) {
+  ASSERT(CallIsMonomorphic(expr));
+  return Handle<Map>::cast(GetElement(map_, expr->position()));
+}
+
+
+ZoneMapList* TypeFeedbackOracle::LoadReceiverTypes(Property* expr,
+                                                   Handle<String> name) {
+  Code::Flags flags = Code::ComputeMonomorphicFlags(Code::LOAD_IC, NORMAL);
+  return CollectReceiverTypes(expr->position(), name, flags);
+}
+
+
+ZoneMapList* TypeFeedbackOracle::StoreReceiverTypes(Assignment* expr,
+                                                    Handle<String> name) {
+  Code::Flags flags = Code::ComputeMonomorphicFlags(Code::STORE_IC, NORMAL);
+  return CollectReceiverTypes(expr->position(), name, flags);
+}
+
+
+ZoneMapList* TypeFeedbackOracle::CallReceiverTypes(Call* expr,
+                                                   Handle<String> name) {
+  int arity = expr->arguments()->length();
+  Code::Flags flags = Code::ComputeMonomorphicFlags(
+      Code::CALL_IC, NORMAL, OWN_MAP, NOT_IN_LOOP, arity);
+  return CollectReceiverTypes(expr->position(), name, flags);
+}
+
+
+bool TypeFeedbackOracle::LoadIsBuiltin(Property* expr, Builtins::Name id) {
+  Handle<Object> object = GetElement(map_, expr->position());
+  return *object == Builtins::builtin(id);
+}
+
+
+TypeInfo TypeFeedbackOracle::CompareType(CompareOperation* expr, Side side) {
+  Handle<Object> object = GetElement(map_, expr->position());
+  TypeInfo unknown = TypeInfo::Unknown();
+  if (!object->IsCode()) return unknown;
+  Handle<Code> code = Handle<Code>::cast(object);
+  if (!code->is_compare_ic_stub()) return unknown;
+
+  CompareIC::State state = static_cast<CompareIC::State>(code->compare_state());
+  switch (state) {
+    case CompareIC::UNINITIALIZED:
+    case CompareIC::SMIS:
+      return TypeInfo::Smi();
+    case CompareIC::HEAP_NUMBERS:
+      return TypeInfo::Number();
+    case CompareIC::OBJECTS:
+      // TODO(kasperl): We really need a type for JS objects here.
+      return TypeInfo::NonPrimitive();
+    case CompareIC::GENERIC:
+    default:
+      return unknown;
+  }
+}
+
+
+TypeInfo TypeFeedbackOracle::BinaryType(BinaryOperation* expr, Side side) {
+  Handle<Object> object = GetElement(map_, expr->position());
+  TypeInfo unknown = TypeInfo::Unknown();
+  if (!object->IsCode()) return unknown;
+  Handle<Code> code = Handle<Code>::cast(object);
+  if (code->is_binary_op_stub()) {
+    BinaryOpIC::TypeInfo type = static_cast<BinaryOpIC::TypeInfo>(
+        code->binary_op_type());
+    switch (type) {
+      case BinaryOpIC::UNINIT_OR_SMI:
+        return TypeInfo::Smi();
+      case BinaryOpIC::DEFAULT:
+        return (expr->op() == Token::DIV || expr->op() == Token::MUL)
+            ? TypeInfo::Double()
+            : TypeInfo::Integer32();
+      case BinaryOpIC::HEAP_NUMBERS:
+        return TypeInfo::Double();
+      default:
+        return unknown;
+    }
+  } else if (code->is_type_recording_binary_op_stub()) {
+    TRBinaryOpIC::TypeInfo type = static_cast<TRBinaryOpIC::TypeInfo>(
+        code->type_recording_binary_op_type());
+    TRBinaryOpIC::TypeInfo result_type = static_cast<TRBinaryOpIC::TypeInfo>(
+        code->type_recording_binary_op_result_type());
+
+    switch (type) {
+      case TRBinaryOpIC::UNINITIALIZED:
+      case TRBinaryOpIC::SMI:
+        switch (result_type) {
+          case TRBinaryOpIC::UNINITIALIZED:
+          case TRBinaryOpIC::SMI:
+            return TypeInfo::Smi();
+          case TRBinaryOpIC::INT32:
+            return TypeInfo::Integer32();
+          case TRBinaryOpIC::HEAP_NUMBER:
+            return TypeInfo::Double();
+          default:
+            return unknown;
+        }
+      case TRBinaryOpIC::INT32:
+        if (expr->op() == Token::DIV ||
+            result_type == TRBinaryOpIC::HEAP_NUMBER) {
+          return TypeInfo::Double();
+        }
+        return TypeInfo::Integer32();
+      case TRBinaryOpIC::HEAP_NUMBER:
+        return TypeInfo::Double();
+      case TRBinaryOpIC::STRING:
+      case TRBinaryOpIC::GENERIC:
+        return unknown;
+     default:
+        return unknown;
+    }
+  }
+  return unknown;
+}
+
+TypeInfo TypeFeedbackOracle::SwitchType(CaseClause* clause) {
+  Handle<Object> object = GetElement(map_, clause->position());
+  TypeInfo unknown = TypeInfo::Unknown();
+  if (!object->IsCode()) return unknown;
+  Handle<Code> code = Handle<Code>::cast(object);
+  if (!code->is_compare_ic_stub()) return unknown;
+
+  CompareIC::State state = static_cast<CompareIC::State>(code->compare_state());
+  switch (state) {
+    case CompareIC::UNINITIALIZED:
+    case CompareIC::SMIS:
+      return TypeInfo::Smi();
+    case CompareIC::HEAP_NUMBERS:
+      return TypeInfo::Number();
+    case CompareIC::OBJECTS:
+      // TODO(kasperl): We really need a type for JS objects here.
+      return TypeInfo::NonPrimitive();
+    case CompareIC::GENERIC:
+    default:
+      return unknown;
+  }
+}
+
+
+
+ZoneMapList* TypeFeedbackOracle::CollectReceiverTypes(int position,
+                                                      Handle<String> name,
+                                                      Code::Flags flags) {
+  Handle<Object> object = GetElement(map_, position);
+  if (object->IsUndefined()) return NULL;
+
+  if (*object == Builtins::builtin(Builtins::StoreIC_GlobalProxy)) {
+    // TODO(fschneider): We could collect the maps and signal that
+    // we need a generic store (or load) here.
+    ASSERT(Handle<Code>::cast(object)->ic_state() == MEGAMORPHIC);
+    return NULL;
+  } else if (object->IsMap()) {
+    ZoneMapList* types = new ZoneMapList(1);
+    types->Add(Handle<Map>::cast(object));
+    return types;
+  } else if (Handle<Code>::cast(object)->ic_state() == MEGAMORPHIC) {
+    ZoneMapList* types = new ZoneMapList(4);
+    ASSERT(object->IsCode());
+    StubCache::CollectMatchingMaps(types, *name, flags);
+    return types->length() > 0 ? types : NULL;
+  } else {
+    return NULL;
+  }
+}
+
+
+void TypeFeedbackOracle::PopulateMap(Handle<Code> code) {
+  HandleScope scope;
+
+  const int kInitialCapacity = 16;
+  List<int> code_positions(kInitialCapacity);
+  List<int> source_positions(kInitialCapacity);
+  CollectPositions(*code, &code_positions, &source_positions);
+
+  int length = code_positions.length();
+  ASSERT(source_positions.length() == length);
+  for (int i = 0; i < length; i++) {
+    RelocInfo info(code->instruction_start() + code_positions[i],
+                   RelocInfo::CODE_TARGET, 0);
+    Handle<Code> target(Code::GetCodeFromTargetAddress(info.target_address()));
+    int position = source_positions[i];
+    InlineCacheState state = target->ic_state();
+    Code::Kind kind = target->kind();
+    if (kind == Code::BINARY_OP_IC ||
+        kind == Code::TYPE_RECORDING_BINARY_OP_IC ||
+        kind == Code::COMPARE_IC) {
+      // TODO(kasperl): Avoid having multiple ICs with the same
+      // position by making sure that we have position information
+      // recorded for all binary ICs.
+      if (GetElement(map_, position)->IsUndefined()) {
+        SetElement(map_, position, target);
+      }
+    } else if (state == MONOMORPHIC) {
+      Handle<Map> map = Handle<Map>(target->FindFirstMap());
+      if (*map == NULL) {
+        SetElement(map_, position, target);
+      } else {
+        SetElement(map_, position, map);
+      }
+    } else if (state == MEGAMORPHIC) {
+      SetElement(map_, position, target);
+    }
+  }
+}
+
+
+void TypeFeedbackOracle::CollectPositions(Code* code,
+                                          List<int>* code_positions,
+                                          List<int>* source_positions) {
+  AssertNoAllocation no_allocation;
+  int position = 0;
+  // Because the ICs we use for global variables access in the full
+  // code generator do not have any meaningful positions, we avoid
+  // collecting those by filtering out contextual code targets.
+  int mask = RelocInfo::ModeMask(RelocInfo::CODE_TARGET) |
+      RelocInfo::kPositionMask;
+  for (RelocIterator it(code, mask); !it.done(); it.next()) {
+    RelocInfo* info = it.rinfo();
+    RelocInfo::Mode mode = info->rmode();
+    if (RelocInfo::IsCodeTarget(mode)) {
+      Code* target = Code::GetCodeFromTargetAddress(info->target_address());
+      if (target->is_inline_cache_stub()) {
+        InlineCacheState state = target->ic_state();
+        Code::Kind kind = target->kind();
+        if (kind == Code::BINARY_OP_IC) {
+          if (target->binary_op_type() == BinaryOpIC::GENERIC) continue;
+        } else if (kind == Code::TYPE_RECORDING_BINARY_OP_IC) {
+          if (target->type_recording_binary_op_type() ==
+              TRBinaryOpIC::GENERIC) {
+            continue;
+          }
+        } else if (kind == Code::COMPARE_IC) {
+          if (target->compare_state() == CompareIC::GENERIC) continue;
+        } else {
+          if (kind == Code::CALL_IC && state == MONOMORPHIC &&
+              target->check_type() != RECEIVER_MAP_CHECK) continue;
+          if (state != MONOMORPHIC && state != MEGAMORPHIC) continue;
+        }
+        code_positions->Add(info->pc() - code->instruction_start());
+        source_positions->Add(position);
+      }
+    } else {
+      ASSERT(RelocInfo::IsPosition(mode));
+      position = info->data();
+    }
+  }
+}
+
 } }  // namespace v8::internal
index f588e56..cb3e75d 100644 (file)
 #define V8_TYPE_INFO_H_
 
 #include "globals.h"
+#include "zone.h"
+#include "zone-inl.h"
 
 namespace v8 {
 namespace internal {
 
-//        Unknown
-//           |
-//      PrimitiveType
-//           |   \--------|
-//         Number      String
-//         /    |         |
-//    Double  Integer32   |
-//        |      |       /
-//        |     Smi     /
-//        |     /      /
-//        Uninitialized.
+//         Unknown
+//           |   |
+//           |   \--------------|
+//      Primitive             Non-primitive
+//           |   \--------|     |
+//         Number      String   |
+//         /    |         |     |
+//    Double  Integer32   |    /
+//        |      |       /    /
+//        |     Smi     /    /
+//        |      |     /    /
+//        |      |    /    /
+//        Uninitialized.--/
 
 class TypeInfo {
  public:
-  TypeInfo() : type_(kUnknownType) { }
+  TypeInfo() : type_(kUninitialized) { }
 
-  static inline TypeInfo Unknown();
+  static TypeInfo Unknown() { return TypeInfo(kUnknown); }
   // We know it's a primitive type.
-  static inline TypeInfo Primitive();
+  static TypeInfo Primitive() { return TypeInfo(kPrimitive); }
   // We know it's a number of some sort.
-  static inline TypeInfo Number();
-  // We know it's signed 32 bit integer.
-  static inline TypeInfo Integer32();
+  static TypeInfo Number() { return TypeInfo(kNumber); }
+  // We know it's signed 32 bit integer.
+  static TypeInfo Integer32() { return TypeInfo(kInteger32); }
   // We know it's a Smi.
-  static inline TypeInfo Smi();
+  static TypeInfo Smi() { return TypeInfo(kSmi); }
   // We know it's a heap number.
-  static inline TypeInfo Double();
+  static TypeInfo Double() { return TypeInfo(kDouble); }
   // We know it's a string.
-  static inline TypeInfo String();
+  static TypeInfo String() { return TypeInfo(kString); }
+  // We know it's a non-primitive (object) type.
+  static TypeInfo NonPrimitive() { return TypeInfo(kNonPrimitive); }
   // We haven't started collecting info yet.
-  static inline TypeInfo Uninitialized();
+  static TypeInfo Uninitialized() { return TypeInfo(kUninitialized); }
 
   // Return compact representation.  Very sensitive to enum values below!
-  // Compacting drops information about primtive types and strings types.
+  // Compacting drops information about primitive types and strings types.
   // We use the compact representation when we only care about number types.
   int ThreeBitRepresentation() {
-    ASSERT(type_ != kUninitializedType);
+    ASSERT(type_ != kUninitialized);
     int answer = type_ & 0xf;
     answer = answer > 6 ? answer - 2 : answer;
     ASSERT(answer >= 0);
@@ -82,12 +88,12 @@ class TypeInfo {
     Type t = static_cast<Type>(three_bit_representation > 4 ?
                                three_bit_representation + 2 :
                                three_bit_representation);
-    t = (t == kUnknownType) ? t : static_cast<Type>(t | kPrimitiveType);
-    ASSERT(t == kUnknownType ||
-           t == kNumberType ||
-           t == kInteger32Type ||
-           t == kSmiType ||
-           t == kDoubleType);
+    t = (t == kUnknown) ? t : static_cast<Type>(t | kPrimitive);
+    ASSERT(t == kUnknown ||
+           t == kNumber ||
+           t == kInteger32 ||
+           t == kSmi ||
+           t == kDouble);
     return TypeInfo(t);
   }
 
@@ -97,13 +103,14 @@ class TypeInfo {
 
   static TypeInfo FromInt(int bit_representation) {
     Type t = static_cast<Type>(bit_representation);
-    ASSERT(t == kUnknownType ||
-           t == kPrimitiveType ||
-           t == kNumberType ||
-           t == kInteger32Type ||
-           t == kSmiType ||
-           t == kDoubleType ||
-           t == kStringType);
+    ASSERT(t == kUnknown ||
+           t == kPrimitive ||
+           t == kNumber ||
+           t == kInteger32 ||
+           t == kSmi ||
+           t == kDouble ||
+           t == kString ||
+           t == kNonPrimitive);
     return TypeInfo(t);
   }
 
@@ -113,82 +120,98 @@ class TypeInfo {
   }
 
 
-  // Integer32 is an integer that can be represented as a signed
-  // 32-bit integer. It has to be in the range [-2^31, 2^31 - 1].
-  // We also have to check for negative 0 as it is not an Integer32.
+  // Integer32 is an integer that can be represented as either a signed
+  // 32-bit integer or as an unsigned 32-bit integer. It has to be
+  // in the range [-2^31, 2^32 - 1]. We also have to check for negative 0
+  // as it is not an Integer32.
   static inline bool IsInt32Double(double value) {
     const DoubleRepresentation minus_zero(-0.0);
     DoubleRepresentation rep(value);
     if (rep.bits == minus_zero.bits) return false;
-    if (value >= kMinInt && value <= kMaxInt) {
-      if (value == static_cast<int32_t>(value)) return true;
+    if (value >= kMinInt && value <= kMaxInt &&
+        value == static_cast<int32_t>(value)) {
+      return true;
     }
     return false;
   }
 
   static TypeInfo TypeFromValue(Handle<Object> value);
 
+  bool Equals(const TypeInfo& other) {
+    return type_ == other.type_;
+  }
+
   inline bool IsUnknown() {
-    return type_ == kUnknownType;
+    ASSERT(type_ != kUninitialized);
+    return type_ == kUnknown;
+  }
+
+  inline bool IsPrimitive() {
+    ASSERT(type_ != kUninitialized);
+    return ((type_ & kPrimitive) == kPrimitive);
   }
 
   inline bool IsNumber() {
-    ASSERT(type_ != kUninitializedType);
-    return ((type_ & kNumberType) == kNumberType);
+    ASSERT(type_ != kUninitialized);
+    return ((type_ & kNumber) == kNumber);
   }
 
   inline bool IsSmi() {
-    ASSERT(type_ != kUninitializedType);
-    return ((type_ & kSmiType) == kSmiType);
+    ASSERT(type_ != kUninitialized);
+    return ((type_ & kSmi) == kSmi);
   }
 
   inline bool IsInteger32() {
-    ASSERT(type_ != kUninitializedType);
-    return ((type_ & kInteger32Type) == kInteger32Type);
+    ASSERT(type_ != kUninitialized);
+    return ((type_ & kInteger32) == kInteger32);
   }
 
   inline bool IsDouble() {
-    ASSERT(type_ != kUninitializedType);
-    return ((type_ & kDoubleType) == kDoubleType);
+    ASSERT(type_ != kUninitialized);
+    return ((type_ & kDouble) == kDouble);
   }
 
   inline bool IsString() {
-    ASSERT(type_ != kUninitializedType);
-    return ((type_ & kStringType) == kStringType);
+    ASSERT(type_ != kUninitialized);
+    return ((type_ & kString) == kString);
+  }
+
+  inline bool IsNonPrimitive() {
+    ASSERT(type_ != kUninitialized);
+    return ((type_ & kNonPrimitive) == kNonPrimitive);
   }
 
   inline bool IsUninitialized() {
-    return type_ == kUninitializedType;
+    return type_ == kUninitialized;
   }
 
   const char* ToString() {
     switch (type_) {
-      case kUnknownType: return "UnknownType";
-      case kPrimitiveType: return "PrimitiveType";
-      case kNumberType: return "NumberType";
-      case kInteger32Type: return "Integer32Type";
-      case kSmiType: return "SmiType";
-      case kDoubleType: return "DoubleType";
-      case kStringType: return "StringType";
-      case kUninitializedType:
-        UNREACHABLE();
-        return "UninitializedType";
+      case kUnknown: return "Unknown";
+      case kPrimitive: return "Primitive";
+      case kNumber: return "Number";
+      case kInteger32: return "Integer32";
+      case kSmi: return "Smi";
+      case kDouble: return "Double";
+      case kString: return "String";
+      case kNonPrimitive: return "Object";
+      case kUninitialized: return "Uninitialized";
     }
     UNREACHABLE();
     return "Unreachable code";
   }
 
  private:
-  // We use 6 bits to represent the types.
   enum Type {
-    kUnknownType = 0,          // 000000
-    kPrimitiveType = 0x10,     // 010000
-    kNumberType = 0x11,        // 010001
-    kInteger32Type = 0x13,     // 010011
-    kSmiType = 0x17,           // 010111
-    kDoubleType = 0x19,        // 011001
-    kStringType = 0x30,        // 110000
-    kUninitializedType = 0x3f  // 111111
+    kUnknown = 0,          // 0000000
+    kPrimitive = 0x10,     // 0010000
+    kNumber = 0x11,        // 0010001
+    kInteger32 = 0x13,     // 0010011
+    kSmi = 0x17,           // 0010111
+    kDouble = 0x19,        // 0011001
+    kString = 0x30,        // 0110000
+    kNonPrimitive = 0x40,  // 1000000
+    kUninitialized = 0x7f  // 1111111
   };
   explicit inline TypeInfo(Type t) : type_(t) { }
 
@@ -196,44 +219,63 @@ class TypeInfo {
 };
 
 
-TypeInfo TypeInfo::Unknown() {
-  return TypeInfo(kUnknownType);
-}
+// Forward declarations.
+class Assignment;
+class BinaryOperation;
+class Call;
+class CompareOperation;
+class CompilationInfo;
+class Property;
+class CaseClause;
 
+class TypeFeedbackOracle BASE_EMBEDDED {
+ public:
+  enum Side {
+    LEFT,
+    RIGHT,
+    RESULT
+  };
 
-TypeInfo TypeInfo::Primitive() {
-  return TypeInfo(kPrimitiveType);
-}
+  explicit TypeFeedbackOracle(Handle<Code> code);
 
+  bool LoadIsMonomorphic(Property* expr);
+  bool StoreIsMonomorphic(Assignment* expr);
+  bool CallIsMonomorphic(Call* expr);
 
-TypeInfo TypeInfo::Number() {
-  return TypeInfo(kNumberType);
-}
+  Handle<Map> LoadMonomorphicReceiverType(Property* expr);
+  Handle<Map> StoreMonomorphicReceiverType(Assignment* expr);
+  Handle<Map> CallMonomorphicReceiverType(Call* expr);
 
+  ZoneMapList* LoadReceiverTypes(Property* expr, Handle<String> name);
+  ZoneMapList* StoreReceiverTypes(Assignment* expr, Handle<String> name);
+  ZoneMapList* CallReceiverTypes(Call* expr, Handle<String> name);
 
-TypeInfo TypeInfo::Integer32() {
-  return TypeInfo(kInteger32Type);
-}
+  bool LoadIsBuiltin(Property* expr, Builtins::Name id);
 
+  // Get type information for arithmetic operations and compares.
+  TypeInfo BinaryType(BinaryOperation* expr, Side side);
+  TypeInfo CompareType(CompareOperation* expr, Side side);
+  TypeInfo SwitchType(CaseClause* clause);
 
-TypeInfo TypeInfo::Smi() {
-  return TypeInfo(kSmiType);
-}
+ private:
+  void Initialize(Handle<Code> code);
 
+  bool IsMonomorphic(int pos) { return GetElement(map_, pos)->IsMap(); }
 
-TypeInfo TypeInfo::Double() {
-  return TypeInfo(kDoubleType);
-}
+  ZoneMapList* CollectReceiverTypes(int position,
+                                    Handle<String> name,
+                                    Code::Flags flags);
 
+  void PopulateMap(Handle<Code> code);
 
-TypeInfo TypeInfo::String() {
-  return TypeInfo(kStringType);
-}
+  void CollectPositions(Code* code,
+                        List<int>* code_positions,
+                        List<int>* source_positions);
 
+  Handle<JSObject> map_;
 
-TypeInfo TypeInfo::Uninitialized() {
-  return TypeInfo(kUninitializedType);
-}
+  DISALLOW_COPY_AND_ASSIGN(TypeFeedbackOracle);
+};
 
 } }  // namespace v8::internal
 
index 7096ba3..2c234bb 100644 (file)
@@ -168,6 +168,23 @@ int WriteCharsToFile(const char* str, int size, FILE* f) {
 }
 
 
+int AppendChars(const char* filename,
+                const char* str,
+                int size,
+                bool verbose) {
+  FILE* f = OS::FOpen(filename, "ab");
+  if (f == NULL) {
+    if (verbose) {
+      OS::PrintError("Cannot open file %s for writing.\n", filename);
+    }
+    return 0;
+  }
+  int written = WriteCharsToFile(str, size, f);
+  fclose(f);
+  return written;
+}
+
+
 int WriteChars(const char* filename,
                const char* str,
                int size,
@@ -214,11 +231,16 @@ void StringBuilder::AddSubstring(const char* s, int n) {
 
 
 void StringBuilder::AddFormatted(const char* format, ...) {
+  va_list arguments;
+  va_start(arguments, format);
+  AddFormattedList(format, arguments);
+  va_end(arguments);
+}
+
+
+void StringBuilder::AddFormattedList(const char* format, va_list list) {
   ASSERT(!is_finalized() && position_ < buffer_.length());
-  va_list args;
-  va_start(args, format);
-  int n = OS::VSNPrintF(buffer_ + position_, format, args);
-  va_end(args);
+  int n = OS::VSNPrintF(buffer_ + position_, format, list);
   if (n < 0 || n >= (buffer_.length() - position_)) {
     position_ = buffer_.length();
   } else {
index 69c062f..62b8726 100644 (file)
@@ -226,6 +226,11 @@ class BitField {
   static T decode(uint32_t value) {
     return static_cast<T>((value & mask()) >> shift);
   }
+
+  // Value for the field with all bits set.
+  static T max() {
+    return decode(mask());
+  }
 };
 
 
@@ -326,7 +331,7 @@ class Vector {
     return start_[index];
   }
 
-  T& at(int i) const { return operator[](i); }
+  const T& at(int index) const { return operator[](index); }
 
   T& first() { return start_[0]; }
 
@@ -387,11 +392,40 @@ class Vector {
 };
 
 
+// A pointer that can only be set once and doesn't allow NULL values.
+template<typename T>
+class SetOncePointer {
+ public:
+  SetOncePointer() : pointer_(NULL) { }
+
+  bool is_set() const { return pointer_ != NULL; }
+
+  T* get() const {
+    ASSERT(pointer_ != NULL);
+    return pointer_;
+  }
+
+  void set(T* value) {
+    ASSERT(pointer_ == NULL && value != NULL);
+    pointer_ = value;
+  }
+
+ private:
+  T* pointer_;
+};
+
+
 template <typename T, int kSize>
 class EmbeddedVector : public Vector<T> {
  public:
   EmbeddedVector() : Vector<T>(buffer_, kSize) { }
 
+  explicit EmbeddedVector(T initial_value) : Vector<T>(buffer_, kSize) {
+    for (int i = 0; i < kSize; ++i) {
+      buffer_[i] = initial_value;
+    }
+  }
+
   // When copying, make underlying Vector to reference our buffer.
   EmbeddedVector(const EmbeddedVector& rhs)
       : Vector<T>(rhs) {
index af5095d..428ebc6 100644 (file)
@@ -159,7 +159,20 @@ namespace internal {
   SC(named_load_global_stub, V8.NamedLoadGlobalStub)                  \
   SC(named_load_global_stub_miss, V8.NamedLoadGlobalStubMiss)         \
   SC(keyed_store_field, V8.KeyedStoreField)                           \
+  SC(named_store_inline_field, V8.NamedStoreInlineField)              \
   SC(keyed_store_inline, V8.KeyedStoreInline)                         \
+  SC(named_load_inline_generic, V8.NamedLoadInlineGeneric)            \
+  SC(named_load_inline_field, V8.NamedLoadInlineFast)                 \
+  SC(keyed_load_inline_generic, V8.KeyedLoadInlineGeneric)            \
+  SC(keyed_load_inline_fast, V8.KeyedLoadInlineFast)                  \
+  SC(named_load_full, V8.NamedLoadFull)                               \
+  SC(keyed_load_full, V8.KeyedLoadFull)                               \
+  SC(keyed_store_inline_generic, V8.KeyedStoreInlineGeneric)          \
+  SC(keyed_store_inline_fast, V8.KeyedStoreInlineFast)                \
+  SC(named_store_inline_generic, V8.NamedStoreInlineGeneric)          \
+  SC(named_store_inline_fast, V8.NamedStoreInlineFast)                \
+  SC(keyed_store_full, V8.KeyedStoreFull)                             \
+  SC(named_store_full, V8.NamedStoreFull)                             \
   SC(keyed_store_inline_miss, V8.KeyedStoreInlineMiss)                \
   SC(named_store_global_inline, V8.NamedStoreGlobalInline)            \
   SC(named_store_global_inline_miss, V8.NamedStoreGlobalInlineMiss)   \
@@ -225,8 +238,16 @@ namespace internal {
   SC(math_tan, V8.MathTan)                                            \
   SC(transcendental_cache_hit, V8.TranscendentalCacheHit)             \
   SC(transcendental_cache_miss, V8.TranscendentalCacheMiss)           \
+  SC(stack_interrupts, V8.StackInterrupts)                            \
+  SC(runtime_profiler_ticks, V8.RuntimeProfilerTicks)                 \
+  SC(other_ticks, V8.OtherTicks)                                      \
+  SC(js_opt_ticks, V8.JsOptTicks)                                     \
+  SC(js_non_opt_ticks, V8.JsNonoptTicks)                              \
+  SC(js_other_ticks, V8.JsOtherTicks)                                 \
+  SC(smi_checks_removed, V8.SmiChecksRemoved)                         \
+  SC(map_checks_removed, V8.MapChecksRemoved)                         \
   SC(quote_json_char_count, V8.QuoteJsonCharacterCount)               \
-  SC(quote_json_char_recount, V8.QuoteJsonCharacterReCount)           \
+  SC(quote_json_char_recount, V8.QuoteJsonCharacterReCount)
 
 
 // This file contains all the v8 counters that are in use.
index c8d719b..f5b6150 100644 (file)
--- a/src/v8.cc
+++ b/src/v8.cc
 
 #include "bootstrapper.h"
 #include "debug.h"
+#include "deoptimizer.h"
+#include "heap-profiler.h"
+#include "hydrogen.h"
+#include "lithium-allocator.h"
+#include "log.h"
+#include "oprofile-agent.h"
+#include "runtime-profiler.h"
 #include "serialize.h"
 #include "simulator.h"
 #include "stub-cache.h"
-#include "heap-profiler.h"
-#include "oprofile-agent.h"
-#include "log.h"
 
 namespace v8 {
 namespace internal {
@@ -43,6 +47,7 @@ bool V8::is_running_ = false;
 bool V8::has_been_setup_ = false;
 bool V8::has_been_disposed_ = false;
 bool V8::has_fatal_error_ = false;
+bool V8::use_crankshaft_ = true;
 
 
 bool V8::Initialize(Deserializer* des) {
@@ -50,6 +55,9 @@ bool V8::Initialize(Deserializer* des) {
   if (has_been_disposed_ || has_fatal_error_) return false;
   if (IsRunning()) return true;
 
+  use_crankshaft_ = FLAG_crankshaft;
+  // Peephole optimization might interfere with deoptimization.
+  FLAG_peephole_optimization = !use_crankshaft_;
   is_running_ = true;
   has_been_setup_ = true;
   has_fatal_error_ = false;
@@ -122,6 +130,9 @@ bool V8::Initialize(Deserializer* des) {
   CPU::Setup();
 
   OProfileAgent::Initialize();
+  Deoptimizer::Setup();
+  LAllocator::Setup();
+  RuntimeProfiler::Setup();
 
   // If we are deserializing, log non-function code objects and compiled
   // functions found in the snapshot.
@@ -144,6 +155,12 @@ void V8::SetFatalError() {
 void V8::TearDown() {
   if (!has_been_setup_ || has_been_disposed_) return;
 
+  if (FLAG_time_hydrogen) HStatistics::Instance()->Print();
+
+  // We must stop the logger before we tear down other components.
+  Logger::EnsureTickerStopped();
+
+  Deoptimizer::TearDown();
   OProfileAgent::TearDown();
 
   if (FLAG_preemption) {
@@ -157,12 +174,11 @@ void V8::TearDown() {
   Top::TearDown();
 
   HeapProfiler::TearDown();
-
   CpuProfiler::TearDown();
-
-  Heap::TearDown();
+  RuntimeProfiler::TearDown();
 
   Logger::TearDown();
+  Heap::TearDown();
 
   is_running_ = false;
   has_been_disposed_ = true;
index a2313b0..cc1673e 100644 (file)
--- a/src/v8.h
+++ b/src/v8.h
@@ -66,7 +66,6 @@
 #include "log-inl.h"
 #include "cpu-profiler-inl.h"
 #include "handles-inl.h"
-#include "vm-state-inl.h"
 
 namespace v8 {
 namespace internal {
@@ -84,6 +83,8 @@ class V8 : public AllStatic {
   static bool Initialize(Deserializer* des);
   static void TearDown();
   static bool IsRunning() { return is_running_; }
+  static bool UseCrankshaft() { return use_crankshaft_; }
+  static void DisableCrankshaft() { use_crankshaft_ = false; }
   // To be dead you have to have lived
   static bool IsDead() { return has_fatal_error_ || has_been_disposed_; }
   static void SetFatalError();
@@ -115,6 +116,8 @@ class V8 : public AllStatic {
   // True if engine has been shut down
   // (reset if engine is restarted)
   static bool has_been_disposed_;
+  // True if we are using the crankshaft optimizing compiler.
+  static bool use_crankshaft_;
 };
 
 } }  // namespace v8::internal
index 2815771..65bbf6a 100644 (file)
@@ -82,6 +82,7 @@ const uint64_t kDebugZapValue = 0xbadbaddbbadbaddb;
 const Address kZapValue = reinterpret_cast<Address>(0xdeadbeed);
 const Address kHandleZapValue = reinterpret_cast<Address>(0xbaddead);
 const Address kFromSpaceZapValue = reinterpret_cast<Address>(0xbeefdad);
+const uint32_t kSlotsZapValue = 0xbeefdeed;
 const uint32_t kDebugZapValue = 0xbadbaddb;
 #endif
 
@@ -285,6 +286,14 @@ enum InlineCacheState {
 };
 
 
+enum CheckType {
+  RECEIVER_MAP_CHECK,
+  STRING_CHECK,
+  NUMBER_CHECK,
+  BOOLEAN_CHECK
+};
+
+
 enum InLoopFlag {
   NOT_IN_LOOP,
   IN_LOOP
index a907c9f..f6ed520 100644 (file)
@@ -67,6 +67,14 @@ char* ReadLine(const char* prompt);
 byte* ReadBytes(const char* filename, int* size, bool verbose = true);
 
 
+// Append size chars from str to the file given by filename.
+// The file is overwritten. Returns the number of chars written.
+int AppendChars(const char* filename,
+                const char* str,
+                int size,
+                bool verbose = true);
+
+
 // Write size chars from str to the file given by filename.
 // The file is overwritten. Returns the number of chars written.
 int WriteChars(const char* filename,
@@ -217,6 +225,9 @@ class StringBuilder {
   // Add formatted contents to the builder just like printf().
   void AddFormatted(const char* format, ...);
 
+  // Add formatted contents like printf based on a va_list.
+  void AddFormattedList(const char* format, va_list list);
+
   // Add character padding to the builder. If count is non-positive,
   // nothing is added to the builder.
   void AddPadding(char c, int count);
index 504e224..c1440b7 100644 (file)
@@ -86,6 +86,18 @@ bool Variable::IsStackAllocated() const {
 }
 
 
+bool Variable::IsParameter() const {
+  Slot* s = AsSlot();
+  return s != NULL && s->type() == Slot::PARAMETER;
+}
+
+
+bool Variable::IsStackLocal() const {
+  Slot* s = AsSlot();
+  return s != NULL && s->type() == Slot::LOCAL;
+}
+
+
 Variable::Variable(Scope* scope,
                    Handle<String> name,
                    Mode mode,
index ec76fee..9e460f7 100644 (file)
@@ -146,6 +146,8 @@ class Variable: public ZoneObject {
   }
 
   bool IsStackAllocated() const;
+  bool IsParameter() const;  // Includes 'this'.
+  bool IsStackLocal() const;
 
   bool is_dynamic() const {
     return (mode_ == DYNAMIC ||
index d3186ae..fc3bb81 100644 (file)
@@ -32,8 +32,8 @@
 // These macros define the version number for the current version.
 // NOTE these macros are used by the SCons build script so their names
 // cannot be changed without changing the SCons build script.
-#define MAJOR_VERSION     2
-#define MINOR_VERSION     6
+#define MAJOR_VERSION     3
+#define MINOR_VERSION     0
 #define BUILD_NUMBER      0
 #define PATCH_LEVEL       0
 #define CANDIDATE_VERSION true
@@ -57,12 +57,19 @@ const char* Version::soname_ = SONAME;
 // Calculate the V8 version string.
 void Version::GetString(Vector<char> str) {
   const char* candidate = IsCandidate() ? " (candidate)" : "";
+#ifdef USE_SIMULATOR
+  const char* is_simulator = " SIMULATOR";
+#else
+  const char* is_simulator = "";
+#endif  // USE_SIMULATOR
   if (GetPatch() > 0) {
-    OS::SNPrintF(str, "%d.%d.%d.%d%s",
-                 GetMajor(), GetMinor(), GetBuild(), GetPatch(), candidate);
+    OS::SNPrintF(str, "%d.%d.%d.%d%s%s",
+                 GetMajor(), GetMinor(), GetBuild(), GetPatch(), candidate,
+                 is_simulator);
   } else {
-    OS::SNPrintF(str, "%d.%d.%d%s",
-                 GetMajor(), GetMinor(), GetBuild(), candidate);
+    OS::SNPrintF(str, "%d.%d.%d%s%s",
+                 GetMajor(), GetMinor(), GetBuild(), candidate,
+                 is_simulator);
   }
 }
 
index 74f4a6a..da912b7 100644 (file)
@@ -29,6 +29,7 @@
 #define V8_VM_STATE_INL_H_
 
 #include "vm-state.h"
+#include "runtime-profiler.h"
 
 namespace v8 {
 namespace internal {
@@ -49,52 +50,31 @@ inline const char* StateToString(StateTag state) {
       return "COMPILER";
     case OTHER:
       return "OTHER";
+    case EXTERNAL:
+      return "EXTERNAL";
     default:
       UNREACHABLE();
       return NULL;
   }
 }
 
-VMState::VMState(StateTag state)
-    : disabled_(true),
-      state_(OTHER),
-      external_callback_(NULL) {
-#ifdef ENABLE_LOGGING_AND_PROFILING
-  if (!Logger::is_logging() && !CpuProfiler::is_profiling()) {
-    return;
-  }
-#endif
-
-  disabled_ = false;
-#if !defined(ENABLE_HEAP_PROTECTION)
-  // When not protecting the heap, there is no difference between
-  // EXTERNAL and OTHER.  As an optimization in that case, we will not
-  // perform EXTERNAL->OTHER transitions through the API.  We thus
-  // compress the two states into one.
-  if (state == EXTERNAL) state = OTHER;
-#endif
-  state_ = state;
-  // Save the previous state.
-  previous_ = Top::current_vm_state();
-  // Install the new state.
-  Top::set_current_vm_state(this);
-
+VMState::VMState(StateTag tag) : previous_tag_(Top::current_vm_state()) {
 #ifdef ENABLE_LOGGING_AND_PROFILING
   if (FLAG_log_state_changes) {
-    LOG(UncheckedStringEvent("Entering", StateToString(state_)));
-    if (previous_ != NULL) {
-      LOG(UncheckedStringEvent("From", StateToString(previous_->state_)));
-    }
+    LOG(UncheckedStringEvent("Entering", StateToString(tag)));
+    LOG(UncheckedStringEvent("From", StateToString(previous_tag_)));
   }
 #endif
 
+  Top::SetCurrentVMState(tag);
+
 #ifdef ENABLE_HEAP_PROTECTION
   if (FLAG_protect_heap) {
-    if (state_ == EXTERNAL) {
+    if (tag == EXTERNAL) {
       // We are leaving V8.
-      ASSERT((previous_ != NULL) && (previous_->state_ != EXTERNAL));
+      ASSERT(previous_tag_ != EXTERNAL);
       Heap::Protect();
-    } else if ((previous_ == NULL) || (previous_->state_ == EXTERNAL)) {
+    } else if (previous_tag_ = EXTERNAL) {
       // We are entering V8.
       Heap::Unprotect();
     }
@@ -104,34 +84,51 @@ VMState::VMState(StateTag state)
 
 
 VMState::~VMState() {
-  if (disabled_) return;
-  // Return to the previous state.
-  Top::set_current_vm_state(previous_);
-
 #ifdef ENABLE_LOGGING_AND_PROFILING
   if (FLAG_log_state_changes) {
-    LOG(UncheckedStringEvent("Leaving", StateToString(state_)));
-    if (previous_ != NULL) {
-      LOG(UncheckedStringEvent("To", StateToString(previous_->state_)));
-    }
+    LOG(UncheckedStringEvent("Leaving",
+                             StateToString(Top::current_vm_state())));
+    LOG(UncheckedStringEvent("To", StateToString(previous_tag_)));
   }
 #endif  // ENABLE_LOGGING_AND_PROFILING
 
 #ifdef ENABLE_HEAP_PROTECTION
+  StateTag tag = Top::current_vm_state();
+#endif
+
+  Top::SetCurrentVMState(previous_tag_);
+
+#ifdef ENABLE_HEAP_PROTECTION
   if (FLAG_protect_heap) {
-    if (state_ == EXTERNAL) {
+    if (tag == EXTERNAL) {
       // We are reentering V8.
-      ASSERT((previous_ != NULL) && (previous_->state_ != EXTERNAL));
+      ASSERT(previous_tag_ != EXTERNAL);
       Heap::Unprotect();
-    } else if ((previous_ == NULL) || (previous_->state_ == EXTERNAL)) {
+    } else if (previous_tag_ == EXTERNAL) {
       // We are leaving V8.
       Heap::Protect();
     }
   }
 #endif  // ENABLE_HEAP_PROTECTION
 }
+
 #endif  // ENABLE_VMSTATE_TRACKING
 
+
+#ifdef ENABLE_LOGGING_AND_PROFILING
+
+ExternalCallbackScope::ExternalCallbackScope(Address callback)
+    : previous_callback_(Top::external_callback()) {
+  Top::set_external_callback(callback);
+}
+
+ExternalCallbackScope::~ExternalCallbackScope() {
+  Top::set_external_callback(previous_callback_);
+}
+
+#endif  // ENABLE_LOGGING_AND_PROFILING
+
+
 } }  // namespace v8::internal
 
 #endif  // V8_VM_STATE_INL_H_
index cc91e83..df7fb30 100644 (file)
@@ -36,38 +36,29 @@ namespace internal {
 class VMState BASE_EMBEDDED {
 #ifdef ENABLE_VMSTATE_TRACKING
  public:
-  inline VMState(StateTag state);
+  inline explicit VMState(StateTag tag);
   inline ~VMState();
 
-  StateTag state() { return state_; }
-  void set_external_callback(Address external_callback) {
-    external_callback_ = external_callback;
-  }
-
-  // Used for debug asserts.
-  static bool is_outermost_external() {
-    return Top::current_vm_state() == 0;
-  }
+ private:
+  StateTag previous_tag_;
 
-  static StateTag current_state() {
-    VMState* state = Top::current_vm_state();
-    return state ? state->state() : EXTERNAL;
-  }
+#else
+ public:
+  explicit VMState(StateTag state) {}
+#endif
+};
 
-  static Address external_callback() {
-    VMState* state = Top::current_vm_state();
-    return state ? state->external_callback_ : NULL;
-  }
 
+class ExternalCallbackScope BASE_EMBEDDED {
+#ifdef ENABLE_LOGGING_AND_PROFILING
+ public:
+  inline explicit ExternalCallbackScope(Address callback);
+  inline ~ExternalCallbackScope();
  private:
-  bool disabled_;
-  StateTag state_;
-  VMState* previous_;
-  Address external_callback_;
-
+  Address previous_callback_;
 #else
  public:
-  explicit VMState(StateTag state) {}
+  explicit ExternalCallbackScope(Address callback) {}
 #endif
 };
 
diff --git a/src/win32-headers.h b/src/win32-headers.h
new file mode 100644 (file)
index 0000000..b51a38a
--- /dev/null
@@ -0,0 +1,95 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef WIN32_LEAN_AND_MEAN
+// WIN32_LEAN_AND_MEAN implies NOCRYPT and NOGDI.
+#define WIN32_LEAN_AND_MEAN
+#endif
+#ifndef NOMINMAX
+#define NOMINMAX
+#endif
+#ifndef NOKERNEL
+#define NOKERNEL
+#endif
+#ifndef NOUSER
+#define NOUSER
+#endif
+#ifndef NOSERVICE
+#define NOSERVICE
+#endif
+#ifndef NOSOUND
+#define NOSOUND
+#endif
+#ifndef NOMCX
+#define NOMCX
+#endif
+// Require Windows XP or higher (this is required for the RtlCaptureContext
+// function to be present).
+#ifndef _WIN32_WINNT
+#define _WIN32_WINNT 0x501
+#endif
+
+#include <windows.h>
+
+#ifdef V8_WIN32_HEADERS_FULL
+#include <time.h>  // For LocalOffset() implementation.
+#include <mmsystem.h>  // For timeGetTime().
+#ifdef __MINGW32__
+// Require Windows XP or higher when compiling with MinGW. This is for MinGW
+// header files to expose getaddrinfo.
+#undef _WIN32_WINNT
+#define _WIN32_WINNT 0x501
+#endif  // __MINGW32__
+#ifndef __MINGW32__
+#include <dbghelp.h>  // For SymLoadModule64 and al.
+#endif  // __MINGW32__
+#include <limits.h>  // For INT_MAX and al.
+#include <tlhelp32.h>  // For Module32First and al.
+
+// These additional WIN32 includes have to be right here as the #undef's below
+// makes it impossible to have them elsewhere.
+#include <winsock2.h>
+#include <ws2tcpip.h>
+#include <process.h>  // for _beginthreadex()
+#include <stdlib.h>
+#endif  // V8_WIN32_HEADERS_FULL
+
+#undef VOID
+#undef DELETE
+#undef IN
+#undef THIS
+#undef CONST
+#undef NAN
+#undef TRUE
+#undef FALSE
+#undef UNKNOWN
+#undef NONE
+#undef ANY
+#undef IGNORE
+#undef GetObject
+#undef CreateMutex
+#undef CreateSemaphore
index 44159e0..1fe9eed 100644 (file)
@@ -274,6 +274,30 @@ void RelocInfo::set_target_object(Object* target) {
 }
 
 
+Handle<JSGlobalPropertyCell> RelocInfo::target_cell_handle() {
+  ASSERT(rmode_ == RelocInfo::GLOBAL_PROPERTY_CELL);
+  Address address = Memory::Address_at(pc_);
+  return Handle<JSGlobalPropertyCell>(
+      reinterpret_cast<JSGlobalPropertyCell**>(address));
+}
+
+
+JSGlobalPropertyCell* RelocInfo::target_cell() {
+  ASSERT(rmode_ == RelocInfo::GLOBAL_PROPERTY_CELL);
+  Address address = Memory::Address_at(pc_);
+  Object* object = HeapObject::FromAddress(
+      address - JSGlobalPropertyCell::kValueOffset);
+  return reinterpret_cast<JSGlobalPropertyCell*>(object);
+}
+
+
+void RelocInfo::set_target_cell(JSGlobalPropertyCell* cell) {
+  ASSERT(rmode_ == RelocInfo::GLOBAL_PROPERTY_CELL);
+  Address address = cell->address() + JSGlobalPropertyCell::kValueOffset;
+  Memory::Address_at(pc_) = address;
+}
+
+
 bool RelocInfo::IsPatchedReturnSequence() {
   // The recognized call sequence is:
   //  movq(kScratchRegister, immediate64); call(kScratchRegister);
index 05bf195..8f15f23 100644 (file)
@@ -1,4 +1,4 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
+// Copyright 2010 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -44,10 +44,10 @@ uint64_t CpuFeatures::supported_ = kDefaultCpuFeatures;
 uint64_t CpuFeatures::enabled_ = 0;
 uint64_t CpuFeatures::found_by_runtime_probing_ = 0;
 
-void CpuFeatures::Probe()  {
+void CpuFeatures::Probe(bool portable)  {
   ASSERT(Heap::HasBeenSetup());
-  ASSERT(supported_ == kDefaultCpuFeatures);
-  if (Serializer::enabled()) {
+  supported_ = kDefaultCpuFeatures;
+  if (portable && Serializer::enabled()) {
     supported_ |= OS::CpuFeaturesImpliedByPlatform();
     return;  // No features if we might serialize.
   }
@@ -133,7 +133,7 @@ void CpuFeatures::Probe()  {
   found_by_runtime_probing_ &= ~kDefaultCpuFeatures;
   uint64_t os_guarantees = OS::CpuFeaturesImpliedByPlatform();
   supported_ |= os_guarantees;
-  found_by_runtime_probing_ &= ~os_guarantees;
+  found_by_runtime_probing_ &= portable ? ~os_guarantees : 0;
   // SSE2 and CMOV must be available on an X64 CPU.
   ASSERT(IsSupported(CPUID));
   ASSERT(IsSupported(SSE2));
@@ -821,6 +821,7 @@ void Assembler::bts(const Operand& dst, Register src) {
 
 
 void Assembler::call(Label* L) {
+  positions_recorder()->WriteRecordedPositions();
   EnsureSpace ensure_space(this);
   last_pc_ = pc_;
   // 1110 1000 #32-bit disp.
@@ -852,6 +853,7 @@ void Assembler::call(Handle<Code> target, RelocInfo::Mode rmode) {
 
 
 void Assembler::call(Register adr) {
+  positions_recorder()->WriteRecordedPositions();
   EnsureSpace ensure_space(this);
   last_pc_ = pc_;
   // Opcode: FF /2 r64.
@@ -862,6 +864,7 @@ void Assembler::call(Register adr) {
 
 
 void Assembler::call(const Operand& op) {
+  positions_recorder()->WriteRecordedPositions();
   EnsureSpace ensure_space(this);
   last_pc_ = pc_;
   // Opcode: FF /2 m64.
@@ -2933,6 +2936,12 @@ void Assembler::emit_sse_operand(Register dst, XMMRegister src) {
 }
 
 
+void Assembler::dd(uint32_t data) {
+  EnsureSpace ensure_space(this);
+  emitl(data);
+}
+
+
 // Relocation information implementations.
 
 void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
@@ -2962,7 +2971,7 @@ void Assembler::RecordDebugBreakSlot() {
 
 
 void Assembler::RecordComment(const char* msg) {
-  if (FLAG_debug_code) {
+  if (FLAG_code_comments) {
     EnsureSpace ensure_space(this);
     RecordRelocInfo(RelocInfo::COMMENT, reinterpret_cast<intptr_t>(msg));
   }
index 549d94c..fde88df 100644 (file)
@@ -30,7 +30,7 @@
 
 // The original source code covered by the above license above has been
 // modified significantly by Google Inc.
-// Copyright 2006-2009 the V8 project authors. All rights reserved.
+// Copyright 2010 the V8 project authors. All rights reserved.
 
 // A lightweight X64 Assembler.
 
@@ -88,11 +88,38 @@ static inline bool is_uint32(uint64_t x) {
 //
 
 struct Register {
+  // The non-allocatable registers are:
+  //  rsp - stack pointer
+  //  rbp - frame pointer
+  //  rsi - context register
+  //  r10 - fixed scratch register
+  //  r13 - root register
+  //  r15 - smi constant register
+  static const int kNumRegisters = 16;
+  static const int kNumAllocatableRegisters = 10;
+
+  static const char* AllocationIndexToString(int index) {
+    ASSERT(index >= 0 && index < kNumAllocatableRegisters);
+    const char* const names[] = {
+      "rax",
+      "rcx",
+      "rdx",
+      "rbx",
+      "rdi",
+      "r8",
+      "r9",
+      "r11",
+      "r12",
+      "r14"
+    };
+    return names[index];
+  }
+
   static Register toRegister(int code) {
     Register r = { code };
     return r;
   }
-  bool is_valid() const { return 0 <= code_ && code_ < 16; }
+  bool is_valid() const { return 0 <= code_ && code_ < kNumRegisters; }
   bool is(Register reg) const { return code_ == reg.code_; }
   int code() const {
     ASSERT(is_valid());
@@ -138,7 +165,37 @@ const Register no_reg = { -1 };
 
 
 struct XMMRegister {
-  bool is_valid() const { return 0 <= code_ && code_ < 16; }
+  static const int kNumRegisters = 16;
+  static const int kNumAllocatableRegisters = 15;
+
+  static int ToAllocationIndex(XMMRegister reg) {
+    ASSERT(reg.code() != 0);
+    return reg.code() - 1;
+  }
+
+  static const char* AllocationIndexToString(int index) {
+    ASSERT(index >= 0 && index < kNumAllocatableRegisters);
+    const char* const names[] = {
+      "xmm1",
+      "xmm2",
+      "xmm3",
+      "xmm4",
+      "xmm5",
+      "xmm6",
+      "xmm7",
+      "xmm8",
+      "xmm9",
+      "xmm10",
+      "xmm11",
+      "xmm12",
+      "xmm13",
+      "xmm14",
+      "xmm15"
+    };
+    return names[index];
+  }
+
+  bool is_valid() const { return 0 <= code_ && code_ < kNumRegisters; }
   int code() const {
     ASSERT(is_valid());
     return code_;
@@ -175,6 +232,10 @@ const XMMRegister xmm13 = { 13 };
 const XMMRegister xmm14 = { 14 };
 const XMMRegister xmm15 = { 15 };
 
+
+typedef XMMRegister DoubleRegister;
+
+
 enum Condition {
   // any value < 0 is considered no_condition
   no_condition  = -1,
@@ -345,7 +406,7 @@ class CpuFeatures : public AllStatic {
  public:
   // Detect features of the target CPU. Set safe defaults if the serializer
   // is enabled (snapshots must be portable).
-  static void Probe();
+  static void Probe(bool portable);
   // Check whether a feature is supported by the target CPU.
   static bool IsSupported(CpuFeature f) {
     if (f == SSE2 && !FLAG_enable_sse2) return false;
@@ -1173,9 +1234,14 @@ class Assembler : public Malloced {
   void RecordDebugBreakSlot();
 
   // Record a comment relocation entry that can be used by a disassembler.
-  // Use --debug_code to enable.
+  // Use --code-comments to enable.
   void RecordComment(const char* msg);
 
+  // Writes a single word of data in the code stream.
+  // Used for inline tables, e.g., jump-tables.
+  void db(uint8_t data) { UNIMPLEMENTED(); }
+  void dd(uint32_t data);
+
   int pc_offset() const { return static_cast<int>(pc_ - buffer_); }
 
   PositionsRecorder* positions_recorder() { return &positions_recorder_; }
index 0dead6b..540593f 100644 (file)
@@ -1,4 +1,4 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
+// Copyright 2010 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -1347,6 +1347,47 @@ void Builtins::Generate_LazyCompile(MacroAssembler* masm) {
   __ jmp(rcx);
 }
 
+
+void Builtins::Generate_LazyRecompile(MacroAssembler* masm) {
+  // Enter an internal frame.
+  __ EnterInternalFrame();
+
+  // Push a copy of the function onto the stack.
+  __ push(rdi);
+
+  __ push(rdi);  // Function is also the parameter to the runtime call.
+  __ CallRuntime(Runtime::kLazyRecompile, 1);
+
+  // Restore function and tear down temporary frame.
+  __ pop(rdi);
+  __ LeaveInternalFrame();
+
+  // Do a tail-call of the compiled function.
+  __ lea(rcx, FieldOperand(rax, Code::kHeaderSize));
+  __ jmp(rcx);
+}
+
+
+void Builtins::Generate_NotifyDeoptimized(MacroAssembler* masm) {
+  __ int3();
+}
+
+
+void Builtins::Generate_NotifyLazyDeoptimized(MacroAssembler* masm) {
+  __ int3();
+}
+
+
+void Builtins::Generate_NotifyOSR(MacroAssembler* masm) {
+  __ int3();
+}
+
+
+void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
+  __ int3();
+}
+
+
 } }  // namespace v8::internal
 
 #endif  // V8_TARGET_ARCH_X64
index 9bd28bc..c3eb5bf 100644 (file)
@@ -57,12 +57,14 @@ void FastNewClosureStub::Generate(MacroAssembler* masm) {
   // write barrier because the allocated object is in new space.
   __ LoadRoot(rbx, Heap::kEmptyFixedArrayRootIndex);
   __ LoadRoot(rcx, Heap::kTheHoleValueRootIndex);
+  __ LoadRoot(rdi, Heap::kUndefinedValueRootIndex);
   __ movq(FieldOperand(rax, JSObject::kPropertiesOffset), rbx);
   __ movq(FieldOperand(rax, JSObject::kElementsOffset), rbx);
   __ movq(FieldOperand(rax, JSFunction::kPrototypeOrInitialMapOffset), rcx);
   __ movq(FieldOperand(rax, JSFunction::kSharedFunctionInfoOffset), rdx);
   __ movq(FieldOperand(rax, JSFunction::kContextOffset), rsi);
   __ movq(FieldOperand(rax, JSFunction::kLiteralsOffset), rbx);
+  __ movq(FieldOperand(rax, JSFunction::kNextFunctionLinkOffset), rdi);
 
   // Initialize the code pointer in the function to be the one
   // found in the shared function info object.
@@ -983,6 +985,14 @@ Handle<Code> GetBinaryOpStub(int key, BinaryOpIC::TypeInfo type_info) {
 }
 
 
+Handle<Code> GetTypeRecordingBinaryOpStub(int key,
+    TRBinaryOpIC::TypeInfo type_info,
+    TRBinaryOpIC::TypeInfo result_type_info) {
+  UNIMPLEMENTED();
+  return Handle<Code>::null();
+}
+
+
 void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
   // Input on stack:
   // rsp[8]: argument (should be number).
@@ -2003,6 +2013,90 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
 }
 
 
+void RegExpConstructResultStub::Generate(MacroAssembler* masm) {
+  const int kMaxInlineLength = 100;
+  Label slowcase;
+  Label done;
+  __ movq(r8, Operand(rsp, kPointerSize * 3));
+  __ JumpIfNotSmi(r8, &slowcase);
+  __ SmiToInteger32(rbx, r8);
+  __ cmpl(rbx, Immediate(kMaxInlineLength));
+  __ j(above, &slowcase);
+  // Smi-tagging is equivalent to multiplying by 2.
+  STATIC_ASSERT(kSmiTag == 0);
+  STATIC_ASSERT(kSmiTagSize == 1);
+  // Allocate RegExpResult followed by FixedArray with size in ebx.
+  // JSArray:   [Map][empty properties][Elements][Length-smi][index][input]
+  // Elements:  [Map][Length][..elements..]
+  __ AllocateInNewSpace(JSRegExpResult::kSize + FixedArray::kHeaderSize,
+                        times_pointer_size,
+                        rbx,  // In: Number of elements.
+                        rax,  // Out: Start of allocation (tagged).
+                        rcx,  // Out: End of allocation.
+                        rdx,  // Scratch register
+                        &slowcase,
+                        TAG_OBJECT);
+  // rax: Start of allocated area, object-tagged.
+  // rbx: Number of array elements as int32.
+  // r8: Number of array elements as smi.
+
+  // Set JSArray map to global.regexp_result_map().
+  __ movq(rdx, ContextOperand(rsi, Context::GLOBAL_INDEX));
+  __ movq(rdx, FieldOperand(rdx, GlobalObject::kGlobalContextOffset));
+  __ movq(rdx, ContextOperand(rdx, Context::REGEXP_RESULT_MAP_INDEX));
+  __ movq(FieldOperand(rax, HeapObject::kMapOffset), rdx);
+
+  // Set empty properties FixedArray.
+  __ Move(FieldOperand(rax, JSObject::kPropertiesOffset),
+          Factory::empty_fixed_array());
+
+  // Set elements to point to FixedArray allocated right after the JSArray.
+  __ lea(rcx, Operand(rax, JSRegExpResult::kSize));
+  __ movq(FieldOperand(rax, JSObject::kElementsOffset), rcx);
+
+  // Set input, index and length fields from arguments.
+  __ movq(r8, Operand(rsp, kPointerSize * 1));
+  __ movq(FieldOperand(rax, JSRegExpResult::kInputOffset), r8);
+  __ movq(r8, Operand(rsp, kPointerSize * 2));
+  __ movq(FieldOperand(rax, JSRegExpResult::kIndexOffset), r8);
+  __ movq(r8, Operand(rsp, kPointerSize * 3));
+  __ movq(FieldOperand(rax, JSArray::kLengthOffset), r8);
+
+  // Fill out the elements FixedArray.
+  // rax: JSArray.
+  // rcx: FixedArray.
+  // rbx: Number of elements in array as int32.
+
+  // Set map.
+  __ Move(FieldOperand(rcx, HeapObject::kMapOffset),
+          Factory::fixed_array_map());
+  // Set length.
+  __ Integer32ToSmi(rdx, rbx);
+  __ movq(FieldOperand(rcx, FixedArray::kLengthOffset), rdx);
+  // Fill contents of fixed-array with the-hole.
+  __ Move(rdx, Factory::the_hole_value());
+  __ lea(rcx, FieldOperand(rcx, FixedArray::kHeaderSize));
+  // Fill fixed array elements with hole.
+  // rax: JSArray.
+  // rbx: Number of elements in array that remains to be filled, as int32.
+  // rcx: Start of elements in FixedArray.
+  // rdx: the hole.
+  Label loop;
+  __ testl(rbx, rbx);
+  __ bind(&loop);
+  __ j(less_equal, &done);  // Jump if ecx is negative or zero.
+  __ subl(rbx, Immediate(1));
+  __ movq(Operand(rcx, rbx, times_pointer_size, 0), rdx);
+  __ jmp(&loop);
+
+  __ bind(&done);
+  __ ret(3 * kPointerSize);
+
+  __ bind(&slowcase);
+  __ TailCallRuntime(Runtime::kRegExpConstructResult, 3, 1);
+}
+
+
 void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm,
                                                          Register object,
                                                          Register result,
@@ -3990,6 +4084,25 @@ void StringCompareStub::Generate(MacroAssembler* masm) {
   __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
 }
 
+void ICCompareStub::GenerateSmis(MacroAssembler* masm) {
+  UNIMPLEMENTED();
+}
+
+
+void ICCompareStub::GenerateHeapNumbers(MacroAssembler* masm) {
+  UNIMPLEMENTED();
+}
+
+
+void ICCompareStub::GenerateObjects(MacroAssembler* masm) {
+  UNIMPLEMENTED();
+}
+
+
+void ICCompareStub::GenerateMiss(MacroAssembler* masm) {
+  UNIMPLEMENTED();
+}
+
 #undef __
 
 } }  // namespace v8::internal
index 18213b9..eb7ad26 100644 (file)
@@ -149,7 +149,7 @@ class GenericBinaryOpStub: public CodeStub {
   class ArgsReversedBits: public BitField<bool, 10, 1> {};
   class FlagBits: public BitField<GenericBinaryFlags, 11, 1> {};
   class StaticTypeInfoBits: public BitField<int, 12, 3> {};
-  class RuntimeTypeInfoBits: public BitField<BinaryOpIC::TypeInfo, 15, 2> {};
+  class RuntimeTypeInfoBits: public BitField<BinaryOpIC::TypeInfo, 15, 3> {};
 
   Major MajorKey() { return GenericBinaryOp; }
   int MinorKey() {
index 765a570..23700e1 100644 (file)
@@ -104,12 +104,12 @@ void VirtualFrameRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
 }
 
 
-void ICRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const {
+void StubRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const {
   masm->EnterInternalFrame();
 }
 
 
-void ICRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
+void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
   masm->LeaveInternalFrame();
 }
 
@@ -6490,94 +6490,13 @@ void CodeGenerator::GenerateRegExpExec(ZoneList<Expression*>* args) {
 
 
 void CodeGenerator::GenerateRegExpConstructResult(ZoneList<Expression*>* args) {
-  // No stub. This code only occurs a few times in regexp.js.
-  const int kMaxInlineLength = 100;
   ASSERT_EQ(3, args->length());
   Load(args->at(0));  // Size of array, smi.
   Load(args->at(1));  // "index" property value.
   Load(args->at(2));  // "input" property value.
-  {
-    VirtualFrame::SpilledScope spilled_scope;
-
-    Label slowcase;
-    Label done;
-    __ movq(r8, Operand(rsp, kPointerSize * 2));
-    __ JumpIfNotSmi(r8, &slowcase);
-    __ SmiToInteger32(rbx, r8);
-    __ cmpl(rbx, Immediate(kMaxInlineLength));
-    __ j(above, &slowcase);
-    // Smi-tagging is equivalent to multiplying by 2.
-    STATIC_ASSERT(kSmiTag == 0);
-    STATIC_ASSERT(kSmiTagSize == 1);
-    // Allocate RegExpResult followed by FixedArray with size in ebx.
-    // JSArray:   [Map][empty properties][Elements][Length-smi][index][input]
-    // Elements:  [Map][Length][..elements..]
-    __ AllocateInNewSpace(JSRegExpResult::kSize + FixedArray::kHeaderSize,
-                          times_pointer_size,
-                          rbx,  // In: Number of elements.
-                          rax,  // Out: Start of allocation (tagged).
-                          rcx,  // Out: End of allocation.
-                          rdx,  // Scratch register
-                          &slowcase,
-                          TAG_OBJECT);
-    // rax: Start of allocated area, object-tagged.
-    // rbx: Number of array elements as int32.
-    // r8: Number of array elements as smi.
-
-    // Set JSArray map to global.regexp_result_map().
-    __ movq(rdx, ContextOperand(rsi, Context::GLOBAL_INDEX));
-    __ movq(rdx, FieldOperand(rdx, GlobalObject::kGlobalContextOffset));
-    __ movq(rdx, ContextOperand(rdx, Context::REGEXP_RESULT_MAP_INDEX));
-    __ movq(FieldOperand(rax, HeapObject::kMapOffset), rdx);
-
-    // Set empty properties FixedArray.
-    __ Move(FieldOperand(rax, JSObject::kPropertiesOffset),
-            Factory::empty_fixed_array());
-
-    // Set elements to point to FixedArray allocated right after the JSArray.
-    __ lea(rcx, Operand(rax, JSRegExpResult::kSize));
-    __ movq(FieldOperand(rax, JSObject::kElementsOffset), rcx);
-
-    // Set input, index and length fields from arguments.
-    __ pop(FieldOperand(rax, JSRegExpResult::kInputOffset));
-    __ pop(FieldOperand(rax, JSRegExpResult::kIndexOffset));
-    __ lea(rsp, Operand(rsp, kPointerSize));
-    __ movq(FieldOperand(rax, JSArray::kLengthOffset), r8);
-
-    // Fill out the elements FixedArray.
-    // rax: JSArray.
-    // rcx: FixedArray.
-    // rbx: Number of elements in array as int32.
-
-    // Set map.
-    __ Move(FieldOperand(rcx, HeapObject::kMapOffset),
-            Factory::fixed_array_map());
-    // Set length.
-    __ Integer32ToSmi(rdx, rbx);
-    __ movq(FieldOperand(rcx, FixedArray::kLengthOffset), rdx);
-    // Fill contents of fixed-array with the-hole.
-    __ Move(rdx, Factory::the_hole_value());
-    __ lea(rcx, FieldOperand(rcx, FixedArray::kHeaderSize));
-    // Fill fixed array elements with hole.
-    // rax: JSArray.
-    // rbx: Number of elements in array that remains to be filled, as int32.
-    // rcx: Start of elements in FixedArray.
-    // rdx: the hole.
-    Label loop;
-    __ testl(rbx, rbx);
-    __ bind(&loop);
-    __ j(less_equal, &done);  // Jump if ecx is negative or zero.
-    __ subl(rbx, Immediate(1));
-    __ movq(Operand(rcx, rbx, times_pointer_size, 0), rdx);
-    __ jmp(&loop);
-
-    __ bind(&slowcase);
-    __ CallRuntime(Runtime::kRegExpConstructResult, 3);
-
-    __ bind(&done);
-  }
-  frame_->Forget(3);
-  frame_->Push(rax);
+  RegExpConstructResultStub stub;
+  Result result = frame_->CallStub(&stub, 3);
+  frame_->Push(&result);
 }
 
 
index e38bf29..b308f64 100644 (file)
@@ -308,6 +308,9 @@ class CodeGenerator: public AstVisitor {
                                        Code::Flags flags,
                                        CompilationInfo* info);
 
+  // Print the code after compiling it.
+  static void PrintCode(Handle<Code> code, CompilationInfo* info);
+
 #ifdef ENABLE_LOGGING_AND_PROFILING
   static bool ShouldGenerateLog(Expression* type);
 #endif
@@ -370,8 +373,9 @@ class CodeGenerator: public AstVisitor {
   // Node visitors.
   void VisitStatements(ZoneList<Statement*>* statements);
 
-#define DEF_VISIT(type) \
-  void Visit##type(type* node);
+  virtual void VisitSlot(Slot* node);
+#define DEF_VISIT(type)                         \
+  virtual void Visit##type(type* node);
   AST_NODE_LIST(DEF_VISIT)
 #undef DEF_VISIT
 
index a43a02b..30134bf 100644 (file)
@@ -42,7 +42,7 @@ namespace v8 {
 namespace internal {
 
 void CPU::Setup() {
-  CpuFeatures::Probe();
+  CpuFeatures::Probe(true);
 }
 
 
diff --git a/src/x64/deoptimizer-x64.cc b/src/x64/deoptimizer-x64.cc
new file mode 100644 (file)
index 0000000..4e890cd
--- /dev/null
@@ -0,0 +1,77 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "codegen.h"
+#include "deoptimizer.h"
+#include "full-codegen.h"
+#include "safepoint-table.h"
+
+namespace v8 {
+namespace internal {
+
+
+int Deoptimizer::table_entry_size_ = 10;
+
+void Deoptimizer::DeoptimizeFunction(JSFunction* function) {
+  UNIMPLEMENTED();
+}
+
+
+void Deoptimizer::PatchStackCheckCode(RelocInfo* rinfo,
+                                      Code* replacement_code) {
+  UNIMPLEMENTED();
+}
+
+
+void Deoptimizer::RevertStackCheckCode(RelocInfo* rinfo, Code* check_code) {
+  UNIMPLEMENTED();
+}
+
+
+void Deoptimizer::DoComputeOsrOutputFrame() {
+  UNIMPLEMENTED();
+}
+
+
+void Deoptimizer::DoComputeFrame(TranslationIterator* iterator,
+                                 int frame_index) {
+  UNIMPLEMENTED();
+}
+
+
+void Deoptimizer::EntryGenerator::Generate() {
+  UNIMPLEMENTED();
+}
+
+
+void Deoptimizer::TableEntryGenerator::GeneratePrologue() {
+  UNIMPLEMENTED();
+}
+
+} }  // namespace v8::internal
index 9991981..fbbf176 100644 (file)
@@ -43,6 +43,12 @@ static const int kNumJSCallerSaved = 5;
 
 typedef Object* JSCallerSavedBuffer[kNumJSCallerSaved];
 
+// Number of registers for which space is reserved in safepoints.
+// TODO(x64): This should not be 0.
+static const int kNumSafepointRegisters = 0;
+
+// ----------------------------------------------------
+
 class StackHandlerConstants : public AllStatic {
  public:
   static const int kNextOffset  = 0 * kPointerSize;
index 6007d7e..574688c 100644 (file)
@@ -170,7 +170,12 @@ void FullCodeGenerator::Generate(CompilationInfo* info) {
     }
   }
 
+  if (FLAG_trace) {
+    __ CallRuntime(Runtime::kTraceEnter, 0);
+  }
+
   { Comment cmnt(masm_, "[ Stack check");
+    PrepareForBailout(info->function(), NO_REGISTERS);
     NearLabel ok;
     __ CompareRoot(rsp, Heap::kStackLimitRootIndex);
     __ j(above_equal, &ok);
@@ -179,10 +184,6 @@ void FullCodeGenerator::Generate(CompilationInfo* info) {
     __ bind(&ok);
   }
 
-  if (FLAG_trace) {
-    __ CallRuntime(Runtime::kTraceEnter, 0);
-  }
-
   { Comment cmnt(masm_, "[ Body");
     ASSERT(loop_depth() == 0);
     VisitStatements(function()->body());
@@ -197,6 +198,20 @@ void FullCodeGenerator::Generate(CompilationInfo* info) {
 }
 
 
+void FullCodeGenerator::EmitStackCheck(IterationStatement* stmt) {
+  Comment cmnt(masm_, "[ Stack check");
+  NearLabel ok;
+  __ CompareRoot(rsp, Heap::kStackLimitRootIndex);
+  __ j(above_equal, &ok);
+  StackCheckStub stub;
+  __ CallStub(&stub);
+  __ bind(&ok);
+  PrepareForBailoutForId(stmt->EntryId(), NO_REGISTERS);
+  PrepareForBailoutForId(stmt->OsrEntryId(), NO_REGISTERS);
+  RecordStackCheck(stmt->OsrEntryId());
+}
+
+
 void FullCodeGenerator::EmitReturnSequence() {
   Comment cmnt(masm_, "[ Return sequence");
   if (return_label_.is_bound()) {
@@ -261,6 +276,7 @@ void FullCodeGenerator::StackValueContext::Plug(Slot* slot) const {
 
 void FullCodeGenerator::TestContext::Plug(Slot* slot) const {
   codegen()->Move(result_register(), slot);
+  codegen()->PrepareForBailoutBeforeSplit(TOS_REG, false, NULL, NULL);
   codegen()->DoTest(true_label_, false_label_, fall_through_);
 }
 
@@ -282,12 +298,16 @@ void FullCodeGenerator::StackValueContext::Plug(
 
 
 void FullCodeGenerator::TestContext::Plug(Heap::RootListIndex index) const {
+  codegen()->PrepareForBailoutBeforeSplit(TOS_REG,
+                                          true,
+                                          true_label_,
+                                          false_label_);
   if (index == Heap::kUndefinedValueRootIndex ||
       index == Heap::kNullValueRootIndex ||
       index == Heap::kFalseValueRootIndex) {
-    __ jmp(false_label_);
+    if (false_label_ != fall_through_) __ jmp(false_label_);
   } else if (index == Heap::kTrueValueRootIndex) {
-    __ jmp(true_label_);
+    if (true_label_ != fall_through_) __ jmp(true_label_);
   } else {
     __ LoadRoot(result_register(), index);
     codegen()->DoTest(true_label_, false_label_, fall_through_);
@@ -311,22 +331,26 @@ void FullCodeGenerator::StackValueContext::Plug(Handle<Object> lit) const {
 
 
 void FullCodeGenerator::TestContext::Plug(Handle<Object> lit) const {
+  codegen()->PrepareForBailoutBeforeSplit(TOS_REG,
+                                          true,
+                                          true_label_,
+                                          false_label_);
   ASSERT(!lit->IsUndetectableObject());  // There are no undetectable literals.
   if (lit->IsUndefined() || lit->IsNull() || lit->IsFalse()) {
-    __ jmp(false_label_);
+    if (false_label_ != fall_through_) __ jmp(false_label_);
   } else if (lit->IsTrue() || lit->IsJSObject()) {
-    __ jmp(true_label_);
+    if (true_label_ != fall_through_) __ jmp(true_label_);
   } else if (lit->IsString()) {
     if (String::cast(*lit)->length() == 0) {
-      __ jmp(false_label_);
+      if (false_label_ != fall_through_) __ jmp(false_label_);
     } else {
-      __ jmp(true_label_);
+      if (true_label_ != fall_through_) __ jmp(true_label_);
     }
   } else if (lit->IsSmi()) {
     if (Smi::cast(*lit)->value() == 0) {
-      __ jmp(false_label_);
+      if (false_label_ != fall_through_) __ jmp(false_label_);
     } else {
-      __ jmp(true_label_);
+      if (true_label_ != fall_through_) __ jmp(true_label_);
     }
   } else {
     // For simplicity we always test the accumulator register.
@@ -366,13 +390,14 @@ void FullCodeGenerator::TestContext::DropAndPlug(int count,
   // For simplicity we always test the accumulator register.
   __ Drop(count);
   __ Move(result_register(), reg);
+  codegen()->PrepareForBailoutBeforeSplit(TOS_REG, false, NULL, NULL);
   codegen()->DoTest(true_label_, false_label_, fall_through_);
 }
 
 
 void FullCodeGenerator::EffectContext::Plug(Label* materialize_true,
                                             Label* materialize_false) const {
-  ASSERT_EQ(materialize_true, materialize_false);
+  ASSERT(materialize_true == materialize_false);
   __ bind(materialize_true);
 }
 
@@ -405,8 +430,8 @@ void FullCodeGenerator::StackValueContext::Plug(
 
 void FullCodeGenerator::TestContext::Plug(Label* materialize_true,
                                           Label* materialize_false) const {
-  ASSERT(materialize_false == false_label_);
   ASSERT(materialize_true == true_label_);
+  ASSERT(materialize_false == false_label_);
 }
 
 
@@ -429,6 +454,7 @@ void FullCodeGenerator::StackValueContext::Plug(bool flag) const {
 
 
 void FullCodeGenerator::TestContext::Plug(bool flag) const {
+  codegen()->PrepareForBailoutBeforeSplit(TOS_REG, false, NULL, NULL);
   if (flag) {
     if (true_label_ != fall_through_) __ jmp(true_label_);
   } else {
@@ -520,6 +546,13 @@ void FullCodeGenerator::Move(Slot* dst,
 }
 
 
+void FullCodeGenerator::PrepareForBailoutBeforeSplit(State state,
+                                                     bool should_normalize,
+                                                     Label* if_true,
+                                                     Label* if_false) {
+}
+
+
 void FullCodeGenerator::EmitDeclaration(Variable* variable,
                                         Variable::Mode mode,
                                         FunctionLiteral* function) {
@@ -809,23 +842,15 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
   EmitAssignment(stmt->each());
 
   // Generate code for the body of the loop.
-  Label stack_limit_hit, stack_check_done;
   Visit(stmt->body());
 
-  __ StackLimitCheck(&stack_limit_hit);
-  __ bind(&stack_check_done);
-
   // Generate code for going to the next element by incrementing the
   // index (smi) stored on top of the stack.
   __ bind(loop_statement.continue_target());
   __ SmiAddConstant(Operand(rsp, 0 * kPointerSize), Smi::FromInt(1));
-  __ jmp(&loop);
 
-  // Slow case for the stack limit check.
-  StackCheckStub stack_check_stub;
-  __ bind(&stack_limit_hit);
-  __ CallStub(&stack_check_stub);
-  __ jmp(&stack_check_done);
+  EmitStackCheck(stmt);
+  __ jmp(&loop);
 
   // Remove the pointers stored on the stack.
   __ bind(loop_statement.break_target());
@@ -1706,13 +1731,14 @@ void FullCodeGenerator::VisitProperty(Property* expr) {
   if (key->IsPropertyName()) {
     VisitForAccumulatorValue(expr->obj());
     EmitNamedPropertyLoad(expr);
+    context()->Plug(rax);
   } else {
     VisitForStackValue(expr->obj());
     VisitForAccumulatorValue(expr->key());
     __ pop(rdx);
     EmitKeyedPropertyLoad(expr);
+    context()->Plug(rax);
   }
-  context()->Plug(rax);
 }
 
 
@@ -2662,11 +2688,12 @@ void FullCodeGenerator::EmitCallFunction(ZoneList<Expression*>* args) {
 
 
 void FullCodeGenerator::EmitRegExpConstructResult(ZoneList<Expression*>* args) {
+  RegExpConstructResultStub stub;
   ASSERT(args->length() == 3);
   VisitForStackValue(args->at(0));
   VisitForStackValue(args->at(1));
   VisitForStackValue(args->at(2));
-  __ CallRuntime(Runtime::kRegExpConstructResult, 3);
+  __ CallStub(&stub);
   context()->Plug(rax);
 }
 
@@ -2928,7 +2955,7 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
     case Token::ADD: {
       Comment cmt(masm_, "[ UnaryOperation (ADD)");
       VisitForAccumulatorValue(expr->expression());
-      NearLabel no_conversion;
+      Label no_conversion;
       Condition is_smi = masm_->CheckSmi(result_register());
       __ j(is_smi, &no_conversion);
       __ push(result_register());
@@ -3081,6 +3108,10 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
       __ SmiAddConstant(rax, rax, Smi::FromInt(1));
     }
   }
+
+  // Record position before stub call.
+  SetSourcePosition(expr->position());
+
   // Call stub for +1/-1.
   GenericBinaryOpStub stub(expr->binary_op(),
                            NO_OVERWRITE,
@@ -3418,6 +3449,9 @@ void FullCodeGenerator::EmitCallIC(Handle<Code> ic, RelocInfo::Mode mode) {
          mode == RelocInfo::CODE_TARGET_CONTEXT);
   __ call(ic, mode);
 
+  // Crankshaft doesn't need patching of inlined loads and stores.
+  if (V8::UseCrankshaft()) return;
+
   // If we're calling a (keyed) load or store stub, we have to mark
   // the call as containing no inlined code so we will not attempt to
   // patch it.
index 9ec7814..2002099 100644 (file)
@@ -383,6 +383,8 @@ static const byte kTestEaxByte = 0xA9;
 
 
 static bool PatchInlinedMapCheck(Address address, Object* map) {
+  if (V8::UseCrankshaft()) return false;
+
   // Arguments are address of start of call sequence that called
   // the IC,
   Address test_instruction_address =
@@ -748,7 +750,7 @@ void KeyedLoadIC::GenerateString(MacroAssembler* masm) {
   char_at_generator.GenerateFast(masm);
   __ ret(0);
 
-  ICRuntimeCallHelper call_helper;
+  StubRuntimeCallHelper call_helper;
   char_at_generator.GenerateSlow(masm, call_helper);
 
   __ bind(&miss);
@@ -1699,6 +1701,8 @@ void LoadIC::GenerateStringLength(MacroAssembler* masm) {
 
 
 bool LoadIC::PatchInlinedLoad(Address address, Object* map, int offset) {
+  if (V8::UseCrankshaft()) return false;
+
   // The address of the instruction following the call.
   Address test_instruction_address =
       address + Assembler::kCallTargetAddressOffset;
@@ -1741,6 +1745,8 @@ const int StoreIC::kOffsetToStoreInstruction = 20;
 
 
 bool StoreIC::PatchInlinedStore(Address address, Object* map, int offset) {
+  if (V8::UseCrankshaft()) return false;
+
   // The address of the instruction following the call.
   Address test_instruction_address =
       address + Assembler::kCallTargetAddressOffset;
@@ -1899,9 +1905,75 @@ void StoreIC::GenerateNormal(MacroAssembler* masm) {
 }
 
 
+void StoreIC::GenerateGlobalProxy(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- rax    : value
+  //  -- rcx    : name
+  //  -- rdx    : receiver
+  //  -- rsp[0] : return address
+  // -----------------------------------
+  __ pop(rbx);
+  __ push(rdx);
+  __ push(rcx);
+  __ push(rax);
+  __ push(rbx);
+
+  // Do tail-call to runtime routine.
+  __ TailCallRuntime(Runtime::kSetProperty, 3, 1);
+}
+
+
 #undef __
 
 
+Condition CompareIC::ComputeCondition(Token::Value op) {
+  switch (op) {
+    case Token::EQ_STRICT:
+    case Token::EQ:
+      return equal;
+    case Token::LT:
+      return less;
+    case Token::GT:
+      // Reverse left and right operands to obtain ECMA-262 conversion order.
+      return less;
+    case Token::LTE:
+      // Reverse left and right operands to obtain ECMA-262 conversion order.
+      return greater_equal;
+    case Token::GTE:
+      return greater_equal;
+    default:
+      UNREACHABLE();
+      return no_condition;
+  }
+}
+
+
+void CompareIC::UpdateCaches(Handle<Object> x, Handle<Object> y) {
+  HandleScope scope;
+  Handle<Code> rewritten;
+#ifdef DEBUG
+  State previous_state = GetState();
+#endif
+  State state = TargetState(x, y);
+  if (state == GENERIC) {
+    CompareStub stub(GetCondition(), strict(), NO_COMPARE_FLAGS);
+    rewritten = stub.GetCode();
+  } else {
+    ICCompareStub stub(op_, state);
+    rewritten = stub.GetCode();
+  }
+  set_target(*rewritten);
+
+#ifdef DEBUG
+  if (FLAG_trace_ic) {
+    PrintF("[CompareIC (%s->%s)#%s]\n",
+           GetStateName(previous_state),
+           GetStateName(state),
+           Token::Name(op_));
+  }
+#endif
+}
+
 } }  // namespace v8::internal
 
 #endif  // V8_TARGET_ARCH_X64
diff --git a/src/x64/lithium-codegen-x64.h b/src/x64/lithium-codegen-x64.h
new file mode 100644 (file)
index 0000000..cd1f08d
--- /dev/null
@@ -0,0 +1,62 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_X64_LITHIUM_CODEGEN_X64_H_
+#define V8_X64_LITHIUM_CODEGEN_X64_H_
+
+#include "x64/lithium-x64.h"
+
+#include "deoptimizer.h"
+#include "safepoint-table.h"
+#include "scopes.h"
+
+namespace v8 {
+namespace internal {
+
+// Forward declarations.
+class LDeferredCode;
+
+class LCodeGen BASE_EMBEDDED {
+ public:
+  LCodeGen(LChunk* chunk, MacroAssembler* assembler, CompilationInfo* info) { }
+
+  // Try to generate code for the entire chunk, but it may fail if the
+  // chunk contains constructs we cannot handle. Returns true if the
+  // code generation attempt succeeded.
+  bool GenerateCode() {
+    UNIMPLEMENTED();
+    return false;
+  }
+
+  // Finish the code by setting stack height, safepoint, and bailout
+  // information on it.
+  void FinishCode(Handle<Code> code) { UNIMPLEMENTED(); }
+};
+
+} }  // namespace v8::internal
+
+#endif  // V8_X64_LITHIUM_CODEGEN_X64_H_
diff --git a/src/x64/lithium-x64.h b/src/x64/lithium-x64.h
new file mode 100644 (file)
index 0000000..0c1559b
--- /dev/null
@@ -0,0 +1,256 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_X64_LITHIUM_X64_H_
+#define V8_X64_LITHIUM_X64_H_
+
+#include "hydrogen.h"
+#include "lithium-allocator.h"
+#include "safepoint-table.h"
+
+namespace v8 {
+namespace internal {
+
+// Forward declarations.
+class LCodeGen;
+class LEnvironment;
+class Translation;
+
+class LInstruction: public ZoneObject {
+ public:
+  LInstruction() { }
+  virtual ~LInstruction() { }
+
+  // Predicates should be generated by macro as in lithium-ia32.h.
+  virtual bool IsLabel() const {
+    UNIMPLEMENTED();
+    return false;
+  }
+  virtual bool IsOsrEntry() const {
+    UNIMPLEMENTED();
+    return false;
+  }
+
+  LPointerMap* pointer_map() const {
+    UNIMPLEMENTED();
+    return NULL;
+  }
+
+  bool HasPointerMap() const {
+    UNIMPLEMENTED();
+    return false;
+  }
+
+  virtual void PrintTo(StringStream* stream) const { UNIMPLEMENTED(); }
+};
+
+
+class LParallelMove : public ZoneObject {
+ public:
+  LParallelMove() { }
+
+  void AddMove(LOperand* from, LOperand* to) {
+    UNIMPLEMENTED();
+  }
+
+  const ZoneList<LMoveOperands>* move_operands() const {
+    UNIMPLEMENTED();
+    return NULL;
+  }
+};
+
+
+class LGap: public LInstruction {
+ public:
+  explicit LGap(HBasicBlock* block) { }
+
+  HBasicBlock* block() const {
+    UNIMPLEMENTED();
+    return NULL;
+  }
+
+  enum InnerPosition {
+    BEFORE,
+    START,
+    END,
+    AFTER,
+    FIRST_INNER_POSITION = BEFORE,
+    LAST_INNER_POSITION = AFTER
+  };
+
+  LParallelMove* GetOrCreateParallelMove(InnerPosition pos) {
+    UNIMPLEMENTED();
+    return NULL;
+  }
+
+  LParallelMove* GetParallelMove(InnerPosition pos)  {
+    UNIMPLEMENTED();
+    return NULL;
+  }
+};
+
+
+class LLabel: public LGap {
+ public:
+  explicit LLabel(HBasicBlock* block) : LGap(block) { }
+};
+
+
+class LOsrEntry: public LInstruction {
+ public:
+  // Function could be generated by a macro as in lithium-ia32.h.
+  static LOsrEntry* cast(LInstruction* instr) {
+    UNIMPLEMENTED();
+    return NULL;
+  }
+
+  LOperand** SpilledRegisterArray() {
+    UNIMPLEMENTED();
+    return NULL;
+  }
+  LOperand** SpilledDoubleRegisterArray() {
+    UNIMPLEMENTED();
+    return NULL;
+  }
+
+  void MarkSpilledRegister(int allocation_index, LOperand* spill_operand) {
+    UNIMPLEMENTED();
+  }
+  void MarkSpilledDoubleRegister(int allocation_index,
+                                 LOperand* spill_operand) {
+    UNIMPLEMENTED();
+  }
+};
+
+
+class LPointerMap: public ZoneObject {
+ public:
+  explicit LPointerMap(int position) { }
+
+  int lithium_position() const {
+    UNIMPLEMENTED();
+    return 0;
+  }
+
+  void RecordPointer(LOperand* op) { UNIMPLEMENTED(); }
+};
+
+
+class LChunk: public ZoneObject {
+ public:
+  explicit LChunk(HGraph* graph) { }
+
+  HGraph* graph() const {
+    UNIMPLEMENTED();
+    return NULL;
+  }
+
+  const ZoneList<LPointerMap*>* pointer_maps() const {
+    UNIMPLEMENTED();
+    return NULL;
+  }
+
+  LOperand* GetNextSpillSlot(bool double_slot) {
+    UNIMPLEMENTED();
+    return NULL;
+  }
+
+  LConstantOperand* DefineConstantOperand(HConstant* constant) {
+    UNIMPLEMENTED();
+    return NULL;
+  }
+
+  LLabel* GetLabel(int block_id) const {
+    UNIMPLEMENTED();
+    return NULL;
+  }
+
+  const ZoneList<LInstruction*>* instructions() const {
+    UNIMPLEMENTED();
+    return NULL;
+  }
+
+  int GetParameterStackSlot(int index) const {
+    UNIMPLEMENTED();
+    return 0;
+  }
+
+  void AddGapMove(int index, LOperand* from, LOperand* to) { UNIMPLEMENTED(); }
+
+  LGap* GetGapAt(int index) const {
+    UNIMPLEMENTED();
+    return NULL;
+  }
+
+  bool IsGapAt(int index) const {
+    UNIMPLEMENTED();
+    return false;
+  }
+
+  int NearestGapPos(int index) const {
+    UNIMPLEMENTED();
+    return 0;
+  }
+
+  int NearestNextGapPos(int index) const {
+    UNIMPLEMENTED();
+    return 0;
+  }
+
+  void MarkEmptyBlocks() { UNIMPLEMENTED(); }
+
+#ifdef DEBUG
+  void Verify() { UNIMPLEMENTED(); }
+#endif
+};
+
+
+class LChunkBuilder BASE_EMBEDDED {
+ public:
+  LChunkBuilder(HGraph* graph, LAllocator* allocator) { }
+
+  // Build the sequence for the graph.
+  LChunk* Build() {
+    UNIMPLEMENTED();
+    return NULL;
+  };
+
+  // Declare methods that deal with the individual node types.
+#define DECLARE_DO(type) LInstruction* Do##type(H##type* node) { \
+    UNIMPLEMENTED(); \
+    return NULL; \
+  }
+  HYDROGEN_CONCRETE_INSTRUCTION_LIST(DECLARE_DO)
+#undef DECLARE_DO
+
+  DISALLOW_COPY_AND_ASSIGN(LChunkBuilder);
+};
+
+
+} }  // namespace v8::internal
+
+#endif  // V8_X64_LITHIUM_X64_H_
index d919833..5bb5ffd 100644 (file)
@@ -74,12 +74,6 @@ void MacroAssembler::CompareRoot(Operand with, Heap::RootListIndex index) {
 }
 
 
-void MacroAssembler::StackLimitCheck(Label* on_stack_overflow) {
-  CompareRoot(rsp, Heap::kStackLimitRootIndex);
-  j(below, on_stack_overflow);
-}
-
-
 void MacroAssembler::RecordWriteHelper(Register object,
                                        Register addr,
                                        Register scratch) {
index 0b7e601..348191e 100644 (file)
@@ -1,4 +1,4 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
+// Copyright 2010 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -137,12 +137,6 @@ class MacroAssembler: public Assembler {
 #endif
 
   // ---------------------------------------------------------------------------
-  // Stack limit support
-
-  // Do simple test for stack overflow. This doesn't handle an overflow.
-  void StackLimitCheck(Label* on_stack_limit_hit);
-
-  // ---------------------------------------------------------------------------
   // Activation frames
 
   void EnterInternalFrame() { EnterFrame(StackFrame::INTERNAL); }
@@ -173,6 +167,14 @@ class MacroAssembler: public Assembler {
   // register rax (untouched).
   void LeaveApiExitFrame();
 
+  // Push and pop the registers that can hold pointers.
+  void PushSafepointRegisters() { UNIMPLEMENTED(); }
+  void PopSafepointRegisters() { UNIMPLEMENTED(); }
+  static int SafepointRegisterStackIndex(int reg_code) {
+    UNIMPLEMENTED();
+    return 0;
+  }
+
   // ---------------------------------------------------------------------------
   // JavaScript invokes
 
index 7ba482c..530222e 100644 (file)
@@ -923,22 +923,20 @@ void CallStubCompiler::GenerateLoadFunctionFromCell(JSGlobalPropertyCell* cell,
 
 
 MaybeObject* CallStubCompiler::GenerateMissBranch() {
+  MaybeObject* maybe_obj =
+      StubCache::ComputeCallMiss(arguments().immediate(), kind_);
   Object* obj;
-  { MaybeObject* maybe_obj =
-        StubCache::ComputeCallMiss(arguments().immediate(), kind_);
-    if (!maybe_obj->ToObject(&obj)) return maybe_obj;
-  }
+  if (!maybe_obj->ToObject(&obj)) return maybe_obj;
   __ Jump(Handle<Code>(Code::cast(obj)), RelocInfo::CODE_TARGET);
   return obj;
 }
 
 
-MaybeObject* CallStubCompiler::CompileCallConstant(
-    Object* object,
-    JSObject* holder,
-    JSFunction* function,
-    String* name,
-    StubCompiler::CheckType check) {
+MaybeObject* CallStubCompiler::CompileCallConstant(Object* object,
+                                                   JSObject* holder,
+                                                   JSFunction* function,
+                                                   String* name,
+                                                   CheckType check) {
   // ----------- S t a t e -------------
   // rcx                 : function name
   // rsp[0]              : return address
@@ -1467,7 +1465,7 @@ MaybeObject* CallStubCompiler::CompileStringCharAtCall(
   char_at_generator.GenerateFast(masm());
   __ ret((argc + 1) * kPointerSize);
 
-  ICRuntimeCallHelper call_helper;
+  StubRuntimeCallHelper call_helper;
   char_at_generator.GenerateSlow(masm(), call_helper);
 
   __ bind(&index_out_of_range);
@@ -1539,7 +1537,7 @@ MaybeObject* CallStubCompiler::CompileStringCharCodeAtCall(
   char_code_at_generator.GenerateFast(masm());
   __ ret((argc + 1) * kPointerSize);
 
-  ICRuntimeCallHelper call_helper;
+  StubRuntimeCallHelper call_helper;
   char_code_at_generator.GenerateSlow(masm(), call_helper);
 
   __ bind(&index_out_of_range);
@@ -1608,7 +1606,7 @@ MaybeObject* CallStubCompiler::CompileStringFromCharCodeCall(
   char_from_code_generator.GenerateFast(masm());
   __ ret(2 * kPointerSize);
 
-  ICRuntimeCallHelper call_helper;
+  StubRuntimeCallHelper call_helper;
   char_from_code_generator.GenerateSlow(masm(), call_helper);
 
   // Tail call the full function. We do not have to patch the receiver
@@ -2249,6 +2247,52 @@ MaybeObject* KeyedLoadStubCompiler::CompileLoadStringLength(String* name) {
 }
 
 
+MaybeObject* KeyedLoadStubCompiler::CompileLoadSpecialized(JSObject* receiver) {
+  // ----------- S t a t e -------------
+  //  -- rax    : key
+  //  -- rdx    : receiver
+  //  -- esp[0] : return address
+  // -----------------------------------
+  Label miss;
+
+  // Check that the receiver isn't a smi.
+  __ JumpIfSmi(rdx, &miss);
+
+  // Check that the map matches.
+  __ Cmp(FieldOperand(rdx, HeapObject::kMapOffset),
+         Handle<Map>(receiver->map()));
+  __ j(not_equal, &miss);
+
+  // Check that the key is a smi.
+  __ JumpIfNotSmi(rax, &miss);
+
+  // Get the elements array.
+  __ movq(rcx, FieldOperand(rdx, JSObject::kElementsOffset));
+  __ AssertFastElements(rcx);
+
+  // Check that the key is within bounds.
+  __ SmiCompare(rax, FieldOperand(rcx, FixedArray::kLengthOffset));
+  __ j(above_equal, &miss);
+
+  // Load the result and make sure it's not the hole.
+  SmiIndex index = masm()->SmiToIndex(rbx, rax, kPointerSizeLog2);
+  __ movq(rbx, FieldOperand(rcx,
+                            index.reg,
+                            index.scale,
+                            FixedArray::kHeaderSize));
+  __ CompareRoot(rbx, Heap::kTheHoleValueRootIndex);
+  __ j(equal, &miss);
+  __ movq(rax, rbx);
+  __ ret(0);
+
+  __ bind(&miss);
+  GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
+
+  // Return the generated code.
+  return GetCode(NORMAL, NULL);
+}
+
+
 MaybeObject* StoreStubCompiler::CompileStoreCallback(JSObject* object,
                                                      AccessorInfo* callback,
                                                      String* name) {
@@ -2477,6 +2521,63 @@ MaybeObject* KeyedStoreStubCompiler::CompileStoreField(JSObject* object,
 }
 
 
+MaybeObject* KeyedStoreStubCompiler::CompileStoreSpecialized(
+    JSObject* receiver) {
+  // ----------- S t a t e -------------
+  //  -- rax    : value
+  //  -- rcx    : key
+  //  -- rdx    : receiver
+  //  -- rsp[0] : return address
+  // -----------------------------------
+  Label miss;
+
+  // Check that the receiver isn't a smi.
+  __ JumpIfSmi(rdx, &miss);
+
+  // Check that the map matches.
+  __ Cmp(FieldOperand(rdx, HeapObject::kMapOffset),
+         Handle<Map>(receiver->map()));
+  __ j(not_equal, &miss);
+
+  // Check that the key is a smi.
+  __ JumpIfNotSmi(rcx, &miss);
+
+  // Get the elements array and make sure it is a fast element array, not 'cow'.
+  __ movq(rdi, FieldOperand(rdx, JSObject::kElementsOffset));
+  __ Cmp(FieldOperand(rdi, HeapObject::kMapOffset),
+         Factory::fixed_array_map());
+  __ j(not_equal, &miss);
+
+  // Check that the key is within bounds.
+  if (receiver->IsJSArray()) {
+    __ SmiCompare(rcx, FieldOperand(rdx, JSArray::kLengthOffset));
+    __ j(above_equal, &miss);
+  } else {
+    __ SmiCompare(rcx, FieldOperand(rdi, FixedArray::kLengthOffset));
+    __ j(above_equal, &miss);
+  }
+
+  // Do the store and update the write barrier. Make sure to preserve
+  // the value in register eax.
+  __ movq(rdx, rax);
+  __ SmiToInteger32(rcx, rcx);
+  __ movq(FieldOperand(rdi, rcx, times_pointer_size, FixedArray::kHeaderSize),
+          rax);
+  __ RecordWrite(rdi, 0, rdx, rcx);
+
+  // Done.
+  __ ret(0);
+
+  // Handle store cache miss.
+  __ bind(&miss);
+  Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Miss));
+  __ jmp(ic, RelocInfo::CODE_TARGET);
+
+  // Return the generated code.
+  return GetCode(NORMAL, NULL);
+}
+
+
 void StubCompiler::GenerateLoadInterceptor(JSObject* object,
                                            JSObject* interceptor_holder,
                                            LookupResult* lookup,
index 3397356..dde722f 100644 (file)
@@ -169,9 +169,19 @@ class ZoneList: public List<T, ZoneListAllocationPolicy> {
   // always zero. The capacity must be non-negative.
   explicit ZoneList(int capacity)
       : List<T, ZoneListAllocationPolicy>(capacity) { }
+
+  // Construct a new ZoneList by copying the elements of the given ZoneList.
+  explicit ZoneList(const ZoneList<T>& other)
+      : List<T, ZoneListAllocationPolicy>(other.length()) {
+    AddAll(other);
+  }
 };
 
 
+// Introduce a convenience type for zone lists of map handles.
+typedef ZoneList<Handle<Map> > ZoneMapList;
+
+
 // ZoneScopes keep track of the current parsing and compilation
 // nesting and cleans up generated ASTs in the Zone when exiting the
 // outer-most scope.
index da2ee3d..7038137 100644 (file)
@@ -50,6 +50,7 @@ SOURCES = {
     'test-dataflow.cc',
     'test-debug.cc',
     'test-decls.cc',
+    'test-deoptimization.cc',
     'test-diy-fp.cc',
     'test-double.cc',
     'test-dtoa.cc',
index 895e245..97a5c9f 100644 (file)
@@ -29,19 +29,43 @@ prefix cctest
 
 test-api/Bug*: FAIL
 
+
+##############################################################################
 # BUG(281): This test fails on some Linuxes.
 test-debug/DebuggerAgent: PASS, (PASS || FAIL) if $system == linux
 
 # BUG(382): Weird test. Can't guarantee that it never times out.
 test-api/ApplyInterruption: PASS || TIMEOUT
 
+# BUG(484): This test which we thought was originally corrected in r5236
+# is re-appearing. Disabled until bug in test is fixed. This only fails
+# when snapshot is on, so I am marking it PASS || FAIL
+test-heap-profiler/HeapSnapshotsDiff: PASS || FAIL
+
+# BUG(3260336): Flaky test. May be timing related.
+test-profile-generator/RecordStackTraceAtStartProfiling: PASS || FAIL
+
 # These tests always fail.  They are here to test test.py.  If
 # they don't fail then test.py has failed.
 test-serialize/TestThatAlwaysFails: FAIL
 test-serialize/DependentTestThatAlwaysFails: FAIL
 
+
+##############################################################################
+[ $arch == x64 ]
+
+# Optimization is currently not working on crankshaft x64 and ARM.
+test-heap/TestInternalWeakLists: PASS || FAIL
+test-heap/TestInternalWeakListsTraverseWithGC: PASS || FAIL
+
+
+##############################################################################
 [ $arch == arm ]
 
+# Optimization is currently not working on crankshaft x64 and ARM.
+test-heap/TestInternalWeakLists: PASS || FAIL
+test-heap/TestInternalWeakListsTraverseWithGC: PASS || FAIL
+
 # We cannot assume that we can throw OutOfMemory exceptions in all situations.
 # Apparently our ARM box is in such a state. Skip the test as it also runs for
 # a long time.
@@ -51,6 +75,8 @@ test-api/OutOfMemoryNested: SKIP
 # BUG(355): Test crashes on ARM.
 test-log/ProfLazyMode: SKIP
 
+
+##############################################################################
 [ $arch == mips ]
 test-accessors: SKIP
 test-alloc: SKIP
index fcf2ce4..d2a28d7 100644 (file)
@@ -133,7 +133,7 @@ TEST(StressJS) {
   // Force the creation of an initial map and set the code to
   // something empty.
   Factory::NewJSObject(function);
-  function->set_code(Builtins::builtin(Builtins::EmptyFunction));
+  function->ReplaceCode(Builtins::builtin(Builtins::EmptyFunction));
   // Patch the map to have an accessor for "get".
   Handle<Map> map(function->initial_map());
   Handle<DescriptorArray> instance_descriptors(map->instance_descriptors());
index 8ce7a79..b6f5511 100644 (file)
@@ -5678,6 +5678,22 @@ THREADED_TEST(GlobalObjectInstanceProperties) {
   instance_template->Set(v8_str("f"),
                          v8::FunctionTemplate::New(InstanceFunctionCallback));
 
+  // The script to check how Crankshaft compiles missing global function
+  // invocations.  function g is not defined and should throw on call.
+  const char* script =
+      "function wrapper(call) {"
+      "  var x = 0, y = 1;"
+      "  for (var i = 0; i < 1000; i++) {"
+      "    x += i * 100;"
+      "    y += i * 100;"
+      "  }"
+      "  if (call) g();"
+      "}"
+      "for (var i = 0; i < 17; i++) wrapper(false);"
+      "var thrown = 0;"
+      "try { wrapper(true); } catch (e) { thrown = 1; };"
+      "thrown";
+
   {
     LocalContext env(NULL, instance_template);
     // Hold on to the global object so it can be used again in another
@@ -5688,6 +5704,8 @@ THREADED_TEST(GlobalObjectInstanceProperties) {
     CHECK_EQ(42, value->Int32Value());
     value = Script::Compile(v8_str("f()"))->Run();
     CHECK_EQ(12, value->Int32Value());
+    value = Script::Compile(v8_str(script))->Run();
+    CHECK_EQ(1, value->Int32Value());
   }
 
   {
@@ -5697,6 +5715,48 @@ THREADED_TEST(GlobalObjectInstanceProperties) {
     CHECK_EQ(42, value->Int32Value());
     value = Script::Compile(v8_str("f()"))->Run();
     CHECK_EQ(12, value->Int32Value());
+    value = Script::Compile(v8_str(script))->Run();
+    CHECK_EQ(1, value->Int32Value());
+  }
+}
+
+
+THREADED_TEST(CallKnownGlobalReceiver) {
+  v8::HandleScope handle_scope;
+
+  Local<Value> global_object;
+
+  Local<v8::FunctionTemplate> t = v8::FunctionTemplate::New();
+  Local<ObjectTemplate> instance_template = t->InstanceTemplate();
+
+  // The script to check that we leave global object not
+  // global object proxy on stack when we deoptimize from inside
+  // arguments evaluation.
+  // To provoke error we need to both force deoptimization
+  // from arguments evaluation and to force CallIC to take
+  // CallIC_Miss code path that can't cope with global proxy.
+  const char* script =
+      "function bar(x, y) { try { } finally { } }"
+      "function baz(x) { try { } finally { } }"
+      "function bom(x) { try { } finally { } }"
+      "function foo(x) { bar([x], bom(2)); }"
+      "for (var i = 0; i < 10000; i++) foo(1);"
+      "foo";
+
+  Local<Value> foo;
+  {
+    LocalContext env(NULL, instance_template);
+    // Hold on to the global object so it can be used again in another
+    // environment initialization.
+    global_object = env->Global();
+    foo = Script::Compile(v8_str(script))->Run();
+  }
+
+  {
+    // Create new environment reusing the global object.
+    LocalContext env(NULL, instance_template, global_object);
+    env->Global()->Set(v8_str("foo"), foo);
+    Local<Value> value = Script::Compile(v8_str("foo()"))->Run();
   }
 }
 
@@ -8671,6 +8731,105 @@ THREADED_TEST(TurnOnAccessCheck) {
 }
 
 
+v8::Handle<v8::String> a;
+v8::Handle<v8::String> h;
+
+static bool NamedGetAccessBlockAandH(Local<v8::Object> obj,
+                                       Local<Value> name,
+                                       v8::AccessType type,
+                                       Local<Value> data) {
+  return !(name->Equals(a) || name->Equals(h));
+}
+
+
+THREADED_TEST(TurnOnAccessCheckAndRecompile) {
+  v8::HandleScope handle_scope;
+
+  // Create an environment with access check to the global object disabled by
+  // default. When the registered access checker will block access to properties
+  // a and h
+  a = v8_str("a");
+  h = v8_str("h");
+  v8::Handle<v8::ObjectTemplate> global_template = v8::ObjectTemplate::New();
+  global_template->SetAccessCheckCallbacks(NamedGetAccessBlockAandH,
+                                           IndexedGetAccessBlocker,
+                                           v8::Handle<v8::Value>(),
+                                           false);
+  v8::Persistent<Context> context = Context::New(NULL, global_template);
+  Context::Scope context_scope(context);
+
+  // Set up a property and a number of functions.
+  context->Global()->Set(v8_str("a"), v8_num(1));
+  static const char* source = "function f1() {return a;}"
+                              "function f2() {return a;}"
+                              "function g1() {return h();}"
+                              "function g2() {return h();}"
+                              "function h() {return 1;}";
+
+  CompileRun(source);
+  Local<Function> f1;
+  Local<Function> f2;
+  Local<Function> g1;
+  Local<Function> g2;
+  Local<Function> h;
+  f1 = Local<Function>::Cast(context->Global()->Get(v8_str("f1")));
+  f2 = Local<Function>::Cast(context->Global()->Get(v8_str("f2")));
+  g1 = Local<Function>::Cast(context->Global()->Get(v8_str("g1")));
+  g2 = Local<Function>::Cast(context->Global()->Get(v8_str("g2")));
+  h =  Local<Function>::Cast(context->Global()->Get(v8_str("h")));
+
+  // Get the global object.
+  v8::Handle<v8::Object> global = context->Global();
+
+  // Call f1 one time and f2 a number of times. This will ensure that f1 still
+  // uses the runtime system to retreive property a whereas f2 uses global load
+  // inline cache.
+  CHECK(f1->Call(global, 0, NULL)->Equals(v8_num(1)));
+  for (int i = 0; i < 4; i++) {
+    CHECK(f2->Call(global, 0, NULL)->Equals(v8_num(1)));
+  }
+
+  // Same for g1 and g2.
+  CHECK(g1->Call(global, 0, NULL)->Equals(v8_num(1)));
+  for (int i = 0; i < 4; i++) {
+    CHECK(g2->Call(global, 0, NULL)->Equals(v8_num(1)));
+  }
+
+  // Detach the global and turn on access check now blocking access to property
+  // a and function h.
+  context->DetachGlobal();
+  context->Global()->TurnOnAccessCheck();
+
+  // Failing access check to property get results in undefined.
+  CHECK(f1->Call(global, 0, NULL)->IsUndefined());
+  CHECK(f2->Call(global, 0, NULL)->IsUndefined());
+
+  // Failing access check to function call results in exception.
+  CHECK(g1->Call(global, 0, NULL).IsEmpty());
+  CHECK(g2->Call(global, 0, NULL).IsEmpty());
+
+  // No failing access check when just returning a constant.
+  CHECK(h->Call(global, 0, NULL)->Equals(v8_num(1)));
+
+  // Now compile the source again. And get the newly compiled functions, except
+  // for h for which access is blocked.
+  CompileRun(source);
+  f1 = Local<Function>::Cast(context->Global()->Get(v8_str("f1")));
+  f2 = Local<Function>::Cast(context->Global()->Get(v8_str("f2")));
+  g1 = Local<Function>::Cast(context->Global()->Get(v8_str("g1")));
+  g2 = Local<Function>::Cast(context->Global()->Get(v8_str("g2")));
+  CHECK(context->Global()->Get(v8_str("h"))->IsUndefined());
+
+  // Failing access check to property get results in undefined.
+  CHECK(f1->Call(global, 0, NULL)->IsUndefined());
+  CHECK(f2->Call(global, 0, NULL)->IsUndefined());
+
+  // Failing access check to function call results in exception.
+  CHECK(g1->Call(global, 0, NULL).IsEmpty());
+  CHECK(g2->Call(global, 0, NULL).IsEmpty());
+}
+
+
 // This test verifies that pre-compilation (aka preparsing) can be called
 // without initializing the whole VM. Thus we cannot run this test in a
 // multi-threaded setup.
@@ -10522,7 +10681,9 @@ v8::Handle<Value> AnalyzeStackInNativeCode(const v8::Arguments& args) {
 
 
 // Tests the C++ StackTrace API.
-THREADED_TEST(CaptureStackTrace) {
+// TODO(3074796): Reenable this as a THREADED_TEST once it passes.
+// THREADED_TEST(CaptureStackTrace) {
+TEST(CaptureStackTrace) {
   v8::HandleScope scope;
   v8::Handle<v8::String> origin = v8::String::New("capture-stack-trace-test");
   Local<ObjectTemplate> templ = ObjectTemplate::New();
index 003ac66..5894de2 100644 (file)
@@ -52,6 +52,24 @@ TEST(BitVector) {
   }
 
   {
+    BitVector v(64);
+    v.Add(27);
+    v.Add(30);
+    v.Add(31);
+    v.Add(33);
+    BitVector::Iterator iter(&v);
+    CHECK_EQ(27, iter.Current());
+    iter.Advance();
+    CHECK_EQ(30, iter.Current());
+    iter.Advance();
+    CHECK_EQ(31, iter.Current());
+    iter.Advance();
+    CHECK_EQ(33, iter.Current());
+    iter.Advance();
+    CHECK(iter.Done());
+  }
+
+  {
     BitVector v(15);
     v.Add(0);
     BitVector w(15);
index 7791185..87f9cab 100644 (file)
@@ -1,4 +1,4 @@
-// Copyright 2007-2008 the V8 project authors. All rights reserved.
+// Copyright 2010 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
 #include "v8.h"
 
 #include "api.h"
+#include "cctest.h"
 #include "compilation-cache.h"
 #include "debug.h"
+#include "deoptimizer.h"
 #include "platform.h"
 #include "stub-cache.h"
-#include "cctest.h"
+#include "utils.h"
 
 
 using ::v8::internal::EmbeddedVector;
@@ -515,16 +517,52 @@ void CheckDebugBreakFunction(DebugLocalContext* env,
 // ---
 
 
-// Source for The JavaScript function which picks out the function name of the
-// top frame.
+// Source for the JavaScript function which picks out the function
+// name of a frame.
 const char* frame_function_name_source =
-    "function frame_function_name(exec_state) {"
-    "  return exec_state.frame(0).func().name();"
+    "function frame_function_name(exec_state, frame_number) {"
+    "  return exec_state.frame(frame_number).func().name();"
     "}";
 v8::Local<v8::Function> frame_function_name;
 
 
-// Source for The JavaScript function which picks out the source line for the
+// Source for the JavaScript function which pick out the name of the
+// first argument of a frame.
+const char* frame_argument_name_source =
+    "function frame_argument_name(exec_state, frame_number) {"
+    "  return exec_state.frame(frame_number).argumentName(0);"
+    "}";
+v8::Local<v8::Function> frame_argument_name;
+
+
+// Source for the JavaScript function which pick out the value of the
+// first argument of a frame.
+const char* frame_argument_value_source =
+    "function frame_argument_value(exec_state, frame_number) {"
+    "  return exec_state.frame(frame_number).argumentValue(0).value_;"
+    "}";
+v8::Local<v8::Function> frame_argument_value;
+
+
+// Source for the JavaScript function which pick out the name of the
+// first argument of a frame.
+const char* frame_local_name_source =
+    "function frame_local_name(exec_state, frame_number) {"
+    "  return exec_state.frame(frame_number).localName(0);"
+    "}";
+v8::Local<v8::Function> frame_local_name;
+
+
+// Source for the JavaScript function which pick out the value of the
+// first argument of a frame.
+const char* frame_local_value_source =
+    "function frame_local_value(exec_state, frame_number) {"
+    "  return exec_state.frame(frame_number).localValue(0).value_;"
+    "}";
+v8::Local<v8::Function> frame_local_value;
+
+
+// Source for the JavaScript function which picks out the source line for the
 // top frame.
 const char* frame_source_line_source =
     "function frame_source_line(exec_state) {"
@@ -533,7 +571,7 @@ const char* frame_source_line_source =
 v8::Local<v8::Function> frame_source_line;
 
 
-// Source for The JavaScript function which picks out the source column for the
+// Source for the JavaScript function which picks out the source column for the
 // top frame.
 const char* frame_source_column_source =
     "function frame_source_column(exec_state) {"
@@ -542,7 +580,7 @@ const char* frame_source_column_source =
 v8::Local<v8::Function> frame_source_column;
 
 
-// Source for The JavaScript function which picks out the script name for the
+// Source for the JavaScript function which picks out the script name for the
 // top frame.
 const char* frame_script_name_source =
     "function frame_script_name(exec_state) {"
@@ -551,7 +589,7 @@ const char* frame_script_name_source =
 v8::Local<v8::Function> frame_script_name;
 
 
-// Source for The JavaScript function which picks out the script data for the
+// Source for the JavaScript function which picks out the script data for the
 // top frame.
 const char* frame_script_data_source =
     "function frame_script_data(exec_state) {"
@@ -560,7 +598,7 @@ const char* frame_script_data_source =
 v8::Local<v8::Function> frame_script_data;
 
 
-// Source for The JavaScript function which picks out the script data from
+// Source for the JavaScript function which picks out the script data from
 // AfterCompile event
 const char* compiled_script_data_source =
     "function compiled_script_data(event_data) {"
@@ -569,7 +607,7 @@ const char* compiled_script_data_source =
 v8::Local<v8::Function> compiled_script_data;
 
 
-// Source for The JavaScript function which returns the number of frames.
+// Source for the JavaScript function which returns the number of frames.
 static const char* frame_count_source =
     "function frame_count(exec_state) {"
     "  return exec_state.frameCount();"
@@ -603,8 +641,8 @@ static void DebugEventBreakPointHitCount(v8::DebugEvent event,
     break_point_hit_count++;
     if (!frame_function_name.IsEmpty()) {
       // Get the name of the function.
-      const int argc = 1;
-      v8::Handle<v8::Value> argv[argc] = { exec_state };
+      const int argc = 2;
+      v8::Handle<v8::Value> argv[argc] = { exec_state, v8::Integer::New(0) };
       v8::Handle<v8::Value> result = frame_function_name->Call(exec_state,
                                                                argc, argv);
       if (result->IsUndefined()) {
@@ -834,8 +872,8 @@ static void DebugEventStepSequence(v8::DebugEvent event,
     // Check that the current function is the expected.
     CHECK(break_point_hit_count <
           StrLength(expected_step_sequence));
-    const int argc = 1;
-    v8::Handle<v8::Value> argv[argc] = { exec_state };
+    const int argc = 2;
+    v8::Handle<v8::Value> argv[argc] = { exec_state, v8::Integer::New(0) };
     v8::Handle<v8::Value> result = frame_function_name->Call(exec_state,
                                                              argc, argv);
     CHECK(result->IsString());
@@ -2586,6 +2624,10 @@ TEST(DebugStepLinear) {
   v8::Local<v8::Function> foo = CompileFunction(&env,
                                                 "function foo(){a=1;b=1;c=1;}",
                                                 "foo");
+
+  // Run foo to allow it to get optimized.
+  CompileRun("a=0; b=0; c=0; foo();");
+
   SetBreakPoint(foo, 3);
 
   // Register a debug event listener which steps and counts.
@@ -2635,7 +2677,8 @@ TEST(DebugStepKeyedLoadLoop) {
       "    y = 1;\n"
       "    x = a[i];\n"
       "  }\n"
-      "}\n",
+      "}\n"
+      "y=0\n",
       "foo");
 
   // Create array [0,1,2,3,4,5,6,7,8,9]
@@ -2681,7 +2724,8 @@ TEST(DebugStepKeyedStoreLoop) {
       "    y = 1;\n"
       "    a[i] = 42;\n"
       "  }\n"
-      "}\n",
+      "}\n"
+      "y=0\n",
       "foo");
 
   // Create array [0,1,2,3,4,5,6,7,8,9]
@@ -2753,15 +2797,12 @@ TEST(DebugStepNamedLoadLoop) {
 }
 
 
-static void DoDebugStepNamedStoreLoop(int expected, bool full_compiler = true) {
+static void DoDebugStepNamedStoreLoop(int expected) {
   v8::HandleScope scope;
   DebugLocalContext env;
 
-  // Register a debug event listener which steps and counts before compiling the
-  // function to ensure the full compiler is used.
-  if (full_compiler) {
-    v8::Debug::SetDebugEventListener(DebugEventStep);
-  }
+  // Register a debug event listener which steps and counts.
+  v8::Debug::SetDebugEventListener(DebugEventStep);
 
   // Create a function for testing stepping of named store.
   v8::Local<v8::Function> foo = CompileFunction(
@@ -2777,12 +2818,6 @@ static void DoDebugStepNamedStoreLoop(int expected, bool full_compiler = true) {
   // Call function without any break points to ensure inlining is in place.
   foo->Call(env->Global(), 0, NULL);
 
-  // Register a debug event listener which steps and counts after compiling the
-  // function to ensure the optimizing compiler is used.
-  if (!full_compiler) {
-    v8::Debug::SetDebugEventListener(DebugEventStep);
-  }
-
   // Setup break point and step through the function.
   SetBreakPoint(foo, 3);
   step_action = StepNext;
@@ -2798,20 +2833,11 @@ static void DoDebugStepNamedStoreLoop(int expected, bool full_compiler = true) {
 
 
 // Test of the stepping mechanism for named load in a loop.
-TEST(DebugStepNamedStoreLoopFull) {
-  // With the full compiler it is possible to break on the for statement.
+TEST(DebugStepNamedStoreLoop) {
   DoDebugStepNamedStoreLoop(22);
 }
 
 
-// Test of the stepping mechanism for named load in a loop.
-TEST(DebugStepNamedStoreLoopOptimizing) {
-  // With the optimizing compiler it is not possible to break on the for
-  // statement as it uses a local variable thus no IC's.
-  DoDebugStepNamedStoreLoop(11, false);
-}
-
-
 // Test the stepping mechanism with different ICs.
 TEST(DebugStepLinearMixedICs) {
   v8::HandleScope scope;
@@ -2828,6 +2854,10 @@ TEST(DebugStepLinearMixedICs) {
       "  var index='name';"
       "  var y = {};"
       "  a=1;b=2;x=a;y[index]=3;x=y[index];bar();}", "foo");
+
+  // Run functions to allow them to get optimized.
+  CompileRun("a=0; b=0; bar(); foo();");
+
   SetBreakPoint(foo, 0);
 
   step_action = StepIn;
@@ -2862,15 +2892,18 @@ TEST(DebugStepDeclarations) {
   // Register a debug event listener which steps and counts.
   v8::Debug::SetDebugEventListener(DebugEventStep);
 
-  // Create a function for testing stepping.
+  // Create a function for testing stepping. Run it to allow it to get
+  // optimized.
   const char* src = "function foo() { "
                     "  var a;"
                     "  var b = 1;"
                     "  var c = foo;"
                     "  var d = Math.floor;"
                     "  var e = b + d(1.2);"
-                    "}";
+                    "}"
+                    "foo()";
   v8::Local<v8::Function> foo = CompileFunction(&env, src, "foo");
+
   SetBreakPoint(foo, 0);
 
   // Stepping through the declarations.
@@ -2892,15 +2925,18 @@ TEST(DebugStepLocals) {
   // Register a debug event listener which steps and counts.
   v8::Debug::SetDebugEventListener(DebugEventStep);
 
-  // Create a function for testing stepping.
+  // Create a function for testing stepping. Run it to allow it to get
+  // optimized.
   const char* src = "function foo() { "
                     "  var a,b;"
                     "  a = 1;"
                     "  b = a + 2;"
                     "  b = 1 + 2 + 3;"
                     "  a = Math.floor(b);"
-                    "}";
+                    "}"
+                    "foo()";
   v8::Local<v8::Function> foo = CompileFunction(&env, src, "foo");
+
   SetBreakPoint(foo, 0);
 
   // Stepping through the declarations.
@@ -2922,7 +2958,8 @@ TEST(DebugStepIf) {
   // Register a debug event listener which steps and counts.
   v8::Debug::SetDebugEventListener(DebugEventStep);
 
-  // Create a function for testing stepping.
+  // Create a function for testing stepping. Run it to allow it to get
+  // optimized.
   const int argc = 1;
   const char* src = "function foo(x) { "
                     "  a = 1;"
@@ -2932,7 +2969,8 @@ TEST(DebugStepIf) {
                     "    c = 1;"
                     "    d = 1;"
                     "  }"
-                    "}";
+                    "}"
+                    "a=0; b=0; c=0; d=0; foo()";
   v8::Local<v8::Function> foo = CompileFunction(&env, src, "foo");
   SetBreakPoint(foo, 0);
 
@@ -2963,7 +3001,8 @@ TEST(DebugStepSwitch) {
   // Register a debug event listener which steps and counts.
   v8::Debug::SetDebugEventListener(DebugEventStep);
 
-  // Create a function for testing stepping.
+  // Create a function for testing stepping. Run it to allow it to get
+  // optimized.
   const int argc = 1;
   const char* src = "function foo(x) { "
                     "  a = 1;"
@@ -2979,7 +3018,8 @@ TEST(DebugStepSwitch) {
                     "      f = 1;"
                     "      break;"
                     "  }"
-                    "}";
+                    "}"
+                    "a=0; b=0; c=0; d=0; e=0; f=0; foo()";
   v8::Local<v8::Function> foo = CompileFunction(&env, src, "foo");
   SetBreakPoint(foo, 0);
 
@@ -3017,14 +3057,16 @@ TEST(DebugStepWhile) {
   // Register a debug event listener which steps and counts.
   v8::Debug::SetDebugEventListener(DebugEventStep);
 
-  // Create a function for testing stepping.
+  // Create a function for testing stepping. Run it to allow it to get
+  // optimized.
   const int argc = 1;
   const char* src = "function foo(x) { "
                     "  var a = 0;"
                     "  while (a < x) {"
                     "    a++;"
                     "  }"
-                    "}";
+                    "}"
+                    "foo()";
   v8::Local<v8::Function> foo = CompileFunction(&env, src, "foo");
   SetBreakPoint(foo, 8);  // "var a = 0;"
 
@@ -3033,14 +3075,14 @@ TEST(DebugStepWhile) {
   break_point_hit_count = 0;
   v8::Handle<v8::Value> argv_10[argc] = { v8::Number::New(10) };
   foo->Call(env->Global(), argc, argv_10);
-  CHECK_EQ(23, break_point_hit_count);
+  CHECK_EQ(22, break_point_hit_count);
 
   // Looping 100 times.
   step_action = StepIn;
   break_point_hit_count = 0;
   v8::Handle<v8::Value> argv_100[argc] = { v8::Number::New(100) };
   foo->Call(env->Global(), argc, argv_100);
-  CHECK_EQ(203, break_point_hit_count);
+  CHECK_EQ(202, break_point_hit_count);
 
   // Get rid of the debug event listener.
   v8::Debug::SetDebugEventListener(NULL);
@@ -3055,14 +3097,16 @@ TEST(DebugStepDoWhile) {
   // Register a debug event listener which steps and counts.
   v8::Debug::SetDebugEventListener(DebugEventStep);
 
-  // Create a function for testing stepping.
+  // Create a function for testing stepping. Run it to allow it to get
+  // optimized.
   const int argc = 1;
   const char* src = "function foo(x) { "
                     "  var a = 0;"
                     "  do {"
                     "    a++;"
                     "  } while (a < x)"
-                    "}";
+                    "}"
+                    "foo()";
   v8::Local<v8::Function> foo = CompileFunction(&env, src, "foo");
   SetBreakPoint(foo, 8);  // "var a = 0;"
 
@@ -3093,15 +3137,18 @@ TEST(DebugStepFor) {
   // Register a debug event listener which steps and counts.
   v8::Debug::SetDebugEventListener(DebugEventStep);
 
-  // Create a function for testing stepping.
+  // Create a function for testing stepping. Run it to allow it to get
+  // optimized.
   const int argc = 1;
   const char* src = "function foo(x) { "
                     "  a = 1;"
                     "  for (i = 0; i < x; i++) {"
                     "    b = 1;"
                     "  }"
-                    "}";
+                    "}"
+                    "a=0; b=0; i=0; foo()";
   v8::Local<v8::Function> foo = CompileFunction(&env, src, "foo");
+
   SetBreakPoint(foo, 8);  // "a = 1;"
 
   // Looping 10 times.
@@ -3131,7 +3178,8 @@ TEST(DebugStepForContinue) {
   // Register a debug event listener which steps and counts.
   v8::Debug::SetDebugEventListener(DebugEventStep);
 
-  // Create a function for testing stepping.
+  // Create a function for testing stepping. Run it to allow it to get
+  // optimized.
   const int argc = 1;
   const char* src = "function foo(x) { "
                     "  var a = 0;"
@@ -3144,7 +3192,8 @@ TEST(DebugStepForContinue) {
                     "    c++;"
                     "  }"
                     "  return b;"
-                    "}";
+                    "}"
+                    "foo()";
   v8::Local<v8::Function> foo = CompileFunction(&env, src, "foo");
   v8::Handle<v8::Value> result;
   SetBreakPoint(foo, 8);  // "var a = 0;"
@@ -3180,7 +3229,8 @@ TEST(DebugStepForBreak) {
   // Register a debug event listener which steps and counts.
   v8::Debug::SetDebugEventListener(DebugEventStep);
 
-  // Create a function for testing stepping.
+  // Create a function for testing stepping. Run it to allow it to get
+  // optimized.
   const int argc = 1;
   const char* src = "function foo(x) { "
                     "  var a = 0;"
@@ -3193,7 +3243,8 @@ TEST(DebugStepForBreak) {
                     "    c++;"
                     "  }"
                     "  return b;"
-                    "}";
+                    "}"
+                    "foo()";
   v8::Local<v8::Function> foo = CompileFunction(&env, src, "foo");
   v8::Handle<v8::Value> result;
   SetBreakPoint(foo, 8);  // "var a = 0;"
@@ -3230,13 +3281,16 @@ TEST(DebugStepForIn) {
   // Register a debug event listener which steps and counts.
   v8::Debug::SetDebugEventListener(DebugEventStep);
 
+  // Create a function for testing stepping. Run it to allow it to get
+  // optimized.
   v8::Local<v8::Function> foo;
   const char* src_1 = "function foo() { "
                       "  var a = [1, 2];"
                       "  for (x in a) {"
                       "    b = 0;"
                       "  }"
-                      "}";
+                      "}"
+                      "foo()";
   foo = CompileFunction(&env, src_1, "foo");
   SetBreakPoint(foo, 0);  // "var a = ..."
 
@@ -3245,12 +3299,15 @@ TEST(DebugStepForIn) {
   foo->Call(env->Global(), 0, NULL);
   CHECK_EQ(6, break_point_hit_count);
 
+  // Create a function for testing stepping. Run it to allow it to get
+  // optimized.
   const char* src_2 = "function foo() { "
                       "  var a = {a:[1, 2, 3]};"
                       "  for (x in a.a) {"
                       "    b = 0;"
                       "  }"
-                      "}";
+                      "}"
+                      "foo()";
   foo = CompileFunction(&env, src_2, "foo");
   SetBreakPoint(foo, 0);  // "var a = ..."
 
@@ -3272,12 +3329,14 @@ TEST(DebugStepWith) {
   // Register a debug event listener which steps and counts.
   v8::Debug::SetDebugEventListener(DebugEventStep);
 
-  // Create a function for testing stepping.
+  // Create a function for testing stepping. Run it to allow it to get
+  // optimized.
   const char* src = "function foo(x) { "
                     "  var a = {};"
                     "  with (a) {}"
                     "  with (b) {}"
-                    "}";
+                    "}"
+                    "foo()";
   env->Global()->Set(v8::String::New("b"), v8::Object::New());
   v8::Local<v8::Function> foo = CompileFunction(&env, src, "foo");
   v8::Handle<v8::Value> result;
@@ -3301,12 +3360,14 @@ TEST(DebugConditional) {
   // Register a debug event listener which steps and counts.
   v8::Debug::SetDebugEventListener(DebugEventStep);
 
-  // Create a function for testing stepping.
+  // Create a function for testing stepping. Run it to allow it to get
+  // optimized.
   const char* src = "function foo(x) { "
                     "  var a;"
                     "  a = x ? 1 : 2;"
                     "  return a;"
-                    "}";
+                    "}"
+                    "foo()";
   v8::Local<v8::Function> foo = CompileFunction(&env, src, "foo");
   SetBreakPoint(foo, 0);  // "var a;"
 
@@ -3340,10 +3401,12 @@ TEST(StepInOutSimple) {
   // Register a debug event listener which steps and counts.
   v8::Debug::SetDebugEventListener(DebugEventStepSequence);
 
-  // Create functions for testing stepping.
+  // Create a function for testing stepping. Run it to allow it to get
+  // optimized.
   const char* src = "function a() {b();c();}; "
                     "function b() {c();}; "
-                    "function c() {}; ";
+                    "function c() {}; "
+                    "a(); b(); c()";
   v8::Local<v8::Function> a = CompileFunction(&env, src, "a");
   SetBreakPoint(a, 0);
 
@@ -3389,11 +3452,13 @@ TEST(StepInOutTree) {
   // Register a debug event listener which steps and counts.
   v8::Debug::SetDebugEventListener(DebugEventStepSequence);
 
-  // Create functions for testing stepping.
+  // Create a function for testing stepping. Run it to allow it to get
+  // optimized.
   const char* src = "function a() {b(c(d()),d());c(d());d()}; "
                     "function b(x,y) {c();}; "
                     "function c(x) {}; "
-                    "function d() {}; ";
+                    "function d() {}; "
+                    "a(); b(); c(); d()";
   v8::Local<v8::Function> a = CompileFunction(&env, src, "a");
   SetBreakPoint(a, 0);
 
@@ -3439,10 +3504,12 @@ TEST(StepInOutBranch) {
   // Register a debug event listener which steps and counts.
   v8::Debug::SetDebugEventListener(DebugEventStepSequence);
 
-  // Create functions for testing stepping.
+  // Create a function for testing stepping. Run it to allow it to get
+  // optimized.
   const char* src = "function a() {b(false);c();}; "
                     "function b(x) {if(x){c();};}; "
-                    "function c() {}; ";
+                    "function c() {}; "
+                    "a(); b(); c()";
   v8::Local<v8::Function> a = CompileFunction(&env, src, "a");
   SetBreakPoint(a, 0);
 
@@ -6116,8 +6183,8 @@ static void DebugEventDebugBreak(
     // Get the name of the top frame function.
     if (!frame_function_name.IsEmpty()) {
       // Get the name of the function.
-      const int argc = 1;
-      v8::Handle<v8::Value> argv[argc] = { exec_state };
+      const int argc = 2;
+      v8::Handle<v8::Value> argv[argc] = { exec_state, v8::Integer::New(0) };
       v8::Handle<v8::Value> result = frame_function_name->Call(exec_state,
                                                                argc, argv);
       if (result->IsUndefined()) {
@@ -6846,6 +6913,7 @@ static void DebugEventBreakDataChecker(const v8::Debug::EventDetails& details) {
   }
 }
 
+
 // Check that event details contain context where debug event occured.
 TEST(DebugEventBreakData) {
   v8::HandleScope scope;
@@ -6898,6 +6966,156 @@ TEST(DebugEventBreakData) {
   CheckDebuggerUnloaded();
 }
 
+static bool debug_event_break_deoptimize_done = false;
+
+static void DebugEventBreakDeoptimize(v8::DebugEvent event,
+                                      v8::Handle<v8::Object> exec_state,
+                                      v8::Handle<v8::Object> event_data,
+                                      v8::Handle<v8::Value> data) {
+  if (event == v8::Break) {
+    if (!frame_function_name.IsEmpty()) {
+      // Get the name of the function.
+      const int argc = 2;
+      v8::Handle<v8::Value> argv[argc] = { exec_state, v8::Integer::New(0) };
+      v8::Handle<v8::Value> result =
+          frame_function_name->Call(exec_state, argc, argv);
+      if (!result->IsUndefined()) {
+        char fn[80];
+        CHECK(result->IsString());
+        v8::Handle<v8::String> function_name(result->ToString());
+        function_name->WriteAscii(fn);
+        if (strcmp(fn, "bar") == 0) {
+          i::Deoptimizer::DeoptimizeAll();
+          debug_event_break_deoptimize_done = true;
+        }
+      }
+    }
+
+    v8::Debug::DebugBreak();
+  }
+}
+
+
+// Test deoptimization when execution is broken using the debug break stack
+// check interrupt.
+TEST(DeoptimizeDuringDebugBreak) {
+  v8::HandleScope scope;
+  DebugLocalContext env;
+  env.ExposeDebug();
+
+  // Create a function for checking the function when hitting a break point.
+  frame_function_name = CompileFunction(&env,
+                                        frame_function_name_source,
+                                        "frame_function_name");
+
+
+  // Set a debug event listener which will keep interrupting execution until
+  // debug break. When inside function bar it will deoptimize all functions.
+  // This tests lazy deoptimization bailout for the stack check, as the first
+  // time in function bar when using debug break and no break points will be at
+  // the initial stack check.
+  v8::Debug::SetDebugEventListener(DebugEventBreakDeoptimize,
+                                   v8::Undefined());
+
+  // Compile and run function bar which will optimize it for some flag settings.
+  v8::Script::Compile(v8::String::New("function bar(){}; bar()"))->Run();
+
+  // Set debug break and call bar again.
+  v8::Debug::DebugBreak();
+  v8::Script::Compile(v8::String::New("bar()"))->Run();
+
+  CHECK(debug_event_break_deoptimize_done);
+
+  v8::Debug::SetDebugEventListener(NULL);
+}
+
+
+static void DebugEventBreakWithOptimizedStack(v8::DebugEvent event,
+                                              v8::Handle<v8::Object> exec_state,
+                                              v8::Handle<v8::Object> event_data,
+                                              v8::Handle<v8::Value> data) {
+  if (event == v8::Break) {
+    if (!frame_function_name.IsEmpty()) {
+      for (int i = 0; i < 2; i++) {
+        const int argc = 2;
+        v8::Handle<v8::Value> argv[argc] = { exec_state, v8::Integer::New(i) };
+        // Get the name of the function in frame i.
+        v8::Handle<v8::Value> result =
+            frame_function_name->Call(exec_state, argc, argv);
+        CHECK(result->IsString());
+        v8::Handle<v8::String> function_name(result->ToString());
+        CHECK(function_name->Equals(v8::String::New("loop")));
+        // Get the name of the first argument in frame i.
+        result = frame_argument_name->Call(exec_state, argc, argv);
+        CHECK(result->IsString());
+        v8::Handle<v8::String> argument_name(result->ToString());
+        CHECK(argument_name->Equals(v8::String::New("count")));
+        // Get the value of the first argument in frame i. If the
+        // funtion is optimized the value will be undefined, otherwise
+        // the value will be '1 - i'.
+        //
+        // TODO(3141533): We should be able to get the real value for
+        // optimized frames.
+        result = frame_argument_value->Call(exec_state, argc, argv);
+        CHECK(result->IsUndefined() || (result->Int32Value() == 1 - i));
+        // Get the name of the first local variable.
+        result = frame_local_name->Call(exec_state, argc, argv);
+        CHECK(result->IsString());
+        v8::Handle<v8::String> local_name(result->ToString());
+        CHECK(local_name->Equals(v8::String::New("local")));
+        // Get the value of the first local variable. If the function
+        // is optimized the value will be undefined, otherwise it will
+        // be 42.
+        //
+        // TODO(3141533): We should be able to get the real value for
+        // optimized frames.
+        result = frame_local_value->Call(exec_state, argc, argv);
+        CHECK(result->IsUndefined() || (result->Int32Value() == 42));
+      }
+    }
+  }
+}
+
+
+static v8::Handle<v8::Value> ScheduleBreak(const v8::Arguments& args) {
+  v8::Debug::SetDebugEventListener(DebugEventBreakWithOptimizedStack,
+                                   v8::Undefined());
+  v8::Debug::DebugBreak();
+  return v8::Undefined();
+}
+
+
+TEST(DebugBreakStackInspection) {
+  v8::HandleScope scope;
+  DebugLocalContext env;
+
+  frame_function_name =
+      CompileFunction(&env, frame_function_name_source, "frame_function_name");
+  frame_argument_name =
+      CompileFunction(&env, frame_argument_name_source, "frame_argument_name");
+  frame_argument_value = CompileFunction(&env,
+                                         frame_argument_value_source,
+                                         "frame_argument_value");
+  frame_local_name =
+      CompileFunction(&env, frame_local_name_source, "frame_local_name");
+  frame_local_value =
+      CompileFunction(&env, frame_local_value_source, "frame_local_value");
+
+  v8::Handle<v8::FunctionTemplate> schedule_break_template =
+      v8::FunctionTemplate::New(ScheduleBreak);
+  v8::Handle<v8::Function> schedule_break =
+      schedule_break_template->GetFunction();
+  env->Global()->Set(v8_str("scheduleBreak"), schedule_break);
+
+  const char* src =
+      "function loop(count) {"
+      "  var local = 42;"
+      "  if (count < 1) { scheduleBreak(); loop(count + 1); }"
+      "}"
+      "loop(0);";
+  v8::Script::Compile(v8::String::New(src))->Run();
+}
+
 
 // Test that setting the terminate execution flag during debug break processing.
 static void TestDebugBreakInLoop(const char* loop_head,
diff --git a/test/cctest/test-deoptimization.cc b/test/cctest/test-deoptimization.cc
new file mode 100644 (file)
index 0000000..1745355
--- /dev/null
@@ -0,0 +1,714 @@
+// Copyright 2007-2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include <stdlib.h>
+
+#include "v8.h"
+
+#include "api.h"
+#include "compilation-cache.h"
+#include "debug.h"
+#include "deoptimizer.h"
+#include "platform.h"
+#include "stub-cache.h"
+#include "cctest.h"
+
+
+using ::v8::internal::Handle;
+using ::v8::internal::Object;
+using ::v8::internal::JSFunction;
+using ::v8::internal::Deoptimizer;
+using ::v8::internal::EmbeddedVector;
+using ::v8::internal::OS;
+
+// Size of temp buffer for formatting small strings.
+#define SMALL_STRING_BUFFER_SIZE 80
+
+// Utility class to set --allow-natives-syntax --always-opt and --nouse-inlining
+// when constructed and return to their default state when destroyed.
+class AlwaysOptimizeAllowNativesSyntaxNoInlining {
+ public:
+  AlwaysOptimizeAllowNativesSyntaxNoInlining()
+      : always_opt_(i::FLAG_always_opt),
+        allow_natives_syntax_(i::FLAG_allow_natives_syntax),
+        use_inlining_(i::FLAG_use_inlining) {
+    i::FLAG_always_opt = true;
+    i::FLAG_allow_natives_syntax = true;
+    i::FLAG_use_inlining = false;
+  }
+
+  ~AlwaysOptimizeAllowNativesSyntaxNoInlining() {
+    i::FLAG_allow_natives_syntax = allow_natives_syntax_;
+    i::FLAG_always_opt = always_opt_;
+    i::FLAG_use_inlining = use_inlining_;
+  }
+
+ private:
+  bool always_opt_;
+  bool allow_natives_syntax_;
+  bool use_inlining_;
+};
+
+
+// Utility class to set --allow-natives-syntax and --nouse-inlining when
+// constructed and return to their default state when destroyed.
+class AllowNativesSyntaxNoInlining {
+ public:
+  AllowNativesSyntaxNoInlining()
+      : allow_natives_syntax_(i::FLAG_allow_natives_syntax),
+        use_inlining_(i::FLAG_use_inlining) {
+    i::FLAG_allow_natives_syntax = true;
+    i::FLAG_use_inlining = false;
+  }
+
+  ~AllowNativesSyntaxNoInlining() {
+    i::FLAG_allow_natives_syntax = allow_natives_syntax_;
+    i::FLAG_use_inlining = use_inlining_;
+  }
+
+ private:
+  bool allow_natives_syntax_;
+  bool use_inlining_;
+};
+
+
+Handle<JSFunction> GetJSFunction(v8::Handle<v8::Object> obj,
+                                 const char* property_name) {
+  v8::Local<v8::Function> fun =
+      v8::Local<v8::Function>::Cast(obj->Get(v8_str(property_name)));
+  return v8::Utils::OpenHandle(*fun);
+}
+
+
+TEST(DeoptimizeSimple) {
+  v8::HandleScope scope;
+  const char* extension_list[] = { "v8/gc" };
+  v8::ExtensionConfiguration extensions(1, extension_list);
+  LocalContext env(&extensions);
+
+  // Test lazy deoptimization of a simple function.
+  {
+    AlwaysOptimizeAllowNativesSyntaxNoInlining options;
+    CompileRun(
+        "var count = 0;"
+        "function h() { %DeoptimizeFunction(f); }"
+        "function g() { count++; h(); }"
+        "function f() { g(); };"
+        "f();"
+        "gc(); gc()");
+  }
+
+  CHECK_EQ(1, env->Global()->Get(v8_str("count"))->Int32Value());
+  CHECK(!GetJSFunction(env->Global(), "f")->IsOptimized());
+  CHECK_EQ(0, Deoptimizer::GetDeoptimizedCodeCount());
+
+  // Test lazy deoptimization of a simple function. Call the function after the
+  // deoptimization while it is still activated further down the stack.
+  {
+    AlwaysOptimizeAllowNativesSyntaxNoInlining options;
+    CompileRun(
+        "var count = 0;"
+        "function g() { count++; %DeoptimizeFunction(f); f(false); }"
+        "function f(x) { if (x) { g(); } else { return } };"
+        "f(true);"
+        "gc(); gc()");
+  }
+
+  CHECK_EQ(1, env->Global()->Get(v8_str("count"))->Int32Value());
+  CHECK(!GetJSFunction(env->Global(), "f")->IsOptimized());
+  CHECK_EQ(0, Deoptimizer::GetDeoptimizedCodeCount());
+}
+
+
+TEST(DeoptimizeSimpleWithArguments) {
+  v8::HandleScope scope;
+  const char* extension_list[] = { "v8/gc" };
+  v8::ExtensionConfiguration extensions(1, extension_list);
+  LocalContext env(&extensions);
+
+  // Test lazy deoptimization of a simple function with some arguments.
+  {
+    AlwaysOptimizeAllowNativesSyntaxNoInlining options;
+    CompileRun(
+        "var count = 0;"
+        "function h(x) { %DeoptimizeFunction(f); }"
+        "function g(x, y) { count++; h(x); }"
+        "function f(x, y, z) { g(1,x); y+z; };"
+        "f(1, \"2\", false);"
+        "gc(); gc()");
+  }
+
+  CHECK_EQ(1, env->Global()->Get(v8_str("count"))->Int32Value());
+  CHECK(!GetJSFunction(env->Global(), "f")->IsOptimized());
+  CHECK_EQ(0, Deoptimizer::GetDeoptimizedCodeCount());
+
+  // Test lazy deoptimization of a simple function with some arguments. Call the
+  // function after the deoptimization while it is still activated further down
+  // the stack.
+  {
+    AlwaysOptimizeAllowNativesSyntaxNoInlining options;
+    CompileRun(
+        "var count = 0;"
+        "function g(x, y) { count++; %DeoptimizeFunction(f); f(false, 1, y); }"
+        "function f(x, y, z) { if (x) { g(x, y); } else { return y + z; } };"
+        "f(true, 1, \"2\");"
+        "gc(); gc()");
+  }
+
+  CHECK_EQ(1, env->Global()->Get(v8_str("count"))->Int32Value());
+  CHECK(!GetJSFunction(env->Global(), "f")->IsOptimized());
+  CHECK_EQ(0, Deoptimizer::GetDeoptimizedCodeCount());
+}
+
+
+TEST(DeoptimizeSimpleNested) {
+  v8::HandleScope scope;
+  const char* extension_list[] = { "v8/gc" };
+  v8::ExtensionConfiguration extensions(1, extension_list);
+  LocalContext env(&extensions);
+
+  // Test lazy deoptimization of a simple function. Have a nested function call
+  // do the deoptimization.
+  {
+    AlwaysOptimizeAllowNativesSyntaxNoInlining options;
+    CompileRun(
+        "var count = 0;"
+        "var result = 0;"
+        "function h(x, y, z) { return x + y + z; }"
+        "function g(z) { count++; %DeoptimizeFunction(f); return z;}"
+        "function f(x,y,z) { return h(x, y, g(z)); };"
+        "result = f(1, 2, 3);"
+        "gc(); gc()");
+
+    CHECK_EQ(1, env->Global()->Get(v8_str("count"))->Int32Value());
+    CHECK_EQ(6, env->Global()->Get(v8_str("result"))->Int32Value());
+    CHECK(!GetJSFunction(env->Global(), "f")->IsOptimized());
+    CHECK_EQ(0, Deoptimizer::GetDeoptimizedCodeCount());
+  }
+}
+
+
+TEST(DeoptimizeRecursive) {
+  v8::HandleScope scope;
+  const char* extension_list[] = { "v8/gc" };
+  v8::ExtensionConfiguration extensions(1, extension_list);
+  LocalContext env(&extensions);
+
+  {
+    // Test lazy deoptimization of a simple function called recursively. Call
+    // the function recursively a number of times before deoptimizing it.
+    AlwaysOptimizeAllowNativesSyntaxNoInlining options;
+    CompileRun(
+        "var count = 0;"
+        "var calls = 0;"
+        "function g() { count++; %DeoptimizeFunction(f); }"
+        "function f(x) { calls++; if (x > 0) { f(x - 1); } else { g(); } };"
+        "f(10); gc(); gc()");
+  }
+
+  CHECK_EQ(1, env->Global()->Get(v8_str("count"))->Int32Value());
+  CHECK_EQ(11, env->Global()->Get(v8_str("calls"))->Int32Value());
+  CHECK_EQ(0, Deoptimizer::GetDeoptimizedCodeCount());
+
+  v8::Local<v8::Function> fun =
+      v8::Local<v8::Function>::Cast(env->Global()->Get(v8::String::New("f")));
+  Handle<v8::internal::JSFunction> f = v8::Utils::OpenHandle(*fun);
+}
+
+
+TEST(DeoptimizeMultiple) {
+  v8::HandleScope scope;
+  const char* extension_list[] = { "v8/gc" };
+  v8::ExtensionConfiguration extensions(1, extension_list);
+  LocalContext env(&extensions);
+
+  {
+    AlwaysOptimizeAllowNativesSyntaxNoInlining options;
+    CompileRun(
+        "var count = 0;"
+        "var result = 0;"
+        "function g() { count++;"
+        "               %DeoptimizeFunction(f1);"
+        "               %DeoptimizeFunction(f2);"
+        "               %DeoptimizeFunction(f3);"
+        "               %DeoptimizeFunction(f4);}"
+        "function f4(x) { g(); };"
+        "function f3(x, y, z) { f4(); return x + y + z; };"
+        "function f2(x, y) { return x + f3(y + 1, y + 1, y + 1) + y; };"
+        "function f1(x) { return f2(x + 1, x + 1) + x; };"
+        "result = f1(1);"
+        "gc(); gc()");
+  }
+
+  CHECK_EQ(1, env->Global()->Get(v8_str("count"))->Int32Value());
+  CHECK_EQ(14, env->Global()->Get(v8_str("result"))->Int32Value());
+  CHECK_EQ(0, Deoptimizer::GetDeoptimizedCodeCount());
+}
+
+
+TEST(DeoptimizeConstructor) {
+  v8::HandleScope scope;
+  const char* extension_list[] = { "v8/gc" };
+  v8::ExtensionConfiguration extensions(1, extension_list);
+  LocalContext env(&extensions);
+
+  {
+    AlwaysOptimizeAllowNativesSyntaxNoInlining options;
+    CompileRun(
+        "var count = 0;"
+        "function g() { count++;"
+        "               %DeoptimizeFunction(f); }"
+        "function f() {  g(); };"
+        "result = new f() instanceof f;"
+        "gc(); gc()");
+  }
+
+  CHECK_EQ(1, env->Global()->Get(v8_str("count"))->Int32Value());
+  CHECK(env->Global()->Get(v8_str("result"))->IsTrue());
+  CHECK_EQ(0, Deoptimizer::GetDeoptimizedCodeCount());
+
+  {
+    AlwaysOptimizeAllowNativesSyntaxNoInlining options;
+    CompileRun(
+        "var count = 0;"
+        "var result = 0;"
+        "function g() { count++;"
+        "               %DeoptimizeFunction(f); }"
+        "function f(x, y) { this.x = x; g(); this.y = y; };"
+        "result = new f(1, 2);"
+        "result = result.x + result.y;"
+        "gc(); gc()");
+  }
+
+  CHECK_EQ(1, env->Global()->Get(v8_str("count"))->Int32Value());
+  CHECK_EQ(3, env->Global()->Get(v8_str("result"))->Int32Value());
+  CHECK_EQ(0, Deoptimizer::GetDeoptimizedCodeCount());
+}
+
+
+TEST(DeoptimizeConstructorMultiple) {
+  v8::HandleScope scope;
+  const char* extension_list[] = { "v8/gc" };
+  v8::ExtensionConfiguration extensions(1, extension_list);
+  LocalContext env(&extensions);
+
+  {
+    AlwaysOptimizeAllowNativesSyntaxNoInlining options;
+    CompileRun(
+        "var count = 0;"
+        "var result = 0;"
+        "function g() { count++;"
+        "               %DeoptimizeFunction(f1);"
+        "               %DeoptimizeFunction(f2);"
+        "               %DeoptimizeFunction(f3);"
+        "               %DeoptimizeFunction(f4);}"
+        "function f4(x) { this.result = x; g(); };"
+        "function f3(x, y, z) { this.result = new f4(x + y + z).result; };"
+        "function f2(x, y) {"
+        "    this.result = x + new f3(y + 1, y + 1, y + 1).result + y; };"
+        "function f1(x) { this.result = new f2(x + 1, x + 1).result + x; };"
+        "result = new f1(1).result;"
+        "gc(); gc()");
+  }
+
+  CHECK_EQ(1, env->Global()->Get(v8_str("count"))->Int32Value());
+  CHECK_EQ(14, env->Global()->Get(v8_str("result"))->Int32Value());
+  CHECK_EQ(0, Deoptimizer::GetDeoptimizedCodeCount());
+}
+
+
+TEST(DeoptimizeBinaryOperationADDString) {
+  v8::HandleScope scope;
+  const char* extension_list[] = { "v8/gc" };
+  v8::ExtensionConfiguration extensions(1, extension_list);
+  LocalContext env(&extensions);
+
+  const char* f_source = "function f(x, y) { return x + y; };";
+
+  {
+    AllowNativesSyntaxNoInlining options;
+    // Compile function f and collect to type feedback to insert binary op stub
+    // call in the optimized code.
+    i::FLAG_prepare_always_opt = true;
+    CompileRun("var count = 0;"
+               "var result = 0;"
+               "var deopt = false;"
+               "function X() { };"
+               "X.prototype.toString = function () {"
+               "  if (deopt) { count++; %DeoptimizeFunction(f); } return 'an X'"
+               "};");
+    CompileRun(f_source);
+    CompileRun("for (var i = 0; i < 5; i++) {"
+               "  f('a+', new X());"
+               "};");
+
+    // Compile an optimized version of f.
+    i::FLAG_always_opt = true;
+    CompileRun(f_source);
+    CompileRun("f('a+', new X());");
+    CHECK(!i::V8::UseCrankshaft() ||
+          GetJSFunction(env->Global(), "f")->IsOptimized());
+
+    // Call f and force deoptimization while processing the binary operation.
+    CompileRun("deopt = true;"
+               "var result = f('a+', new X());"
+               "gc(); gc();");
+  }
+
+  CHECK(!GetJSFunction(env->Global(), "f")->IsOptimized());
+  CHECK_EQ(1, env->Global()->Get(v8_str("count"))->Int32Value());
+  v8::Handle<v8::Value> result = env->Global()->Get(v8_str("result"));
+  CHECK(result->IsString());
+  v8::String::AsciiValue ascii(result);
+  CHECK_EQ("a+an X", *ascii);
+  CHECK_EQ(0, Deoptimizer::GetDeoptimizedCodeCount());
+}
+
+
+static void CompileConstructorWithDeoptimizingValueOf() {
+  CompileRun("var count = 0;"
+             "var result = 0;"
+             "var deopt = false;"
+             "function X() { };"
+             "X.prototype.valueOf = function () {"
+             "  if (deopt) { count++; %DeoptimizeFunction(f); } return 8"
+             "};");
+}
+
+
+static void TestDeoptimizeBinaryOpHelper(LocalContext* env,
+                                         const char* binary_op) {
+  EmbeddedVector<char, SMALL_STRING_BUFFER_SIZE> f_source_buffer;
+  OS::SNPrintF(f_source_buffer,
+               "function f(x, y) { return x %s y; };",
+               binary_op);
+  char* f_source = f_source_buffer.start();
+
+  AllowNativesSyntaxNoInlining options;
+  // Compile function f and collect to type feedback to insert binary op stub
+  // call in the optimized code.
+  i::FLAG_prepare_always_opt = true;
+  CompileConstructorWithDeoptimizingValueOf();
+  CompileRun(f_source);
+  CompileRun("for (var i = 0; i < 5; i++) {"
+             "  f(8, new X());"
+             "};");
+
+  // Compile an optimized version of f.
+  i::FLAG_always_opt = true;
+  CompileRun(f_source);
+  CompileRun("f(7, new X());");
+  CHECK(!i::V8::UseCrankshaft() ||
+        GetJSFunction((*env)->Global(), "f")->IsOptimized());
+
+  // Call f and force deoptimization while processing the binary operation.
+  CompileRun("deopt = true;"
+             "var result = f(7, new X());"
+             "gc(); gc();");
+
+  CHECK(!GetJSFunction((*env)->Global(), "f")->IsOptimized());
+}
+
+
+TEST(DeoptimizeBinaryOperationADD) {
+  v8::HandleScope scope;
+  const char* extension_list[] = { "v8/gc" };
+  v8::ExtensionConfiguration extensions(1, extension_list);
+  LocalContext env(&extensions);
+
+  TestDeoptimizeBinaryOpHelper(&env, "+");
+
+  CHECK_EQ(1, env->Global()->Get(v8_str("count"))->Int32Value());
+  CHECK_EQ(15, env->Global()->Get(v8_str("result"))->Int32Value());
+  CHECK_EQ(0, Deoptimizer::GetDeoptimizedCodeCount());
+}
+
+
+TEST(DeoptimizeBinaryOperationSUB) {
+  v8::HandleScope scope;
+  const char* extension_list[] = { "v8/gc" };
+  v8::ExtensionConfiguration extensions(1, extension_list);
+  LocalContext env(&extensions);
+
+  TestDeoptimizeBinaryOpHelper(&env, "-");
+
+  CHECK_EQ(1, env->Global()->Get(v8_str("count"))->Int32Value());
+  CHECK_EQ(-1, env->Global()->Get(v8_str("result"))->Int32Value());
+  CHECK_EQ(0, Deoptimizer::GetDeoptimizedCodeCount());
+}
+
+
+TEST(DeoptimizeBinaryOperationMUL) {
+  v8::HandleScope scope;
+  const char* extension_list[] = { "v8/gc" };
+  v8::ExtensionConfiguration extensions(1, extension_list);
+  LocalContext env(&extensions);
+
+  TestDeoptimizeBinaryOpHelper(&env, "*");
+
+  CHECK_EQ(1, env->Global()->Get(v8_str("count"))->Int32Value());
+  CHECK_EQ(56, env->Global()->Get(v8_str("result"))->Int32Value());
+  CHECK_EQ(0, Deoptimizer::GetDeoptimizedCodeCount());
+}
+
+
+TEST(DeoptimizeBinaryOperationDIV) {
+  v8::HandleScope scope;
+  const char* extension_list[] = { "v8/gc" };
+  v8::ExtensionConfiguration extensions(1, extension_list);
+  LocalContext env(&extensions);
+
+  TestDeoptimizeBinaryOpHelper(&env, "/");
+
+  CHECK_EQ(1, env->Global()->Get(v8_str("count"))->Int32Value());
+  CHECK_EQ(0, env->Global()->Get(v8_str("result"))->Int32Value());
+  CHECK_EQ(0, Deoptimizer::GetDeoptimizedCodeCount());
+}
+
+
+TEST(DeoptimizeBinaryOperationMOD) {
+  v8::HandleScope scope;
+  const char* extension_list[] = { "v8/gc" };
+  v8::ExtensionConfiguration extensions(1, extension_list);
+  LocalContext env(&extensions);
+
+  TestDeoptimizeBinaryOpHelper(&env, "%");
+
+  CHECK_EQ(1, env->Global()->Get(v8_str("count"))->Int32Value());
+  CHECK_EQ(7, env->Global()->Get(v8_str("result"))->Int32Value());
+  CHECK_EQ(0, Deoptimizer::GetDeoptimizedCodeCount());
+}
+
+
+TEST(DeoptimizeCompare) {
+  v8::HandleScope scope;
+  const char* extension_list[] = { "v8/gc" };
+  v8::ExtensionConfiguration extensions(1, extension_list);
+  LocalContext env(&extensions);
+
+  const char* f_source = "function f(x, y) { return x < y; };";
+
+  {
+    AllowNativesSyntaxNoInlining options;
+    // Compile function f and collect to type feedback to insert compare ic
+    // call in the optimized code.
+    i::FLAG_prepare_always_opt = true;
+    CompileRun("var count = 0;"
+               "var result = 0;"
+               "var deopt = false;"
+               "function X() { };"
+               "X.prototype.toString = function () {"
+               "  if (deopt) { count++; %DeoptimizeFunction(f); } return 'b'"
+               "};");
+    CompileRun(f_source);
+    CompileRun("for (var i = 0; i < 5; i++) {"
+               "  f('a', new X());"
+               "};");
+
+    // Compile an optimized version of f.
+    i::FLAG_always_opt = true;
+    CompileRun(f_source);
+    CompileRun("f('a', new X());");
+    CHECK(!i::V8::UseCrankshaft() ||
+          GetJSFunction(env->Global(), "f")->IsOptimized());
+
+    // Call f and force deoptimization while processing the comparison.
+    CompileRun("deopt = true;"
+               "var result = f('a', new X());"
+               "gc(); gc();");
+  }
+
+  CHECK(!GetJSFunction(env->Global(), "f")->IsOptimized());
+  CHECK_EQ(1, env->Global()->Get(v8_str("count"))->Int32Value());
+  CHECK_EQ(true, env->Global()->Get(v8_str("result"))->BooleanValue());
+  CHECK_EQ(0, Deoptimizer::GetDeoptimizedCodeCount());
+}
+
+
+TEST(DeoptimizeLoadICStoreIC) {
+  v8::HandleScope scope;
+  const char* extension_list[] = { "v8/gc" };
+  v8::ExtensionConfiguration extensions(1, extension_list);
+  LocalContext env(&extensions);
+
+  // Functions to generate load/store/keyed load/keyed store IC calls.
+  const char* f1_source = "function f1(x) { return x.y; };";
+  const char* g1_source = "function g1(x) { x.y = 1; };";
+  const char* f2_source = "function f2(x, y) { return x[y]; };";
+  const char* g2_source = "function g2(x, y) { x[y] = 1; };";
+
+  {
+    AllowNativesSyntaxNoInlining options;
+    // Compile functions and collect to type feedback to insert ic
+    // calls in the optimized code.
+    i::FLAG_prepare_always_opt = true;
+    CompileRun("var count = 0;"
+               "var result = 0;"
+               "var deopt = false;"
+               "function X() { };"
+               "X.prototype.__defineGetter__('y', function () {"
+               "  if (deopt) { count++; %DeoptimizeFunction(f1); };"
+               "  return 13;"
+               "});"
+               "X.prototype.__defineSetter__('y', function () {"
+               "  if (deopt) { count++; %DeoptimizeFunction(g1); };"
+               "});"
+               "X.prototype.__defineGetter__('z', function () {"
+               "  if (deopt) { count++; %DeoptimizeFunction(f2); };"
+               "  return 13;"
+               "});"
+               "X.prototype.__defineSetter__('z', function () {"
+               "  if (deopt) { count++; %DeoptimizeFunction(g2); };"
+               "});");
+    CompileRun(f1_source);
+    CompileRun(g1_source);
+    CompileRun(f2_source);
+    CompileRun(g2_source);
+    CompileRun("for (var i = 0; i < 5; i++) {"
+               "  f1(new X());"
+               "  g1(new X());"
+               "  f2(new X(), 'z');"
+               "  g2(new X(), 'z');"
+               "};");
+
+    // Compile an optimized version of the functions.
+    i::FLAG_always_opt = true;
+    CompileRun(f1_source);
+    CompileRun(g1_source);
+    CompileRun(f2_source);
+    CompileRun(g2_source);
+    CompileRun("f1(new X());");
+    CompileRun("g1(new X());");
+    CompileRun("f2(new X(), 'z');");
+    CompileRun("g2(new X(), 'z');");
+    if (i::V8::UseCrankshaft()) {
+      CHECK(GetJSFunction(env->Global(), "f1")->IsOptimized());
+      CHECK(GetJSFunction(env->Global(), "g1")->IsOptimized());
+      CHECK(GetJSFunction(env->Global(), "f2")->IsOptimized());
+      CHECK(GetJSFunction(env->Global(), "g2")->IsOptimized());
+    }
+
+    // Call functions and force deoptimization while processing the ics.
+    CompileRun("deopt = true;"
+               "var result = f1(new X());"
+               "g1(new X());"
+               "f2(new X(), 'z');"
+               "g2(new X(), 'z');"
+               "gc(); gc();");
+  }
+
+  CHECK(!GetJSFunction(env->Global(), "f1")->IsOptimized());
+  CHECK(!GetJSFunction(env->Global(), "g1")->IsOptimized());
+  CHECK(!GetJSFunction(env->Global(), "f2")->IsOptimized());
+  CHECK(!GetJSFunction(env->Global(), "g2")->IsOptimized());
+  CHECK_EQ(4, env->Global()->Get(v8_str("count"))->Int32Value());
+  CHECK_EQ(13, env->Global()->Get(v8_str("result"))->Int32Value());
+  CHECK_EQ(0, Deoptimizer::GetDeoptimizedCodeCount());
+}
+
+
+TEST(DeoptimizeLoadICStoreICNested) {
+  v8::HandleScope scope;
+  const char* extension_list[] = { "v8/gc" };
+  v8::ExtensionConfiguration extensions(1, extension_list);
+  LocalContext env(&extensions);
+
+  // Functions to generate load/store/keyed load/keyed store IC calls.
+  const char* f1_source = "function f1(x) { return x.y; };";
+  const char* g1_source = "function g1(x) { x.y = 1; };";
+  const char* f2_source = "function f2(x, y) { return x[y]; };";
+  const char* g2_source = "function g2(x, y) { x[y] = 1; };";
+
+  {
+    AllowNativesSyntaxNoInlining options;
+    // Compile functions and collect to type feedback to insert ic
+    // calls in the optimized code.
+    i::FLAG_prepare_always_opt = true;
+    CompileRun("var count = 0;"
+               "var result = 0;"
+               "var deopt = false;"
+               "function X() { };"
+               "X.prototype.__defineGetter__('y', function () {"
+               "  g1(this);"
+               "  return 13;"
+               "});"
+               "X.prototype.__defineSetter__('y', function () {"
+               "  f2(this, 'z');"
+               "});"
+               "X.prototype.__defineGetter__('z', function () {"
+               "  g2(this, 'z');"
+               "});"
+               "X.prototype.__defineSetter__('z', function () {"
+               "  if (deopt) {"
+               "    count++;"
+               "    %DeoptimizeFunction(f1);"
+               "    %DeoptimizeFunction(g1);"
+               "    %DeoptimizeFunction(f2);"
+               "    %DeoptimizeFunction(g2); };"
+               "});");
+    CompileRun(f1_source);
+    CompileRun(g1_source);
+    CompileRun(f2_source);
+    CompileRun(g2_source);
+    CompileRun("for (var i = 0; i < 5; i++) {"
+               "  f1(new X());"
+               "  g1(new X());"
+               "  f2(new X(), 'z');"
+               "  g2(new X(), 'z');"
+               "};");
+
+    // Compile an optimized version of the functions.
+    i::FLAG_always_opt = true;
+    CompileRun(f1_source);
+    CompileRun(g1_source);
+    CompileRun(f2_source);
+    CompileRun(g2_source);
+    CompileRun("f1(new X());");
+    CompileRun("g1(new X());");
+    CompileRun("f2(new X(), 'z');");
+    CompileRun("g2(new X(), 'z');");
+    if (i::V8::UseCrankshaft()) {
+      CHECK(GetJSFunction(env->Global(), "f1")->IsOptimized());
+      CHECK(GetJSFunction(env->Global(), "g1")->IsOptimized());
+      CHECK(GetJSFunction(env->Global(), "f2")->IsOptimized());
+      CHECK(GetJSFunction(env->Global(), "g2")->IsOptimized());
+    }
+
+    // Call functions and force deoptimization while processing the ics.
+    CompileRun("deopt = true;"
+               "var result = f1(new X());"
+               "gc(); gc();");
+  }
+
+  CHECK(!GetJSFunction(env->Global(), "f1")->IsOptimized());
+  CHECK(!GetJSFunction(env->Global(), "g1")->IsOptimized());
+  CHECK(!GetJSFunction(env->Global(), "f2")->IsOptimized());
+  CHECK(!GetJSFunction(env->Global(), "g2")->IsOptimized());
+  CHECK_EQ(1, env->Global()->Get(v8_str("count"))->Int32Value());
+  CHECK_EQ(13, env->Global()->Get(v8_str("result"))->Int32Value());
+  CHECK_EQ(0, Deoptimizer::GetDeoptimizedCodeCount());
+}
index fbe66ec..aa5fe59 100644 (file)
@@ -511,7 +511,7 @@ static void CheckSymbols(const char** strings) {
     if (!maybe_a->ToObject(&a)) continue;
     CHECK(a->IsSymbol());
     Object* b;
-    MaybeObjectmaybe_b = Heap::LookupAsciiSymbol(string);
+    MaybeObject *maybe_b = Heap::LookupAsciiSymbol(string);
     if (!maybe_b->ToObject(&b)) continue;
     CHECK_EQ(b, a);
     CHECK(String::cast(b)->IsEqualTo(CStrVector(string)));
@@ -978,7 +978,9 @@ TEST(TestCodeFlushing) {
   Handle<String> foo_name = Factory::LookupAsciiSymbol("foo");
 
   // This compile will add the code to the compilation cache.
-  CompileRun(source);
+  { v8::HandleScope scope;
+    CompileRun(source);
+  }
 
   // Check function is compiled.
   Object* func_value =
@@ -1000,8 +1002,8 @@ TEST(TestCodeFlushing) {
   Heap::CollectAllGarbage(true);
 
   // foo should no longer be in the compilation cache
-  CHECK(!function->shared()->is_compiled());
-  CHECK(!function->is_compiled());
+  CHECK(!function->shared()->is_compiled() || function->IsOptimized());
+  CHECK(!function->is_compiled() || function->IsOptimized());
   // Call foo to get it recompiled.
   CompileRun("foo()");
   CHECK(function->shared()->is_compiled());
@@ -1021,6 +1023,20 @@ static int CountGlobalContexts() {
 }
 
 
+// Count the number of user functions in the weak list of optimized
+// functions attached to a global context.
+static int CountOptimizedUserFunctions(v8::Handle<v8::Context> context) {
+  int count = 0;
+  Handle<Context> icontext = v8::Utils::OpenHandle(*context);
+  Object* object = icontext->get(Context::OPTIMIZED_FUNCTIONS_LIST);
+  while (object->IsJSFunction() && !JSFunction::cast(object)->IsBuiltin()) {
+    count++;
+    object = JSFunction::cast(object)->next_function_link();
+  }
+  return count;
+}
+
+
 TEST(TestInternalWeakLists) {
   static const int kNumTestContexts = 10;
 
@@ -1032,9 +1048,63 @@ TEST(TestInternalWeakLists) {
   // Create a number of global contests which gets linked together.
   for (int i = 0; i < kNumTestContexts; i++) {
     ctx[i] = v8::Context::New();
+
+    bool opt = (FLAG_always_opt && i::V8::UseCrankshaft());
+
     CHECK_EQ(i + 1, CountGlobalContexts());
 
     ctx[i]->Enter();
+
+    // Create a handle scope so no function objects get stuch in the outer
+    // handle scope
+    v8::HandleScope scope;
+    const char* source = "function f1() { };"
+                         "function f2() { };"
+                         "function f3() { };"
+                         "function f4() { };"
+                         "function f5() { };";
+    CompileRun(source);
+    CHECK_EQ(0, CountOptimizedUserFunctions(ctx[i]));
+    CompileRun("f1()");
+    CHECK_EQ(opt ? 1 : 0, CountOptimizedUserFunctions(ctx[i]));
+    CompileRun("f2()");
+    CHECK_EQ(opt ? 2 : 0, CountOptimizedUserFunctions(ctx[i]));
+    CompileRun("f3()");
+    CHECK_EQ(opt ? 3 : 0, CountOptimizedUserFunctions(ctx[i]));
+    CompileRun("f4()");
+    CHECK_EQ(opt ? 4 : 0, CountOptimizedUserFunctions(ctx[i]));
+    CompileRun("f5()");
+    CHECK_EQ(opt ? 5 : 0, CountOptimizedUserFunctions(ctx[i]));
+
+    // Remove function f1, and
+    CompileRun("f1=null");
+
+    // Scavenge treats these references as strong.
+    for (int j = 0; j < 10; j++) {
+      Heap::PerformScavenge();
+      CHECK_EQ(opt ? 5 : 0, CountOptimizedUserFunctions(ctx[i]));
+    }
+
+    // Mark compact handles the weak references.
+    Heap::CollectAllGarbage(true);
+    CHECK_EQ(opt ? 4 : 0, CountOptimizedUserFunctions(ctx[i]));
+
+    // Get rid of f3 and f5 in the same way.
+    CompileRun("f3=null");
+    for (int j = 0; j < 10; j++) {
+      Heap::PerformScavenge();
+      CHECK_EQ(opt ? 4 : 0, CountOptimizedUserFunctions(ctx[i]));
+    }
+    Heap::CollectAllGarbage(true);
+    CHECK_EQ(opt ? 3 : 0, CountOptimizedUserFunctions(ctx[i]));
+    CompileRun("f5=null");
+    for (int j = 0; j < 10; j++) {
+      Heap::PerformScavenge();
+      CHECK_EQ(opt ? 3 : 0, CountOptimizedUserFunctions(ctx[i]));
+    }
+    Heap::CollectAllGarbage(true);
+    CHECK_EQ(opt ? 2 : 0, CountOptimizedUserFunctions(ctx[i]));
+
     ctx[i]->Exit();
   }
 
@@ -1076,6 +1146,25 @@ static int CountGlobalContextsWithGC(int n) {
 }
 
 
+// Count the number of user functions in the weak list of optimized
+// functions attached to a global context causing a GC after the
+// specified number of elements.
+static int CountOptimizedUserFunctionsWithGC(v8::Handle<v8::Context> context,
+                                             int n) {
+  int count = 0;
+  Handle<Context> icontext = v8::Utils::OpenHandle(*context);
+  Handle<Object> object(icontext->get(Context::OPTIMIZED_FUNCTIONS_LIST));
+  while (object->IsJSFunction() &&
+         !Handle<JSFunction>::cast(object)->IsBuiltin()) {
+    count++;
+    if (count == n) Heap::CollectAllGarbage(true);
+    object = Handle<Object>(
+        Object::cast(JSFunction::cast(*object)->next_function_link()));
+  }
+  return count;
+}
+
+
 TEST(TestInternalWeakListsTraverseWithGC) {
   static const int kNumTestContexts = 10;
 
@@ -1090,10 +1179,37 @@ TEST(TestInternalWeakListsTraverseWithGC) {
     ctx[i] = v8::Context::New();
     CHECK_EQ(i + 1, CountGlobalContexts());
     CHECK_EQ(i + 1, CountGlobalContextsWithGC(i / 2 + 1));
-
-    ctx[i]->Enter();
-    ctx[i]->Exit();
   }
+
+  bool opt = (FLAG_always_opt && i::V8::UseCrankshaft());
+
+  // Compile a number of functions the length of the weak list of optimized
+  // functions both with and without GCs while iterating the list.
+  ctx[0]->Enter();
+  const char* source = "function f1() { };"
+                       "function f2() { };"
+                       "function f3() { };"
+                       "function f4() { };"
+                       "function f5() { };";
+  CompileRun(source);
+  CHECK_EQ(0, CountOptimizedUserFunctions(ctx[0]));
+  CompileRun("f1()");
+  CHECK_EQ(opt ? 1 : 0, CountOptimizedUserFunctions(ctx[0]));
+  CHECK_EQ(opt ? 1 : 0, CountOptimizedUserFunctionsWithGC(ctx[0], 1));
+  CompileRun("f2()");
+  CHECK_EQ(opt ? 2 : 0, CountOptimizedUserFunctions(ctx[0]));
+  CHECK_EQ(opt ? 2 : 0, CountOptimizedUserFunctionsWithGC(ctx[0], 1));
+  CompileRun("f3()");
+  CHECK_EQ(opt ? 3 : 0, CountOptimizedUserFunctions(ctx[0]));
+  CHECK_EQ(opt ? 3 : 0, CountOptimizedUserFunctionsWithGC(ctx[0], 1));
+  CompileRun("f4()");
+  CHECK_EQ(opt ? 4 : 0, CountOptimizedUserFunctions(ctx[0]));
+  CHECK_EQ(opt ? 4 : 0, CountOptimizedUserFunctionsWithGC(ctx[0], 2));
+  CompileRun("f5()");
+  CHECK_EQ(opt ? 5 : 0, CountOptimizedUserFunctions(ctx[0]));
+  CHECK_EQ(opt ? 5 : 0, CountOptimizedUserFunctionsWithGC(ctx[0], 4));
+
+  ctx[0]->Exit();
 }
 
 
index 65be6bd..c85f6c0 100644 (file)
@@ -39,6 +39,7 @@
 #include "cctest.h"
 #include "disassembler.h"
 #include "register-allocator-inl.h"
+#include "vm-state-inl.h"
 
 using v8::Function;
 using v8::Local;
@@ -200,6 +201,7 @@ static void InitializeVM() {
 
 
 static void CheckJSFunctionAtAddress(const char* func_name, Address addr) {
+  CHECK(i::Heap::Contains(addr));
   i::Object* obj = i::HeapObject::FromAddress(addr);
   CHECK(obj->IsJSFunction());
   CHECK(JSFunction::cast(obj)->shared()->name()->IsString());
@@ -298,10 +300,17 @@ TEST(CFromJSStackTrace) {
   //       trace(EBP) [native (extension)]
   //         DoTrace(EBP) [native]
   //           StackTracer::Trace
-  CHECK_GT(sample.frames_count, 1);
+
+  // The VM state tracking keeps track of external callbacks and puts
+  // them at the top of the sample stack.
+  int base = 0;
+  CHECK(sample.stack[0] == FUNCTION_ADDR(TraceExtension::Trace));
+  base++;
+
   // Stack tracing will start from the first JS function, i.e. "JSFuncDoTrace"
-  CheckJSFunctionAtAddress("JSFuncDoTrace", sample.stack[0]);
-  CheckJSFunctionAtAddress("JSTrace", sample.stack[1]);
+  CHECK_GT(sample.frames_count, base + 1);
+  CheckJSFunctionAtAddress("JSFuncDoTrace", sample.stack[base + 0]);
+  CheckJSFunctionAtAddress("JSTrace", sample.stack[base + 1]);
 }
 
 
@@ -311,6 +320,10 @@ TEST(CFromJSStackTrace) {
 // Top::c_entry_fp value. In this case, StackTracer uses passed frame
 // pointer value as a starting point for stack walking.
 TEST(PureJSStackTrace) {
+  // This test does not pass with inlining enabled since inlined functions
+  // don't appear in the stack trace.
+  i::FLAG_use_inlining = false;
+
   TickSample sample;
   InitTraceEnv(&sample);
 
@@ -341,10 +354,17 @@ TEST(PureJSStackTrace) {
   // The last JS function called. It is only visible through
   // sample.function, as its return address is above captured EBP value.
   CheckJSFunctionAtAddress("JSFuncDoTrace", sample.function);
-  CHECK_GT(sample.frames_count, 1);
+
+  // The VM state tracking keeps track of external callbacks and puts
+  // them at the top of the sample stack.
+  int base = 0;
+  CHECK(sample.stack[0] == FUNCTION_ADDR(TraceExtension::JSTrace));
+  base++;
+
   // Stack sampling will start from the caller of JSFuncDoTrace, i.e. "JSTrace"
-  CheckJSFunctionAtAddress("JSTrace", sample.stack[0]);
-  CheckJSFunctionAtAddress("OuterJSTrace", sample.stack[1]);
+  CHECK_GT(sample.frames_count, base + 1);
+  CheckJSFunctionAtAddress("JSTrace", sample.stack[base + 0]);
+  CheckJSFunctionAtAddress("OuterJSTrace", sample.stack[base + 1]);
 }
 
 
index 710c10e..503e0cf 100644 (file)
@@ -16,6 +16,7 @@
 #include "cpu-profiler.h"
 #include "v8threads.h"
 #include "cctest.h"
+#include "vm-state-inl.h"
 
 using v8::internal::Address;
 using v8::internal::EmbeddedVector;
@@ -246,7 +247,8 @@ class LogBufferMatcher {
 
 
 static void CheckThatProfilerWorks(LogBufferMatcher* matcher) {
-  CHECK(!LoggerTestHelper::IsSamplerActive());
+  CHECK(i::RuntimeProfiler::IsEnabled() ||
+        !LoggerTestHelper::IsSamplerActive());
   LoggerTestHelper::ResetSamplesTaken();
 
   Logger::ResumeProfiler(v8::PROFILER_MODULE_CPU, 0);
@@ -272,7 +274,8 @@ static void CheckThatProfilerWorks(LogBufferMatcher* matcher) {
   }
 
   Logger::PauseProfiler(v8::PROFILER_MODULE_CPU, 0);
-  CHECK(!LoggerTestHelper::IsSamplerActive());
+  CHECK(i::RuntimeProfiler::IsEnabled() ||
+        !LoggerTestHelper::IsSamplerActive());
 
   // Wait 50 msecs to allow Profiler thread to process the last
   // tick sample it has got.
@@ -291,8 +294,12 @@ static void CheckThatProfilerWorks(LogBufferMatcher* matcher) {
 TEST(ProfLazyMode) {
   ScopedLoggerInitializer initialize_logger(true);
 
-  // No sampling should happen prior to resuming profiler.
-  CHECK(!LoggerTestHelper::IsSamplerActive());
+  if (!i::V8::UseCrankshaft()) return;
+
+  // No sampling should happen prior to resuming profiler unless we
+  // are runtime profiling.
+  CHECK(i::RuntimeProfiler::IsEnabled() ||
+        !LoggerTestHelper::IsSamplerActive());
 
   LogBufferMatcher matcher;
   // Nothing must be logged until profiling is resumed.
@@ -403,7 +410,7 @@ class LoopingNonJsThread : public LoopingThread {
 class TestSampler : public v8::internal::Sampler {
  public:
   TestSampler()
-      : Sampler(0, true),
+      : Sampler(0, true, true),
         semaphore_(v8::internal::OS::CreateSemaphore(0)),
         was_sample_stack_called_(false) {
   }
@@ -431,30 +438,38 @@ class TestSampler : public v8::internal::Sampler {
 }  // namespace
 
 TEST(ProfMultipleThreads) {
+  TestSampler* sampler = NULL;
+  {
+    v8::Locker locker;
+    sampler = new TestSampler();
+    sampler->Start();
+    CHECK(sampler->IsActive());
+  }
+
   LoopingJsThread jsThread;
   jsThread.Start();
   LoopingNonJsThread nonJsThread;
   nonJsThread.Start();
 
-  TestSampler sampler;
-  sampler.Start();
-  CHECK(!sampler.WasSampleStackCalled());
+  CHECK(!sampler->WasSampleStackCalled());
   jsThread.WaitForRunning();
   jsThread.SendSigProf();
-  CHECK(sampler.WaitForTick());
-  CHECK(sampler.WasSampleStackCalled());
-  sampler.Reset();
-  CHECK(!sampler.WasSampleStackCalled());
+  CHECK(sampler->WaitForTick());
+  CHECK(sampler->WasSampleStackCalled());
+  sampler->Reset();
+  CHECK(!sampler->WasSampleStackCalled());
   nonJsThread.WaitForRunning();
   nonJsThread.SendSigProf();
-  CHECK(!sampler.WaitForTick());
-  CHECK(!sampler.WasSampleStackCalled());
-  sampler.Stop();
+  CHECK(!sampler->WaitForTick());
+  CHECK(!sampler->WasSampleStackCalled());
+  sampler->Stop();
 
   jsThread.Stop();
   nonJsThread.Stop();
   jsThread.Join();
   nonJsThread.Join();
+
+  delete sampler;
 }
 
 #endif  // __linux__
index 9942567..86f105f 100644 (file)
@@ -71,6 +71,10 @@ TEST(MarkingStack) {
 
 
 TEST(Promotion) {
+  // This test requires compaction. If compaction is turned off, we
+  // skip the entire test.
+  if (FLAG_never_compact) return;
+
   // Ensure that we get a compacting collection so that objects are promoted
   // from new space.
   FLAG_gc_global = true;
index f46191a..a0733ef 100644 (file)
@@ -757,6 +757,10 @@ static const ProfileNode* PickChild(const ProfileNode* parent,
 
 
 TEST(RecordStackTraceAtStartProfiling) {
+  // This test does not pass with inlining enabled since inlined functions
+  // don't appear in the stack trace.
+  i::FLAG_use_inlining = false;
+
   if (env.IsEmpty()) {
     v8::HandleScope scope;
     const char* extensions[] = { "v8/profiler" };
index b399a4e..706c6bf 100644 (file)
@@ -95,13 +95,13 @@ TEST(MemoryAllocator) {
 
   OldSpace faked_space(Heap::MaxReserved(), OLD_POINTER_SPACE, NOT_EXECUTABLE);
   int total_pages = 0;
-  int requested = 2;
+  int requested = MemoryAllocator::kPagesPerChunk;
   int allocated;
-  // If we request two pages, we should get one or two.
+  // If we request n pages, we should get n or n - 1.
   Page* first_page =
       MemoryAllocator::AllocatePages(requested, &allocated, &faked_space);
   CHECK(first_page->is_valid());
-  CHECK(allocated > 0 && allocated <= 2);
+  CHECK(allocated == requested || allocated == requested - 1);
   total_pages += allocated;
 
   Page* last_page = first_page;
@@ -110,11 +110,11 @@ TEST(MemoryAllocator) {
     last_page = p;
   }
 
-  // Again, we should get one or two pages.
+  // Again, we should get n or n - 1 pages.
   Page* others =
       MemoryAllocator::AllocatePages(requested, &allocated, &faked_space);
   CHECK(others->is_valid());
-  CHECK(allocated > 0 && allocated <= 2);
+  CHECK(allocated == requested || allocated == requested - 1);
   total_pages += allocated;
 
   MemoryAllocator::SetNextPage(last_page, others);
@@ -129,11 +129,10 @@ TEST(MemoryAllocator) {
   CHECK(second_page->is_valid());
 
   // Freeing pages at the first chunk starting at or after the second page
-  // should free the entire second chunk.  It will return the last page in the
-  // first chunk (if the second page was in the first chunk) or else an
-  // invalid page (if the second page was the start of the second chunk).
+  // should free the entire second chunk.  It will return the page it was passed
+  // (since the second page was in the first chunk).
   Page* free_return = MemoryAllocator::FreePages(second_page);
-  CHECK(free_return == last_page || !free_return->is_valid());
+  CHECK(free_return == second_page);
   MemoryAllocator::SetNextPage(first_page, free_return);
 
   // Freeing pages in the first chunk starting at the first page should free
index 88ef0a2..b48dcb8 100644 (file)
@@ -103,6 +103,7 @@ void TestMemCopy(Vector<byte> src,
 
 
 TEST(MemCopy) {
+  V8::Initialize(NULL);
   const int N = kMinComplexMemCopy + 128;
   Vector<byte> buffer1 = Vector<byte>::New(N);
   Vector<byte> buffer2 = Vector<byte>::New(N);
index 6d26855..6bec4b7 100644 (file)
@@ -74,6 +74,20 @@ static void CheckVersion(int major, int minor, int build,
 
 
 TEST(VersionString) {
+#ifdef USE_SIMULATOR
+  CheckVersion(0, 0, 0, 0, false, "0.0.0 SIMULATOR", "libv8-0.0.0.so");
+  CheckVersion(0, 0, 0, 0, true,
+               "0.0.0 (candidate) SIMULATOR", "libv8-0.0.0-candidate.so");
+  CheckVersion(1, 0, 0, 0, false, "1.0.0 SIMULATOR", "libv8-1.0.0.so");
+  CheckVersion(1, 0, 0, 0, true,
+               "1.0.0 (candidate) SIMULATOR", "libv8-1.0.0-candidate.so");
+  CheckVersion(1, 0, 0, 1, false, "1.0.0.1 SIMULATOR", "libv8-1.0.0.1.so");
+  CheckVersion(1, 0, 0, 1, true,
+               "1.0.0.1 (candidate) SIMULATOR", "libv8-1.0.0.1-candidate.so");
+  CheckVersion(2, 5, 10, 7, false, "2.5.10.7 SIMULATOR", "libv8-2.5.10.7.so");
+  CheckVersion(2, 5, 10, 7, true,
+               "2.5.10.7 (candidate) SIMULATOR", "libv8-2.5.10.7-candidate.so");
+#else
   CheckVersion(0, 0, 0, 0, false, "0.0.0", "libv8-0.0.0.so");
   CheckVersion(0, 0, 0, 0, true,
                "0.0.0 (candidate)", "libv8-0.0.0-candidate.so");
@@ -86,4 +100,5 @@ TEST(VersionString) {
   CheckVersion(2, 5, 10, 7, false, "2.5.10.7", "libv8-2.5.10.7.so");
   CheckVersion(2, 5, 10, 7, true,
                "2.5.10.7 (candidate)", "libv8-2.5.10.7-candidate.so");
+#endif
 }
index 5add082..cc641df 100644 (file)
@@ -29,6 +29,8 @@ prefix es5conform
 def UNIMPLEMENTED = PASS || FAIL
 def FAIL_OK = FAIL, OKAY
 
+
+##############################################################################
 # Non UTF8 characters in test files.
 chapter10/10.4/10.4.2/10.4.2-3-c-2-s: FAIL_OK
 chapter10/10.4/10.4.2/10.4.2-3-c-1-s: FAIL_OK
index c4a3842..70354ce 100644 (file)
@@ -30,6 +30,8 @@ prefix message
 # All tests in the bug directory are expected to fail.
 bugs: FAIL
 
+
+##############################################################################
 [ $arch == mips ]
 
 # Skip all tests on MIPS.
index d23fe35..58a62a8 100644 (file)
@@ -36,4 +36,5 @@ function f() {
   }
 }
 
-print(f());
+var result = f();
+if (result != 42) print("Wrong result: " + result);
index 1c42ee0..f59f5c6 100644 (file)
@@ -24,5 +24,3 @@
 # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
 # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-42
index 6ec8970..0e46193 100644 (file)
@@ -34,4 +34,5 @@ function f() {
   }
 }
 
-print(f());
+var result = f();
+if (result != 42) print("Wrong result: " + result);
index 1c42ee0..f59f5c6 100644 (file)
@@ -24,5 +24,3 @@
 # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
 # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-42
diff --git a/test/mjsunit/accessors-on-global-object.js b/test/mjsunit/accessors-on-global-object.js
new file mode 100644 (file)
index 0000000..8d95692
--- /dev/null
@@ -0,0 +1,72 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Test that installing a getter on the global object instead of a
+// normal property works.
+
+var x = 0;
+
+function getX() { return x; }
+
+for (var i = 0; i < 10; i++) {
+  assertEquals(i < 5 ? 0 : 42, getX());
+  if (i == 4) __defineGetter__("x", function() { return 42; });
+}
+
+
+// Test that installing a setter on the global object instead of a
+// normal property works.
+
+var y = 0;
+var setter_y;
+
+function setY(value) { y = value; }
+
+for (var i = 0; i < 10; i++) {
+  setY(i);
+  assertEquals(i < 5 ? i : 2 * i, y);
+  if (i == 4) {
+    __defineSetter__("y", function(value) { setter_y = 2 * value; });
+    __defineGetter__("y", function() { return setter_y; });
+  }
+}
+
+
+// Test that replacing a getter with a normal property works as
+// expected.
+
+__defineGetter__("z", function() { return 42; });
+
+function getZ() { return z; }
+
+for (var i = 0; i < 10; i++) {
+  assertEquals(i < 5 ? 42 : 0, getZ());
+  if (i == 4) {
+    delete z;
+    var z = 0;
+  }
+}
diff --git a/test/mjsunit/apply-arguments-gc-safepoint.js b/test/mjsunit/apply-arguments-gc-safepoint.js
new file mode 100644 (file)
index 0000000..57ed8cc
--- /dev/null
@@ -0,0 +1,44 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --expose-gc
+
+// Test that safepoint tables are correctly generated for apply with
+// arguments in the case where arguments adaption is needed.
+
+function f(x, y) {
+  if (x == 149999) gc();
+  return x + y;
+}
+
+function g() {
+  f.apply(this, arguments);
+}
+
+for (var i = 0; i < 150000; i++) {
+  g(i);
+}
index ea0dc61..b68ee73 100644 (file)
@@ -55,7 +55,7 @@ function assertHasOwnProperties(object, limit) {
 // shift.
 // ----------------------------------------------------------------------
 
-function runTest() {
+function runTest1() {
   var nonArray = new constructor();
   var array = ['zero', , 'two'];
   // Shift away the zero.
@@ -80,13 +80,13 @@ function runTest() {
   assertEquals('two', nonArray[2]);
 }
 
-runTest();
+runTest1();
 
 // ----------------------------------------------------------------------
 // unshift.
 // ----------------------------------------------------------------------
 
-runTest = function() {
+runTest2 = function() {
   var nonArray = new constructor();
   var array = ['zero', , 'two'];
   // Unshift a new 'zero'.
@@ -110,14 +110,14 @@ runTest = function() {
   assertEquals('two', nonArray[3]);
 }
 
-runTest();
+runTest2();
 
 
 // ----------------------------------------------------------------------
 // splice
 // ----------------------------------------------------------------------
 
-runTest = function() {
+runTest3 = function() {
   var nonArray = new constructor();
   var array = ['zero', , 'two'];
   // Delete the first element by splicing in nothing.
@@ -140,14 +140,14 @@ runTest = function() {
   assertEquals('two', nonArray[2]);
 };
 
-runTest();
+runTest3();
 
 
 // ----------------------------------------------------------------------
 // slice
 // ----------------------------------------------------------------------
 
-runTest = function() {
+runTest4 = function() {
   var nonArray = new constructor();
   var array = ['zero', , 'two'];
   // Again Spidermonkey is inconsistent.  (array.slice(0, 3))[1] is
@@ -156,4 +156,4 @@ runTest = function() {
   assertArrayEquals(['zero', 'one', 'two'], Array.prototype.slice.call(nonArray, 0, 3));
 };
 
-runTest();
+runTest4();
index 8e7f189..cd53863 100644 (file)
@@ -25,8 +25,6 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// Flags: --nofull-compiler --nofast-compiler
-
 // Test paths in the code generator where values in specific registers
 // get moved around.
 function identity(x) {
diff --git a/test/mjsunit/compiler/alloc-number.js b/test/mjsunit/compiler/alloc-number.js
new file mode 100644 (file)
index 0000000..85c39de
--- /dev/null
@@ -0,0 +1,39 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+// Try to get a GC because of a heap number allocation while we
+// have live values (o) in a register.
+function f(o) {
+  var x = 1.5;
+  var y = 2.5;
+  for (var i = 1; i < 100000; i+=2) o.val = x + y + i;
+  return o;
+}
+
+var o = { val: 0 };
+for (var i = 0; i < 100; i++) f(o);
diff --git a/test/mjsunit/compiler/array-access.js b/test/mjsunit/compiler/array-access.js
new file mode 100644 (file)
index 0000000..65b3c99
--- /dev/null
@@ -0,0 +1,132 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+function Get0(a) {
+  return a[0];
+}
+
+function GetN(a,n) {
+  return a[n];
+}
+
+function GetA0(a) {
+  return a[a[0]];
+}
+
+function GetAN(a,n) {
+  return a[a[n]];
+}
+
+function GetAAN(a,n) {
+  return a[a[a[n]]];
+}
+
+function RunGetTests() {
+  var a = [2,0,1];
+  assertEquals(2, Get0(a));
+
+  assertEquals(2, GetN(a, 0));
+  assertEquals(0, GetN(a, 1));
+  assertEquals(1, GetN(a, 2));
+
+  assertEquals(1, GetA0(a));
+
+  assertEquals(1, GetAN(a,0));
+  assertEquals(2, GetAN(a,1));
+  assertEquals(0, GetAN(a,2));
+
+  assertEquals(0, GetAAN(a,0));
+  assertEquals(1, GetAAN(a,1));
+  assertEquals(2, GetAAN(a,2));
+}
+
+
+function Set07(a) {
+  a[0] = 7;
+}
+
+function Set0V(a, v) {
+  a[0] = v;
+}
+
+function SetN7(a, n) {
+  a[n] = 7;
+}
+
+function SetNX(a, n, x) {
+  a[n] = x;
+}
+
+function RunSetTests(a) {
+  Set07(a);
+  assertEquals(7, a[0]);
+  assertEquals(0, a[1]);
+  assertEquals(0, a[2]);
+
+  Set0V(a, 1);
+  assertEquals(1, a[0]);
+  assertEquals(0, a[1]);
+  assertEquals(0, a[2]);
+
+  SetN7(a, 2);
+  assertEquals(1, a[0]);
+  assertEquals(0, a[1]);
+  assertEquals(7, a[2]);
+
+  SetNX(a, 1, 5);
+  assertEquals(1, a[0]);
+  assertEquals(5, a[1]);
+  assertEquals(7, a[2]);
+
+  for (var i = 0; i < 3; i++) SetNX(a, i, 0);
+  assertEquals(0, a[0]);
+  assertEquals(0, a[1]);
+  assertEquals(0, a[2]);
+}
+
+function RunArrayBoundsCheckTest() {
+  var g = [1,2,3];
+
+  function f(a, i) { a[i] = 42; }
+
+  for (var i = 0; i < 100000; i++) { f(g, 0); }
+
+  f(g, 4);
+
+  assertEquals(42, g[0]);
+  assertEquals(42, g[4]);
+}
+
+var a = [0,0,0];
+var o = {0: 0, 1: 0, 2: 0};
+for (var i = 0; i < 1000; i++) {
+  RunGetTests();
+  RunSetTests(a);
+  RunSetTests(o);
+}
+
+RunArrayBoundsCheckTest();
diff --git a/test/mjsunit/compiler/array-length.js b/test/mjsunit/compiler/array-length.js
new file mode 100644 (file)
index 0000000..7adb9ab
--- /dev/null
@@ -0,0 +1,42 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+function ArrayLength(a) { return a.length; }
+
+function Test(a0, a2, a5) {
+  assertEquals(0, ArrayLength(a0));
+  assertEquals(2, ArrayLength(a2));
+  assertEquals(5, ArrayLength(a5));
+}
+
+var a0 = [];
+var a2 = [1,2];
+var a5 = [1,2,3,4,5];
+for (var i = 0; i < 10000000; i++) Test(a0, a2, a5);
+assertEquals("undefined", typeof(ArrayLength(0)));
+for (var i = 0; i < 10000000; i++) Test(a0, a2, a5);
+assertEquals(4, ArrayLength("hest"));
diff --git a/test/mjsunit/compiler/assignment-deopt.js b/test/mjsunit/compiler/assignment-deopt.js
new file mode 100644 (file)
index 0000000..74f185b
--- /dev/null
@@ -0,0 +1,146 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Test deopt with count operation on parameter.
+var max_smi = 1073741823;
+var o = {x:0};
+
+function assign1(x) { x += 1; o.x = x; }
+assign1(max_smi);
+assertEquals(max_smi + 1, o.x);
+
+assign1(1.1);
+assertEquals(2.1, o.x);
+
+
+// Test deopt with count operation on named property.
+function assign2(p) { p.x += 1 }
+
+o.x = "42";
+assign2(o);
+assertEquals("421", o.x);
+
+var s = max_smi - 10000;
+o.x = s;
+for(var i = 0; i < 20000; i++) {
+  assign2(o);
+}
+assertEquals(max_smi + 10000, o.x);
+
+
+// Test deopt with count operation on keyed property.
+function assign3(a, b) { a[b] += 1; }
+
+o = ["42"];
+assign3(o, 0);
+assertEquals("421", o[0]);
+
+var s = max_smi - 10000;
+o[0] = s;
+for(var i = 0; i < 20000; i++) {
+  assign3(o, 0);
+}
+assertEquals(max_smi + 10000, o[0]);
+
+assign3(o,"0");
+
+assertEquals(max_smi + 10001, o[0]);
+
+// Test bailout when accessing a non-existing array element.
+o[0] = 0;
+for(var i = 0; i < 10000; i++) {
+  assign3(o, 0);
+}
+assign3(o,1);
+
+// Test bailout with count operation in a value context.
+function assign5(x,y) { return (x += 1) + y; }
+for (var i = 0; i < 10000; ++i) assertEquals(4, assign5(2, 1));
+assertEquals(4.1, assign5(2, 1.1));
+assertEquals(4.1, assign5(2.1, 1));
+
+function assign7(o,y) { return (o.x += 1) + y; }
+o = {x:0};
+for (var i = 0; i < 10000; ++i) {
+  o.x = 42;
+  assertEquals(44, assign7(o, 1));
+}
+o.x = 42;
+assertEquals(44.1, assign7(o, 1.1));
+o.x = 42.1;
+assertEquals(44.1, assign7(o, 1));
+
+function assign9(o,y) { return (o[0] += 1) + y; }
+q = [0];
+for (var i = 0; i < 10000; ++i) {
+  q[0] = 42;
+  assertEquals(44, assign9(q, 1));
+}
+q[0] = 42;
+assertEquals(44.1, assign9(q, 1.1));
+q[0] = 42.1;
+assertEquals(44.1, assign9(q, 1));
+
+// Test deopt because of a failed map check on the load.
+function assign10(p) { return p.x += 1 }
+var g1 = {x:0};
+var g2 = {y:0, x:42};
+for (var i = 0; i < 10000; ++i) {
+  g1.x = 42;
+  assertEquals(43, assign10(g1));
+  assertEquals(43, g1.x);
+}
+assertEquals(43, assign10(g2));
+assertEquals(43, g2.x);
+
+// Test deopt because of a failed map check on the store.
+// The binary operation changes the map as a side effect.
+o = {x:0};
+var g3 = { valueOf: function() { o.y = "bar"; return 42; }};
+function assign11(p) { return p.x += 1; }
+
+for (var i = 0; i < 10000; i++) {
+  o.x = "a";
+  assign11(o);
+}
+assertEquals("a11", assign11(o));
+o.x = g3;
+assertEquals(43, assign11(o));
+assertEquals("bar", o.y);
+
+o = [0];
+var g4 = { valueOf: function() { o.y = "bar"; return 42; }};
+function assign12(p) { return p[0] += 1; }
+
+for (var i = 0; i < 1000000; i++) {
+  o[0] = "a";
+  assign12(o);
+}
+assertEquals("a11", assign12(o));
+o[0] = g4;
+assertEquals(43, assign12(o));
+assertEquals("bar", o.y);
index 6aded4e..1f3f282 100644 (file)
@@ -264,6 +264,13 @@ function bar_loop() {
 bar_loop();
 
 
+// Test assignment in test context.
+function test_assign(x, y) { if (x = y) return x; }
+
+assertEquals(42, test_assign(0, 42));
+
+assertEquals("undefined", typeof test_assign(42, 0));
+
 // Test for assignment using a keyed store ic:
 function store_i_in_element_i_of_object_i() {
   var i = new Object();
diff --git a/test/mjsunit/compiler/binary-ops.js b/test/mjsunit/compiler/binary-ops.js
new file mode 100644 (file)
index 0000000..27745c1
--- /dev/null
@@ -0,0 +1,55 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Values in distinct spans.
+function or_test0(x, y) { return x | y; }
+function and_test0(x, y) { return x & y; }
+function add_test0(x, y) { return x + y; }
+
+assertEquals(3, or_test0(1, 2));   // 1 | 2
+assertEquals(2, and_test0(3, 6));  // 3 & 6
+assertEquals(5, add_test0(2, 3));  // 2 + 3
+
+
+// Values in the same span.
+function or_test1(x, y) { return x | x; }
+function and_test1(x, y) { return x & x; }
+function add_test1(x, y) { return x + x; }
+
+assertEquals(1, or_test1(1, 2));   // 1 | 1
+assertEquals(3, and_test1(3, 6));  // 3 & 3
+assertEquals(4, add_test1(2, 3));  // 2 + 2
+
+
+// Values in distinct spans that alias.
+function or_test2(x, y) { x = y; return x | y; }
+function and_test2(x, y) { x = y; return x & y; }
+function add_test2(x, y) { x = y; return x + y; }
+
+assertEquals(2, or_test2(1, 2));   // 2 | 2
+assertEquals(6, and_test2(3, 6));  // 6 & 6
+assertEquals(6, add_test2(2, 3));  // 3 + 3
diff --git a/test/mjsunit/compiler/call-keyed.js b/test/mjsunit/compiler/call-keyed.js
new file mode 100644 (file)
index 0000000..d442212
--- /dev/null
@@ -0,0 +1,38 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+A = {}
+A.i = [];
+A.i.push(function () { });
+A.i.push(function () { });
+
+function f (event) {
+ for(var i = 0, j = A.i.length; i < j; ++i)
+   A.i[i]();
+}
+
+f(null);
diff --git a/test/mjsunit/compiler/compare.js b/test/mjsunit/compiler/compare.js
new file mode 100644 (file)
index 0000000..3f96087
--- /dev/null
@@ -0,0 +1,108 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+function MaxLT(x, y) {
+  if (x < y) return y;
+  return x;
+}
+
+function MaxLE(x, y) {
+  if (x <= y) return y;
+  return x;
+}
+
+function MaxGE(x, y) {
+  if (x >= y) return x;
+  return y;
+}
+
+function MaxGT(x, y) {
+  if (x > y) return x;
+  return y;
+}
+
+
+// First test primitive values.
+function TestPrimitive(max, x, y) {
+  assertEquals(max, MaxLT(x, y), "MaxLT - primitive");
+  assertEquals(max, MaxLE(x, y), "MaxLE - primitive");
+  assertEquals(max, MaxGE(x, y), "MaxGE - primitive");
+  assertEquals(max, MaxGT(x, y), "MaxGT - primitive");
+}
+
+TestPrimitive(1, 0, 1);
+TestPrimitive(1, 1, 0);
+TestPrimitive(4, 3, 4);
+TestPrimitive(4, 4, 3);
+TestPrimitive(0, -1, 0);
+TestPrimitive(0, 0, -1)
+TestPrimitive(-2, -2, -3);
+TestPrimitive(-2, -3, -2);
+
+TestPrimitive(1, 0.1, 1);
+TestPrimitive(1, 1, 0.1);
+TestPrimitive(4, 3.1, 4);
+TestPrimitive(4, 4, 3.1);
+TestPrimitive(0, -1.1, 0);
+TestPrimitive(0, 0, -1.1)
+TestPrimitive(-2, -2, -3.1);
+TestPrimitive(-2, -3.1, -2);
+
+
+// Test non-primitive values and watch for valueOf call order.
+function TestNonPrimitive(order, f) {
+  var result = "";
+  var x = { valueOf: function() { result += "x"; } };
+  var y = { valueOf: function() { result += "y"; } };
+  f(x, y);
+  assertEquals(order, result);
+}
+
+TestNonPrimitive("xy", MaxLT);
+TestNonPrimitive("yx", MaxLE);
+TestNonPrimitive("xy", MaxGE);
+TestNonPrimitive("yx", MaxGT);
+
+// Test compare in case of aliased registers.
+function CmpX(x) { if (x == x) return 42; }
+assertEquals(42, CmpX(0));
+
+function CmpXY(x) { var y = x; if (x == y) return 42; }
+assertEquals(42, CmpXY(0));
+
+
+// Test compare against null.
+function CmpNullValue(x) { return x == null; }
+assertEquals(false, CmpNullValue(42));
+
+function CmpNullTest(x) { if (x == null) return 42; return 0; }
+assertEquals(42, CmpNullTest(null));
+
+var g1 = 0;
+function CmpNullEffect() { (g1 = 42) == null; }
+CmpNullEffect();
+assertEquals(42, g1);
diff --git a/test/mjsunit/compiler/complex-for-in.js b/test/mjsunit/compiler/complex-for-in.js
new file mode 100644 (file)
index 0000000..883f20a
--- /dev/null
@@ -0,0 +1,50 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+function TestNamed(m) {
+  var o = {};
+  var result = [];
+  for (o.p in m) result.push(o.p);
+  return result;
+}
+
+assertArrayEquals(['x','y'], TestNamed({x:0, y:1}));
+assertArrayEquals(['0','1'], TestNamed([1,2]));
+
+
+function TestKeyed(m) {
+  var a = [];
+  var result = [];
+  var i = 0;
+  for (a[i++] in m) result.push(a[i - 1]);
+  assertEquals(i, a.length);
+  return result;
+}
+
+
+assertArrayEquals(['x','y'], TestKeyed({x:0, y:1}));
+assertArrayEquals(['0','1'], TestKeyed([1,2]));
diff --git a/test/mjsunit/compiler/control-flow-0.js b/test/mjsunit/compiler/control-flow-0.js
new file mode 100644 (file)
index 0000000..bcf4f2d
--- /dev/null
@@ -0,0 +1,44 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+function f() {
+  return (42 + (0 == 1 ? 1 : 2));
+}
+
+
+function g(x) {
+  return (x + (0 == 1 ? 1 : 2));
+}
+
+
+function h(x) {
+  return ((x + 1) + (0 == 1 ? 1 : 2));
+}
+
+assertEquals(44, f());
+assertEquals(45, g(43));
+assertEquals(47, h(44));
diff --git a/test/mjsunit/compiler/control-flow-1.js b/test/mjsunit/compiler/control-flow-1.js
new file mode 100644 (file)
index 0000000..973d9b6
--- /dev/null
@@ -0,0 +1,55 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+var global = this;
+
+function f0(x) {
+  assertTrue(this === global);
+  return x;
+}
+
+function g0(x, y) {
+  return f0(x == y);
+}
+
+assertTrue(g0(0, 0));
+assertFalse(g0(0, 1));
+
+
+var o = {};
+o.f1 = f1;
+function f1(x) {
+  assertTrue(this === o);
+  return x;
+}
+
+function g1(x, y) {
+  return o.f1(x == y);
+}
+
+assertTrue(g1(0, 0));
+assertFalse(g1(0, 1));
\ No newline at end of file
diff --git a/test/mjsunit/compiler/control-flow-2.js b/test/mjsunit/compiler/control-flow-2.js
new file mode 100644 (file)
index 0000000..26ed564
--- /dev/null
@@ -0,0 +1,34 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+function f(a,b) {
+  return (b < a) - (a < b);
+}
+
+assertEquals(0, f(0,0));
+assertEquals(1, f(1,0));
+assertEquals(-1, f(0,1));
diff --git a/test/mjsunit/compiler/count-deopt.js b/test/mjsunit/compiler/count-deopt.js
new file mode 100644 (file)
index 0000000..dcd82f8
--- /dev/null
@@ -0,0 +1,150 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Test deopt with count operation on parameter.
+var max_smi = 1073741823;
+var o = {x:0};
+
+function inc1(x) { x++; o.x = x; }
+inc1(max_smi);
+assertEquals(max_smi + 1, o.x);
+
+inc1(1.1);
+assertEquals(2.1, o.x);
+
+
+// Test deopt with count operation on named property.
+function inc2(p) { p.x++ }
+
+o.x = "42";
+inc2(o);
+assertEquals(43, o.x);
+
+var s = max_smi - 10000;
+o.x = s;
+for(var i = 0; i < 20000; i++) {
+  inc2(o);
+}
+assertEquals(max_smi + 10000, o.x);
+
+
+// Test deopt with count operation on keyed property.
+function inc3(a, b) { a[b]++; }
+
+o = ["42"];
+inc3(o, 0);
+assertEquals(43, o[0]);
+
+var s = max_smi - 10000;
+o[0] = s;
+for(var i = 0; i < 20000; i++) {
+  inc3(o, 0);
+}
+assertEquals(max_smi + 10000, o[0]);
+
+inc3(o,"0");
+
+assertEquals(max_smi + 10001, o[0]);
+
+// Test bailout when accessing a non-existing array element.
+o[0] = 0;
+for(var i = 0; i < 10000; i++) {
+  inc3(o, 0);
+}
+inc3(o,1);
+
+// Test bailout with count operation in a value context.
+function inc4(x,y) { return (x++) + y; }
+for (var i = 0; i < 100000; ++i) assertEquals(3, inc4(2, 1));
+assertEquals(3.1, inc4(2, 1.1));
+
+function inc5(x,y) { return (++x) + y; }
+for (var i = 0; i < 100000; ++i) assertEquals(4, inc5(2, 1));
+assertEquals(4.1, inc5(2, 1.1));
+assertEquals(4.1, inc5(2.1, 1));
+
+function inc6(o,y) { return (o.x++) + y; }
+o = {x:0};
+for (var i = 0; i < 10000; ++i) {
+  o.x = 42;
+  assertEquals(43, inc6(o, 1));
+}
+o.x = 42;
+assertEquals(43.1, inc6(o, 1.1));
+o.x = 42.1;
+assertEquals(43.1, inc6(o, 1));
+
+function inc7(o,y) { return (++o.x) + y; }
+o = {x:0};
+for (var i = 0; i < 10000; ++i) {
+  o.x = 42;
+  assertEquals(44, inc7(o, 1));
+}
+o.x = 42;
+assertEquals(44.1, inc7(o, 1.1));
+o.x = 42.1;
+assertEquals(44.1, inc7(o, 1));
+
+function inc8(o,y) { return (o[0]++) + y; }
+var q = [0];
+for (var i = 0; i < 100000; ++i) {
+  q[0] = 42;
+  assertEquals(43, inc8(q, 1));
+}
+q[0] = 42;
+assertEquals(43.1, inc8(q, 1.1));
+q[0] = 42.1;
+assertEquals(43.1, inc8(q, 1));
+
+function inc9(o,y) { return (++o[0]) + y; }
+q = [0];
+for (var i = 0; i < 100000; ++i) {
+  q[0] = 42;
+  assertEquals(44, inc9(q, 1));
+}
+q[0] = 42;
+assertEquals(44.1, inc9(q, 1.1));
+q[0] = 42.1;
+assertEquals(44.1, inc9(q, 1));
+
+// Test deopt because of a failed map check.
+function inc10(p) { return p.x++ }
+var g1 = {x:0};
+var g2 = {y:0, x:42}
+for (var i = 0; i < 10000; ++i) {
+  g1.x = 42;
+  assertEquals(42, inc10(g1));
+  assertEquals(43, g1.x);
+}
+assertEquals(42, inc10(g2));
+assertEquals(43, g2.x);
+
+// Test deoptimization with postfix operation in a value context.
+function inc11(a) { return a[this.x++]; }
+var g3 = {x:null, f:inc11};
+var g4 = [42];
+assertEquals(42, g3.f(g4));
index 5660cee..dca4c11 100644 (file)
@@ -109,3 +109,23 @@ assertEquals(1, ++b[c] && 1);
 assertEquals(45, b[c]);
 assertEquals(1, b[c]++ && 1);
 assertEquals(46, b[c]);
+
+// Test count operations with parameters.
+function f(x) { x++; return x; }
+assertEquals(43, f(42));
+
+function g(x) { ++x; return x; }
+assertEquals(43, g(42));
+
+function h(x) { var y = x++; return y; }
+assertEquals(42, h(42));
+
+function k(x) { var y = ++x; return y; }
+assertEquals(43, k(42));
+
+// Test count operation in a test context.
+function countTestPost(i) { var k = 0; while (i--) { k++; } return k; }
+assertEquals(10, countTestPost(10));
+
+function countTestPre(i) { var k = 0; while (--i) { k++; } return k; }
+assertEquals(9, countTestPre(10));
diff --git a/test/mjsunit/compiler/delete.js b/test/mjsunit/compiler/delete.js
new file mode 100644 (file)
index 0000000..373a1cb
--- /dev/null
@@ -0,0 +1,71 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Tests of unary delete in cases where it is always true or always false.
+
+// In an effect context, expression is always true.
+assertEquals(undefined, void (delete 0));
+// In an effect context, expression is always false.
+assertEquals(undefined, (function (x) { delete x; })(0));
+
+// In a pure test context, expression is always true.
+assertEquals(1, (delete 0) ? 1 : 2);
+// In a pure test context, expression is always false.
+assertEquals(2, (function (x) { return (delete x) ? 1 : 2; })(0));
+// In a negated test context, expression is always false.
+assertEquals(1, (function (x) { return !(delete x) ? 1 : 2; })(0));
+
+// In a hybrid test/value context, expression is always true, value
+// expected in accumulator.
+assertEquals(3, 1 + ((delete 0) && 2));
+// In a hybrid test/value context, expression is always false, value
+// expected in accumulator.
+assertEquals(false, (function (x) { return (delete x) && 2; })(0));
+// In a hybrid test/value context, expression is always true, value
+// expected on stack.
+assertEquals(3, ((delete 0) && 2) + 1);
+// In a hybrid test/value context, expression is always false, value
+// expected on stack.
+assertEquals(1, (function (x) { return ((delete x) && 2) + 1; })(0));
+
+// In a hybrid value/test context, expression is always true, value
+// expected in accumulator.
+assertEquals(2, 1 + ((delete 0) || 2));
+// In a hybrid value/test context, expression is always false, value
+// expected in accumulator.
+assertEquals(2, (function (x) { return (delete x) || 2; })(0));
+// In a hybrid value/test context, expression is always true, value
+// expected on stack.
+assertEquals(2, ((delete 0) || 2) + 1);
+// In a hybrid value/test context, expression is always false, value
+// expected on stack.
+assertEquals(3, (function (x) { return ((delete x) || 2) + 1; })(0));
+
+
+// 'this' at toplevel is different from all other global variables---not
+// deletable.
+assertEquals(true, delete this);
diff --git a/test/mjsunit/compiler/deopt-args.js b/test/mjsunit/compiler/deopt-args.js
new file mode 100644 (file)
index 0000000..780e2a2
--- /dev/null
@@ -0,0 +1,43 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+function g(x) {
+  return x.f(0,1,2);
+}
+
+function f(a,b,c) {
+  return 42;
+}
+
+var object = { };
+object.f = f;
+for (var i = 0; i < 10000000; i++) {
+  assertEquals(42, g(object));
+}
+
+object.f = function(a,b,c) { return 87; };
+assertEquals(87, g(object));
diff --git a/test/mjsunit/compiler/deopt-inlined-smi.js b/test/mjsunit/compiler/deopt-inlined-smi.js
new file mode 100644 (file)
index 0000000..dda083e
--- /dev/null
@@ -0,0 +1,64 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --always-opt --always-inline-smi-code
+
+// Test deoptimization into inlined smi code.
+
+function f(x) {
+  return ~x;
+}
+
+f(42);
+assertEquals(~12, f(12.45));
+assertEquals(~42, f(42.87));
+
+
+var a = 1, b = 2, c = 4, d = 8;
+function g() {
+  return a | (b | (c | d));
+}
+
+g();
+c = "16";
+assertEquals(1 | 2 | 16 | 8, g());
+
+
+function h() {
+  return 1 | a;
+}
+a = "2";
+h();
+assertEquals(3, h());
+
+
+function k() {
+  return a | 1;
+}
+a = "4";
+k();
+assertEquals(5, k());
diff --git a/test/mjsunit/compiler/expression-trees.js b/test/mjsunit/compiler/expression-trees.js
new file mode 100644 (file)
index 0000000..fac6b4c
--- /dev/null
@@ -0,0 +1,107 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --always-opt --nocompilation-cache
+
+// Given a binary operation string and an ordered array of leaf
+// strings, return an array of all binary tree strings with the leaves
+// (in order) as the fringe.
+function makeTrees(op, leaves) {
+  var len = leaves.length;
+  if (len == 1) {
+    // One leaf is a leaf.
+    return leaves;
+  } else {
+    // More than one leaf requires an interior node.
+    var result = [];
+    // Split the leaves into left and right subtrees in all possible
+    // ways.  For each split recursively compute all possible subtrees.
+    for (var i = 1; i < len; ++i) {
+      var leftTrees = makeTrees(op, leaves.slice(0, i));
+      var rightTrees = makeTrees(op, leaves.slice(i, len));
+      // Adjoin every possible left and right subtree.
+      for (var j = 0; j < leftTrees.length; ++j) {
+        for (var k = 0; k < rightTrees.length; ++k) {
+          var string = "(" + leftTrees[j] + op + rightTrees[k] + ")";
+          result.push(string);
+        }
+      }
+    }
+    return result;
+  }
+}
+
+// All 429 possible bitwise OR trees with eight leaves.
+var identifiers = ['a','b','c','d','e','f','g','h'];
+var or_trees = makeTrees("|", identifiers);
+var and_trees = makeTrees("&", identifiers);
+
+// Set up leaf masks to set 8 least-significant bits.
+var a = 1 << 0;
+var b = 1 << 1;
+var c = 1 << 2;
+var d = 1 << 3;
+var e = 1 << 4;
+var f = 1 << 5;
+var g = 1 << 6;
+var h = 1 << 7;
+
+for (var i = 0; i < or_trees.length; ++i) {
+  for (var j = 0; j < 8; ++j) {
+    var or_fun = new Function("return " + or_trees[i]);
+    if (j == 0) assertEquals(255, or_fun());
+
+    // Set the j'th variable to a string to force a bailout.
+    eval(identifiers[j] + "+= ''");
+    assertEquals(255, or_fun());
+    // Set it back to a number for the next iteration.
+    eval(identifiers[j] + "= +" + identifiers[j]);
+  }
+}
+
+// Set up leaf masks to clear 8 least-significant bits.
+a ^= 255;
+b ^= 255;
+c ^= 255;
+d ^= 255;
+e ^= 255;
+f ^= 255;
+g ^= 255;
+h ^= 255;
+
+for (i = 0; i < and_trees.length; ++i) {
+  for (var j = 0; j < 8; ++j) {
+    var and_fun = new Function("return " + and_trees[i]);
+    if (j == 0) assertEquals(0, and_fun());
+
+    // Set the j'th variable to a string to force a bailout.
+    eval(identifiers[j] + "+= ''");
+    assertEquals(0, and_fun());
+    // Set it back to a number for the next iteration.
+    eval(identifiers[j] + "= +" + identifiers[j]);
+  }
+}
diff --git a/test/mjsunit/compiler/for-stmt.js b/test/mjsunit/compiler/for-stmt.js
new file mode 100644 (file)
index 0000000..c8af01c
--- /dev/null
@@ -0,0 +1,59 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+// Test variants of for loops.
+function f(i, p) {
+  for(; i < 10; ) {
+    p.x = p.x + 1;
+    i = i+1;
+  }
+}
+var o = {x:42};
+f(1, o);
+assertEquals(51, o.x);
+
+
+function g(i, p) {
+  for(; ; ) {
+    if (i == 10) return;
+    p.x = p.x + 1;
+    i = i+1;
+  }
+}
+o = {x:42};
+g(1, o);
+assertEquals(51, o.x);
+
+
+function h(p) {
+  for(; p.x < 10; p.x++) {}
+}
+
+var o = {x:0};
+h(o);
+assertEquals(10, o.x);
index 0abd5dd..3b778da 100644 (file)
@@ -63,3 +63,14 @@ assertEquals(4, g);
 code = "g--; 1";
 assertEquals(1, eval(code));
 assertEquals(3, g);
+
+// Test simple assignment to non-deletable and deletable globals.
+var glo1 = 0;
+function f1(x) { glo1 = x; }
+f1(42);
+assertEquals(glo1, 42);
+
+glo2 = 0;
+function f2(x) { glo2 = x; }
+f2(42);
+assertEquals(42, glo2);
diff --git a/test/mjsunit/compiler/inline-compare.js b/test/mjsunit/compiler/inline-compare.js
new file mode 100644 (file)
index 0000000..6efe154
--- /dev/null
@@ -0,0 +1,46 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Test that we can inline a function that returns the result of
+// a compare operation.
+function TestInlineCompare(o) {
+  // Effect context.
+  o.f();
+  // Value context.
+  var x = o.f();
+  assertFalse(x);
+  assertFalse(o.f());
+  // Test context.
+  if (o.f()) {
+    assertTrue(false);  // Should not happen.
+  }
+}
+
+var o = {};
+o.f = function() { return 0 === 1; };
+for (var i = 0; i < 10000000; i++) TestInlineCompare(o);
+TestInlineCompare({f: o.f});
diff --git a/test/mjsunit/compiler/inline-conditional.js b/test/mjsunit/compiler/inline-conditional.js
new file mode 100644 (file)
index 0000000..941f74a
--- /dev/null
@@ -0,0 +1,46 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Test that we can inline a function that returns the result of
+// a conditional operation.
+function TestInlineConditional(o) {
+  // Effect context.
+  o.f();
+  // Value context.
+  var x = o.f();
+  assertEquals(87, x);
+  assertEquals(87, o.f());
+  // Test context.
+  if (!o.f()) {
+    assertTrue(false);  // Should not happen.
+  }
+}
+
+var o = {x:false,y:42,z:87};
+o.f = function() { return this.x ? this.y : this.z; };
+for (var i = 0; i < 10000; i++) TestInlineConditional(o);
+TestInlineConditional({x:true,y:87,z:42,f: o.f});
diff --git a/test/mjsunit/compiler/inline-global-access.js b/test/mjsunit/compiler/inline-global-access.js
new file mode 100644 (file)
index 0000000..3795173
--- /dev/null
@@ -0,0 +1,49 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Test that we can inline a function that returns the result of a
+// global variable load.
+var GLOBAL;
+function TestInlineGlobalLoad(o) {
+  // Effect context.
+  GLOBAL = 42;
+  o.f();
+  // Value context.
+  var x = o.f();
+  assertEquals(42, x);
+  GLOBAL = 87;
+  assertEquals(87, o.f());
+  // Test context.
+  if (!o.f()) {
+    assertTrue(false);  // Should not happen.
+  }
+}
+
+var o = {};
+o.f = function() { return GLOBAL; };
+for (var i = 0; i < 10000000; i++) TestInlineGlobalLoad(o);
+TestInlineGlobalLoad({f: o.f});
diff --git a/test/mjsunit/compiler/inline-param.js b/test/mjsunit/compiler/inline-param.js
new file mode 100644 (file)
index 0000000..8e0933a
--- /dev/null
@@ -0,0 +1,80 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Test that we can inline a call with a parameter.
+function TestInlineOneParam(o, p) {
+  // Effect context.
+  o.f(p);
+  // Value context.
+  var x = o.f(p);
+  assertEquals(42, x);
+  assertEquals(42, o.f(p));
+  // Test context.
+  if (!o.f(p)) {
+    assertTrue(false);  // Should not happen.
+  }
+}
+
+var obj = {x:42};
+var o1 = {};
+o1.f = function(o) { return o.x; };
+for (var i = 0; i < 10000; i++) TestInlineOneParam(o1, obj);
+TestInlineOneParam({f: o1.f}, {x:42});
+
+
+function TestInlineTwoParams(o, p) {
+  var y = 43;
+  // Effect context.
+  o.h(y, y);
+  // Value context.
+  var x = o.h(p, y);
+  assertEquals(true, x);
+  assertEquals(false, o.h(y, p));
+  // Test context.
+  if (!o.h(p, y)) {
+    assertTrue(false);  // Should not happen.
+  }
+
+  // Perform the same tests again, but this time with non-trivial
+  // expressions as the parameters.
+
+  // Effect context.
+  o.h(y + 1, y + 1);
+  // Value context.
+  var x = o.h(p + 1, y + 1);
+  assertEquals(true, x);
+  assertEquals(false, o.h(y + 1, p + 1));
+  // Test context.
+  if (!o.h(p + 1, y + 1)) {
+    assertTrue(false);  // Should not happen.
+  }
+}
+
+var o2 = {};
+o2.h = function(i, j) { return i < j; };
+for (var i = 0; i < 10000; i++) TestInlineTwoParams(o2, 42);
+TestInlineTwoParams({h: o2.h}, 42);
diff --git a/test/mjsunit/compiler/inline-two.js b/test/mjsunit/compiler/inline-two.js
new file mode 100644 (file)
index 0000000..30f579d
--- /dev/null
@@ -0,0 +1,93 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Test that we can inline a function that calls another function.
+function TestInlineX(o) {
+  // Effect context.
+  o.g();
+  // Value context.
+  var x = o.g();
+  assertEquals(42, x);
+  assertEquals(42, o.g());
+  // Test context.
+  if (!o.g()) {
+    assertTrue(false);  // Should not happen.
+  }
+}
+
+var o2 = {};
+o2.size = function() { return 42; }
+o2.g = function() { return this.size(); };
+for (var i = 0; i < 10000; i++) TestInlineX(o2);
+TestInlineX({g: o2.g, size:o2.size});
+
+
+// Test that we can inline a call on a non-variable receiver.
+function TestInlineX2(o) {
+  // Effect context.
+  o.h();
+  // Value context.
+  var x = o.h();
+  assertEquals(42, x);
+  assertEquals(42, o.h());
+  // Test context.
+  if (!o.h()) {
+    assertTrue(false);  // Should not happen.
+  }
+}
+
+var obj = {}
+obj.foo = function() { return 42; }
+var o3 = {};
+o3.v = obj;
+o3.h = function() { return this.v.foo(); };
+for (var i = 0; i < 10000; i++) TestInlineX2(o3);
+TestInlineX2({h: o3.h, v:obj});
+
+
+// Test that we can inline a call on a non-variable receiver.
+function TestInlineFG(o) {
+  // Effect context.
+  o.h();
+  // Value context.
+  var x = o.h();
+  assertEquals(42, x);
+  assertEquals(42, o.h());
+  // Test context.
+  if (!o.h()) {
+    assertTrue(false);  // Should not happen.
+  }
+}
+
+var obj = {}
+obj.g = function() { return 42; }
+var o3 = {};
+o3.v = obj;
+o3.f = function() { return this.v; }
+o3.h = function() { return this.f().g(); };
+for (var i = 0; i < 10000; i++) TestInlineFG(o3);
+TestInlineFG({h: o3.h, f: o3.f, v:obj});
diff --git a/test/mjsunit/compiler/logical-and.js b/test/mjsunit/compiler/logical-and.js
new file mode 100644 (file)
index 0000000..1d31a0a
--- /dev/null
@@ -0,0 +1,70 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+function AndBB(x,y) {
+  return (x == 0) && (y == 0);
+}
+
+function AndBN(x,y) {
+  return (x == 0) && y;
+}
+
+function AndNB(x,y) {
+  return x && (y == 0);
+}
+
+function AndNN(x,y) {
+  return x && y;
+}
+
+assertTrue(AndBB(0, 0));
+assertFalse(AndBB(1, 0));
+assertFalse(AndBB(0, 1));
+assertFalse(AndBB(1, 1));
+
+assertFalse(AndBN(0, 0));
+assertTrue(AndBN(0, 1));
+assertFalse(AndBN(1, 0));
+assertEquals(1, AndBN(0, 1));
+assertEquals(2, AndBN(0, 2));
+assertFalse(AndBN(1, 1));
+assertFalse(AndBN(1, 2));
+
+assertEquals(0, AndNB(0, 0));
+assertTrue(AndNB(1, 0));
+assertEquals(0, AndNB(0, 1));
+assertEquals("", AndNB("", 1));
+assertFalse(AndNB(1, 1));
+assertTrue(AndNB(2, 0));
+
+assertEquals(0, AndNN(0, 0));
+assertEquals(0, AndNN(1, 0));
+assertEquals(0, AndNN(2, 0));
+assertEquals(0, AndNN(0, 1));
+assertEquals(0, AndNN(0, 2));
+assertEquals(1, AndNN(1, 1));
+assertEquals(2, AndNN(3, 2));
diff --git a/test/mjsunit/compiler/logical-or.js b/test/mjsunit/compiler/logical-or.js
new file mode 100644 (file)
index 0000000..87c630d
--- /dev/null
@@ -0,0 +1,66 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+function OrBB(x,y) {
+  return (x == 0) || (y == 0);
+}
+
+function OrBN(x,y) {
+  return (x == 0) || y;
+}
+
+function OrNB(x,y) {
+  return x || (y == 0);
+}
+
+function OrNN(x,y) {
+  return x || y;
+}
+
+assertTrue(OrBB(0, 0));
+assertTrue(OrBB(1, 0));
+assertTrue(OrBB(0, 1));
+assertFalse(OrBB(1, 1));
+
+assertTrue(OrBN(0, 0));
+assertEquals(0, OrBN(1, 0));
+assertTrue(OrBN(0, 1));
+assertEquals(1, OrBN(1, 1));
+assertEquals(2, OrBN(1, 2));
+
+assertTrue(OrNB(0, 0));
+assertEquals(1, OrNB(1, 0));
+assertFalse(OrNB(0, 1));
+assertEquals(1, OrNB(1, 1));
+assertEquals(2, OrNB(2, 1));
+
+assertEquals(0, OrNN(0, 0));
+assertEquals(1, OrNN(1, 0));
+assertEquals(2, OrNN(2, 0));
+assertEquals(1, OrNN(0, 1));
+assertEquals(2, OrNN(0, 2));
+assertEquals(1, OrNN(1, 2));
index 4de45e7..2195c6c 100644 (file)
@@ -33,3 +33,29 @@ for (var i = 1; (6 - i); i++) {
   n = n * i;
 }
 assertEquals(120, n);
+
+// Test assignments in the loop condition.
+function f(i, n) {
+  while((n = n - 1) >= 0) {
+    i = n + 1;
+  }
+  return i;
+}
+assertEquals(1, f(0, 42));
+
+
+// Test do-while loop and continue.
+function g(a) {
+  var x = 0, c = 0;
+  do {
+    x = x + 1;
+    if (x < 5) continue;
+    c = c + 1;
+  } while(x < a);
+  return c;
+}
+
+assertEquals(6, g(10));
+
+// Test deoptimization in the loop condition.
+assertEquals(0, g("foo"));
diff --git a/test/mjsunit/compiler/null-compare.js b/test/mjsunit/compiler/null-compare.js
new file mode 100644 (file)
index 0000000..e01b555
--- /dev/null
@@ -0,0 +1,54 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+function IsNull(x) {
+  if (x == null) return true; else return false;
+}
+
+assertTrue(IsNull(null), "null == null");
+assertTrue(IsNull(void 0), "void 0 == null");
+assertFalse(IsNull(42), "42 != null");
+
+
+function IsNullStrict(x) {
+  if (x === null) return true; else return false;
+}
+
+assertTrue(IsNullStrict(null), "null === null");
+assertFalse(IsNullStrict(void 0), "void 0 != null");
+assertFalse(IsNullStrict(87), "87 !== null");
+
+
+function GimmeFalse(x) {
+  if ((x & 1) == null) return true;
+  if ((x | 3) === null) return true;
+  return false;
+}
+
+assertFalse(GimmeFalse(1), "GimmeFalse(1)");
+assertFalse(GimmeFalse(null), "GimmeFalse(null)");
+assertFalse(GimmeFalse({}), "GimmeFalse({})");
diff --git a/test/mjsunit/compiler/optimized-function-calls.js b/test/mjsunit/compiler/optimized-function-calls.js
new file mode 100644 (file)
index 0000000..1b5f3b0
--- /dev/null
@@ -0,0 +1,79 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --expose-gc
+
+function f() {
+  gc();
+  return 87;
+}
+
+
+var x = 42, y = 99;
+function g() {
+  return x | f() | (y | (x | (f() | x)));
+}
+f();  // Give us a chance to optimize f.
+assertEquals(42 | 87 | 99, g());
+
+
+// Regression test for issue where we would try do an illegal
+// compile-time lookup on a null prototype.
+var object = { f: function() { return 42; }, x: 42 };
+delete object.x;
+function call_f(o) {
+  return o.f();
+}
+for (var i = 0; i < 10000000; i++) call_f(object);
+
+
+// Check that nested global function calls work.
+function f0() {
+  return 42;
+}
+
+function f1(a) {
+  return a;
+}
+
+function f2(a, b) {
+  return a * b;
+}
+
+function f3(a, b, c) {
+  return a + b - c;
+}
+
+function f4(a, b, c, d) {
+  return a * b + c - d;
+}
+
+function nested() {
+  return f4(f3(f2(f1(f0()),f0()),f1(f0()),f0()),f2(f1(f0()),f0()),f1(f0()),f0())
+    + f4(f0(),f1(f0()),f2(f1(f0()),f0()),f3(f2(f1(f0()),f0()),f1(f0()),f0()));
+}
+assertEquals(3113460, nested());
diff --git a/test/mjsunit/compiler/pic.js b/test/mjsunit/compiler/pic.js
new file mode 100644 (file)
index 0000000..a0b5d8f
--- /dev/null
@@ -0,0 +1,66 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+function GetX(o) { return o.x; }
+function CallF(o) { return o.f(); }
+function SetX(o) { o.x = 42; }
+function SetXY(o,y) { return o.x = y; }
+
+
+function Test(o) {
+  SetX(o);
+  assertEquals(42, GetX(o));
+  assertEquals(87, SetXY(o, 87));
+  assertEquals(87, GetX(o));
+  assertTrue(SetXY(o, o) === o);
+  assertTrue(o === GetX(o), "o === GetX(o)");
+  assertEquals("hest", SetXY(o, "hest"));
+  assertEquals("hest", GetX(o));
+  assertTrue(SetXY(o, Test) === Test);
+  assertTrue(Test === GetX(o), "Test === GetX(o)");
+  assertEquals(99, CallF(o));
+}
+
+// Create a bunch of objects with different layouts.
+var o1 = { x: 0, y: 1 };
+var o2 = { y: 1, x: 0 };
+var o3 = { y: 1, z: 2, x: 0 };
+o1.f = o2.f = o3.f = function() { return 99; }
+
+// Run the test until we're fairly sure we've optimized the
+// polymorphic property access.
+for (var i = 0; i < 1000000; i++) {
+  Test(o1);
+  Test(o2);
+  Test(o3);
+}
+
+// Make sure that the following doesn't crash.
+GetX(0);
+SetX(0);
+SetXY(0, 0);
+assertThrows("CallF(0)", TypeError);
diff --git a/test/mjsunit/compiler/property-calls.js b/test/mjsunit/compiler/property-calls.js
new file mode 100644 (file)
index 0000000..3366971
--- /dev/null
@@ -0,0 +1,37 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+function f(o) { return o.g(); }
+function g() { return 42; }
+
+var object = { };
+object.g = g;
+for (var i = 0; i < 10000000; i++) f(object);
+assertEquals(42, f(object));
+
+object = { g: function() { return 87; } };
+assertEquals(87, f(object));
diff --git a/test/mjsunit/compiler/property-refs.js b/test/mjsunit/compiler/property-refs.js
new file mode 100644 (file)
index 0000000..3f6f793
--- /dev/null
@@ -0,0 +1,51 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+function Load(o) {
+  return o.outer.x | o.outer.inner.y;
+}
+
+function StoreXY(o, x, y) {
+  o.outer.x = x;
+  o.outer.inner.y = y;
+}
+
+function LoadXY(x, y) {
+  var object = {
+    outer: {
+      x: 0,
+      inner: { y: 0 }
+    }
+  };
+  StoreXY(object, x, y);
+  return Load(object);
+}
+
+for (var i = 0; i < 10000; i++) LoadXY(i, i);
+assertEquals(42 | 87, LoadXY(42, 87));
+assertEquals(42 | 87, LoadXY(42, 87));
+assertEquals(42 | 99, LoadXY(42, "99"));
diff --git a/test/mjsunit/compiler/property-stores.js b/test/mjsunit/compiler/property-stores.js
new file mode 100644 (file)
index 0000000..0dec82a
--- /dev/null
@@ -0,0 +1,43 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+var a = 42;
+
+var obj = {x: 0,
+           f: function() { this.x = 7; },
+           g: function() { this.x = a | 1; },
+           h: function() { this.x = a; }};
+
+var i;
+for (i = 0; i < 10000; i++) { obj.f(); }
+assertEquals(7, obj.x);
+
+for (i = 0; i < 10000; i++) { obj.g(); }
+assertEquals(43, obj.x);
+
+for (i = 0; i < 10000; i++) { obj.h(); }
+assertEquals(42, obj.x);
diff --git a/test/mjsunit/compiler/recursive-deopt.js b/test/mjsunit/compiler/recursive-deopt.js
new file mode 100644 (file)
index 0000000..366f59a
--- /dev/null
@@ -0,0 +1,48 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+function f(n) {
+  // Force deopt in both leaf case and when returning. To make
+  // debugging easier, the operation that bails out (<<) is so simple
+  // that it doesn't cause GCs.
+  if (n == 0) return 1 << one;
+  return f(n - 1) << one;
+}
+
+function RunTests() {
+  assertEquals(1 << 1, f(0));
+  assertEquals(1 << 2, f(1));
+  assertEquals(1 << 5, f(4));
+}
+
+
+var one = 1;
+for (var i = 0; i < 1000000; i++) RunTests();
+
+var one = { valueOf: function() { return 1; } };
+for (var j = 0; j < 100000; j++) RunTests();
diff --git a/test/mjsunit/compiler/regress-0.js b/test/mjsunit/compiler/regress-0.js
new file mode 100644 (file)
index 0000000..df6dfee
--- /dev/null
@@ -0,0 +1,37 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+function TestNestedLoops() {
+  var sum = 0;
+  for (var i = 0; i < 200; i = i + 1) {
+    for (var j = 0; j < 200; j = j + 1) {
+      sum = sum + 1;
+    }
+  }
+  return sum;
+}
+assertEquals(200 * 200, TestNestedLoops());
diff --git a/test/mjsunit/compiler/regress-1.js b/test/mjsunit/compiler/regress-1.js
new file mode 100644 (file)
index 0000000..cbae1a8
--- /dev/null
@@ -0,0 +1,35 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+function DaysInYear(y) {
+  if (y % 4 != 0) return 365;
+  if (y % 4 == 0 && y % 100 != 0) return 366;
+  if (y % 100 == 0 && y % 400 != 0) return 365;
+  if (y % 400 == 0) return 366;
+}
+assertEquals(365, DaysInYear(1999));
+assertEquals(366, DaysInYear(2000));
diff --git a/test/mjsunit/compiler/regress-2.js b/test/mjsunit/compiler/regress-2.js
new file mode 100644 (file)
index 0000000..a26ef32
--- /dev/null
@@ -0,0 +1,44 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// The compilation of this function currently fails when resolving
+// control flow in the register allocator.
+function TestCreateString(n)
+{
+  var l = n * 1;
+  var r = 'r';
+  while (r.length < n)
+  {
+    r = r + r;
+  }
+  return r;
+}
+
+assertEquals("r", TestCreateString(1));
+assertEquals("rr", TestCreateString(2));
+assertEquals("rrrr", TestCreateString(3));
+assertEquals("rrrrrrrr", TestCreateString(6));
diff --git a/test/mjsunit/compiler/regress-3.js b/test/mjsunit/compiler/regress-3.js
new file mode 100644 (file)
index 0000000..6aa7078
--- /dev/null
@@ -0,0 +1,37 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+function fib(n) {
+  var f0 = 0, f1 = 1;
+  for (; n > 0; n = n -1) {
+    var f2 = f0 + f1;
+    f0 = f1; f1 = f2;
+  }
+  return f0;
+}
+
+assertEquals(2111485077978050, fib(75));
diff --git a/test/mjsunit/compiler/regress-3136962.js b/test/mjsunit/compiler/regress-3136962.js
new file mode 100644 (file)
index 0000000..147d833
--- /dev/null
@@ -0,0 +1,51 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Reduced regression test for a global value numbering bug.  Original
+// value of global variable height was reused even after reassignment.
+
+var height = 267;
+
+var count = 0;
+function inner() { height = 0; ++count; }
+function outer() {}
+
+function test() {
+  for (var i = 0; i < height; ++i) {
+    for (var j = -6; j < 7; ++j) {
+      if (i + j < 0 || i + j >= height) continue;
+      for (var k = -6; k < 7; ++k) {
+        inner();
+      }
+    }
+    outer();
+  }
+}
+
+test();
+
+assertEquals(13, count);
diff --git a/test/mjsunit/compiler/regress-3185901.js b/test/mjsunit/compiler/regress-3185901.js
new file mode 100644 (file)
index 0000000..1e1bbe7
--- /dev/null
@@ -0,0 +1,35 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Inlined function call in a test context.  Should never crash even
+// with --always-opt.
+var x;
+
+function f() { if (g()) { } }
+function g() { if (x) { return true; } }
+
+f();
diff --git a/test/mjsunit/compiler/regress-3218915.js b/test/mjsunit/compiler/regress-3218915.js
new file mode 100644 (file)
index 0000000..d27c319
--- /dev/null
@@ -0,0 +1,48 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Regression test for failure to deoptimize properly when the most recent
+// side effect occurred in a comma expression in an effect context.
+
+// An unoptimizable function, calling it is a side effect.
+function side_effect() { try {} finally {} return "wrong"; }
+
+// A function to observe the value of its first argument.
+function observe(x, y) { try {} finally {} return x; }
+
+// If we optimize for x a smi, then x a string will deopt.  The side effect
+// immediately before the deopt is in a comma expresion in an effect context
+// (i.e., itself the left subexpression of a comma expression).
+function test(x) { return observe(this, ((0, side_effect()), x + 1)); }
+
+// Run test enough times to get it optimized.
+for (var i = 0; i < 1000000; ++i) test(0);
+
+// Force test to deopt.  If it behaves normally, it should return the global
+// object.  If the value of the call to side_effect() is lingering after the
+// deopt, it will return the string "wrong".
+assertFalse(test("a") === "wrong");
diff --git a/test/mjsunit/compiler/regress-3249650.js b/test/mjsunit/compiler/regress-3249650.js
new file mode 100644 (file)
index 0000000..1f06090
--- /dev/null
@@ -0,0 +1,53 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Among other things, this code covers the case of deoptimization
+// after a compare expression in an effect context.
+
+function f0(x) { try { } catch (e) {}}
+function f1(x) { try { } catch (e) {}}
+function f2(x) { try { } catch (e) {}}
+function f3(x) { try { } catch (e) {}}
+
+var object = { a: "", b: false, c: {}};
+object.f = function(x) { return this; }
+
+
+function test(x) {
+  f0(x);
+  f1(x);
+  f2(x);
+  f3(x);
+  x.a.b == "";
+  object.f("A").b = true;
+  object.f("B").a = "";
+  object.f("C").c.display = "A";
+  object.f("D").c.display = "A";
+}
+
+var x = {a: {b: "" }};
+for (var i = 0; i < 1000000; i++) test(x);
diff --git a/test/mjsunit/compiler/regress-4.js b/test/mjsunit/compiler/regress-4.js
new file mode 100644 (file)
index 0000000..0ec9a12
--- /dev/null
@@ -0,0 +1,40 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Test deoptimization after a loop.
+function f(p) {
+  var y=0;
+  for (var x=0; x<10; x++) {
+    if (x > 5) { y=y+p; break;}
+  }
+  return y+x;
+}
+
+for (var i=0; i<10000000; i++) f(42);
+
+var result = f("foo");
+assertEquals("0foo6", result);
diff --git a/test/mjsunit/compiler/regress-5.js b/test/mjsunit/compiler/regress-5.js
new file mode 100644 (file)
index 0000000..5488d0e
--- /dev/null
@@ -0,0 +1,43 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Test breaking out of labelled blocks.
+function f(y) {
+  var x = 0;
+
+  foo: {
+    x++;
+    bar: {
+       if (y == 0) break bar; else break foo;
+    }
+    x++;
+  }
+  return x;
+}
+
+assertEquals(2, f(0));
+assertEquals(1, f(1));
diff --git a/test/mjsunit/compiler/regress-6.js b/test/mjsunit/compiler/regress-6.js
new file mode 100644 (file)
index 0000000..e92b0e5
--- /dev/null
@@ -0,0 +1,42 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+function f(a, b, c) {
+  if (a == 0 || b == 0) return a;
+  return a + c;
+}
+
+assertEquals(0, f(0, 0, 0));
+assertEquals(0, f(0, 1, 0));
+assertEquals(1, f(1, 0, 0));
+assertEquals(2, f(2, 1, 0));
+
+// Force deoptimization in --always-opt mode when evaluating
+// the 'a + c' expression. Make sure this doesn't end up
+// returning 'a'.
+assertEquals(1.5, f(1, 1, 0.5));
+assertEquals(2.5, f(2, 1, 0.5));
diff --git a/test/mjsunit/compiler/regress-7.js b/test/mjsunit/compiler/regress-7.js
new file mode 100644 (file)
index 0000000..d6034f9
--- /dev/null
@@ -0,0 +1,41 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Test correct truncation of tagged values.
+var G = 42;
+
+function f() {
+  var v = G;
+  var w = v >> 0;
+  return w;
+}
+
+for(var i=0; i<10000; i++) f();
+
+assertEquals(G, f());
+G = 2000000000;
+assertEquals(G, f());
diff --git a/test/mjsunit/compiler/regress-8.js b/test/mjsunit/compiler/regress-8.js
new file mode 100644 (file)
index 0000000..3a23885
--- /dev/null
@@ -0,0 +1,109 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Regression test for the register allocator.
+var gp = "";
+var yE = "";
+var W = "";
+var LA = "";
+var zE = "";
+var Fp = "";
+var AE = "";
+var Gob = "";
+var Hob = "";
+var Iob = "";
+var Job = "";
+var Kob = "";
+var Lob = "";
+var Mob = "";
+var p = "";
+function O() { this.append = function(a,b,c,d,e) { return a + b + c + d + e; } }
+
+function Nob(b,a) {
+ var c;
+ if (b==2) {
+   c=new O;
+   c.append(gp,
+            yE,
+            W,
+            LA+(a.Un+(zE+(Fp+(LA+(a.Im+(zE+(AE+(LA+(a.total+Gob))))))))),
+            p);
+   c=c.toString();
+ } else {
+   if (b==1) {
+     if(a.total>=2E6) {
+       c=new O;
+       c.append(gp,yE,W,LA+(a.Un+(zE+(Fp+(LA+(a.Im+Hob))))),p);
+       c=c.toString();
+     } else {
+       if(a.total>=2E5) {
+         c=new O;
+         c.append(gp,yE,W,LA+(a.Un+(zE+(Fp+(LA+(a.Im+Iob))))),p);
+         c=c.toString();
+       } else {
+         if(a.total>=2E4) {
+           c=new O;
+           c.append(gp,yE,W,LA+(a.Un+(zE+(Fp+(LA+(a.Im+Job))))),p);
+           c=c.toString();
+         } else {
+           if(a.total>=2E3) {
+             c=new O;
+             c.append(gp,yE,W,LA+(a.Un+(zE+(Fp+(LA+(a.Im+Kob))))),p);
+             c=c.toString();
+           } else {
+             if(a.total>=200) {
+               c=new O;
+               c.append(gp,yE,W,LA+(a.Un+(zE+(Fp+(LA+(a.Im+Lob))))),p);
+               c=c.toString();
+             } else {
+               c=new O;
+               c.append(gp,yE,W,
+                        LA+(a.Un+(zE+(Fp+(LA+(a.Im+(zE+(Mob+(LA+(a.total+zE))))))))),
+                        p);
+               c=c.toString();
+             }
+             c=c;
+           }
+           c=c;
+         }
+         c=c;
+       }
+       c=c;
+     }
+     c=c;
+   } else {
+     c=new O;
+     c.append(gp,yE,W,
+              LA+(a.Un+(zE+(Fp+(LA+(a.Im+(zE+(AE+(LA+(a.total+zE))))))))),
+              p);
+     c=c.toString();
+   }
+   c=c;
+ }
+ return c;
+}
+Nob(2, { Un: "" , Im: "" , total: 42});
diff --git a/test/mjsunit/compiler/regress-arguments.js b/test/mjsunit/compiler/regress-arguments.js
new file mode 100644 (file)
index 0000000..234d3fb
--- /dev/null
@@ -0,0 +1,49 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Test of arguments.
+
+// Test passing null or undefined as receiver.
+function f() { return this.foo; }
+
+function g() { return f.apply(null, arguments); }
+function h() { return f.apply(void 0, arguments); }
+
+var foo = 42;
+
+for (var i=0; i<1000000; i++) assertEquals(42, g());
+for (var i=0; i<1000000; i++) assertEquals(42, h());
+
+var G1 = 21;
+var G2 = 22;
+
+function u() {
+ var v = G1 + G2;
+ return f.apply(v, arguments);
+}
+
+for (var i=0; i<1000000; i++) assertEquals(void 0, u());
diff --git a/test/mjsunit/compiler/regress-arrayliteral.js b/test/mjsunit/compiler/regress-arrayliteral.js
new file mode 100644 (file)
index 0000000..8938785
--- /dev/null
@@ -0,0 +1,33 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Regression test for array literals.
+
+var G = 41;
+var H = 42;
+function f() { var v = [G,H]; return v[1]; }
+assertEquals(42, f());
diff --git a/test/mjsunit/compiler/regress-funarguments.js b/test/mjsunit/compiler/regress-funarguments.js
new file mode 100644 (file)
index 0000000..cea40bc
--- /dev/null
@@ -0,0 +1,82 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Test function.arguments.
+
+function A() {}
+function B() {}
+
+function fee(x, y) {
+  if (x == 1) return fee["arg" + "uments"];
+  if (x == 2) return gee["arg" + "uments"];
+  return 42;
+}
+
+function gee(x) { return this.f(2 - x, "f"); }
+
+function foo(x, y) {
+  if (x == 0) return foo["arg" + "uments"];
+  if (x == 1) return goo["arg" + "uments"];
+  return 42;
+}
+
+function goo(x) { return this.f(x, "f"); }
+
+A.prototype.f = fee;
+A.prototype.g = gee;
+
+B.prototype.f = foo;
+B.prototype.g = goo;
+
+var o = new A();
+
+function hej(x) {
+  if (x == 0) return o.g(x, "h");
+  if (x == 1) return o.g(x, "h");
+  return o.g(x, "z");
+}
+
+function stress() {
+  for (var i=0; i<5000000; i++) o.g(i, "g");
+  for (var j=0; j<5000000; j++) hej(j);
+}
+
+stress();
+
+assertArrayEquals([0, "g"], o.g(0, "g"));
+assertArrayEquals([1, "f"], o.g(1, "g"));
+assertArrayEquals([0, "h"], hej(0));
+assertArrayEquals([1, "f"], hej(1));
+
+o = new B();
+
+stress();
+
+assertArrayEquals([0, "f"], o.g(0, "g"));
+assertArrayEquals([1, "g"], o.g(1, "g"));
+assertArrayEquals([0, "f"], hej(0));
+assertArrayEquals([1, "h"], hej(1));
diff --git a/test/mjsunit/compiler/regress-funcaller.js b/test/mjsunit/compiler/regress-funcaller.js
new file mode 100644 (file)
index 0000000..88db147
--- /dev/null
@@ -0,0 +1,73 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Test function.caller.
+function A() {}
+
+function fun(x) {
+  if (x == 0) return fun.caller;
+  if (x == 1) return gee.caller;
+  return 42;
+}
+function gee(x) { return this.f(x); }
+
+A.prototype.f = fun;
+A.prototype.g = gee;
+
+var o = new A();
+
+for (var i=0; i<5000000; i++) {
+  o.g(i);
+}
+assertEquals(gee, o.g(0));
+assertEquals(null, o.g(1));
+
+// Test when called from another function.
+function hej(x) {
+  if (x == 0) return o.g(x);
+  if (x == 1) return o.g(x);
+  return o.g(x);
+}
+
+for (var j=0; j<5000000; j++) {
+  hej(j);
+}
+assertEquals(gee, hej(0));
+assertEquals(hej, hej(1));
+
+// Test when called from eval.
+function from_eval(x) {
+  if (x == 0) return eval("o.g(x);");
+  if (x == 1) return eval("o.g(x);");
+  return o.g(x);
+}
+
+for (var j=0; j<5000000; j++) {
+  from_eval(j);
+}
+assertEquals(gee, from_eval(0));
+assertEquals(from_eval, from_eval(1));
diff --git a/test/mjsunit/compiler/regress-gap.js b/test/mjsunit/compiler/regress-gap.js
new file mode 100644 (file)
index 0000000..a812daa
--- /dev/null
@@ -0,0 +1,130 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Regression test that stresses the register allocator gap instruction.
+
+function small_select(n, v1, v2) {
+  for (var i = 0; i < n; ++i) {
+    var tmp = v1;
+    v1 = v2;
+    v2 = tmp;
+  }
+  return v1;
+}
+
+function select(n, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10) {
+  for (var i = 0; i < n; ++i) {
+    var tmp = v1;
+    v1 = v2;
+    v2 = v3;
+    v3 = v4;
+    v4 = v5;
+    v5 = v6;
+    v6 = v7;
+    v7 = v8;
+    v8 = v9;
+    v9 = v10;
+    v10 = tmp;
+  }
+  return v1;
+}
+
+function select_while(n, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10) {
+  var i = 0;
+  while (i < n) {
+    var tmp = v1;
+    v1 = v2;
+    v2 = v3;
+    v3 = v4;
+    v4 = v5;
+    v5 = v6;
+    v6 = v7;
+    v7 = v8;
+    v8 = v9;
+    v9 = v10;
+    v10 = tmp;
+    i++;
+  }
+  return v1;
+}
+
+function two_cycles(n, v1, v2, v3, v4, v5, x1, x2, x3, x4, x5) {
+  for (var i = 0; i < n; ++i) {
+    var tmp = v1;
+    v1 = v2;
+    v2 = v3;
+    v3 = v4;
+    v4 = v5;
+    v5 = tmp;
+    tmp = x1;
+    x1 = x2;
+    x2 = x3;
+    x3 = x4;
+    x4 = x5;
+    x5 = tmp;
+  }
+  return v1 + x1;
+}
+
+function two_cycles_while(n, v1, v2, v3, v4, v5, x1, x2, x3, x4, x5) {
+  var i = 0;
+  while (i < n) {
+    var tmp = v1;
+    v1 = v2;
+    v2 = v3;
+    v3 = v4;
+    v4 = v5;
+    v5 = tmp;
+    tmp = x1;
+    x1 = x2;
+    x2 = x3;
+    x3 = x4;
+    x4 = x5;
+    x5 = tmp;
+    i++;
+  }
+  return v1 + x1;
+}
+assertEquals(1, small_select(0, 1, 2));
+assertEquals(2, small_select(1, 1, 2));
+assertEquals(1, small_select(10, 1, 2));
+
+assertEquals(1, select(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10));
+assertEquals(4, select(3, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10));
+assertEquals(10, select(9, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10));
+
+assertEquals(1 + 6, two_cycles(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10));
+assertEquals(4 + 9, two_cycles(3, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10));
+assertEquals(5 + 10, two_cycles(9, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10));
+
+assertEquals(1, select_while(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10));
+assertEquals(4, select_while(3, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10));
+assertEquals(10, select_while(9, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10));
+
+assertEquals(1 + 6, two_cycles_while(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10));
+assertEquals(4 + 9, two_cycles_while(3, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10));
+assertEquals(5 + 10, two_cycles_while(9, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10));
diff --git a/test/mjsunit/compiler/regress-gvn.js b/test/mjsunit/compiler/regress-gvn.js
new file mode 100644 (file)
index 0000000..358daf7
--- /dev/null
@@ -0,0 +1,51 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --noalways-opt
+//
+// Regression test for global value numbering.
+
+function test(a) {
+  var res = a[0] + a[0];
+  if (res == 0) {
+    a[0] = 1;
+  }
+  return a[0];
+}
+
+var a = new Array();
+
+var n = 100000000;
+
+var result = 0;
+for (var i = 0; i < n; ++i) {
+  a[0] = 0;
+  result += test(a);
+}
+
+
+assertEquals(n, result);
diff --git a/test/mjsunit/compiler/regress-loop-deopt.js b/test/mjsunit/compiler/regress-loop-deopt.js
new file mode 100644 (file)
index 0000000..7906761
--- /dev/null
@@ -0,0 +1,41 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Test while loops and continue.
+function h() {
+  var i = 3, j = 0;
+  while(--i >= 0) {
+    var x = i & 1;
+    if(x > 0) {
+      continue;
+    }
+    j++;
+  }
+  return j;
+}
+
+assertEquals(2, h());
diff --git a/test/mjsunit/compiler/regress-max.js b/test/mjsunit/compiler/regress-max.js
new file mode 100644 (file)
index 0000000..94c543a
--- /dev/null
@@ -0,0 +1,34 @@
+// Copyright 2008 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Test Math.max with negative zero as input.
+function f(x, y) { return Math.max(x, y) }
+
+for (var i = 0; i < 1000000; i++) f(0, 0);
+
+var r = f(-0, -0);
+assertEquals(-Infinity, 1 / r);
diff --git a/test/mjsunit/compiler/regress-or.js b/test/mjsunit/compiler/regress-or.js
new file mode 100644 (file)
index 0000000..89f7802
--- /dev/null
@@ -0,0 +1,57 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Test deoptimization inside short-circuited expressions.
+function f1(x) {
+  var c = "fail";
+  if (!x || g1()) {
+    c = ~x;
+  }
+  return c;
+}
+
+function g1() { try { return 1; } finally {} }
+
+for (var i=0; i<10000000; i++) f1(42);
+
+assertEquals(-1, f1(0));
+assertEquals(-43, f1(42));
+assertEquals(-1, f1(""));
+
+function f2(x) {
+  var c = "fail";
+  if (!x || !g2()) {
+    c = ~x;
+  }
+  return c;
+}
+
+function g2() { try { return 0; } finally {} }
+
+for (var i=0; i<10000000; i++) f2(42);
+
+assertEquals(-1, f2(""));
diff --git a/test/mjsunit/compiler/regress-rep-change.js b/test/mjsunit/compiler/regress-rep-change.js
new file mode 100644 (file)
index 0000000..9370999
--- /dev/null
@@ -0,0 +1,42 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Regression test for the case where a phi has two input operands with
+// the same value.
+
+function test(start) {
+  if (true) {
+    for (var i = start; i < 10; i++) { }
+  }
+  for (var i = start; i < 10; i++) { }
+}
+
+var n = 5000000;
+
+for (var i = 0; i < n; ++i) {
+  test(0);
+}
diff --git a/test/mjsunit/compiler/regress-stacktrace-methods.js b/test/mjsunit/compiler/regress-stacktrace-methods.js
new file mode 100644 (file)
index 0000000..4900ccf
--- /dev/null
@@ -0,0 +1,64 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Test stack traces with method calls.
+function Hest() {}
+function Svin() {}
+
+Svin.prototype.two = function() { /* xxxxxxx */ o.three(); }
+
+Hest.prototype.one = function(x) { x.two(); }
+
+Hest.prototype.three = function() { if (v == 42) throw new Error("urg"); }
+
+var o = new Hest();
+var s = new Svin();
+var v = 0;
+
+for (var i = 0; i < 1000000; i++) {
+  o.one(s);
+}
+
+v = 42;
+
+try {
+  o.one(s);
+} catch (e) {
+  var stack = e.stack.toString();
+  var p3 = stack.indexOf("at Hest.three");
+  var p2 = stack.indexOf("at Svin.two");
+  var p1 = stack.indexOf("at Hest.one");
+  assertTrue(p3 != -1);
+  assertTrue(p2 != -1);
+  assertTrue(p1 != -1);
+  assertTrue(p3 < p2);
+  assertTrue(p2 < p1);
+  assertTrue(stack.indexOf("36:56") != -1);
+  assertTrue(stack.indexOf("32:51") != -1);
+  assertTrue(stack.indexOf("34:38") != -1);
+  assertTrue(stack.indexOf("49:5") != -1);
+}
diff --git a/test/mjsunit/compiler/regress-stacktrace.js b/test/mjsunit/compiler/regress-stacktrace.js
new file mode 100644 (file)
index 0000000..843dd12
--- /dev/null
@@ -0,0 +1,52 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Test correctness of stack traces with global functions.
+eval("function two() { /* xxxxxxx */ three(); }");
+
+function one() {
+  two();
+}
+
+function three() {
+  throw new Error("urg");
+}
+
+try {
+ one();
+} catch (e) {
+  var stack = e.stack.toString();
+  var p3 = stack.indexOf("at three");
+  var p2 = stack.indexOf("at two");
+  var p1 = stack.indexOf("at one");
+  assertTrue(p3 != -1);
+  assertTrue(p2 != -1);
+  assertTrue(p1 != -1);
+  assertTrue(p3 < p2);
+  assertTrue(p2 < p1);
+  print(stack);
+}
diff --git a/test/mjsunit/compiler/safepoint.js b/test/mjsunit/compiler/safepoint.js
new file mode 100644 (file)
index 0000000..ee8fcf0
--- /dev/null
@@ -0,0 +1,39 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --expose-gc
+
+function Test(o) {
+  var x = o;
+  var y = this;
+  x.gc();
+  x.gc();
+  return y;
+}
+
+var o = {gc:gc};
+assertTrue(Test(o) === this);
index af80b7f..ef7a0f4 100644 (file)
@@ -25,8 +25,6 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// Flags: --fast-compiler
-
 function Test() {
   this.result = 0;
   this.x = 0;
@@ -92,6 +90,14 @@ Test.prototype.test9 = function() {
        | a;             // 1.1
 }
 
+Test.prototype.test10 = function() {
+  this.z = (a >> b) | (c >> c);
+}
+
+Test.prototype.test11 = function(x) {
+  this.z = x >> x;
+}
+
 var t = new Test();
 
 t.test0();
@@ -125,3 +131,13 @@ t.test9();
 assertEquals(14, t.x);
 assertEquals(6, t.y);
 assertEquals(15, t.z);
+
+a = "2";
+t.test11(a);
+assertEquals(0, t.z);
+
+a = 4;
+b = "1";
+c = 2;
+t.test10();
+assertEquals(2, t.z);
index 15e1a55..a4e8ab5 100644 (file)
@@ -25,8 +25,6 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// Flags: --fast-compiler
-
 var a = 1;
 var b = 2;
 var c = 4;
diff --git a/test/mjsunit/compiler/simple-deopt.js b/test/mjsunit/compiler/simple-deopt.js
new file mode 100644 (file)
index 0000000..8befd9f
--- /dev/null
@@ -0,0 +1,101 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+function f(x) {
+  return ~x;
+}
+
+f(42);
+assertEquals(~12, f(12.45));
+assertEquals(~42, f(42.87));
+
+
+var a = 1, b = 2, c = 4, d = 8;
+function g() {
+  return a | (b | (c | d));
+}
+
+g();
+c = "16";
+assertEquals(1 | 2 | 16 | 8, g());
+
+
+// Test deopt when global function changes.
+function h() {
+  return g();
+}
+assertEquals(1 | 2 | 16 | 8, h());
+g = function() { return 42; };
+assertEquals(42, h());
+
+
+// Test deopt when map changes.
+var obj = {};
+obj.g = g;
+function k(o) {
+  return o.g();
+}
+for (var i = 0; i < 1000000; i++) k(obj);
+assertEquals(42, k(obj));
+assertEquals(87, k({g: function() { return 87; }}));
+
+
+// Test deopt with assignments to parameters.
+function p(x,y) {
+  x = 42;
+  y = 1;
+  y = y << "0";
+  return x | y;
+}
+assertEquals(43, p(0,0));
+
+
+// Test deopt with literals on the expression stack.
+function LiteralToStack(x) {
+  return 'lit[' + (x + ']');
+}
+
+assertEquals('lit[-87]', LiteralToStack(-87));
+assertEquals('lit[0]', LiteralToStack(0));
+assertEquals('lit[42]', LiteralToStack(42));
+
+
+// Test deopt before call.
+var str = "abc";
+var r;
+function CallCharAt(n) { return str.charAt(n); }
+for (var i = 0; i < 1000000; i++) {
+  r = CallCharAt(0);
+}
+assertEquals("a", r);
+
+
+// Test of deopt in presence of spilling.
+function add4(a,b,c,d) {
+  return a+b+c+d;
+}
+assertEquals(0x40000003, add4(1,1,2,0x3fffffff));
index 35746ba..87a641c 100644 (file)
@@ -25,9 +25,7 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// Flags: --fast-compiler
-
-// Test global variable loads with the fast compiler.
+// Test global variable loads.
 var g1 = 42;
 var g2 = 43;
 var g3 = 44;
diff --git a/test/mjsunit/compiler/simple-inlining.js b/test/mjsunit/compiler/simple-inlining.js
new file mode 100644 (file)
index 0000000..219580f
--- /dev/null
@@ -0,0 +1,146 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Test that we can inline a function that returns a constant.
+function TestInlineConstant(o) {
+  // Effect context.
+  o.f();
+  // Value context.
+  var x = o.f();
+  assertEquals(42, x);
+  assertEquals(42, o.f());
+  // Test context.
+  if (!o.f()) {
+    assertTrue(false);  // Should not happen.
+  }
+}
+
+var o1 = {};
+o1.f = function() { return 42; };
+for (var i = 0; i < 10000; i++) TestInlineConstant(o1);
+TestInlineConstant({f: o1.f});
+
+
+// Test that we can inline a function that returns 'this'.
+function TestInlineThis(o) {
+  // Effect context.
+  o.g();
+  // Value context.
+  var x = o.g();
+  assertEquals(o, x);
+  assertEquals(o, o.g());
+  // Test context.
+  if (!o.g()) {
+    assertTrue(false);  // Should not happen.
+  }
+}
+
+var o2 = {};
+o2.g = function() { return this; };
+for (var i = 0; i < 10000; i++) TestInlineThis(o2);
+TestInlineThis({g: o2.g});
+
+
+// Test that we can inline a function that returns 'this.x'.
+function TestInlineThisX(o) {
+  // Effect context.
+  o.h();
+  // Value context.
+  var x = o.h();
+  assertEquals(42, x);
+  assertEquals(42, o.h());
+  // Test context.
+  if (!o.h()) {
+    assertTrue(false);  // Should not happen.
+  }
+}
+
+var o3 = {y:0,x:42};
+o3.h = function() { return this.x; };
+for (var i = 0; i < 10000; i++) TestInlineThisX(o3);
+TestInlineThisX({h: o3.h, x:42});
+
+
+// Test that we can inline a function that returns 'this.x.length'.
+function TestInlineThisXLength(o) {
+  // Effect context.
+  o.h();
+  // Value context.
+  var x = o.h();
+  assertEquals(3, x);
+  assertEquals(3, o.h());
+  // Test context.
+  if (!o.h()) {
+    assertTrue(false);  // Should not happen.
+  }
+}
+
+var o4 = {x:[1,2,3]};
+o4.h = function() { return this.x.length; };
+for (var i = 0; i < 10000; i++) TestInlineThisXLength(o4);
+TestInlineThisXLength({h: o4.h, x:[1,2,3]});
+
+
+// Test that we can inline a function that returns 'this.x.y'.
+function TestInlineThisXY(o) {
+  // Effect context.
+  o.h();
+  // Value context.
+  var x = o.h();
+  assertEquals(42, x);
+  assertEquals(42, o.h());
+  // Test context.
+  if (!o.h()) {
+    assertTrue(false);  // Should not happen.
+  }
+}
+
+var o6 = {y:42}
+var o5 = {e:o6};
+o5.h = function() { return this.e.y; };
+for (var i = 0; i < 10000; i++) TestInlineThisXY(o5);
+TestInlineThisXY({h: o5.h, e:o6});
+
+
+// Test that we can inline a function that returns 'this.x.length'.
+function TestInlineThisX0(o) {
+  // Effect context.
+  o.foo();
+  // Value context.
+  var x = o.foo();
+  assertEquals(42, x);
+  assertEquals(42, o.foo());
+  // Test context.
+  if (!o.foo()) {
+    assertTrue(false);  // Should not happen.
+  }
+}
+
+var o7 = {x:[42,43,44]};
+o7.foo = function() { return this.x[0]; };
+for (var i = 0; i < 10000; i++) TestInlineThisX0(o7);
+TestInlineThisX0({foo: o7.foo, x:[42,0,0]});
diff --git a/test/mjsunit/compiler/simple-osr.js b/test/mjsunit/compiler/simple-osr.js
new file mode 100644 (file)
index 0000000..8ec1b2b
--- /dev/null
@@ -0,0 +1,44 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --use-osr
+
+function f() {
+  var sum = 0;
+  for (var i = 0; i < 1000000; i++) {
+    var x = i + 2;
+    var y = x + 5;
+    var z = y + 3;
+    sum += z;
+  }
+  return sum;
+}
+
+
+for (var i = 0; i < 2; i++) {
+  assertEquals(500009500000, f());
+}
diff --git a/test/mjsunit/compiler/switch-bailout.js b/test/mjsunit/compiler/switch-bailout.js
new file mode 100644 (file)
index 0000000..8011d44
--- /dev/null
@@ -0,0 +1,39 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Test that bailing out of the optimized compilation doesn't mess with
+// the labels in the AST.
+function f(x) {
+  switch (x) {
+    case "foo": return 87;
+    case "bar": return 42;
+  }
+  return 99;
+}
+
+for (var i = 0; i < 10000; i++) f("foo");
+assertEquals(42, f("bar"));
index 5e8ea59..1ee8e50 100644 (file)
@@ -25,8 +25,6 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// Flags: --fast-compiler
-
 // Test references to properties of this.
 function Test() {
   this.a = 0;
index 098fc3a..7615561 100644 (file)
@@ -25,8 +25,6 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// Flags: --always-full-compiler
-
 // Test reference to this-function.
 
 var g = (function f(x) {
diff --git a/test/mjsunit/compiler/variables.js b/test/mjsunit/compiler/variables.js
new file mode 100644 (file)
index 0000000..fac4878
--- /dev/null
@@ -0,0 +1,73 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Simple tests of the various kinds of variable references in the
+// implementstion.
+
+// Global variables.
+var x = 0;
+function f0() { return x; }
+assertEquals(0, f0());
+
+
+// Parameters.
+function f1(x) { return x; }
+assertEquals(1, f1(1));
+
+
+// Stack-allocated locals.
+function f2() { var x = 2; return x; }
+assertEquals(2, f2());
+
+
+// Context-allocated locals.  Local function forces x into f3's context.
+function f3(x) {
+  function g() { return x; }
+  return x;
+}
+assertEquals(3, f3(3));
+
+// Local function reads x from an outer context.
+function f4(x) {
+  function g() { return x; }
+  return g();
+}
+assertEquals(4, f4(4));
+
+
+// Lookup slots.  'With' forces x to be looked up at runtime.
+function f5(x) {
+  with ({}) return x;
+}
+assertEquals(5, f5(5));
+
+
+// Parameters rewritten to property accesses.  Using the name 'arguments'
+// (even if it shadows the arguments object) forces all parameters to be
+// rewritten to explicit property accesses.
+function f6(arguments) { return arguments; }
+assertEquals(6, f6(6));
index 936523a..897c3e3 100644 (file)
@@ -33,6 +33,7 @@ Debug = debug.Debug
 listenerComplete = false;
 exception = false;
 
+var breakpoint = -1;
 var base_request = '"seq":0,"type":"request","command":"changebreakpoint"'
 
 function safeEval(code) {
@@ -68,21 +69,21 @@ function listener(event, exec_state, event_data, data) {
 
     testArguments(dcp, '{}', false);
     testArguments(dcp, '{"breakpoint":0,"condition":"false"}', false);
-    // TODO(1241036) change this to 2 when break points have been restructured.
-    testArguments(dcp, '{"breakpoint":3,"condition":"false"}', false);
+    testArguments(dcp, '{"breakpoint":' + (breakpoint + 1) + ',"condition":"false"}', false);
     testArguments(dcp, '{"breakpoint":"xx","condition":"false"}', false);
 
     // Test some legal clearbreakpoint requests.
-    testArguments(dcp, '{"breakpoint":1}', true);
-    testArguments(dcp, '{"breakpoint":1,"enabled":"true"}', true);
-    testArguments(dcp, '{"breakpoint":1,"enabled":"false"}', true);
-    testArguments(dcp, '{"breakpoint":1,"condition":"1==2"}', true);
-    testArguments(dcp, '{"breakpoint":1,"condition":"false"}', true);
-    testArguments(dcp, '{"breakpoint":1,"ignoreCount":7}', true);
-    testArguments(dcp, '{"breakpoint":1,"ignoreCount":0}', true);
+    var bp_str = '"breakpoint":' + breakpoint;;
+    testArguments(dcp, '{' + bp_str + '}', true);
+    testArguments(dcp, '{' + bp_str + ',"enabled":"true"}', true);
+    testArguments(dcp, '{' + bp_str + ',"enabled":"false"}', true);
+    testArguments(dcp, '{' + bp_str + ',"condition":"1==2"}', true);
+    testArguments(dcp, '{' + bp_str + ',"condition":"false"}', true);
+    testArguments(dcp, '{' + bp_str + ',"ignoreCount":7}', true);
+    testArguments(dcp, '{' + bp_str + ',"ignoreCount":0}', true);
     testArguments(
         dcp,
-        '{"breakpoint":1,"enabled":"true","condition":"false","ignoreCount":0}',
+        '{' + bp_str + ',"enabled":"true","condition":"false","ignoreCount":0}',
         true);
 
     // Indicate that all was processed.
@@ -99,8 +100,7 @@ Debug.setListener(listener);
 function g() {};
 
 // Set a break point and call to invoke the debug event listener.
-bp = Debug.setBreakPoint(g, 0, 0);
-assertEquals(1, bp);
+breakpoint = Debug.setBreakPoint(g, 0, 0);
 g();
 
 // Make sure that the debug event listener vas invoked.
index 59479f2..58e1531 100644 (file)
@@ -33,6 +33,7 @@ Debug = debug.Debug
 listenerComplete = false;
 exception = false;
 
+var breakpoint = -1;
 var base_request = '"seq":0,"type":"request","command":"clearbreakpoint"'
 
 function safeEval(code) {
@@ -68,15 +69,14 @@ function listener(event, exec_state, event_data, data) {
 
     testArguments(dcp, '{}', false);
     testArguments(dcp, '{"breakpoint":0}', false);
-    // TODO(1241036) change this to 2 when break points have been restructured.
-    testArguments(dcp, '{"breakpoint":3}', false);
+    testArguments(dcp, '{"breakpoint":' + (breakpoint + 1)+ '}', false);
     testArguments(dcp, '{"breakpoint":"xx"}', false);
 
     // Test some legal clearbreakpoint requests.
-    testArguments(dcp, '{"breakpoint":1}', true);
+    testArguments(dcp, '{"breakpoint":' + breakpoint + '}', true);
 
     // Cannot clear the same break point twice.
-    testArguments(dcp, '{"breakpoint":1}', false);
+    testArguments(dcp, '{"breakpoint":' + breakpoint + '}', false);
 
     // Indicate that all was processed.
     listenerComplete = true;
@@ -92,8 +92,7 @@ Debug.setListener(listener);
 function g() {};
 
 // Set a break point and call to invoke the debug event listener.
-bp = Debug.setBreakPoint(g, 0, 0);
-assertEquals(1, bp);
+breakpoint = Debug.setBreakPoint(g, 0, 0);
 g();
 
 // Make sure that the debug event listener vas invoked.
index e6677f9..0cfc5c9 100644 (file)
@@ -115,3 +115,8 @@ var breakpointNumbers = breakpoints.map(
 assertEquals([bp2, bp3, bp5].sort(), breakpointNumbers.sort());
 
 assertFalse(exception, "exception in listener");
+
+// Clear all breakpoints to allow the test to run again (--stress-opt).
+Debug.clearBreakPoint(bp2);
+Debug.clearBreakPoint(bp3);
+Debug.clearBreakPoint(bp5);
index 94e2780..39ebf3a 100644 (file)
 
 Debug = debug.Debug
 
-
-eval(
-    "function ChooseAnimal(p) {\n " +
-    "  if (p == 7) {\n" + // Use p
-    "    return;\n" +
-    "  }\n" +
-    "  return function Chooser() {\n" +
-    "    return 'Cat';\n" +
-    "  };\n" +
-    "}\n"
-);
+eval("function ChooseAnimal(p) {\n " +
+     "  if (p == 7) {\n" + // Use p
+     "    return;\n" +
+     "  }\n" +
+     "  return function Chooser() {\n" +
+     "    return 'Cat';\n" +
+     "  };\n" +
+     "}\n");
 
 var old_closure = ChooseAnimal(19);
 
@@ -67,4 +64,3 @@ assertEquals("Capybara19", new_closure());
 
 // Old instance of closure is not patched.
 assertEquals("Cat", old_closure());
-
index f01a8c4..1d28ab9 100644 (file)
@@ -62,6 +62,8 @@ assertEquals(26, F26());
 
 var script = Debug.findScript(F25);
 
+assertEquals(0, Debug.scriptBreakPoints().length);
+
 Debug.setScriptBreakPoint(Debug.ScriptBreakPointType.ScriptId, script.id, 1, 1, "true || false || false");
 Debug.setScriptBreakPoint(Debug.ScriptBreakPointType.ScriptId, script.id, 6, 1, "true || false || false");
 Debug.setScriptBreakPoint(Debug.ScriptBreakPointType.ScriptId, script.id, 14, 1, "true || false || false");
@@ -96,3 +98,16 @@ assertEquals(3, breakpoints_in_script);
 assertTrue(break_position_map[1]);
 assertTrue(break_position_map[11]);
 
+// Delete all breakpoints to make this test reentrant.
+var breaks = Debug.scriptBreakPoints();
+var breaks_ids = [];
+
+for (var i = 0; i < breaks.length; i++) {
+  breaks_ids.push(breaks[i].number());
+}
+
+for (var i = 0; i < breaks_ids.length; i++) {
+  Debug.clearBreakPoint(breaks_ids[i]);
+}
+
+assertEquals(0, Debug.scriptBreakPoints().length);
index 027987f..b0d3c20 100644 (file)
@@ -30,7 +30,7 @@
 
 // Scenario: some function is being edited; the outer function has to have its
 // positions patched. Accoring to a special markup of function text
-// corresponding byte-code PCs should conicide before change and after it.
+// corresponding byte-code PCs should coincide before change and after it.
 
 Debug = debug.Debug
 
@@ -62,32 +62,65 @@ function ReadMarkerPositions(func) {
 function ReadPCMap(func, positions) {
   var res = new Array();
   for (var i = 0; i < positions.length; i++) {
-    res.push(Debug.LiveEdit.GetPcFromSourcePos(func, positions[i]));
+    var pc = Debug.LiveEdit.GetPcFromSourcePos(func, positions[i]);
+
+    if (typeof pc === 'undefined') {
+      // Function was marked for recompilation and it's code was replaced with a
+      // stub. This can happen at any time especially if we are running with
+      // --stress-opt. There is no way to get PCs now.
+      return;
+    }
+
+    res.push(pc);
   }
+
   return res;
 }
 
-var res = ChooseAnimal();
-assertEquals("Cat15", res);
+function ApplyPatch(orig_animal, new_animal) {
+  var res = ChooseAnimal();
+  assertEquals(orig_animal + "15", res);
+
+  var script = Debug.findScript(ChooseAnimal);
+
+  var orig_string = "'" + orig_animal + "'";
+  var patch_string = "'" + new_animal + "'";
+  var patch_pos = script.source.indexOf(orig_string);
 
-var markerPositionsBefore = ReadMarkerPositions(ChooseAnimal);
-var pcArrayBefore = ReadPCMap(ChooseAnimal, markerPositionsBefore);
+  var change_log = new Array();
 
-var script = Debug.findScript(ChooseAnimal);
+  Debug.LiveEdit.TestApi.ApplySingleChunkPatch(script,
+                                               patch_pos,
+                                               orig_string.length,
+                                               patch_string,
+                                               change_log);
 
-var orig_animal = "'Cat'";
-var patch_pos = script.source.indexOf(orig_animal);
-var new_animal_patch = "'Capybara'";
+  print("Change log: " + JSON.stringify(change_log) + "\n");
 
-var change_log = new Array();
-Debug.LiveEdit.TestApi.ApplySingleChunkPatch(script, patch_pos, orig_animal.length, new_animal_patch, change_log);
-print("Change log: " + JSON.stringify(change_log) + "\n");
+  var markerPositions = ReadMarkerPositions(ChooseAnimal);
+  var pcArray = ReadPCMap(ChooseAnimal, markerPositions);
 
-var res = ChooseAnimal();
-assertEquals("Capybara15", res);
+  var res = ChooseAnimal();
+  assertEquals(new_animal + "15", res);
 
-var markerPositionsAfter = ReadMarkerPositions(ChooseAnimal);
-var pcArrayAfter = ReadPCMap(ChooseAnimal, markerPositionsAfter);
+  return pcArray;
+}
+
+var pcArray1 = ApplyPatch('Cat', 'Dog');
+
+// When we patched function for the first time it was deoptimized.
+// Check that after the second patch maping between sources position and
+// pcs will not change.
 
-assertArrayEquals(pcArrayBefore, pcArrayAfter);
+var pcArray2 = ApplyPatch('Dog', 'Capybara');
 
+print(pcArray1);
+print(pcArray2);
+
+// Function can be marked for recompilation at any point (especially if we are
+// running with --stress-opt). When we mark function for recompilation we
+// replace it's code with stub. So there is no reliable way to get PCs for
+// function.
+if (pcArray1 && pcArray2) {
+  assertArrayEquals(pcArray1, pcArray2);
+}
index 475fe26..3741f26 100644 (file)
@@ -97,7 +97,7 @@ fact(3);
 EndTest(2);
 
 BeginTest('Test 4');
-shouldBreak = function(x) { print(x); return x == 1 || x == 3; };
+shouldBreak = function(x) { return x == 1 || x == 3; };
 step_out_count = 2;
 fact(3);
 EndTest(3);
index cf08d7a..0446cd3 100644 (file)
@@ -25,7 +25,7 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// Flags: --allow-natives-syntax
+// Flags: --allow-natives-syntax --noalways-opt
 
 var RUN_WITH_ALL_ARGUMENT_ENTRIES = false;
 var kOnManyArgumentsRemove = 5;
@@ -144,6 +144,9 @@ var knownProblems = {
   "NewArgumentsFast": true,
   "PushContext": true,
   "LazyCompile": true,
+  "LazyRecompile": true,
+  "NotifyDeoptimized": true,
+  "NotifyOSR": true,
   "CreateObjectLiteralBoilerplate": true,
   "CloneLiteralBoilerplate": true,
   "CloneShallowLiteralBoilerplate": true,
index ad7add8..1888554 100644 (file)
@@ -74,7 +74,7 @@ function testObjectMirror(obj, cls_name, ctor_name, hasSpecialProperties) {
     assertEquals('property', properties[i].type(), 'Unexpected mirror type');
     assertEquals(names[i], properties[i].name(), 'Unexpected property name');
   }
-  
+
   for (var p in obj) {
     var property_mirror = mirror.property(p);
     assertTrue(property_mirror instanceof debug.PropertyMirror);
index 820dca7..24d9603 100644 (file)
@@ -30,6 +30,13 @@ prefix mjsunit
 # All tests in the bug directory are expected to fail.
 bugs: FAIL
 
+
+##############################################################################
+# Too slow in debug mode with --stress-opt
+compiler/regress-stacktrace-methods: PASS, SKIP if $mode == debug
+
+
+##############################################################################
 # This one uses a built-in that's only present in debug mode. It takes
 # too long to run in debug mode on ARM.
 fuzz-natives: PASS, SKIP if ($mode == release || $arch == arm)
@@ -49,6 +56,8 @@ regress/regress-524: (PASS || TIMEOUT), SKIP if $mode == debug
 debug-liveedit-check-stack: SKIP
 debug-liveedit-patch-positions-replace: SKIP
 
+
+##############################################################################
 [ $arch == arm ]
 
 # Slow tests which times out in debug mode.
@@ -60,15 +69,44 @@ array-constructor: PASS, SKIP if $mode == debug
 unicode-test: PASS, (PASS || FAIL) if $mode == debug
 
 # Times out often in release mode on ARM.
+compiler/regress-stacktrace-methods: PASS, PASS || TIMEOUT if $mode == release
 array-splice: PASS || TIMEOUT
 
-# Skip long running test in debug mode on ARM.
-string-indexof-2: PASS, SKIP if $mode == debug
-
-
+# Long running test.
+mirror-object: PASS || TIMEOUT
+string-indexof-2: PASS || TIMEOUT
+
+# BUG(3251035): Timeouts in long looping crankshaft optimization
+# tests. Skipping because having them timeout takes too long on the
+# buildbot.
+compiler/alloc-number: SKIP
+compiler/array-length: SKIP
+compiler/assignment-deopt: SKIP
+compiler/deopt-args: SKIP
+compiler/inline-compare: SKIP
+compiler/inline-global-access: SKIP
+compiler/optimized-function-calls: SKIP
+compiler/pic: SKIP
+compiler/property-calls: SKIP
+compiler/recursive-deopt: SKIP
+compiler/regress-4: SKIP
+compiler/regress-funcaller: SKIP
+compiler/regress-gvn: SKIP
+compiler/regress-rep-change: SKIP
+compiler/regress-arguments: SKIP
+compiler/regress-funarguments: SKIP
+compiler/regress-or: SKIP
+compiler/regress-3249650: SKIP
+compiler/simple-deopt: SKIP
+regress/regress-490: SKIP
+regress/regress-634: SKIP
+regress/regress-create-exception: SKIP
+regress/regress-3218915: SKIP
+regress/regress-3247124: SKIP
+
+
+##############################################################################
 [ $arch == mips ]
 
 # Skip all tests on MIPS.
 *: SKIP
-
-
diff --git a/test/mjsunit/regress/regress-3006390.js b/test/mjsunit/regress/regress-3006390.js
new file mode 100644 (file)
index 0000000..4f916ef
--- /dev/null
@@ -0,0 +1,33 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+function X() { }
+X.prototype.valueOf = function () { return 7; }
+
+function f(x, y) { return x % y; }
+
+assertEquals(1, f(8, new X()));
diff --git a/test/mjsunit/regress/regress-3185905.js b/test/mjsunit/regress/regress-3185905.js
new file mode 100644 (file)
index 0000000..bd611ab
--- /dev/null
@@ -0,0 +1,60 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+function test1(x) {
+  var a = arguments.callee;
+  x = 1;
+  x = 2;
+  assertEquals(2, x);
+}
+test1(0)
+
+function test2(x) {
+  var a = arguments.callee;
+  x++;
+  x++;
+  assertEquals(2, x);
+}
+test2(0)
+
+function test3(x) {
+  var a = arguments.callee;
+  x += 1;
+  x += 1;
+  assertEquals(2, x);
+}
+test3(0)
+
+function test4(x) {
+  var arguments = { 0 : 3, 'x' : 4 };
+  x += 1;
+  x += 1;
+  assertEquals(2, x);
+  assertEquals(3, arguments[0])
+  assertEquals(4, arguments['x'])
+}
+test4(0)
diff --git a/test/mjsunit/regress/regress-3199913.js b/test/mjsunit/regress/regress-3199913.js
new file mode 100644 (file)
index 0000000..e202af1
--- /dev/null
@@ -0,0 +1,44 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Test that bailout during evaluation of the key for a keyed call works as
+// intended.
+
+var y = {
+  'a' : function (x, y) { return 'called a(' + x + ', ' + y + ')' },
+  'b' : function (x, y) { return 'called b(' + x + ', ' + y + ')' }
+}
+
+function C() {
+}
+
+C.prototype.f = function () {
+  return y[(this.a == 1 ? "a" : "b")](0, 1);
+}
+
+obj = new C()
+assertEquals('called b(0, 1)', obj.f())
diff --git a/test/mjsunit/regress/regress-3218530.js b/test/mjsunit/regress/regress-3218530.js
new file mode 100644 (file)
index 0000000..247f3df
--- /dev/null
@@ -0,0 +1,39 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// This tests that a global key values are preserved when used in
+// an expression which will bail out.
+
+var m = Math;
+var p = "floor";
+
+function test() {
+  var bignumber = 31363200000;
+  assertDoesNotThrow(assertEquals(m[p](Math.round(bignumber/864E5)/7)+1, 52));
+}
+
+test();
diff --git a/test/mjsunit/regress/regress-3218915.js b/test/mjsunit/regress/regress-3218915.js
new file mode 100644 (file)
index 0000000..5fcbcec
--- /dev/null
@@ -0,0 +1,42 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Checks that comma expression in conditional context is processed correctly.
+
+function withCommaExpressionInConditional(x) {
+  if (x > 1000) { for (var i = 0; i < 10000; i++) { } }
+  var y;
+  if (y = x, y > 1) {
+    return 'big';
+  }
+  return (y = x + 1, y > 1) ? 'medium' : 'small';
+}
+
+for (var i = 0; i < 10000; i++) {
+  withCommaExpressionInConditional(i);
+}
+withCommaExpressionInConditional("1")
diff --git a/test/mjsunit/regress/regress-3230771.js b/test/mjsunit/regress/regress-3230771.js
new file mode 100644 (file)
index 0000000..bd00798
--- /dev/null
@@ -0,0 +1,35 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Regression test for missing stack-overflow check in
+// VisitForStatement in hydrogen graph building.
+
+function f() {
+  for (var h = typeof arguments[0] == "object" ? 0 : arguments; false; ) { }
+}
+
+f();
diff --git a/test/mjsunit/regress/regress-3247124.js b/test/mjsunit/regress/regress-3247124.js
new file mode 100644 (file)
index 0000000..7fda299
--- /dev/null
@@ -0,0 +1,53 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+var foo = unescape("%E0%E2%EA%F4%FB%E3%F5%E1%E9%ED%F3%FA%E7%FC%C0%C2%CA%D4%DB%C3%D5%C1%C9%CD%D3%DA%C7%DC");
+
+function bar(x) {
+  var s = new String(x);
+  var a = new String(foo);
+  var b = new String('aaeouaoaeioucuAAEOUAOAEIOUCU');
+
+  var i = new Number();
+  var j = new Number();
+  var c = new String();
+  var r = '';
+
+  for (i = 0; i < s.length; i++) {
+    c = s.substring(i, i + 1);
+    for (j = 0; j < a.length; j++) {
+      if (a.substring(j, j + 1) == c) {
+        c = b.substring(j, j + 1);
+      }
+    }
+    r += c;
+  }
+
+  return r.toLowerCase();
+}
+
+for (var i = 0; i < 10000; i++) bar(foo);
diff --git a/test/mjsunit/regress/regress-3252443.js b/test/mjsunit/regress/regress-3252443.js
new file mode 100644 (file)
index 0000000..cd7aa40
--- /dev/null
@@ -0,0 +1,45 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+var document = new Object();
+document.getElementById = function(s) { return { style: {}}};
+function x(p0, p1, p2, p3) {
+  document.getElementById(p1+p0).style.display='';
+  document.getElementById(p1+''+p0).style.backgroundColor = "";
+  document.getElementById(p1+''+p0).style.color="";
+  document.getElementById(p1+''+p0).style.borderBottomColor = "";
+  for (var i = p3; i <= p2; ++i) {
+    if (i != p0) {
+      document.getElementById(p1+i).style.display='';
+      document.getElementById(p1+''+i).style.backgroundColor = "";
+      document.getElementById(p1+''+i).style.color="";
+      document.getElementById(p1+''+i).style.borderBottomColor = "";
+    }
+  }
+}
+
+x(1, "xxx", 10000, 1)
index 80cc0c7..9a34b81 100644 (file)
@@ -67,5 +67,3 @@ assertEquals(5, re.lastIndex);  // Fails if caching.
 re.lastIndex = 0;
 re.exec(str);
 assertEquals(5, re.lastIndex);  // Fails if caching.
-
-
index c6b3db7..6b1d098 100644 (file)
@@ -32,22 +32,22 @@ function num_ops() {
   var x;
   var tmp = 0;
   x = (tmp = 1578221999, tmp)+(tmp = 572285336, tmp);
-  assertEquals(2150507335, x);
+  assertEquals(2150507335, x, "++");
   x = 1578221999 + 572285336;
   assertEquals(2150507335, x);
 
   x = (tmp = -1500000000, tmp)+(tmp = -2000000000, tmp);
-  assertEquals(-3500000000, x);
+  assertEquals(-3500000000, x, "+-");
   x = -1500000000 + -2000000000;
   assertEquals(-3500000000, x);
 
   x = (tmp = 1578221999, tmp)-(tmp = -572285336, tmp);
-  assertEquals(2150507335, x);
+  assertEquals(2150507335, x, "--");
   x = 1578221999 - -572285336;
   assertEquals(2150507335, x);
 
   x = (tmp = -1500000000, tmp)-(tmp = 2000000000, tmp);
-  assertEquals(-3500000000, x);
+  assertEquals(-3500000000, x, "-+");
   x = -1500000000 - 2000000000;
   assertEquals(-3500000000, x);
 }
diff --git a/test/mjsunit/smi-ops-inlined.js b/test/mjsunit/smi-ops-inlined.js
new file mode 100644 (file)
index 0000000..afc6cc0
--- /dev/null
@@ -0,0 +1,673 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --always-inline-smi-code
+
+const SMI_MAX = (1 << 30) - 1;
+const SMI_MIN = -(1 << 30);
+const ONE = 1;
+const ONE_HUNDRED = 100;
+
+const OBJ_42 = new (function() {
+  this.valueOf = function() { return 42; };
+})();
+
+assertEquals(42, OBJ_42.valueOf());
+
+
+function Add1(x) {
+  return x + 1;
+}
+
+function Add100(x) {
+  return x + 100;
+}
+
+function Add1Reversed(x) {
+  return 1 + x;
+}
+
+function Add100Reversed(x) {
+  return 100 + x;
+}
+
+
+assertEquals(1, Add1(0));  // fast case
+assertEquals(1, Add1Reversed(0));  // fast case
+assertEquals(SMI_MAX + ONE, Add1(SMI_MAX), "smimax + 1");
+assertEquals(SMI_MAX + ONE, Add1Reversed(SMI_MAX), "1 + smimax");
+assertEquals(42 + ONE, Add1(OBJ_42));  // non-smi
+assertEquals(42 + ONE, Add1Reversed(OBJ_42));  // non-smi
+
+assertEquals(100, Add100(0));  // fast case
+assertEquals(100, Add100Reversed(0));  // fast case
+assertEquals(SMI_MAX + ONE_HUNDRED, Add100(SMI_MAX), "smimax + 100");
+assertEquals(SMI_MAX + ONE_HUNDRED, Add100Reversed(SMI_MAX), " 100 + smimax");
+assertEquals(42 + ONE_HUNDRED, Add100(OBJ_42));  // non-smi
+assertEquals(42 + ONE_HUNDRED, Add100Reversed(OBJ_42));  // non-smi
+
+
+
+function Sub1(x) {
+  return x - 1;
+}
+
+function Sub100(x) {
+  return x - 100;
+}
+
+function Sub1Reversed(x) {
+  return 1 - x;
+}
+
+function Sub100Reversed(x) {
+  return 100 - x;
+}
+
+
+assertEquals(0, Sub1(1));  // fast case
+assertEquals(-1, Sub1Reversed(2));  // fast case
+assertEquals(SMI_MIN - ONE, Sub1(SMI_MIN));  // overflow
+assertEquals(ONE - SMI_MIN, Sub1Reversed(SMI_MIN));  // overflow
+assertEquals(42 - ONE, Sub1(OBJ_42));  // non-smi
+assertEquals(ONE - 42, Sub1Reversed(OBJ_42));  // non-smi
+
+assertEquals(0, Sub100(100));  // fast case
+assertEquals(1, Sub100Reversed(99));  // fast case
+assertEquals(SMI_MIN - ONE_HUNDRED, Sub100(SMI_MIN));  // overflow
+assertEquals(ONE_HUNDRED - SMI_MIN, Sub100Reversed(SMI_MIN));  // overflow
+assertEquals(42 - ONE_HUNDRED, Sub100(OBJ_42));  // non-smi
+assertEquals(ONE_HUNDRED - 42, Sub100Reversed(OBJ_42));  // non-smi
+
+
+function Shr1(x) {
+  return x >>> 1;
+}
+
+function Shr100(x) {
+  return x >>> 100;
+}
+
+function Shr1Reversed(x) {
+  return 1 >>> x;
+}
+
+function Shr100Reversed(x) {
+  return 100 >>> x;
+}
+
+function Sar1(x) {
+  return x >> 1;
+}
+
+function Sar100(x) {
+  return x >> 100;
+}
+
+function Sar1Reversed(x) {
+  return 1 >> x;
+}
+
+function Sar100Reversed(x) {
+  return 100 >> x;
+}
+
+
+assertEquals(0, Shr1(1));
+assertEquals(0, Sar1(1));
+assertEquals(0, Shr1Reversed(2));
+assertEquals(0, Sar1Reversed(2));
+assertEquals(1610612736, Shr1(SMI_MIN));
+assertEquals(-536870912, Sar1(SMI_MIN));
+assertEquals(1, Shr1Reversed(SMI_MIN));
+assertEquals(1, Sar1Reversed(SMI_MIN));
+assertEquals(21, Shr1(OBJ_42));
+assertEquals(21, Sar1(OBJ_42));
+assertEquals(0, Shr1Reversed(OBJ_42));
+assertEquals(0, Sar1Reversed(OBJ_42));
+
+assertEquals(6, Shr100(100), "100 >>> 100");
+assertEquals(6, Sar100(100), "100 >> 100");
+assertEquals(12, Shr100Reversed(99));
+assertEquals(12, Sar100Reversed(99));
+assertEquals(201326592, Shr100(SMI_MIN));
+assertEquals(-67108864, Sar100(SMI_MIN));
+assertEquals(100, Shr100Reversed(SMI_MIN));
+assertEquals(100, Sar100Reversed(SMI_MIN));
+assertEquals(2, Shr100(OBJ_42));
+assertEquals(2, Sar100(OBJ_42));
+assertEquals(0, Shr100Reversed(OBJ_42));
+assertEquals(0, Sar100Reversed(OBJ_42));
+
+
+function Xor1(x) {
+  return x ^ 1;
+}
+
+function Xor100(x) {
+  return x ^ 100;
+}
+
+function Xor1Reversed(x) {
+  return 1 ^ x;
+}
+
+function Xor100Reversed(x) {
+  return 100 ^ x;
+}
+
+
+assertEquals(0, Xor1(1));
+assertEquals(3, Xor1Reversed(2));
+assertEquals(SMI_MIN + 1, Xor1(SMI_MIN));
+assertEquals(SMI_MIN + 1, Xor1Reversed(SMI_MIN));
+assertEquals(43, Xor1(OBJ_42));
+assertEquals(43, Xor1Reversed(OBJ_42));
+
+assertEquals(0, Xor100(100));
+assertEquals(7, Xor100Reversed(99));
+assertEquals(-1073741724, Xor100(SMI_MIN));
+assertEquals(-1073741724, Xor100Reversed(SMI_MIN));
+assertEquals(78, Xor100(OBJ_42));
+assertEquals(78, Xor100Reversed(OBJ_42));
+
+var x = 0x23; var y = 0x35;
+assertEquals(0x16, x ^ y);
+
+
+// Bitwise not.
+var v = 0;
+assertEquals(-1, ~v);
+v = SMI_MIN;
+assertEquals(0x3fffffff, ~v, "~smimin");
+v = SMI_MAX;
+assertEquals(-0x40000000, ~v, "~smimax");
+
+// Overflowing ++ and --.
+v = SMI_MAX;
+v++;
+assertEquals(0x40000000, v, "smimax++");
+v = SMI_MIN;
+v--;
+assertEquals(-0x40000001, v, "smimin--");
+
+// Not actually Smi operations.
+// Check that relations on unary ops work.
+var v = -1.2;
+assertTrue(v == v);
+assertTrue(v === v);
+assertTrue(v <= v);
+assertTrue(v >= v);
+assertFalse(v < v);
+assertFalse(v > v);
+assertFalse(v != v);
+assertFalse(v !== v);
+
+// Right hand side of unary minus is overwritable.
+v = 1.5
+assertEquals(-2.25, -(v * v));
+
+// Smi input to bitop gives non-smi result where the rhs is a float that
+// can be overwritten.
+var x1 = 0x10000000;
+var x2 = 0x40000002;
+var x3 = 0x40000000;
+assertEquals(0x40000000, x1 << (x2 - x3), "0x10000000<<1(1)");
+
+// Smi input to bitop gives non-smi result where the rhs could be overwritten
+// if it were a float, but it isn't.
+x1 = 0x10000000
+x2 = 4
+x3 = 2
+assertEquals(0x40000000, x1 << (x2 - x3), "0x10000000<<2(2)");
+
+
+// Test shift operators on non-smi inputs, giving smi and non-smi results.
+function testShiftNonSmis() {
+  var pos_non_smi = 2000000000;
+  var neg_non_smi = -pos_non_smi;
+  var pos_smi = 1000000000;
+  var neg_smi = -pos_smi;
+
+  // Begin block A
+  assertEquals(pos_non_smi, (pos_non_smi) >> 0);
+  assertEquals(pos_non_smi, (pos_non_smi) >>> 0);
+  assertEquals(pos_non_smi, (pos_non_smi) << 0);
+  assertEquals(neg_non_smi, (neg_non_smi) >> 0);
+  assertEquals(neg_non_smi + 0x100000000, (neg_non_smi) >>> 0);
+  assertEquals(neg_non_smi, (neg_non_smi) << 0);
+  assertEquals(pos_smi, (pos_smi) >> 0, "possmi >> 0");
+  assertEquals(pos_smi, (pos_smi) >>> 0, "possmi >>>0");
+  assertEquals(pos_smi, (pos_smi) << 0, "possmi << 0");
+  assertEquals(neg_smi, (neg_smi) >> 0, "negsmi >> 0");
+  assertEquals(neg_smi + 0x100000000, (neg_smi) >>> 0, "negsmi >>> 0");
+  assertEquals(neg_smi, (neg_smi) << 0), "negsmi << 0";
+
+  assertEquals(pos_non_smi / 2, (pos_non_smi) >> 1);
+  assertEquals(pos_non_smi / 2, (pos_non_smi) >>> 1);
+  assertEquals(-0x1194D800, (pos_non_smi) << 1);
+  assertEquals(pos_non_smi / 8, (pos_non_smi) >> 3);
+  assertEquals(pos_non_smi / 8, (pos_non_smi) >>> 3);
+  assertEquals(-0x46536000, (pos_non_smi) << 3);
+  assertEquals(0x73594000, (pos_non_smi) << 4);
+  assertEquals(pos_non_smi, (pos_non_smi + 0.5) >> 0);
+  assertEquals(pos_non_smi, (pos_non_smi + 0.5) >>> 0);
+  assertEquals(pos_non_smi, (pos_non_smi + 0.5) << 0);
+  assertEquals(pos_non_smi / 2, (pos_non_smi + 0.5) >> 1);
+  assertEquals(pos_non_smi / 2, (pos_non_smi + 0.5) >>> 1);
+  assertEquals(-0x1194D800, (pos_non_smi + 0.5) << 1);
+  assertEquals(pos_non_smi / 8, (pos_non_smi + 0.5) >> 3);
+  assertEquals(pos_non_smi / 8, (pos_non_smi + 0.5) >>> 3);
+  assertEquals(-0x46536000, (pos_non_smi + 0.5) << 3);
+  assertEquals(0x73594000, (pos_non_smi + 0.5) << 4);
+
+  assertEquals(neg_non_smi / 2, (neg_non_smi) >> 1, "negnonsmi >> 1");
+
+  assertEquals(neg_non_smi / 2 + 0x100000000 / 2, (neg_non_smi) >>> 1,
+               "negnonsmi >>> 1");
+  assertEquals(0x1194D800, (neg_non_smi) << 1);
+  assertEquals(neg_non_smi / 8, (neg_non_smi) >> 3);
+  assertEquals(neg_non_smi / 8 + 0x100000000 / 8, (neg_non_smi) >>> 3);
+  assertEquals(0x46536000, (neg_non_smi) << 3);
+  assertEquals(-0x73594000, (neg_non_smi) << 4);
+  assertEquals(neg_non_smi, (neg_non_smi - 0.5) >> 0);
+  assertEquals(neg_non_smi + 0x100000000, (neg_non_smi - 0.5) >>> 0,
+               "negnonsmi.5 >>> 0");
+  assertEquals(neg_non_smi, (neg_non_smi - 0.5) << 0);
+  assertEquals(neg_non_smi / 2, (neg_non_smi - 0.5) >> 1);
+  assertEquals(neg_non_smi / 2 + 0x100000000 / 2, (neg_non_smi - 0.5) >>> 1,
+               "negnonsmi.5 >>> 1");
+  assertEquals(0x1194D800, (neg_non_smi - 0.5) << 1);
+  assertEquals(neg_non_smi / 8, (neg_non_smi - 0.5) >> 3);
+  assertEquals(neg_non_smi / 8 + 0x100000000 / 8, (neg_non_smi - 0.5) >>> 3);
+  assertEquals(0x46536000, (neg_non_smi - 0.5) << 3);
+  assertEquals(-0x73594000, (neg_non_smi - 0.5) << 4);
+
+  assertEquals(pos_smi / 2, (pos_smi) >> 1);
+  assertEquals(pos_smi / 2, (pos_smi) >>> 1);
+  assertEquals(pos_non_smi, (pos_smi) << 1);
+  assertEquals(pos_smi / 8, (pos_smi) >> 3);
+  assertEquals(pos_smi / 8, (pos_smi) >>> 3);
+  assertEquals(-0x2329b000, (pos_smi) << 3);
+  assertEquals(0x73594000, (pos_smi) << 5);
+  assertEquals(pos_smi, (pos_smi + 0.5) >> 0, "possmi.5 >> 0");
+  assertEquals(pos_smi, (pos_smi + 0.5) >>> 0, "possmi.5 >>> 0");
+  assertEquals(pos_smi, (pos_smi + 0.5) << 0, "possmi.5 << 0");
+  assertEquals(pos_smi / 2, (pos_smi + 0.5) >> 1);
+  assertEquals(pos_smi / 2, (pos_smi + 0.5) >>> 1);
+  assertEquals(pos_non_smi, (pos_smi + 0.5) << 1);
+  assertEquals(pos_smi / 8, (pos_smi + 0.5) >> 3);
+  assertEquals(pos_smi / 8, (pos_smi + 0.5) >>> 3);
+  assertEquals(-0x2329b000, (pos_smi + 0.5) << 3);
+  assertEquals(0x73594000, (pos_smi + 0.5) << 5);
+
+  assertEquals(neg_smi / 2, (neg_smi) >> 1);
+  assertEquals(neg_smi / 2 + 0x100000000 / 2, (neg_smi) >>> 1);
+  assertEquals(neg_non_smi, (neg_smi) << 1);
+  assertEquals(neg_smi / 8, (neg_smi) >> 3);
+  assertEquals(neg_smi / 8 + 0x100000000 / 8, (neg_smi) >>> 3);
+  assertEquals(0x46536000, (neg_smi) << 4);
+  assertEquals(-0x73594000, (neg_smi) << 5);
+  assertEquals(neg_smi, (neg_smi - 0.5) >> 0, "negsmi.5 >> 0");
+  assertEquals(neg_smi + 0x100000000, (neg_smi - 0.5) >>> 0, "negsmi.5 >>> 0");
+  assertEquals(neg_smi, (neg_smi - 0.5) << 0, "negsmi.5 << 0");
+  assertEquals(neg_smi / 2, (neg_smi - 0.5) >> 1);
+  assertEquals(neg_smi / 2 + 0x100000000 / 2, (neg_smi - 0.5) >>> 1);
+  assertEquals(neg_non_smi, (neg_smi - 0.5) << 1);
+  assertEquals(neg_smi / 8, (neg_smi - 0.5) >> 3);
+  assertEquals(neg_smi / 8 + 0x100000000 / 8, (neg_smi - 0.5) >>> 3);
+  assertEquals(0x46536000, (neg_smi - 0.5) << 4);
+  assertEquals(-0x73594000, (neg_smi - 0.5) << 5);
+  // End block A
+
+  // Repeat block A with 2^32 added to positive numbers and
+  // 2^32 subtracted from negative numbers.
+  // Begin block A repeat 1
+  var two_32 = 0x100000000;
+  var neg_32 = -two_32;
+  assertEquals(pos_non_smi, (two_32 + pos_non_smi) >> 0);
+  assertEquals(pos_non_smi, (two_32 + pos_non_smi) >>> 0);
+  assertEquals(pos_non_smi, (two_32 + pos_non_smi) << 0);
+  assertEquals(neg_non_smi, (neg_32 + neg_non_smi) >> 0);
+  assertEquals(neg_non_smi + 0x100000000, (neg_32 + neg_non_smi) >>> 0);
+  assertEquals(neg_non_smi, (neg_32 + neg_non_smi) << 0);
+  assertEquals(pos_smi, (two_32 + pos_smi) >> 0, "2^32+possmi >> 0");
+  assertEquals(pos_smi, (two_32 + pos_smi) >>> 0, "2^32+possmi >>> 0");
+  assertEquals(pos_smi, (two_32 + pos_smi) << 0, "2^32+possmi << 0");
+  assertEquals(neg_smi, (neg_32 + neg_smi) >> 0, "2^32+negsmi >> 0");
+  assertEquals(neg_smi + 0x100000000, (neg_32 + neg_smi) >>> 0);
+  assertEquals(neg_smi, (neg_32 + neg_smi) << 0, "2^32+negsmi << 0");
+
+  assertEquals(pos_non_smi / 2, (two_32 + pos_non_smi) >> 1);
+  assertEquals(pos_non_smi / 2, (two_32 + pos_non_smi) >>> 1);
+  assertEquals(-0x1194D800, (two_32 + pos_non_smi) << 1);
+  assertEquals(pos_non_smi / 8, (two_32 + pos_non_smi) >> 3);
+  assertEquals(pos_non_smi / 8, (two_32 + pos_non_smi) >>> 3);
+  assertEquals(-0x46536000, (two_32 + pos_non_smi) << 3);
+  assertEquals(0x73594000, (two_32 + pos_non_smi) << 4);
+  assertEquals(pos_non_smi, (two_32 + pos_non_smi + 0.5) >> 0);
+  assertEquals(pos_non_smi, (two_32 + pos_non_smi + 0.5) >>> 0);
+  assertEquals(pos_non_smi, (two_32 + pos_non_smi + 0.5) << 0);
+  assertEquals(pos_non_smi / 2, (two_32 + pos_non_smi + 0.5) >> 1);
+  assertEquals(pos_non_smi / 2, (two_32 + pos_non_smi + 0.5) >>> 1);
+  assertEquals(-0x1194D800, (two_32 + pos_non_smi + 0.5) << 1);
+  assertEquals(pos_non_smi / 8, (two_32 + pos_non_smi + 0.5) >> 3);
+  assertEquals(pos_non_smi / 8, (two_32 + pos_non_smi + 0.5) >>> 3);
+  assertEquals(-0x46536000, (two_32 + pos_non_smi + 0.5) << 3);
+  assertEquals(0x73594000, (two_32 + pos_non_smi + 0.5) << 4);
+
+  assertEquals(neg_non_smi / 2, (neg_32 + neg_non_smi) >> 1);
+  assertEquals(neg_non_smi / 2 + 0x100000000 / 2, (neg_32 + neg_non_smi) >>> 1);
+  assertEquals(0x1194D800, (neg_32 + neg_non_smi) << 1);
+  assertEquals(neg_non_smi / 8, (neg_32 + neg_non_smi) >> 3);
+  assertEquals(neg_non_smi / 8 + 0x100000000 / 8, (neg_32 + neg_non_smi) >>> 3);
+  assertEquals(0x46536000, (neg_32 + neg_non_smi) << 3);
+  assertEquals(-0x73594000, (neg_32 + neg_non_smi) << 4);
+  assertEquals(neg_non_smi, (neg_32 + neg_non_smi - 0.5) >> 0);
+  assertEquals(neg_non_smi + 0x100000000, (neg_32 + neg_non_smi - 0.5) >>> 0);
+  assertEquals(neg_non_smi, (neg_32 + neg_non_smi - 0.5) << 0);
+  assertEquals(neg_non_smi / 2, (neg_32 + neg_non_smi - 0.5) >> 1);
+  assertEquals(neg_non_smi / 2 + 0x100000000 / 2, (neg_32 + neg_non_smi - 0.5)
+               >>> 1);
+  assertEquals(0x1194D800, (neg_32 + neg_non_smi - 0.5) << 1);
+  assertEquals(neg_non_smi / 8, (neg_32 + neg_non_smi - 0.5) >> 3);
+  assertEquals(neg_non_smi / 8 + 0x100000000 / 8, (neg_32 + neg_non_smi - 0.5)
+               >>> 3);
+  assertEquals(0x46536000, (neg_32 + neg_non_smi - 0.5) << 3);
+  assertEquals(-0x73594000, (neg_32 + neg_non_smi - 0.5) << 4);
+
+  assertEquals(pos_smi / 2, (two_32 + pos_smi) >> 1);
+  assertEquals(pos_smi / 2, (two_32 + pos_smi) >>> 1);
+  assertEquals(pos_non_smi, (two_32 + pos_smi) << 1);
+  assertEquals(pos_smi / 8, (two_32 + pos_smi) >> 3);
+  assertEquals(pos_smi / 8, (two_32 + pos_smi) >>> 3);
+  assertEquals(-0x2329b000, (two_32 + pos_smi) << 3);
+  assertEquals(0x73594000, (two_32 + pos_smi) << 5);
+  assertEquals(pos_smi, (two_32 + pos_smi + 0.5) >> 0);
+  assertEquals(pos_smi, (two_32 + pos_smi + 0.5) >>> 0);
+  assertEquals(pos_smi, (two_32 + pos_smi + 0.5) << 0);
+  assertEquals(pos_smi / 2, (two_32 + pos_smi + 0.5) >> 1);
+  assertEquals(pos_smi / 2, (two_32 + pos_smi + 0.5) >>> 1);
+  assertEquals(pos_non_smi, (two_32 + pos_smi + 0.5) << 1);
+  assertEquals(pos_smi / 8, (two_32 + pos_smi + 0.5) >> 3);
+  assertEquals(pos_smi / 8, (two_32 + pos_smi + 0.5) >>> 3);
+  assertEquals(-0x2329b000, (two_32 + pos_smi + 0.5) << 3);
+  assertEquals(0x73594000, (two_32 + pos_smi + 0.5) << 5);
+
+  assertEquals(neg_smi / 2, (neg_32 + neg_smi) >> 1);
+  assertEquals(neg_smi / 2 + 0x100000000 / 2, (neg_32 + neg_smi) >>> 1);
+  assertEquals(neg_non_smi, (neg_32 + neg_smi) << 1);
+  assertEquals(neg_smi / 8, (neg_32 + neg_smi) >> 3);
+  assertEquals((neg_smi + 0x100000000) / 8, (neg_32 + neg_smi) >>> 3);
+  assertEquals(0x46536000, (neg_32 + neg_smi) << 4);
+  assertEquals(-0x73594000, (neg_32 + neg_smi) << 5);
+  assertEquals(neg_smi, (neg_32 + neg_smi - 0.5) >> 0, "-2^32+negsmi.5 >> 0");
+  assertEquals(neg_smi + 0x100000000, (neg_32 + neg_smi - 0.5) >>> 0);
+  assertEquals(neg_smi, (neg_32 + neg_smi - 0.5) << 0, "-2^32+negsmi.5 << 0");
+  assertEquals(neg_smi / 2, (neg_32 + neg_smi - 0.5) >> 1);
+  assertEquals(neg_smi / 2 + 0x100000000 / 2, (neg_32 + neg_smi - 0.5) >>> 1);
+  assertEquals(neg_non_smi, (neg_32 + neg_smi - 0.5) << 1);
+  assertEquals(neg_smi / 8, (neg_32 + neg_smi - 0.5) >> 3);
+  assertEquals(neg_smi / 8 + 0x100000000 / 8, (neg_32 + neg_smi - 0.5) >>> 3);
+  assertEquals(0x46536000, (neg_32 + neg_smi - 0.5) << 4);
+  assertEquals(-0x73594000, (neg_32 + neg_smi - 0.5) << 5);
+  // End block A repeat 1
+  // Repeat block A with shift amounts in variables intialized with
+  // a constant.
+  var zero = 0;
+  var one = 1;
+  var three = 3;
+  var four = 4;
+  var five = 5;
+  // Begin block A repeat 2
+  assertEquals(pos_non_smi, (pos_non_smi) >> zero);
+  assertEquals(pos_non_smi, (pos_non_smi) >>> zero);
+  assertEquals(pos_non_smi, (pos_non_smi) << zero);
+  assertEquals(neg_non_smi, (neg_non_smi) >> zero);
+  assertEquals(neg_non_smi + 0x100000000, (neg_non_smi) >>> zero);
+  assertEquals(neg_non_smi, (neg_non_smi) << zero);
+  assertEquals(pos_smi, (pos_smi) >> zero);
+  assertEquals(pos_smi, (pos_smi) >>> zero);
+  assertEquals(pos_smi, (pos_smi) << zero);
+  assertEquals(neg_smi, (neg_smi) >> zero, "negsmi >> zero");
+  assertEquals(neg_smi + 0x100000000, (neg_smi) >>> zero);
+  assertEquals(neg_smi, (neg_smi) << zero, "negsmi << zero");
+
+  assertEquals(pos_non_smi / 2, (pos_non_smi) >> one);
+  assertEquals(pos_non_smi / 2, (pos_non_smi) >>> one);
+  assertEquals(-0x1194D800, (pos_non_smi) << one);
+  assertEquals(pos_non_smi / 8, (pos_non_smi) >> three);
+  assertEquals(pos_non_smi / 8, (pos_non_smi) >>> three);
+  assertEquals(-0x46536000, (pos_non_smi) << three);
+  assertEquals(0x73594000, (pos_non_smi) << four);
+  assertEquals(pos_non_smi, (pos_non_smi + 0.5) >> zero);
+  assertEquals(pos_non_smi, (pos_non_smi + 0.5) >>> zero);
+  assertEquals(pos_non_smi, (pos_non_smi + 0.5) << zero);
+  assertEquals(pos_non_smi / 2, (pos_non_smi + 0.5) >> one);
+  assertEquals(pos_non_smi / 2, (pos_non_smi + 0.5) >>> one);
+  assertEquals(-0x1194D800, (pos_non_smi + 0.5) << one);
+  assertEquals(pos_non_smi / 8, (pos_non_smi + 0.5) >> three);
+  assertEquals(pos_non_smi / 8, (pos_non_smi + 0.5) >>> three);
+  assertEquals(-0x46536000, (pos_non_smi + 0.5) << three);
+  assertEquals(0x73594000, (pos_non_smi + 0.5) << four);
+
+  assertEquals(neg_non_smi / 2, (neg_non_smi) >> one);
+  assertEquals(neg_non_smi / 2 + 0x100000000 / 2, (neg_non_smi) >>> one);
+  assertEquals(0x1194D800, (neg_non_smi) << one);
+  assertEquals(neg_non_smi / 8, (neg_non_smi) >> three);
+  assertEquals(neg_non_smi / 8 + 0x100000000 / 8, (neg_non_smi) >>> three);
+  assertEquals(0x46536000, (neg_non_smi) << three);
+  assertEquals(-0x73594000, (neg_non_smi) << four);
+  assertEquals(neg_non_smi, (neg_non_smi - 0.5) >> zero);
+  assertEquals(neg_non_smi + 0x100000000, (neg_non_smi - 0.5) >>> zero);
+  assertEquals(neg_non_smi, (neg_non_smi - 0.5) << zero);
+  assertEquals(neg_non_smi / 2, (neg_non_smi - 0.5) >> one);
+  assertEquals(neg_non_smi / 2 + 0x100000000 / 2, (neg_non_smi - 0.5) >>> one);
+  assertEquals(0x1194D800, (neg_non_smi - 0.5) << one);
+  assertEquals(neg_non_smi / 8, (neg_non_smi - 0.5) >> three);
+  assertEquals(neg_non_smi / 8 + 0x100000000 / 8, (neg_non_smi - 0.5)
+      >>> three);
+  assertEquals(0x46536000, (neg_non_smi - 0.5) << three);
+  assertEquals(-0x73594000, (neg_non_smi - 0.5) << four);
+
+  assertEquals(pos_smi / 2, (pos_smi) >> one);
+  assertEquals(pos_smi / 2, (pos_smi) >>> one);
+  assertEquals(pos_non_smi, (pos_smi) << one);
+  assertEquals(pos_smi / 8, (pos_smi) >> three);
+  assertEquals(pos_smi / 8, (pos_smi) >>> three);
+  assertEquals(-0x2329b000, (pos_smi) << three);
+  assertEquals(0x73594000, (pos_smi) << five);
+  assertEquals(pos_smi, (pos_smi + 0.5) >> zero);
+  assertEquals(pos_smi, (pos_smi + 0.5) >>> zero);
+  assertEquals(pos_smi, (pos_smi + 0.5) << zero);
+  assertEquals(pos_smi / 2, (pos_smi + 0.5) >> one);
+  assertEquals(pos_smi / 2, (pos_smi + 0.5) >>> one);
+  assertEquals(pos_non_smi, (pos_smi + 0.5) << one);
+  assertEquals(pos_smi / 8, (pos_smi + 0.5) >> three);
+  assertEquals(pos_smi / 8, (pos_smi + 0.5) >>> three);
+  assertEquals(-0x2329b000, (pos_smi + 0.5) << three);
+  assertEquals(0x73594000, (pos_smi + 0.5) << five);
+
+  assertEquals(neg_smi / 2, (neg_smi) >> one);
+  assertEquals(neg_smi / 2 + 0x100000000 / 2, (neg_smi) >>> one);
+  assertEquals(neg_non_smi, (neg_smi) << one);
+  assertEquals(neg_smi / 8, (neg_smi) >> three);
+  assertEquals(neg_smi / 8 + 0x100000000 / 8, (neg_smi) >>> three);
+  assertEquals(0x46536000, (neg_smi) << four);
+  assertEquals(-0x73594000, (neg_smi) << five);
+  assertEquals(neg_smi, (neg_smi - 0.5) >> zero);
+  assertEquals(neg_smi + 0x100000000, (neg_smi - 0.5) >>> zero);
+  assertEquals(neg_smi, (neg_smi - 0.5) << zero);
+  assertEquals(neg_smi / 2, (neg_smi - 0.5) >> one);
+  assertEquals(neg_smi / 2 + 0x100000000 / 2, (neg_smi - 0.5) >>> one);
+  assertEquals(neg_non_smi, (neg_smi - 0.5) << one);
+  assertEquals(neg_smi / 8, (neg_smi - 0.5) >> three);
+  assertEquals(neg_smi / 8 + 0x100000000 / 8, (neg_smi - 0.5) >>> three);
+  assertEquals(0x46536000, (neg_smi - 0.5) << four);
+  assertEquals(-0x73594000, (neg_smi - 0.5) << five);
+  // End block A repeat 2
+
+  // Repeat previous block, with computed values in the shift variables.
+  five = 0;
+  while (five < 5 ) ++five;
+  four = five - one;
+  three = four - one;
+  one = four - three;
+  zero = one - one;
+
+  // Begin block A repeat 3
+  assertEquals(pos_non_smi, (pos_non_smi) >> zero);
+  assertEquals(pos_non_smi, (pos_non_smi) >>> zero);
+  assertEquals(pos_non_smi, (pos_non_smi) << zero);
+  assertEquals(neg_non_smi, (neg_non_smi) >> zero);
+  assertEquals(neg_non_smi + 0x100000000, (neg_non_smi) >>> zero);
+  assertEquals(neg_non_smi, (neg_non_smi) << zero);
+  assertEquals(pos_smi, (pos_smi) >> zero);
+  assertEquals(pos_smi, (pos_smi) >>> zero);
+  assertEquals(pos_smi, (pos_smi) << zero);
+  assertEquals(neg_smi, (neg_smi) >> zero, "negsmi >> zero(2)");
+  assertEquals(neg_smi + 0x100000000, (neg_smi) >>> zero);
+  assertEquals(neg_smi, (neg_smi) << zero, "negsmi << zero(2)");
+
+  assertEquals(pos_non_smi / 2, (pos_non_smi) >> one);
+  assertEquals(pos_non_smi / 2, (pos_non_smi) >>> one);
+  assertEquals(-0x1194D800, (pos_non_smi) << one);
+  assertEquals(pos_non_smi / 8, (pos_non_smi) >> three);
+  assertEquals(pos_non_smi / 8, (pos_non_smi) >>> three);
+  assertEquals(-0x46536000, (pos_non_smi) << three);
+  assertEquals(0x73594000, (pos_non_smi) << four);
+  assertEquals(pos_non_smi, (pos_non_smi + 0.5) >> zero);
+  assertEquals(pos_non_smi, (pos_non_smi + 0.5) >>> zero);
+  assertEquals(pos_non_smi, (pos_non_smi + 0.5) << zero);
+  assertEquals(pos_non_smi / 2, (pos_non_smi + 0.5) >> one);
+  assertEquals(pos_non_smi / 2, (pos_non_smi + 0.5) >>> one);
+  assertEquals(-0x1194D800, (pos_non_smi + 0.5) << one);
+  assertEquals(pos_non_smi / 8, (pos_non_smi + 0.5) >> three);
+  assertEquals(pos_non_smi / 8, (pos_non_smi + 0.5) >>> three);
+  assertEquals(-0x46536000, (pos_non_smi + 0.5) << three);
+  assertEquals(0x73594000, (pos_non_smi + 0.5) << four);
+
+  assertEquals(neg_non_smi / 2, (neg_non_smi) >> one);
+  assertEquals(neg_non_smi / 2 + 0x100000000 / 2, (neg_non_smi) >>> one);
+  assertEquals(0x1194D800, (neg_non_smi) << one);
+  assertEquals(neg_non_smi / 8, (neg_non_smi) >> three);
+  assertEquals(neg_non_smi / 8 + 0x100000000 / 8, (neg_non_smi) >>> three);
+  assertEquals(0x46536000, (neg_non_smi) << three);
+  assertEquals(-0x73594000, (neg_non_smi) << four);
+  assertEquals(neg_non_smi, (neg_non_smi - 0.5) >> zero);
+  assertEquals(neg_non_smi + 0x100000000, (neg_non_smi - 0.5) >>> zero);
+  assertEquals(neg_non_smi, (neg_non_smi - 0.5) << zero);
+  assertEquals(neg_non_smi / 2, (neg_non_smi - 0.5) >> one);
+  assertEquals(neg_non_smi / 2 + 0x100000000 / 2, (neg_non_smi - 0.5) >>> one);
+  assertEquals(0x1194D800, (neg_non_smi - 0.5) << one);
+  assertEquals(neg_non_smi / 8, (neg_non_smi - 0.5) >> three);
+  assertEquals(neg_non_smi / 8 + 0x100000000 / 8, (neg_non_smi - 0.5)
+      >>> three);
+  assertEquals(0x46536000, (neg_non_smi - 0.5) << three);
+  assertEquals(-0x73594000, (neg_non_smi - 0.5) << four);
+
+  assertEquals(pos_smi / 2, (pos_smi) >> one);
+  assertEquals(pos_smi / 2, (pos_smi) >>> one);
+  assertEquals(pos_non_smi, (pos_smi) << one);
+  assertEquals(pos_smi / 8, (pos_smi) >> three);
+  assertEquals(pos_smi / 8, (pos_smi) >>> three);
+  assertEquals(-0x2329b000, (pos_smi) << three);
+  assertEquals(0x73594000, (pos_smi) << five);
+  assertEquals(pos_smi, (pos_smi + 0.5) >> zero);
+  assertEquals(pos_smi, (pos_smi + 0.5) >>> zero);
+  assertEquals(pos_smi, (pos_smi + 0.5) << zero);
+  assertEquals(pos_smi / 2, (pos_smi + 0.5) >> one);
+  assertEquals(pos_smi / 2, (pos_smi + 0.5) >>> one);
+  assertEquals(pos_non_smi, (pos_smi + 0.5) << one);
+  assertEquals(pos_smi / 8, (pos_smi + 0.5) >> three);
+  assertEquals(pos_smi / 8, (pos_smi + 0.5) >>> three);
+  assertEquals(-0x2329b000, (pos_smi + 0.5) << three);
+  assertEquals(0x73594000, (pos_smi + 0.5) << five);
+
+  assertEquals(neg_smi / 2, (neg_smi) >> one);
+  assertEquals(neg_smi / 2 + 0x100000000 / 2, (neg_smi) >>> one);
+  assertEquals(neg_non_smi, (neg_smi) << one);
+  assertEquals(neg_smi / 8, (neg_smi) >> three);
+  assertEquals(neg_smi / 8 + 0x100000000 / 8, (neg_smi) >>> three);
+  assertEquals(0x46536000, (neg_smi) << four);
+  assertEquals(-0x73594000, (neg_smi) << five);
+  assertEquals(neg_smi, (neg_smi - 0.5) >> zero, "negsmi.5 >> zero");
+  assertEquals(neg_smi + 0x100000000, (neg_smi - 0.5) >>> zero);
+  assertEquals(neg_smi, (neg_smi - 0.5) << zero, "negsmi.5 << zero");
+  assertEquals(neg_smi / 2, (neg_smi - 0.5) >> one);
+  assertEquals(neg_smi / 2 + 0x100000000 / 2, (neg_smi - 0.5) >>> one);
+  assertEquals(neg_non_smi, (neg_smi - 0.5) << one);
+  assertEquals(neg_smi / 8, (neg_smi - 0.5) >> three);
+  assertEquals(neg_smi / 8 + 0x100000000 / 8, (neg_smi - 0.5) >>> three);
+  assertEquals(0x46536000, (neg_smi - 0.5) << four);
+  assertEquals(-0x73594000, (neg_smi - 0.5) << five);
+  // End block A repeat 3
+
+  // Test non-integer shift value
+  assertEquals(5, 20.5 >> 2.4);
+  assertEquals(5, 20.5 >> 2.7);
+  var shift = 2.4;
+  assertEquals(5, 20.5 >> shift);
+  assertEquals(5, 20.5 >> shift + 0.3);
+  shift = shift + zero;
+  assertEquals(5, 20.5 >> shift);
+  assertEquals(5, 20.5 >> shift + 0.3);
+}
+
+testShiftNonSmis();
+
+function intConversion() {
+  function foo(x) {
+    assertEquals(x, (x * 1.0000000001) | 0, "foo more " + x);
+    assertEquals(x, x | 0, "foo " + x);
+    if (x > 0) {
+      assertEquals(x - 1, (x * 0.9999999999) | 0, "foo less " + x);
+    } else {
+      assertEquals(x + 1, (x * 0.9999999999) | 0, "foo less " + x);
+    }
+  }
+  for (var i = 1; i < 0x80000000; i *= 2) {
+    foo(i);
+    foo(-i);
+  }
+  for (var i = 1; i < 1/0; i *= 2) {
+    assertEquals(i | 0, (i * 1.0000000000000001) | 0, "b" + i);
+    assertEquals(-i | 0, (i * -1.0000000000000001) | 0, "c" + i);
+  }
+  for (var i = 0.5; i > 0; i /= 2) {
+    assertEquals(0, i | 0, "d" + i);
+    assertEquals(0, -i | 0, "e" + i);
+  }
+}
+
+intConversion();
+
+// Verify that we handle the (optimized) corner case of shifting by
+// zero even for non-smis.
+function shiftByZero(n) { return n << 0; }
+
+assertEquals(3, shiftByZero(3.1415));
index 8fa6fec..7945855 100644 (file)
@@ -699,3 +699,6 @@ assertEquals(24, LeftShiftThreeBy(-29));
 // allocations we got the Smi overflow case wrong.
 function f(x, y) { return y +  ( 1 << (x & 31)); }
 assertEquals(-2147483647, f(31, 1));
+
+// Regression test for correct handling of overflow in smi comparison.
+assertTrue(-0x40000000 < 42);
index 26fba10..73b310f 100644 (file)
@@ -25,7 +25,6 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// Flags: --always-compact
 //
 // Regression test for the r1512 fix.
 
diff --git a/test/mjsunit/sum-0-plus-undefined-is-NaN.js b/test/mjsunit/sum-0-plus-undefined-is-NaN.js
new file mode 100644 (file)
index 0000000..fb98d0c
--- /dev/null
@@ -0,0 +1,41 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+/**
+ * @fileoverview Test addition of 0 and undefined.
+ */
+
+function sum(a, b) { return a + b; }
+
+function test(x, y, expectNaN) {
+  for (var i = 0; i < 1000; i++) {
+    assertEquals(expectNaN, isNaN(sum(x, y)));
+  }
+}
+
+test(0, 1, false);
+test(0, undefined, true);
index 15ab7bf..39dec72 100644 (file)
@@ -25,8 +25,6 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// Flags: --nofull-compiler
-
 // The type of a regular expression should be 'function', including in
 // the context of string equality comparisons.
 
index 38d8349..b1e9ba7 100644 (file)
@@ -45,6 +45,7 @@
 prefix mozilla
 def FAIL_OK = FAIL, OKAY
 
+
 ##################### SKIPPED TESTS #####################
 
 # This test checks that we behave properly in an out-of-memory
@@ -813,6 +814,29 @@ js1_5/Regress/regress-312588: TIMEOUT || SKIP if $FAST == yes
 js1_5/Regress/regress-271716-n: PASS || SKIP if $FAST == yes
 
 
+[ $ARCH == arm ]
+
+# Times out and print so much output that we need to skip it to not
+# hang the builder.
+js1_5/extensions/regress-342960: SKIP
+
+# BUG(3251229): Times out when running new crankshaft test script.
+ecma/Date/15.9.5.12-2: SKIP
+ecma/Date/15.9.5.11-2: SKIP
+ecma/Date/15.9.5.10-2: SKIP
+ecma/Date/15.9.5.8: SKIP
+ecma_3/RegExp/regress-311414: SKIP
+js1_5/Array/regress-99120-02: SKIP
+js1_5/Regress/regress-203278-1: SKIP
+ecma/Date/15.9.5.8: SKIP
+ecma/Date/15.9.5.10-2: SKIP
+ecma/Date/15.9.5.11-2: SKIP
+ecma/Date/15.9.5.12-2: SKIP
+js1_5/Regress/regress-404755:  SKIP
+js1_5/Regress/regress-451322: SKIP
+js1_5/extensions/regress-371636: SKIP
+
+
 [ $FAST == yes && $ARCH == arm ]
 
 # In fast mode on arm we try to skip all tests that would time out,
index bc8c1e3..966500d 100644 (file)
@@ -193,7 +193,6 @@ S9.9_A1: FAIL_OK
 S9.9_A2: FAIL_OK
 
 
-
 ##################### SKIPPED TESTS #####################
 
 # These tests take a looong time to run in debug mode.
@@ -254,6 +253,20 @@ S11.4.3_A3.6: FAIL_OK
 S15.10.7_A3_T2: FAIL_OK
 S15.10.7_A3_T1: FAIL_OK
 
+[ $arch == arm ]
+
+# BUG(3251225): Tests that timeout with --nocrankshaft.
+S15.1.3.1_A2.5_T1: SKIP
+S15.1.3.2_A2.5_T1: SKIP
+S15.1.3.1_A2.4_T1: SKIP
+S15.1.3.1_A2.5_T1: SKIP
+S15.1.3.2_A2.4_T1: SKIP
+S15.1.3.2_A2.5_T1: SKIP
+S15.1.3.3_A2.3_T1: SKIP
+S15.1.3.4_A2.3_T1: SKIP
+S15.1.3.1_A2.5_T1: SKIP
+S15.1.3.2_A2.5_T1: SKIP
+
 [ $arch == mips ]
 
 # Skip all tests on MIPS.
index c1a5aab..3ba6422 100644 (file)
         '../../src/ast.cc',
         '../../src/ast-inl.h',
         '../../src/ast.h',
+        '../../src/atomicops_internals_x86_gcc.cc',
         '../../src/bignum.cc',
         '../../src/bignum.h',
         '../../src/bignum-dtoa.cc',
         '../../src/debug.h',
         '../../src/debug-agent.cc',
         '../../src/debug-agent.h',
+        '../../src/deoptimizer.cc',
+        '../../src/deoptimizer.h',     
         '../../src/disasm.h',
         '../../src/disassembler.cc',
         '../../src/disassembler.h',
         '../../src/heap.h',
         '../../src/heap-profiler.cc',
         '../../src/heap-profiler.h',
+        '../../src/hydrogen.cc',
+        '../../src/hydrogen.h',
+        '../../src/hydrogen-instructions.cc',
+        '../../src/hydrogen-instructions.h',
         '../../src/ic-inl.h',
         '../../src/ic.cc',
         '../../src/ic.h',
         '../../src/jsregexp.h',
         '../../src/list-inl.h',
         '../../src/list.h',
+        '../../src/lithium-allocator.cc',
+        '../../src/lithium-allocator.h',
         '../../src/liveedit.cc',
         '../../src/liveedit.h',
         '../../src/log-inl.h',
         '../../src/rewriter.h',
         '../../src/runtime.cc',
         '../../src/runtime.h',
+        '../../src/runtime-profiler.cc',
+        '../../src/runtime-profiler.h',
+        '../../src/safepoint-table.cc',
+        '../../src/safepoint-table.h',
         '../../src/scanner-base.cc',
         '../../src/scanner-base.h',
         '../../src/scanner.cc',
             '../../src/arm/constants-arm.cc',
             '../../src/arm/cpu-arm.cc',
             '../../src/arm/debug-arm.cc',
+            '../../src/arm/deoptimizer-arm.cc',
             '../../src/arm/disasm-arm.cc',
             '../../src/arm/frames-arm.cc',
             '../../src/arm/frames-arm.h',
             '../../src/ia32/codegen-ia32.h',
             '../../src/ia32/cpu-ia32.cc',
             '../../src/ia32/debug-ia32.cc',
+            '../../src/ia32/deoptimizer-ia32.cc',
             '../../src/ia32/disasm-ia32.cc',
             '../../src/ia32/frames-ia32.cc',
             '../../src/ia32/frames-ia32.h',
             '../../src/ia32/full-codegen-ia32.cc',
             '../../src/ia32/ic-ia32.cc',
             '../../src/ia32/jump-target-ia32.cc',
+            '../../src/ia32/lithium-codegen-ia32.cc',
+            '../../src/ia32/lithium-codegen-ia32.h',
+            '../../src/ia32/lithium-ia32.cc',
+            '../../src/ia32/lithium-ia32.h',
             '../../src/ia32/macro-assembler-ia32.cc',
             '../../src/ia32/macro-assembler-ia32.h',
             '../../src/ia32/regexp-macro-assembler-ia32.cc',
             '../../src/x64/codegen-x64.h',
             '../../src/x64/cpu-x64.cc',
             '../../src/x64/debug-x64.cc',
+            '../../src/x64/deoptimizer-x64.cc',
             '../../src/x64/disasm-x64.cc',
             '../../src/x64/frames-x64.cc',
             '../../src/x64/frames-x64.h',
old mode 100755 (executable)
new mode 100644 (file)
index 4b916f8..6aa9831 100755 (executable)
@@ -358,7 +358,7 @@ class TestCase(object):
     full_command = self.context.processor(command)
     output = Execute(full_command,
                      self.context,
-                     self.context.GetTimeout(self.mode))
+                     self.context.GetTimeout(self, self.mode))
     self.Cleanup()
     return TestOutput(self,
                       full_command,
@@ -569,7 +569,7 @@ class TestSuite(object):
 
 # Use this to run several variants of the tests, e.g.:
 # VARIANT_FLAGS = [[], ['--always_compact', '--noflush_code']]
-VARIANT_FLAGS = [[]]
+VARIANT_FLAGS = [[], ['--stress-opt', '--always-opt'], ['--nocrankshaft']]
 
 
 class TestRepository(TestSuite):
@@ -673,8 +673,12 @@ class Context(object):
   def GetVmFlags(self, testcase, mode):
     return testcase.variant_flags + FLAGS[mode]
 
-  def GetTimeout(self, mode):
-    return self.timeout * TIMEOUT_SCALEFACTOR[mode]
+  def GetTimeout(self, testcase, mode):
+    result = self.timeout * TIMEOUT_SCALEFACTOR[mode]
+    if '--stress-opt' in self.GetVmFlags(testcase, mode):
+      return result * 2
+    else:
+      return result
 
 def RunTestCases(cases_to_run, progress, tasks):
   progress = PROGRESS_INDICATORS[progress](cases_to_run)
@@ -1159,12 +1163,18 @@ def BuildOptions():
   result.add_option("--no-suppress-dialogs", help="Display Windows dialogs for crashing tests",
         dest="suppress_dialogs", action="store_false")
   result.add_option("--shell", help="Path to V8 shell", default="shell")
-  result.add_option("--store-unexpected-output", 
+  result.add_option("--store-unexpected-output",
       help="Store the temporary JS files from tests that fails",
       dest="store_unexpected_output", default=True, action="store_true")
-  result.add_option("--no-store-unexpected-output", 
+  result.add_option("--no-store-unexpected-output",
       help="Deletes the temporary JS files from tests that fails",
       dest="store_unexpected_output", action="store_false")
+  result.add_option("--stress-only",
+                    help="Only run tests with --always-opt --stress-opt",
+                    default=False, action="store_true")
+  result.add_option("--nostress",
+                    help="Don't run crankshaft --always-opt --stress-op test",
+                    default=False, action="store_true")
   return result
 
 
@@ -1194,6 +1204,11 @@ def ProcessOptions(options):
     options.scons_flags.append("arch=" + options.arch)
   if options.snapshot:
     options.scons_flags.append("snapshot=on")
+  global VARIANT_FLAGS
+  if options.stress_only:
+    VARIANT_FLAGS = [['--stress-opt', '--always-opt']]
+  if options.nostress:
+    VARIANT_FLAGS = [[],['--nocrankshaft']]
   return True
 
 
index 902faff..fc61f46 100644 (file)
                                >
                        </File>
                        <File
+                               RelativePath="..\..\src\hydrogen.cc"
+                               >
+                       </File>
+                       <File
+                               RelativePath="..\..\src\hydrogen.h"
+                               >
+                       </File>
+                       <File
+                               RelativePath="..\..\src\hydrogen-instructions.cc"
+                               >
+                       </File>
+                       <File
+                               RelativePath="..\..\src\hydrogen-instructions.h"
+                               >
+                       </File>
+                       <File
                                RelativePath="..\..\src\ia32\ic-ia32.cc"
                                >
                        </File>
                                >
                        </File>
                        <File
+                               RelativePath="..\..\src\runtime-profiler.cc"
+                               >
+                       </File>
+                       <File
+                               RelativePath="..\..\src\runtime-profiler.h"
+                               >
+                       </File>
+                       <File
+                               RelativePath="..\..\src\safepoint-table.cc"
+                               >
+                       </File>
+                       <File
+                               RelativePath="..\..\src\safepoint-table.h"
+                               >
+                       </File>
+                       <File
                                RelativePath="..\..\src\scanner-base.cc"
                                >
                        </File>
                                >
                        </File>
                        <File
+                               RelativePath="..\..\include\v8-testing.h"
+                               >
+                       </File>
+                       <File
                                RelativePath="..\..\include\v8.h"
                                >
                        </File>
index b87fdf8..949e6df 100644 (file)
                                >
                        </File>
                        <File
+                               RelativePath="..\..\include\v8-testing.h"
+                               >
+                       </File>
+                       <File
                                RelativePath="..\..\include\v8.h"
                                >
                        </File>
index 6d27472..5abe923 100644 (file)
                                >
                        </File>
                        <File
+                               RelativePath="..\..\include\v8-testing.h"
+                               >
+                       </File>
+                       <File
                                RelativePath="..\..\include\v8.h"
                                >
                        </File>