Enable stub generation using Hydrogen/Lithium (again)
authordanno@chromium.org <danno@chromium.org@ce2b1a6d-e550-0410-aec6-3dcde31c8c00>
Mon, 3 Dec 2012 15:51:05 +0000 (15:51 +0000)
committerdanno@chromium.org <danno@chromium.org@ce2b1a6d-e550-0410-aec6-3dcde31c8c00>
Mon, 3 Dec 2012 15:51:05 +0000 (15:51 +0000)
This initial implementation generates only KeyedLoadICs using the new Hydrogen stub infrastructure.

Committed: https://code.google.com/p/v8/source/detail?r=13105

Review URL: https://codereview.chromium.org/10701054

git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@13117 ce2b1a6d-e550-0410-aec6-3dcde31c8c00

96 files changed:
Makefile
src/arm/assembler-arm-inl.h
src/arm/assembler-arm.cc
src/arm/assembler-arm.h
src/arm/builtins-arm.cc
src/arm/code-stubs-arm.cc
src/arm/code-stubs-arm.h
src/arm/codegen-arm.cc
src/arm/codegen-arm.h
src/arm/deoptimizer-arm.cc
src/arm/lithium-arm.cc
src/arm/lithium-arm.h
src/arm/lithium-codegen-arm.cc
src/arm/lithium-codegen-arm.h
src/arm/lithium-gap-resolver-arm.cc
src/arm/macro-assembler-arm.cc
src/arm/macro-assembler-arm.h
src/arm/stub-cache-arm.cc
src/assembler.cc
src/assembler.h
src/ast.cc
src/ast.h
src/builtins.h
src/code-stubs-hydrogen.cc [new file with mode: 0644]
src/code-stubs.cc
src/code-stubs.h
src/codegen.cc
src/compiler.cc
src/compiler.h
src/deoptimizer.cc
src/deoptimizer.h
src/disassembler.cc
src/frames-inl.h
src/frames.cc
src/frames.h
src/full-codegen.cc
src/full-codegen.h
src/hydrogen.cc
src/hydrogen.h
src/ia32/assembler-ia32.cc
src/ia32/assembler-ia32.h
src/ia32/builtins-ia32.cc
src/ia32/code-stubs-ia32.cc
src/ia32/code-stubs-ia32.h
src/ia32/deoptimizer-ia32.cc
src/ia32/lithium-codegen-ia32.cc
src/ia32/lithium-codegen-ia32.h
src/ia32/lithium-gap-resolver-ia32.cc
src/ia32/lithium-gap-resolver-ia32.h
src/ia32/lithium-ia32.cc
src/ia32/lithium-ia32.h
src/ia32/macro-assembler-ia32.cc
src/ia32/macro-assembler-ia32.h
src/ia32/stub-cache-ia32.cc
src/ic.cc
src/isolate.cc
src/isolate.h
src/lithium-allocator.cc
src/lithium-allocator.h
src/lithium.cc
src/lithium.h
src/log.cc
src/mips/codegen-mips.h
src/objects-inl.h
src/objects.cc
src/objects.h
src/optimizing-compiler-thread.h
src/prettyprinter.cc
src/prettyprinter.h
src/rewriter.cc
src/runtime.cc
src/runtime.h
src/safepoint-table.cc
src/serialize.cc
src/serialize.h
src/smart-pointers.h
src/spaces.cc
src/stub-cache.h
src/utils.h
src/x64/assembler-x64.cc
src/x64/assembler-x64.h
src/x64/builtins-x64.cc
src/x64/code-stubs-x64.cc
src/x64/code-stubs-x64.h
src/x64/codegen-x64.h
src/x64/deoptimizer-x64.cc
src/x64/lithium-codegen-x64.cc
src/x64/lithium-codegen-x64.h
src/x64/lithium-x64.cc
src/x64/lithium-x64.h
src/x64/macro-assembler-x64.cc
src/x64/macro-assembler-x64.h
src/x64/stub-cache-x64.cc
test/cctest/test-mark-compact.cc
test/mjsunit/fuzz-natives-part1.js
tools/gyp/v8.gyp

index b65ea4c..cf94838 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -81,6 +81,12 @@ endif
 ifeq ($(liveobjectlist), on)
   GYPFLAGS += -Dv8_use_liveobjectlist=true
 endif
+# vfp2=off
+ifeq ($(vfp2), off)
+  GYPFLAGS += -Dv8_can_use_vfp2_instructions=false
+else
+  GYPFLAGS += -Dv8_can_use_vfp2_instructions=true
+endif
 # vfp3=off
 ifeq ($(vfp3), off)
   GYPFLAGS += -Dv8_can_use_vfp3_instructions=false
index acd61fe..7b79542 100644 (file)
@@ -47,6 +47,15 @@ namespace v8 {
 namespace internal {
 
 
+ArmDoubleRegister ArmDoubleRegister::FromAllocationIndex(int index) {
+  if (CpuFeatures::IsSupported(VFP2)) {
+    return DwVfpRegister::FromAllocationIndex(index);
+  } else {
+    return SoftFloatRegister::FromAllocationIndex(index);
+  }
+}
+
+
 int DwVfpRegister::ToAllocationIndex(DwVfpRegister reg) {
   ASSERT(!reg.is(kDoubleRegZero));
   ASSERT(!reg.is(kScratchDoubleReg));
index 47ea0e2..42990f6 100644 (file)
@@ -85,6 +85,33 @@ static unsigned CpuFeaturesImpliedByCompiler() {
 }
 
 
+int Register::NumAllocatableRegisters() {
+  if (CpuFeatures::IsSupported(VFP2)) {
+    return kMaxNumAllocatableRegisters;
+  } else {
+    return kMaxNumAllocatableRegisters - kGPRsPerNonVFP2Double;
+  }
+}
+
+
+int DoubleRegister::NumAllocatableRegisters() {
+  if (CpuFeatures::IsSupported(VFP2)) {
+    return DwVfpRegister::kMaxNumAllocatableRegisters;
+  } else {
+    return SoftFloatRegister::kMaxNumAllocatableRegisters;
+  }
+}
+
+
+const char* DoubleRegister::AllocationIndexToString(int index) {
+  if (CpuFeatures::IsSupported(VFP2)) {
+    return DwVfpRegister::AllocationIndexToString(index);
+  } else {
+    return SoftFloatRegister::AllocationIndexToString(index);
+  }
+}
+
+
 void CpuFeatures::Probe() {
   unsigned standard_features = static_cast<unsigned>(
       OS::CpuFeaturesImpliedByPlatform()) | CpuFeaturesImpliedByCompiler();
index 3b9bb80..18c1376 100644 (file)
@@ -71,21 +71,23 @@ namespace internal {
 // Core register
 struct Register {
   static const int kNumRegisters = 16;
-  static const int kNumAllocatableRegisters = 8;
+  static const int kMaxNumAllocatableRegisters = 8;
+  static const int kGPRsPerNonVFP2Double = 2;
+  static int NumAllocatableRegisters();
   static const int kSizeInBytes = 4;
 
   static int ToAllocationIndex(Register reg) {
-    ASSERT(reg.code() < kNumAllocatableRegisters);
+    ASSERT(reg.code() < NumAllocatableRegisters());
     return reg.code();
   }
 
   static Register FromAllocationIndex(int index) {
-    ASSERT(index >= 0 && index < kNumAllocatableRegisters);
+    ASSERT(index >= 0 && index < NumAllocatableRegisters());
     return from_code(index);
   }
 
   static const char* AllocationIndexToString(int index) {
-    ASSERT(index >= 0 && index < kNumAllocatableRegisters);
+    ASSERT(index >= 0 && index < NumAllocatableRegisters());
     const char* const names[] = {
       "r0",
       "r1",
@@ -188,26 +190,57 @@ struct SwVfpRegister {
 };
 
 
-// Double word VFP register.
-struct DwVfpRegister {
-  static const int kNumRegisters = 16;
+struct ArmDoubleRegister {
+  static const int kMaxNumRegisters = 16;
   // A few double registers are reserved: one as a scratch register and one to
   // hold 0.0, that does not fit in the immediate field of vmov instructions.
   //  d14: 0.0
   //  d15: scratch register.
   static const int kNumReservedRegisters = 2;
-  static const int kNumAllocatableRegisters = kNumRegisters -
+  static const int kMaxNumAllocatableRegisters = kMaxNumRegisters -
       kNumReservedRegisters;
+  explicit ArmDoubleRegister(int code) { code_ = code; }
+  static int NumAllocatableRegisters();
+  static int NumRegisters() { return kNumRegisters; }
+  static const char* AllocationIndexToString(int index);
+  inline static ArmDoubleRegister FromAllocationIndex(int index);
+  inline static int ToAllocationIndex(ArmDoubleRegister reg) {
+    return reg.code();
+  }
+
+  static ArmDoubleRegister from_code(int code) {
+    ArmDoubleRegister r = ArmDoubleRegister(code);
+    return r;
+  }
+
+  bool is_valid() const {
+    return 0 <= code_ && code_ < NumRegisters();
+  }
+  bool is(ArmDoubleRegister reg) const { return code_ == reg.code_; }
+  int code() const {
+    ASSERT(is_valid());
+    return code_;
+  }
+
+  int code_;
+};
+
+
+// Double word VFP register.
+struct DwVfpRegister : ArmDoubleRegister {
+  static const int kNumRegisters = 16;
 
-  inline static int ToAllocationIndex(DwVfpRegister reg);
+  explicit DwVfpRegister(int code) : ArmDoubleRegister(code) {}
+
+  inline int ToAllocationIndex(DwVfpRegister reg);
 
   static DwVfpRegister FromAllocationIndex(int index) {
-    ASSERT(index >= 0 && index < kNumAllocatableRegisters);
+    ASSERT(index >= 0 && index < kMaxNumAllocatableRegisters);
     return from_code(index);
   }
 
   static const char* AllocationIndexToString(int index) {
-    ASSERT(index >= 0 && index < kNumAllocatableRegisters);
+    ASSERT(index >= 0 && index < kMaxNumAllocatableRegisters);
     const char* const names[] = {
       "d0",
       "d1",
@@ -228,8 +261,7 @@ struct DwVfpRegister {
   }
 
   static DwVfpRegister from_code(int code) {
-    DwVfpRegister r = { code };
-    return r;
+    return DwVfpRegister(code);
   }
 
   // Supporting d0 to d15, can be later extended to d31.
@@ -262,12 +294,37 @@ struct DwVfpRegister {
     *m = (code_ & 0x10) >> 4;
     *vm = code_ & 0x0F;
   }
+};
 
-  int code_;
+
+// Double word VFP register.
+struct SoftFloatRegister : ArmDoubleRegister {
+  static const int kNumRegisters = 1;
+  static const int kMaxNumAllocatableRegisters = kNumRegisters;
+
+  explicit SoftFloatRegister(int code) : ArmDoubleRegister(code) {}
+
+  static SoftFloatRegister FromAllocationIndex(int index) {
+    ASSERT(index >= 0 && index < kMaxNumAllocatableRegisters);
+    return from_code(index);
+  }
+
+  static const char* AllocationIndexToString(int index) {
+    ASSERT(index >= 0 && index < kMaxNumAllocatableRegisters);
+    const char* const names[] = {
+      "sfpd0"
+    };
+    return names[index];
+  }
+
+  static SoftFloatRegister from_code(int code) {
+    SoftFloatRegister r = SoftFloatRegister(code);
+    return r;
+  }
 };
 
 
-typedef DwVfpRegister DoubleRegister;
+typedef ArmDoubleRegister DoubleRegister;
 
 
 // Support for the VFP registers s0 to s31 (d0 to d15).
@@ -305,23 +362,26 @@ const SwVfpRegister s29 = { 29 };
 const SwVfpRegister s30 = { 30 };
 const SwVfpRegister s31 = { 31 };
 
-const DwVfpRegister no_dreg = { -1 };
-const DwVfpRegister d0  = {  0 };
-const DwVfpRegister d1  = {  1 };
-const DwVfpRegister d2  = {  2 };
-const DwVfpRegister d3  = {  3 };
-const DwVfpRegister d4  = {  4 };
-const DwVfpRegister d5  = {  5 };
-const DwVfpRegister d6  = {  6 };
-const DwVfpRegister d7  = {  7 };
-const DwVfpRegister d8  = {  8 };
-const DwVfpRegister d9  = {  9 };
-const DwVfpRegister d10 = { 10 };
-const DwVfpRegister d11 = { 11 };
-const DwVfpRegister d12 = { 12 };
-const DwVfpRegister d13 = { 13 };
-const DwVfpRegister d14 = { 14 };
-const DwVfpRegister d15 = { 15 };
+const DwVfpRegister no_dreg = DwVfpRegister(-1);
+const DwVfpRegister d0  = DwVfpRegister(0);
+const DwVfpRegister d1  = DwVfpRegister(1);
+const DwVfpRegister d2  = DwVfpRegister(2);
+const DwVfpRegister d3  = DwVfpRegister(3);
+const DwVfpRegister d4  = DwVfpRegister(4);
+const DwVfpRegister d5  = DwVfpRegister(5);
+const DwVfpRegister d6  = DwVfpRegister(6);
+const DwVfpRegister d7  = DwVfpRegister(7);
+const DwVfpRegister d8  = DwVfpRegister(8);
+const DwVfpRegister d9  = DwVfpRegister(9);
+const DwVfpRegister d10 = DwVfpRegister(10);
+const DwVfpRegister d11 = DwVfpRegister(11);
+const DwVfpRegister d12 = DwVfpRegister(12);
+const DwVfpRegister d13 = DwVfpRegister(13);
+const DwVfpRegister d14 = DwVfpRegister(14);
+const DwVfpRegister d15 = DwVfpRegister(15);
+
+const Register sfpd_lo  = { kRegister_r6_Code };
+const Register sfpd_hi  = { kRegister_r7_Code };
 
 // Aliases for double registers.  Defined using #define instead of
 // "static const DwVfpRegister&" because Clang complains otherwise when a
index 24d14e8..28e00dd 100644 (file)
@@ -1259,6 +1259,26 @@ CODE_AGE_LIST(DEFINE_CODE_AGE_BUILTIN_GENERATOR)
 #undef DEFINE_CODE_AGE_BUILTIN_GENERATOR
 
 
+void Builtins::Generate_NotifyICMiss(MacroAssembler* masm) {
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
+
+    // Preserve registers across notification, this is important for compiled
+    // stubs that tail call the runtime on deopts passing their parameters in
+    // registers.
+    __ stm(db_w, sp, kJSCallerSaved | kCalleeSaved);
+    // Pass the function and deoptimization type to the runtime system.
+    __ CallRuntime(Runtime::kNotifyICMiss, 0);
+    __ ldm(ia_w, sp, kJSCallerSaved | kCalleeSaved);
+  }
+
+  __ mov(ip, lr);  // Stash the miss continuation
+  __ add(sp, sp, Operand(kPointerSize));  // Ignore state
+  __ pop(lr);  // Restore LR to continuation in JSFunction
+  __ mov(pc, ip);  // Jump to miss handler
+}
+
+
 static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm,
                                              Deoptimizer::BailoutType type) {
   {
index 9484f85..800c0f1 100644 (file)
@@ -37,6 +37,23 @@ namespace v8 {
 namespace internal {
 
 
+CodeStubInterfaceDescriptor*
+    KeyedLoadFastElementStub::GetInterfaceDescriptor(Isolate* isolate) {
+  static CodeStubInterfaceDescriptor* result = NULL;
+  if (result == NULL) {
+    Handle<Code> miss = isolate->builtins()->KeyedLoadIC_Miss();
+    static Register registers[] = { r1, r0 };
+    static CodeStubInterfaceDescriptor info = {
+      2,
+      registers,
+      miss
+    };
+    result = &info;
+  }
+  return result;
+}
+
+
 #define __ ACCESS_MASM(masm)
 
 static void EmitIdenticalObjectComparison(MacroAssembler* masm,
@@ -503,7 +520,7 @@ void FastCloneShallowObjectStub::Generate(MacroAssembler* masm) {
 // 52 fraction bits (20 in the first word, 32 in the second).  Zeros is a
 // scratch register.  Destroys the source register.  No GC occurs during this
 // stub so you don't have to set up the frame.
-class ConvertToDoubleStub : public CodeStub {
+class ConvertToDoubleStub : public PlatformCodeStub {
  public:
   ConvertToDoubleStub(Register result_reg_1,
                       Register result_reg_2,
@@ -3568,10 +3585,10 @@ void MathPowStub::Generate(MacroAssembler* masm) {
   const Register exponent = r2;
   const Register heapnumbermap = r5;
   const Register heapnumber = r0;
-  const DoubleRegister double_base = d1;
-  const DoubleRegister double_exponent = d2;
-  const DoubleRegister double_result = d3;
-  const DoubleRegister double_scratch = d0;
+  const DwVfpRegister double_base = d1;
+  const DwVfpRegister double_exponent = d2;
+  const DwVfpRegister double_result = d3;
+  const DwVfpRegister double_scratch = d0;
   const SwVfpRegister single_scratch = s0;
   const Register scratch = r9;
   const Register scratch2 = r7;
@@ -3781,12 +3798,29 @@ void CodeStub::GenerateStubsAheadOfTime() {
 
 
 void CodeStub::GenerateFPStubs() {
-  CEntryStub save_doubles(1, kSaveFPRegs);
-  Handle<Code> code = save_doubles.GetCode();
-  code->set_is_pregenerated(true);
-  StoreBufferOverflowStub stub(kSaveFPRegs);
-  stub.GetCode()->set_is_pregenerated(true);
-  code->GetIsolate()->set_fp_stubs_generated(true);
+  SaveFPRegsMode mode = CpuFeatures::IsSupported(VFP2)
+      ? kSaveFPRegs
+      : kDontSaveFPRegs;
+  CEntryStub save_doubles(1, mode);
+  StoreBufferOverflowStub stub(mode);
+  // These stubs might already be in the snapshot, detect that and don't
+  // regenerate, which would lead to code stub initialization state being messed
+  // up.
+  Code* save_doubles_code = NULL;
+  Code* store_buffer_overflow_code = NULL;
+  if (!save_doubles.FindCodeInCache(&save_doubles_code, ISOLATE)) {
+    if (CpuFeatures::IsSupported(VFP2)) {
+      CpuFeatures::Scope scope2(VFP2);
+      save_doubles_code = *save_doubles.GetCode();
+      store_buffer_overflow_code = *stub.GetCode();
+    } else {
+      save_doubles_code = *save_doubles.GetCode();
+      store_buffer_overflow_code = *stub.GetCode();
+    }
+    save_doubles_code->set_is_pregenerated(true);
+    store_buffer_overflow_code->set_is_pregenerated(true);
+  }
+  ISOLATE->set_fp_stubs_generated(true);
 }
 
 
index 0443cf7..6f964a8 100644 (file)
@@ -36,7 +36,7 @@ namespace internal {
 
 // Compute a transcendental math function natively, or call the
 // TranscendentalCache runtime function.
-class TranscendentalCacheStub: public CodeStub {
+class TranscendentalCacheStub: public PlatformCodeStub {
  public:
   enum ArgumentType {
     TAGGED = 0 << TranscendentalCache::kTranscendentalTypeBits,
@@ -58,7 +58,7 @@ class TranscendentalCacheStub: public CodeStub {
 };
 
 
-class StoreBufferOverflowStub: public CodeStub {
+class StoreBufferOverflowStub: public PlatformCodeStub {
  public:
   explicit StoreBufferOverflowStub(SaveFPRegsMode save_fp)
       : save_doubles_(save_fp) { }
@@ -77,7 +77,7 @@ class StoreBufferOverflowStub: public CodeStub {
 };
 
 
-class UnaryOpStub: public CodeStub {
+class UnaryOpStub: public PlatformCodeStub {
  public:
   UnaryOpStub(Token::Value op,
               UnaryOverwriteMode mode,
@@ -219,7 +219,7 @@ enum StringAddFlags {
 };
 
 
-class StringAddStub: public CodeStub {
+class StringAddStub: public PlatformCodeStub {
  public:
   explicit StringAddStub(StringAddFlags flags) : flags_(flags) {}
 
@@ -242,7 +242,7 @@ class StringAddStub: public CodeStub {
 };
 
 
-class SubStringStub: public CodeStub {
+class SubStringStub: public PlatformCodeStub {
  public:
   SubStringStub() {}
 
@@ -255,7 +255,7 @@ class SubStringStub: public CodeStub {
 
 
 
-class StringCompareStub: public CodeStub {
+class StringCompareStub: public PlatformCodeStub {
  public:
   StringCompareStub() { }
 
@@ -295,7 +295,7 @@ class StringCompareStub: public CodeStub {
 // This stub can convert a signed int32 to a heap number (double).  It does
 // not work for int32s that are in Smi range!  No GC occurs during this stub
 // so you don't have to set up the frame.
-class WriteInt32ToHeapNumberStub : public CodeStub {
+class WriteInt32ToHeapNumberStub : public PlatformCodeStub {
  public:
   WriteInt32ToHeapNumberStub(Register the_int,
                              Register the_heap_number,
@@ -329,7 +329,7 @@ class WriteInt32ToHeapNumberStub : public CodeStub {
 };
 
 
-class NumberToStringStub: public CodeStub {
+class NumberToStringStub: public PlatformCodeStub {
  public:
   NumberToStringStub() { }
 
@@ -355,7 +355,7 @@ class NumberToStringStub: public CodeStub {
 };
 
 
-class RecordWriteStub: public CodeStub {
+class RecordWriteStub: public PlatformCodeStub {
  public:
   RecordWriteStub(Register object,
                   Register value,
@@ -511,7 +511,7 @@ class RecordWriteStub: public CodeStub {
     Register GetRegThatIsNotOneOf(Register r1,
                                   Register r2,
                                   Register r3) {
-      for (int i = 0; i < Register::kNumAllocatableRegisters; i++) {
+      for (int i = 0; i < Register::NumAllocatableRegisters(); i++) {
         Register candidate = Register::FromAllocationIndex(i);
         if (candidate.is(r1)) continue;
         if (candidate.is(r2)) continue;
@@ -570,7 +570,7 @@ class RecordWriteStub: public CodeStub {
 // Enter C code from generated RegExp code in a way that allows
 // the C code to fix the return address in case of a GC.
 // Currently only needed on ARM.
-class RegExpCEntryStub: public CodeStub {
+class RegExpCEntryStub: public PlatformCodeStub {
  public:
   RegExpCEntryStub() {}
   virtual ~RegExpCEntryStub() {}
@@ -589,7 +589,7 @@ class RegExpCEntryStub: public CodeStub {
 // keep the code which called into native pinned in the memory. Currently the
 // simplest approach is to generate such stub early enough so it can never be
 // moved by GC
-class DirectCEntryStub: public CodeStub {
+class DirectCEntryStub: public PlatformCodeStub {
  public:
   DirectCEntryStub() {}
   void Generate(MacroAssembler* masm);
@@ -739,7 +739,7 @@ class FloatingPointHelper : public AllStatic {
 };
 
 
-class StringDictionaryLookupStub: public CodeStub {
+class StringDictionaryLookupStub: public PlatformCodeStub {
  public:
   enum LookupMode { POSITIVE_LOOKUP, NEGATIVE_LOOKUP };
 
index 300772a..a2762f8 100644 (file)
@@ -73,10 +73,10 @@ UnaryMathFunction CreateExpFunction() {
 
   {
     CpuFeatures::Scope use_vfp(VFP2);
-    DoubleRegister input = d0;
-    DoubleRegister result = d1;
-    DoubleRegister double_scratch1 = d2;
-    DoubleRegister double_scratch2 = d3;
+    DwVfpRegister input = d0;
+    DwVfpRegister result = d1;
+    DwVfpRegister double_scratch1 = d2;
+    DwVfpRegister double_scratch2 = d3;
     Register temp1 = r4;
     Register temp2 = r5;
     Register temp3 = r6;
@@ -527,10 +527,10 @@ static MemOperand ExpConstant(int index, Register base) {
 
 
 void MathExpGenerator::EmitMathExp(MacroAssembler* masm,
-                                   DoubleRegister input,
-                                   DoubleRegister result,
-                                   DoubleRegister double_scratch1,
-                                   DoubleRegister double_scratch2,
+                                   DwVfpRegister input,
+                                   DwVfpRegister result,
+                                   DwVfpRegister double_scratch1,
+                                   DwVfpRegister double_scratch2,
                                    Register temp1,
                                    Register temp2,
                                    Register temp3) {
index 8f0033e..75899a9 100644 (file)
@@ -44,6 +44,10 @@ enum TypeofState { INSIDE_TYPEOF, NOT_INSIDE_TYPEOF };
 
 class CodeGenerator: public AstVisitor {
  public:
+  CodeGenerator() {
+    InitializeAstVisitor();
+  }
+
   static bool MakeCode(CompilationInfo* info);
 
   // Printing of AST, etc. as requested by flags.
@@ -68,6 +72,8 @@ class CodeGenerator: public AstVisitor {
                               int pos,
                               bool right_here = false);
 
+  DEFINE_AST_VISITOR_SUBCLASS_MEMBERS();
+
  private:
   DISALLOW_COPY_AND_ASSIGN(CodeGenerator);
 };
@@ -92,10 +98,10 @@ class StringCharLoadGenerator : public AllStatic {
 class MathExpGenerator : public AllStatic {
  public:
   static void EmitMathExp(MacroAssembler* masm,
-                          DoubleRegister input,
-                          DoubleRegister result,
-                          DoubleRegister double_scratch1,
-                          DoubleRegister double_scratch2,
+                          DwVfpRegister input,
+                          DwVfpRegister result,
+                          DwVfpRegister double_scratch1,
+                          DwVfpRegister double_scratch2,
                           Register temp1,
                           Register temp2,
                           Register temp3);
index 19667b9..8db156d 100644 (file)
@@ -222,7 +222,7 @@ static int LookupBailoutId(DeoptimizationInputData* data, BailoutId ast_id) {
 
 void Deoptimizer::DoComputeOsrOutputFrame() {
   DeoptimizationInputData* data = DeoptimizationInputData::cast(
-      optimized_code_->deoptimization_data());
+      compiled_code_->deoptimization_data());
   unsigned ast_id = data->OsrAstId()->value();
 
   int bailout_id = LookupBailoutId(data, BailoutId(ast_id));
@@ -256,7 +256,7 @@ void Deoptimizer::DoComputeOsrOutputFrame() {
   unsigned input_frame_size = input_->GetFrameSize();
   ASSERT(fixed_size + height_in_bytes == input_frame_size);
 
-  unsigned stack_slot_size = optimized_code_->stack_slots() * kPointerSize;
+  unsigned stack_slot_size = compiled_code_->stack_slots() * kPointerSize;
   unsigned outgoing_height = data->ArgumentsStackHeight(bailout_id)->value();
   unsigned outgoing_size = outgoing_height * kPointerSize;
   unsigned output_frame_size = fixed_size + stack_slot_size + outgoing_size;
@@ -348,7 +348,7 @@ void Deoptimizer::DoComputeOsrOutputFrame() {
 
     unsigned pc_offset = data->OsrPcOffset()->value();
     uint32_t pc = reinterpret_cast<uint32_t>(
-        optimized_code_->entry() + pc_offset);
+        compiled_code_->entry() + pc_offset);
     output_[0]->SetPc(pc);
   }
   Code* continuation = isolate_->builtins()->builtin(Builtins::kNotifyOSR);
@@ -461,6 +461,70 @@ void Deoptimizer::DoComputeArgumentsAdaptorFrame(TranslationIterator* iterator,
 }
 
 
+void Deoptimizer::DoCompiledStubFrame(TranslationIterator* iterator,
+                                      int frame_index) {
+  //
+  //               FROM                                  TO             <-fp
+  //    |          ....           |          |          ....           |
+  //    +-------------------------+          +-------------------------+
+  //    | JSFunction continuation |          | JSFunction continuation |
+  //    +-------------------------+          +-------------------------+<-sp
+  // |  |   saved frame (fp)      |
+  // |  +=========================+<-fp
+  // |  |   JSFunction context    |
+  // v  +-------------------------+
+  //    |   COMPILED_STUB marker  |          fp = saved frame
+  //    +-------------------------+          f8 = JSFunction context
+  //    |                         |
+  //    | ...                     |
+  //    |                         |
+  //    +-------------------------+<-sp
+  //
+  //
+  int output_frame_size = 1 * kPointerSize;
+  FrameDescription* output_frame =
+      new(output_frame_size) FrameDescription(output_frame_size, 0);
+  Code* notify_miss =
+      isolate_->builtins()->builtin(Builtins::kNotifyICMiss);
+  output_frame->SetState(Smi::FromInt(FullCodeGenerator::NO_REGISTERS));
+  output_frame->SetContinuation(
+      reinterpret_cast<intptr_t>(notify_miss->entry()));
+
+  ASSERT(compiled_code_->kind() == Code::COMPILED_STUB);
+  int major_key = compiled_code_->major_key();
+  CodeStubInterfaceDescriptor* descriptor =
+      isolate_->code_stub_interface_descriptors()[major_key];
+  Handle<Code> miss_ic(descriptor->deoptimization_handler);
+  output_frame->SetPc(reinterpret_cast<intptr_t>(miss_ic->instruction_start()));
+  unsigned input_frame_size = input_->GetFrameSize();
+  intptr_t value = input_->GetFrameSlot(input_frame_size - kPointerSize);
+  output_frame->SetFrameSlot(0, value);
+  value = input_->GetFrameSlot(input_frame_size - 2 * kPointerSize);
+  output_frame->SetRegister(fp.code(), value);
+  output_frame->SetFp(value);
+  value = input_->GetFrameSlot(input_frame_size - 3 * kPointerSize);
+  output_frame->SetRegister(cp.code(), value);
+
+  Translation::Opcode opcode =
+      static_cast<Translation::Opcode>(iterator->Next());
+  ASSERT(opcode == Translation::REGISTER);
+  USE(opcode);
+  int input_reg = iterator->Next();
+  intptr_t input_value = input_->GetRegister(input_reg);
+  output_frame->SetRegister(r1.code(), input_value);
+
+  int32_t next = iterator->Next();
+  opcode = static_cast<Translation::Opcode>(next);
+  ASSERT(opcode == Translation::REGISTER);
+  input_reg = iterator->Next();
+  input_value = input_->GetRegister(input_reg);
+  output_frame->SetRegister(r0.code(), input_value);
+
+  ASSERT(frame_index == 0);
+  output_[frame_index] = output_frame;
+}
+
+
 void Deoptimizer::DoComputeConstructStubFrame(TranslationIterator* iterator,
                                               int frame_index) {
   Builtins* builtins = isolate_->builtins();
@@ -888,7 +952,7 @@ void Deoptimizer::FillInputFrame(Address tos, JavaScriptFrame* frame) {
   }
   input_->SetRegister(sp.code(), reinterpret_cast<intptr_t>(frame->sp()));
   input_->SetRegister(fp.code(), reinterpret_cast<intptr_t>(frame->fp()));
-  for (int i = 0; i < DoubleRegister::kNumAllocatableRegisters; i++) {
+  for (int i = 0; i < DoubleRegister::NumAllocatableRegisters(); i++) {
     input_->SetDoubleRegister(i, 0.0);
   }
 
@@ -908,7 +972,6 @@ void Deoptimizer::EntryGenerator::Generate() {
 
   Isolate* isolate = masm()->isolate();
 
-  CpuFeatures::Scope scope(VFP3);
   // Save all general purpose registers before messing with them.
   const int kNumberOfRegisters = Register::kNumRegisters;
 
@@ -916,23 +979,29 @@ void Deoptimizer::EntryGenerator::Generate() {
   RegList restored_regs = kJSCallerSaved | kCalleeSaved | ip.bit();
 
   const int kDoubleRegsSize =
-      kDoubleSize * DwVfpRegister::kNumAllocatableRegisters;
-
-  // Save all VFP registers before messing with them.
-  DwVfpRegister first = DwVfpRegister::FromAllocationIndex(0);
-  DwVfpRegister last =
-      DwVfpRegister::FromAllocationIndex(
-          DwVfpRegister::kNumAllocatableRegisters - 1);
-  ASSERT(last.code() > first.code());
-  ASSERT((last.code() - first.code()) ==
-      (DwVfpRegister::kNumAllocatableRegisters - 1));
+      kDoubleSize * DwVfpRegister::kMaxNumAllocatableRegisters;
+
+  if (CpuFeatures::IsSupported(VFP2)) {
+    CpuFeatures::Scope scope(VFP2);
+    // Save all VFP registers before messing with them.
+    DwVfpRegister first = DwVfpRegister::FromAllocationIndex(0);
+    DwVfpRegister last =
+        DwVfpRegister::FromAllocationIndex(
+            DwVfpRegister::kMaxNumAllocatableRegisters - 1);
+    ASSERT(last.code() > first.code());
+    ASSERT((last.code() - first.code()) ==
+           (DwVfpRegister::kMaxNumAllocatableRegisters - 1));
 #ifdef DEBUG
-  for (int i = 0; i <= (DwVfpRegister::kNumAllocatableRegisters - 1); i++) {
-    ASSERT((DwVfpRegister::FromAllocationIndex(i).code() <= last.code()) &&
-           (DwVfpRegister::FromAllocationIndex(i).code() >= first.code()));
-  }
+    int max = DwVfpRegister::kMaxNumAllocatableRegisters - 1;
+    for (int i = 0; i <= max; i++) {
+      ASSERT((DwVfpRegister::FromAllocationIndex(i).code() <= last.code()) &&
+             (DwVfpRegister::FromAllocationIndex(i).code() >= first.code()));
+    }
 #endif
-  __ vstm(db_w, sp, first, last);
+    __ vstm(db_w, sp, first, last);
+  } else {
+    __ sub(sp, sp, Operand(kDoubleRegsSize));
+  }
 
   // Push all 16 registers (needed to populate FrameDescription::registers_).
   // TODO(1588) Note that using pc with stm is deprecated, so we should perhaps
@@ -991,14 +1060,17 @@ void Deoptimizer::EntryGenerator::Generate() {
     __ str(r2, MemOperand(r1, offset));
   }
 
-  // Copy VFP registers to
-  // double_registers_[DoubleRegister::kNumAllocatableRegisters]
-  int double_regs_offset = FrameDescription::double_registers_offset();
-  for (int i = 0; i < DwVfpRegister::kNumAllocatableRegisters; ++i) {
-    int dst_offset = i * kDoubleSize + double_regs_offset;
-    int src_offset = i * kDoubleSize + kNumberOfRegisters * kPointerSize;
-    __ vldr(d0, sp, src_offset);
-    __ vstr(d0, r1, dst_offset);
+  if (CpuFeatures::IsSupported(VFP2)) {
+    CpuFeatures::Scope scope(VFP2);
+    // Copy VFP registers to
+    // double_registers_[DoubleRegister::kMaxNumAllocatableRegisters]
+    int double_regs_offset = FrameDescription::double_registers_offset();
+    for (int i = 0; i < DwVfpRegister::NumAllocatableRegisters(); ++i) {
+      int dst_offset = i * kDoubleSize + double_regs_offset;
+      int src_offset = i * kDoubleSize + kNumberOfRegisters * kPointerSize;
+      __ vldr(d0, sp, src_offset);
+      __ vstr(d0, r1, dst_offset);
+    }
   }
 
   // Remove the bailout id, eventually return address, and the saved registers
@@ -1019,10 +1091,13 @@ void Deoptimizer::EntryGenerator::Generate() {
   // frame description.
   __ add(r3,  r1, Operand(FrameDescription::frame_content_offset()));
   Label pop_loop;
+  Label pop_loop_header;
+  __ b(&pop_loop_header);
   __ bind(&pop_loop);
   __ pop(r4);
   __ str(r4, MemOperand(r3, 0));
   __ add(r3, r3, Operand(sizeof(uint32_t)));
+  __ bind(&pop_loop_header);
   __ cmp(r2, sp);
   __ b(ne, &pop_loop);
 
@@ -1039,24 +1114,29 @@ void Deoptimizer::EntryGenerator::Generate() {
   __ pop(r0);  // Restore deoptimizer object (class Deoptimizer).
 
   // Replace the current (input) frame with the output frames.
-  Label outer_push_loop, inner_push_loop;
+  Label outer_push_loop, inner_push_loop,
+      outer_loop_header, inner_loop_header;
   // Outer loop state: r0 = current "FrameDescription** output_",
   // r1 = one past the last FrameDescription**.
   __ ldr(r1, MemOperand(r0, Deoptimizer::output_count_offset()));
   __ ldr(r0, MemOperand(r0, Deoptimizer::output_offset()));  // r0 is output_.
   __ add(r1, r0, Operand(r1, LSL, 2));
+  __ jmp(&outer_loop_header);
   __ bind(&outer_push_loop);
   // Inner loop state: r2 = current FrameDescription*, r3 = loop index.
   __ ldr(r2, MemOperand(r0, 0));  // output_[ix]
   __ ldr(r3, MemOperand(r2, FrameDescription::frame_size_offset()));
+  __ jmp(&inner_loop_header);
   __ bind(&inner_push_loop);
   __ sub(r3, r3, Operand(sizeof(uint32_t)));
   __ add(r6, r2, Operand(r3));
   __ ldr(r7, MemOperand(r6, FrameDescription::frame_content_offset()));
   __ push(r7);
+  __ bind(&inner_loop_header);
   __ cmp(r3, Operand(0));
   __ b(ne, &inner_push_loop);  // test for gt?
   __ add(r0, r0, Operand(kPointerSize));
+  __ bind(&outer_loop_header);
   __ cmp(r0, r1);
   __ b(lt, &outer_push_loop);
 
index 32dda27..400e1fc 100644 (file)
@@ -42,10 +42,10 @@ LITHIUM_CONCRETE_INSTRUCTION_LIST(DEFINE_COMPILE)
 #undef DEFINE_COMPILE
 
 LOsrEntry::LOsrEntry() {
-  for (int i = 0; i < Register::kNumAllocatableRegisters; ++i) {
+  for (int i = 0; i < Register::NumAllocatableRegisters(); ++i) {
     register_spills_[i] = NULL;
   }
-  for (int i = 0; i < DoubleRegister::kNumAllocatableRegisters; ++i) {
+  for (int i = 0; i < DoubleRegister::NumAllocatableRegisters(); ++i) {
     double_register_spills_[i] = NULL;
   }
 }
@@ -612,6 +612,7 @@ LInstruction* LChunkBuilder::AssignEnvironment(LInstruction* instr) {
 LInstruction* LChunkBuilder::MarkAsCall(LInstruction* instr,
                                         HInstruction* hinstr,
                                         CanDeoptimize can_deoptimize) {
+  info()->MarkAsNonDeferredCalling();
 #ifdef DEBUG
   instr->VerifyCall();
 #endif
@@ -1684,6 +1685,7 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) {
   Representation to = instr->to();
   if (from.IsTagged()) {
     if (to.IsDouble()) {
+      info()->MarkAsDeferredCalling();
       LOperand* value = UseRegister(instr->value());
       LNumberUntagD* res = new(zone()) LNumberUntagD(value);
       return AssignEnvironment(DefineAsRegister(res));
@@ -1708,6 +1710,7 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) {
     }
   } else if (from.IsDouble()) {
     if (to.IsTagged()) {
+      info()->MarkAsDeferredCalling();
       LOperand* value = UseRegister(instr->value());
       LOperand* temp1 = TempRegister();
       LOperand* temp2 = TempRegister();
@@ -1727,6 +1730,7 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) {
       return AssignEnvironment(DefineAsRegister(res));
     }
   } else if (from.IsInteger32()) {
+    info()->MarkAsDeferredCalling();
     if (to.IsTagged()) {
       HValue* val = instr->value();
       LOperand* value = UseRegisterAtStart(val);
@@ -1964,7 +1968,16 @@ LInstruction* LChunkBuilder::DoLoadKeyed(HLoadKeyed* instr) {
         (instr->representation().IsDouble() &&
          ((elements_kind == EXTERNAL_FLOAT_ELEMENTS) ||
           (elements_kind == EXTERNAL_DOUBLE_ELEMENTS))));
-    LOperand* external_pointer = UseRegister(instr->elements());
+    // float->double conversion on non-VFP2 requires an extra scratch
+    // register. For convenience, just mark the elements register as "UseTemp"
+    // so that it can be used as a temp during the float->double conversion
+    // after it's no longer needed after the float load.
+    bool needs_temp =
+        !CpuFeatures::IsSupported(VFP2) &&
+        (elements_kind == EXTERNAL_FLOAT_ELEMENTS);
+    LOperand* external_pointer = needs_temp
+        ? UseTempRegister(instr->elements())
+        : UseRegister(instr->elements());
     result = new(zone()) LLoadKeyed(external_pointer, key);
   }
 
@@ -2182,8 +2195,17 @@ LInstruction* LChunkBuilder::DoOsrEntry(HOsrEntry* instr) {
 
 
 LInstruction* LChunkBuilder::DoParameter(HParameter* instr) {
-  int spill_index = chunk()->GetParameterStackSlot(instr->index());
-  return DefineAsSpilled(new(zone()) LParameter, spill_index);
+  LParameter* result = new(zone()) LParameter;
+  if (info()->IsOptimizing()) {
+    int spill_index = chunk()->GetParameterStackSlot(instr->index());
+    return DefineAsSpilled(result, spill_index);
+  } else {
+    ASSERT(info()->IsStub());
+    CodeStubInterfaceDescriptor* descriptor =
+        info()->code_stub()->GetInterfaceDescriptor(info()->isolate());
+    Register reg = descriptor->register_params[instr->index()];
+    return DefineFixed(result, reg);
+  }
 }
 
 
index b45a3e0..3a9d10b 100644 (file)
@@ -255,6 +255,11 @@ class LInstruction: public ZoneObject {
   void MarkAsCall() { is_call_ = true; }
 
   // Interface to the register allocator and iterators.
+  bool ClobbersTemps() const { return is_call_; }
+  bool ClobbersRegisters() const { return is_call_; }
+  bool ClobbersDoubleRegisters() const { return is_call_; }
+
+  // Interface to the register allocator and iterators.
   bool IsMarkedAsCall() const { return is_call_; }
 
   virtual bool HasResult() const = 0;
@@ -2334,8 +2339,9 @@ class LOsrEntry: public LTemplateInstruction<0, 0, 0> {
   // slot, i.e., that must also be restored to the spill slot on OSR entry.
   // NULL if the register has no assigned spill slot.  Indexed by allocation
   // index.
-  LOperand* register_spills_[Register::kNumAllocatableRegisters];
-  LOperand* double_register_spills_[DoubleRegister::kNumAllocatableRegisters];
+  LOperand* register_spills_[Register::kMaxNumAllocatableRegisters];
+  LOperand* double_register_spills_[
+      DoubleRegister::kMaxNumAllocatableRegisters];
 };
 
 
index 515a0d0..1c9d0c4 100644 (file)
@@ -65,8 +65,6 @@ bool LCodeGen::GenerateCode() {
   HPhase phase("Z_Code generation", chunk());
   ASSERT(is_unused());
   status_ = GENERATING;
-  CpuFeatures::Scope scope1(VFP3);
-  CpuFeatures::Scope scope2(ARMv7);
 
   CodeStub::GenerateFPStubs();
 
@@ -118,37 +116,38 @@ void LCodeGen::Comment(const char* format, ...) {
 bool LCodeGen::GeneratePrologue() {
   ASSERT(is_generating());
 
-  ProfileEntryHookStub::MaybeCallEntryHook(masm_);
+  if (info()->IsOptimizing()) {
+    ProfileEntryHookStub::MaybeCallEntryHook(masm_);
 
 #ifdef DEBUG
-  if (strlen(FLAG_stop_at) > 0 &&
-      info_->function()->name()->IsEqualTo(CStrVector(FLAG_stop_at))) {
-    __ stop("stop_at");
-  }
+    if (strlen(FLAG_stop_at) > 0 &&
+        info_->function()->name()->IsEqualTo(CStrVector(FLAG_stop_at))) {
+      __ stop("stop_at");
+    }
 #endif
 
-  // r1: Callee's JS function.
-  // cp: Callee's context.
-  // fp: Caller's frame pointer.
-  // lr: Caller's pc.
+    // r1: Callee's JS function.
+    // cp: Callee's context.
+    // fp: Caller's frame pointer.
+    // lr: Caller's pc.
 
-  // Strict mode functions and builtins need to replace the receiver
-  // with undefined when called as functions (without an explicit
-  // receiver object). r5 is zero for method calls and non-zero for
-  // function calls.
-  if (!info_->is_classic_mode() || info_->is_native()) {
-    Label ok;
-    __ cmp(r5, Operand(0));
-    __ b(eq, &ok);
-    int receiver_offset = scope()->num_parameters() * kPointerSize;
-    __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
-    __ str(r2, MemOperand(sp, receiver_offset));
-    __ bind(&ok);
+    // Strict mode functions and builtins need to replace the receiver
+    // with undefined when called as functions (without an explicit
+    // receiver object). r5 is zero for method calls and non-zero for
+    // function calls.
+    if (!info_->is_classic_mode() || info_->is_native()) {
+      Label ok;
+      __ cmp(r5, Operand(0));
+      __ b(eq, &ok);
+      int receiver_offset = scope()->num_parameters() * kPointerSize;
+      __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
+      __ str(r2, MemOperand(sp, receiver_offset));
+      __ bind(&ok);
+    }
   }
 
-
   info()->set_prologue_offset(masm_->pc_offset());
-  {
+  if (NeedsEagerFrame()) {
     PredictableCodeSizeScope predictible_code_size_scope(
         masm_, kNoCodeAgeSequenceLength * Assembler::kInstrSize);
     // The following three instructions must remain together and unmodified
@@ -159,6 +158,7 @@ bool LCodeGen::GeneratePrologue() {
     __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
     // Adjust FP to point to saved FP.
     __ add(fp, sp, Operand(2 * kPointerSize));
+    frame_is_built_ = true;
   }
 
   // Reserve space for the stack slots needed by the code.
@@ -178,7 +178,7 @@ bool LCodeGen::GeneratePrologue() {
   }
 
   // Possibly allocate a local context.
-  int heap_slots = scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
+  int heap_slots = info()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
   if (heap_slots > 0) {
     Comment(";;; Allocate local context");
     // Argument to NewContext is the function, which is in r1.
@@ -214,7 +214,7 @@ bool LCodeGen::GeneratePrologue() {
   }
 
   // Trace the call.
-  if (FLAG_trace) {
+  if (FLAG_trace && info()->IsOptimizing()) {
     __ CallRuntime(Runtime::kTraceEnter, 0);
   }
   return !is_aborted();
@@ -272,10 +272,31 @@ bool LCodeGen::GenerateDeferredCode() {
     for (int i = 0; !is_aborted() && i < deferred_.length(); i++) {
       LDeferredCode* code = deferred_[i];
       __ bind(code->entry());
+      if (NeedsDeferredFrame()) {
+        Comment(";;; Deferred build frame",
+                code->instruction_index(),
+                code->instr()->Mnemonic());
+        ASSERT(!frame_is_built_);
+        ASSERT(info()->IsStub());
+        frame_is_built_ = true;
+        __ stm(db_w, sp, cp.bit() | fp.bit() | lr.bit());
+        __ mov(scratch0(), Operand(Smi::FromInt(StackFrame::STUB)));
+        __ push(scratch0());
+        __ add(fp, sp, Operand(2 * kPointerSize));
+      }
       Comment(";;; Deferred code @%d: %s.",
               code->instruction_index(),
               code->instr()->Mnemonic());
       code->Generate();
+      if (NeedsDeferredFrame()) {
+        Comment(";;; Deferred destroy frame",
+                code->instruction_index(),
+                code->instr()->Mnemonic());
+        ASSERT(frame_is_built_);
+        __ pop(ip);
+        __ ldm(ia_w, sp, cp.bit() | fp.bit() | lr.bit());
+        frame_is_built_ = false;
+      }
       __ jmp(code->exit());
     }
   }
@@ -297,24 +318,68 @@ bool LCodeGen::GenerateDeoptJumpTable() {
   // Each entry in the jump table generates one instruction and inlines one
   // 32bit data after it.
   if (!is_int24((masm()->pc_offset() / Assembler::kInstrSize) +
-      deopt_jump_table_.length() * 2)) {
+      deopt_jump_table_.length() * 7)) {
     Abort("Generated code is too large");
   }
 
-  // Block the constant pool emission during the jump table emission.
-  __ BlockConstPoolFor(deopt_jump_table_.length());
   __ RecordComment("[ Deoptimisation jump table");
   Label table_start;
   __ bind(&table_start);
+  Label needs_frame_not_call;
+  Label needs_frame_is_call;
   for (int i = 0; i < deopt_jump_table_.length(); i++) {
     __ bind(&deopt_jump_table_[i].label);
-    __ ldr(pc, MemOperand(pc, Assembler::kInstrSize - Assembler::kPcLoadDelta));
-    __ dd(reinterpret_cast<uint32_t>(deopt_jump_table_[i].address));
+    Address entry = deopt_jump_table_[i].address;
+    if (deopt_jump_table_[i].needs_frame) {
+      __ mov(ip, Operand(ExternalReference::ForDeoptEntry(entry)));
+      if (deopt_jump_table_[i].is_lazy_deopt) {
+        if (needs_frame_is_call.is_bound()) {
+          __ b(&needs_frame_is_call);
+        } else {
+          __ bind(&needs_frame_is_call);
+          __ stm(db_w, sp, cp.bit() | fp.bit() | lr.bit());
+          // This variant of deopt can only be used with stubs. Since we don't
+          // have a function pointer to install in the stack frame that we're
+          // building, install a special marker there instead.
+          ASSERT(info()->IsStub());
+          __ mov(scratch0(), Operand(Smi::FromInt(StackFrame::STUB)));
+          __ push(scratch0());
+          __ add(fp, sp, Operand(2 * kPointerSize));
+          __ mov(lr, Operand(pc), LeaveCC, al);
+          __ mov(pc, ip);
+        }
+      } else {
+        if (needs_frame_not_call.is_bound()) {
+          __ b(&needs_frame_not_call);
+        } else {
+          __ bind(&needs_frame_not_call);
+          __ stm(db_w, sp, cp.bit() | fp.bit() | lr.bit());
+          // This variant of deopt can only be used with stubs. Since we don't
+          // have a function pointer to install in the stack frame that we're
+          // building, install a special marker there instead.
+          ASSERT(info()->IsStub());
+          __ mov(scratch0(), Operand(Smi::FromInt(StackFrame::STUB)));
+          __ push(scratch0());
+          __ add(fp, sp, Operand(2 * kPointerSize));
+          __ mov(pc, ip);
+        }
+      }
+    } else {
+      if (deopt_jump_table_[i].is_lazy_deopt) {
+        __ mov(lr, Operand(pc), LeaveCC, al);
+        __ mov(pc, Operand(ExternalReference::ForDeoptEntry(entry)));
+      } else {
+        __ mov(pc, Operand(ExternalReference::ForDeoptEntry(entry)));
+      }
+    }
+    masm()->CheckConstPool(false, false);
   }
-  ASSERT(masm()->InstructionsGeneratedSince(&table_start) ==
-      deopt_jump_table_.length() * 2);
   __ RecordComment("]");
 
+  // Force constant pool emission at the end of the deopt jump table to make
+  // sure that no constant pools are emitted after.
+  masm()->CheckConstPool(true, false);
+
   // The deoptimization jump table is the last part of the instruction
   // sequence. Mark the generated code as done unless we bailed out.
   if (!is_aborted()) status_ = DONE;
@@ -334,8 +399,8 @@ Register LCodeGen::ToRegister(int index) const {
 }
 
 
-DoubleRegister LCodeGen::ToDoubleRegister(int index) const {
-  return DoubleRegister::FromAllocationIndex(index);
+DwVfpRegister LCodeGen::ToDoubleRegister(int index) const {
+  return DwVfpRegister::FromAllocationIndex(index);
 }
 
 
@@ -376,15 +441,15 @@ Register LCodeGen::EmitLoadRegister(LOperand* op, Register scratch) {
 }
 
 
-DoubleRegister LCodeGen::ToDoubleRegister(LOperand* op) const {
+DwVfpRegister LCodeGen::ToDoubleRegister(LOperand* op) const {
   ASSERT(op->IsDoubleRegister());
   return ToDoubleRegister(op->index());
 }
 
 
-DoubleRegister LCodeGen::EmitLoadDoubleRegister(LOperand* op,
-                                                SwVfpRegister flt_scratch,
-                                                DoubleRegister dbl_scratch) {
+DwVfpRegister LCodeGen::EmitLoadDoubleRegister(LOperand* op,
+                                               SwVfpRegister flt_scratch,
+                                               DwVfpRegister dbl_scratch) {
   if (op->IsDoubleRegister()) {
     return ToDoubleRegister(op->index());
   } else if (op->IsConstantOperand()) {
@@ -520,7 +585,9 @@ void LCodeGen::WriteTranslation(LEnvironment* environment,
                    translation,
                    arguments_index,
                    arguments_count);
-  int closure_id = *info()->closure() != *environment->closure()
+  bool has_closure_id = !info()->closure().is_null() &&
+      *info()->closure() != *environment->closure();
+  int closure_id = has_closure_id
       ? DefineDeoptimizationLiteral(environment->closure())
       : Translation::kSelfLiteralId;
 
@@ -541,6 +608,9 @@ void LCodeGen::WriteTranslation(LEnvironment* environment,
       ASSERT(height == 0);
       translation->BeginSetterStubFrame(closure_id);
       break;
+    case STUB:
+      translation->BeginCompiledStubFrame();
+      break;
     case ARGUMENTS_ADAPTOR:
       translation->BeginArgumentsAdaptorFrame(closure_id, translation_size);
       break;
@@ -736,7 +806,11 @@ void LCodeGen::DeoptimizeIf(Condition cc, LEnvironment* environment) {
   RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
   ASSERT(environment->HasBeenRegistered());
   int id = environment->deoptimization_index();
-  Address entry = Deoptimizer::GetDeoptimizationEntry(id, Deoptimizer::EAGER);
+
+  Deoptimizer::BailoutType bailout_type = info()->IsStub()
+      ? Deoptimizer::LAZY
+      : Deoptimizer::EAGER;
+  Address entry = Deoptimizer::GetDeoptimizationEntry(id, bailout_type);
   if (entry == NULL) {
     Abort("bailout was not prepared");
     return;
@@ -752,14 +826,19 @@ void LCodeGen::DeoptimizeIf(Condition cc, LEnvironment* environment) {
 
   if (FLAG_trap_on_deopt) __ stop("trap_on_deopt", cc);
 
-  if (cc == al) {
+  bool needs_lazy_deopt = info()->IsStub();
+  ASSERT(info()->IsStub() || frame_is_built_);
+  if (cc == al && !needs_lazy_deopt) {
     __ Jump(entry, RelocInfo::RUNTIME_ENTRY);
   } else {
     // We often have several deopts to the same entry, reuse the last
     // jump entry if this is the case.
     if (deopt_jump_table_.is_empty() ||
-        (deopt_jump_table_.last().address != entry)) {
-      deopt_jump_table_.Add(JumpTableEntry(entry), zone());
+        (deopt_jump_table_.last().address != entry) ||
+        (deopt_jump_table_.last().is_lazy_deopt != needs_lazy_deopt) ||
+        (deopt_jump_table_.last().needs_frame != !frame_is_built_)) {
+      JumpTableEntry table_entry(entry, !frame_is_built_, needs_lazy_deopt);
+      deopt_jump_table_.Add(table_entry, zone());
     }
     __ b(cc, &deopt_jump_table_.last().label);
   }
@@ -1368,6 +1447,7 @@ void LCodeGen::DoDeferredBinaryOpStub(LPointerMap* pointer_map,
                                       LOperand* left_argument,
                                       LOperand* right_argument,
                                       Token::Value op) {
+  CpuFeatures::Scope vfp_scope(VFP2);
   Register left = ToRegister(left_argument);
   Register right = ToRegister(right_argument);
 
@@ -1653,6 +1733,7 @@ void LCodeGen::DoConstantI(LConstantI* instr) {
 void LCodeGen::DoConstantD(LConstantD* instr) {
   ASSERT(instr->result()->IsDoubleRegister());
   DwVfpRegister result = ToDoubleRegister(instr->result());
+  CpuFeatures::Scope scope(VFP2);
   double v = instr->value();
   __ Vmov(result, v, scratch0());
 }
@@ -1821,9 +1902,10 @@ void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
     __ mov(result_reg, right_op, LeaveCC, NegateCondition(condition));
   } else {
     ASSERT(instr->hydrogen()->representation().IsDouble());
-    DoubleRegister left_reg = ToDoubleRegister(left);
-    DoubleRegister right_reg = ToDoubleRegister(right);
-    DoubleRegister result_reg = ToDoubleRegister(instr->result());
+    CpuFeatures::Scope scope(VFP2);
+    DwVfpRegister left_reg = ToDoubleRegister(left);
+    DwVfpRegister right_reg = ToDoubleRegister(right);
+    DwVfpRegister result_reg = ToDoubleRegister(instr->result());
     Label check_nan_left, check_zero, return_left, return_right, done;
     __ VFPCompareAndSetFlags(left_reg, right_reg);
     __ b(vs, &check_nan_left);
@@ -1866,9 +1948,10 @@ void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
 
 
 void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
-  DoubleRegister left = ToDoubleRegister(instr->left());
-  DoubleRegister right = ToDoubleRegister(instr->right());
-  DoubleRegister result = ToDoubleRegister(instr->result());
+  CpuFeatures::Scope scope(VFP2);
+  DwVfpRegister left = ToDoubleRegister(instr->left());
+  DwVfpRegister right = ToDoubleRegister(instr->right());
+  DwVfpRegister result = ToDoubleRegister(instr->result());
   switch (instr->op()) {
     case Token::ADD:
       __ vadd(result, left, right);
@@ -1956,7 +2039,8 @@ void LCodeGen::DoBranch(LBranch* instr) {
     __ cmp(reg, Operand(0));
     EmitBranch(true_block, false_block, ne);
   } else if (r.IsDouble()) {
-    DoubleRegister reg = ToDoubleRegister(instr->value());
+    CpuFeatures::Scope scope(VFP2);
+    DwVfpRegister reg = ToDoubleRegister(instr->value());
     Register scratch = scratch0();
 
     // Test the double value. Zero and NaN are false.
@@ -2041,8 +2125,9 @@ void LCodeGen::DoBranch(LBranch* instr) {
       }
 
       if (expected.Contains(ToBooleanStub::HEAP_NUMBER)) {
+        CpuFeatures::Scope scope(VFP2);
         // heap number -> false iff +0, -0, or NaN.
-        DoubleRegister dbl_scratch = double_scratch0();
+        DwVfpRegister dbl_scratch = double_scratch0();
         Label not_heap_number;
         __ CompareRoot(map, Heap::kHeapNumberMapRootIndex);
         __ b(ne, &not_heap_number);
@@ -2120,6 +2205,7 @@ void LCodeGen::DoCmpIDAndBranch(LCmpIDAndBranch* instr) {
     EmitGoto(next_block);
   } else {
     if (instr->is_double()) {
+      CpuFeatures::Scope scope(VFP2);
       // Compare left and right operands as doubles and load the
       // resulting flags into the normal status register.
       __ VFPCompareAndSetFlags(ToDoubleRegister(left), ToDoubleRegister(right));
@@ -2658,16 +2744,21 @@ void LCodeGen::DoCmpT(LCmpT* instr) {
 
 
 void LCodeGen::DoReturn(LReturn* instr) {
-  if (FLAG_trace) {
+  if (FLAG_trace && info()->IsOptimizing()) {
     // Push the return value on the stack as the parameter.
     // Runtime::TraceExit returns its parameter in r0.
     __ push(r0);
     __ CallRuntime(Runtime::kTraceExit, 1);
   }
-  int32_t sp_delta = (GetParameterCount() + 1) * kPointerSize;
-  __ mov(sp, fp);
-  __ ldm(ia_w, sp, fp.bit() | lr.bit());
-  __ add(sp, sp, Operand(sp_delta));
+  if (NeedsEagerFrame()) {
+    int32_t sp_delta = (GetParameterCount() + 1) * kPointerSize;
+    __ mov(sp, fp);
+    __ ldm(ia_w, sp, fp.bit() | lr.bit());
+    __ add(sp, sp, Operand(sp_delta));
+  }
+  if (info()->IsStub()) {
+    __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+  }
   __ Jump(lr);
 }
 
@@ -3017,17 +3108,63 @@ void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
 
   if (elements_kind == EXTERNAL_FLOAT_ELEMENTS ||
       elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
-    CpuFeatures::Scope scope(VFP3);
     DwVfpRegister result = ToDoubleRegister(instr->result());
     Operand operand = key_is_constant
         ? Operand(constant_key << element_size_shift)
         : Operand(key, LSL, shift_size);
     __ add(scratch0(), external_pointer, operand);
-    if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
-      __ vldr(result.low(), scratch0(), additional_offset);
-      __ vcvt_f64_f32(result, result.low());
-    } else  {  // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS
-      __ vldr(result, scratch0(), additional_offset);
+    if (CpuFeatures::IsSupported(VFP2)) {
+      CpuFeatures::Scope scope(VFP2);
+      if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
+        __ vldr(result.low(), scratch0(), additional_offset);
+        __ vcvt_f64_f32(result, result.low());
+      } else  {  // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS
+        __ vldr(result, scratch0(), additional_offset);
+      }
+    } else {
+      if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
+        Register value = external_pointer;
+        __ ldr(value, MemOperand(scratch0(), additional_offset));
+        __ and_(sfpd_lo, value, Operand(kBinary32MantissaMask));
+
+        __ mov(scratch0(), Operand(value, LSR, kBinary32MantissaBits));
+        __ and_(scratch0(), scratch0(),
+                Operand(kBinary32ExponentMask >> kBinary32MantissaBits));
+
+        Label exponent_rebiased;
+        __ teq(scratch0(), Operand(0x00));
+        __ b(eq, &exponent_rebiased);
+
+        __ teq(scratch0(), Operand(0xff));
+        __ mov(scratch0(), Operand(0x7ff), LeaveCC, eq);
+        __ b(eq, &exponent_rebiased);
+
+        // Rebias exponent.
+        __ add(scratch0(),
+               scratch0(),
+               Operand(-kBinary32ExponentBias + HeapNumber::kExponentBias));
+
+        __ bind(&exponent_rebiased);
+        __ and_(sfpd_hi, value, Operand(kBinary32SignMask));
+        __ orr(sfpd_hi, sfpd_hi,
+               Operand(scratch0(), LSL, HeapNumber::kMantissaBitsInTopWord));
+
+        // Shift mantissa.
+        static const int kMantissaShiftForHiWord =
+            kBinary32MantissaBits - HeapNumber::kMantissaBitsInTopWord;
+
+        static const int kMantissaShiftForLoWord =
+            kBitsPerInt - kMantissaShiftForHiWord;
+
+        __ orr(sfpd_hi, sfpd_hi,
+               Operand(sfpd_lo, LSR, kMantissaShiftForHiWord));
+        __ mov(sfpd_lo, Operand(sfpd_lo, LSL, kMantissaShiftForLoWord));
+
+      } else {
+        __ ldr(sfpd_lo, MemOperand(scratch0(), additional_offset));
+        __ ldr(sfpd_hi, MemOperand(scratch0(),
+                                   additional_offset + kPointerSize));
+      }
     }
   } else {
     Register result = ToRegister(instr->result());
@@ -3096,23 +3233,28 @@ void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) {
     key = ToRegister(instr->key());
   }
 
-  Operand operand = key_is_constant
-      ? Operand(((constant_key + instr->additional_index()) <<
-                 element_size_shift) +
-                FixedDoubleArray::kHeaderSize - kHeapObjectTag)
-      : Operand(key, LSL, shift_size);
-  __ add(elements, elements, operand);
+  int base_offset = (FixedDoubleArray::kHeaderSize - kHeapObjectTag) +
+      ((constant_key + instr->additional_index()) << element_size_shift);
   if (!key_is_constant) {
-    __ add(elements, elements,
-           Operand((FixedDoubleArray::kHeaderSize - kHeapObjectTag) +
-                   (instr->additional_index() << element_size_shift)));
-  }
-
-  __ vldr(result, elements, 0);
-  if (instr->hydrogen()->RequiresHoleCheck()) {
-    __ ldr(scratch, MemOperand(elements, sizeof(kHoleNanLower32)));
-    __ cmp(scratch, Operand(kHoleNanUpper32));
-    DeoptimizeIf(eq, instr->environment());
+    __ add(elements, elements, Operand(key, LSL, shift_size));
+  }
+  if (CpuFeatures::IsSupported(VFP2)) {
+    CpuFeatures::Scope scope(VFP2);
+    __ add(elements, elements, Operand(base_offset));
+    __ vldr(result, elements, 0);
+    if (instr->hydrogen()->RequiresHoleCheck()) {
+      __ ldr(scratch, MemOperand(elements, sizeof(kHoleNanLower32)));
+      __ cmp(scratch, Operand(kHoleNanUpper32));
+      DeoptimizeIf(eq, instr->environment());
+    }
+  } else {
+      __ ldr(sfpd_hi, MemOperand(elements, base_offset + kPointerSize));
+      __ ldr(sfpd_lo, MemOperand(elements, base_offset));
+    if (instr->hydrogen()->RequiresHoleCheck()) {
+      ASSERT(kPointerSize == sizeof(kHoleNanLower32));
+      __ cmp(sfpd_hi, Operand(kHoleNanUpper32));
+      DeoptimizeIf(eq, instr->environment());
+    }
   }
 }
 
@@ -3548,6 +3690,7 @@ void LCodeGen::EmitIntegerMathAbs(LUnaryMathOperation* instr) {
 
 
 void LCodeGen::DoMathAbs(LUnaryMathOperation* instr) {
+  CpuFeatures::Scope scope(VFP2);
   // Class for deferred case.
   class DeferredMathAbsTaggedHeapNumber: public LDeferredCode {
    public:
@@ -3584,7 +3727,8 @@ void LCodeGen::DoMathAbs(LUnaryMathOperation* instr) {
 
 
 void LCodeGen::DoMathFloor(LUnaryMathOperation* instr) {
-  DoubleRegister input = ToDoubleRegister(instr->value());
+  CpuFeatures::Scope scope(VFP2);
+  DwVfpRegister input = ToDoubleRegister(instr->value());
   Register result = ToRegister(instr->result());
   Register scratch = scratch0();
 
@@ -3609,7 +3753,8 @@ void LCodeGen::DoMathFloor(LUnaryMathOperation* instr) {
 
 
 void LCodeGen::DoMathRound(LUnaryMathOperation* instr) {
-  DoubleRegister input = ToDoubleRegister(instr->value());
+  CpuFeatures::Scope scope(VFP2);
+  DwVfpRegister input = ToDoubleRegister(instr->value());
   Register result = ToRegister(instr->result());
   DwVfpRegister double_scratch1 = ToDoubleRegister(instr->temp());
   Register scratch = scratch0();
@@ -3674,16 +3819,18 @@ void LCodeGen::DoMathRound(LUnaryMathOperation* instr) {
 
 
 void LCodeGen::DoMathSqrt(LUnaryMathOperation* instr) {
-  DoubleRegister input = ToDoubleRegister(instr->value());
-  DoubleRegister result = ToDoubleRegister(instr->result());
+  CpuFeatures::Scope scope(VFP2);
+  DwVfpRegister input = ToDoubleRegister(instr->value());
+  DwVfpRegister result = ToDoubleRegister(instr->result());
   __ vsqrt(result, input);
 }
 
 
 void LCodeGen::DoMathPowHalf(LUnaryMathOperation* instr) {
-  DoubleRegister input = ToDoubleRegister(instr->value());
-  DoubleRegister result = ToDoubleRegister(instr->result());
-  DoubleRegister temp = ToDoubleRegister(instr->temp());
+  CpuFeatures::Scope scope(VFP2);
+  DwVfpRegister input = ToDoubleRegister(instr->value());
+  DwVfpRegister result = ToDoubleRegister(instr->result());
+  DwVfpRegister temp = ToDoubleRegister(instr->temp());
 
   // Note that according to ECMA-262 15.8.2.13:
   // Math.pow(-Infinity, 0.5) == Infinity
@@ -3702,6 +3849,7 @@ void LCodeGen::DoMathPowHalf(LUnaryMathOperation* instr) {
 
 
 void LCodeGen::DoPower(LPower* instr) {
+  CpuFeatures::Scope scope(VFP2);
   Representation exponent_type = instr->hydrogen()->right()->representation();
   // Having marked this as a call, we can use any registers.
   // Just make sure that the input/output registers are the expected ones.
@@ -3734,6 +3882,7 @@ void LCodeGen::DoPower(LPower* instr) {
 
 
 void LCodeGen::DoRandom(LRandom* instr) {
+  CpuFeatures::Scope scope(VFP2);
   class DeferredDoRandom: public LDeferredCode {
    public:
     DeferredDoRandom(LCodeGen* codegen, LRandom* instr)
@@ -3812,10 +3961,11 @@ void LCodeGen::DoDeferredRandom(LRandom* instr) {
 
 
 void LCodeGen::DoMathExp(LMathExp* instr) {
-  DoubleRegister input = ToDoubleRegister(instr->value());
-  DoubleRegister result = ToDoubleRegister(instr->result());
-  DoubleRegister double_scratch1 = ToDoubleRegister(instr->double_temp());
-  DoubleRegister double_scratch2 = double_scratch0();
+  CpuFeatures::Scope scope(VFP2);
+  DwVfpRegister input = ToDoubleRegister(instr->value());
+  DwVfpRegister result = ToDoubleRegister(instr->result());
+  DwVfpRegister double_scratch1 = ToDoubleRegister(instr->double_temp());
+  DwVfpRegister double_scratch2 = double_scratch0();
   Register temp1 = ToRegister(instr->temp1());
   Register temp2 = ToRegister(instr->temp2());
 
@@ -4101,6 +4251,7 @@ void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
 
 
 void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
+  CpuFeatures::Scope scope(VFP2);
   Register external_pointer = ToRegister(instr->elements());
   Register key = no_reg;
   ElementsKind elements_kind = instr->elements_kind();
@@ -4171,6 +4322,7 @@ void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
 
 
 void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) {
+  CpuFeatures::Scope scope(VFP2);
   DwVfpRegister value = ToDoubleRegister(instr->value());
   Register elements = ToRegister(instr->elements());
   Register key = no_reg;
@@ -4447,6 +4599,7 @@ void LCodeGen::DoStringLength(LStringLength* instr) {
 
 
 void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
+  CpuFeatures::Scope scope(VFP2);
   LOperand* input = instr->value();
   ASSERT(input->IsRegister() || input->IsStackSlot());
   LOperand* output = instr->result();
@@ -4464,6 +4617,7 @@ void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
 
 
 void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) {
+  CpuFeatures::Scope scope(VFP2);
   LOperand* input = instr->value();
   LOperand* output = instr->result();
 
@@ -4525,13 +4679,49 @@ void LCodeGen::DoNumberTagU(LNumberTagU* instr) {
 }
 
 
+// Convert unsigned integer with specified number of leading zeroes in binary
+// representation to IEEE 754 double.
+// Integer to convert is passed in register hiword.
+// Resulting double is returned in registers hiword:loword.
+// This functions does not work correctly for 0.
+static void GenerateUInt2Double(MacroAssembler* masm,
+                                Register hiword,
+                                Register loword,
+                                Register scratch,
+                                int leading_zeroes) {
+  const int meaningful_bits = kBitsPerInt - leading_zeroes - 1;
+  const int biased_exponent = HeapNumber::kExponentBias + meaningful_bits;
+
+  const int mantissa_shift_for_hi_word =
+      meaningful_bits - HeapNumber::kMantissaBitsInTopWord;
+  const int mantissa_shift_for_lo_word =
+      kBitsPerInt - mantissa_shift_for_hi_word;
+  masm->mov(scratch, Operand(biased_exponent << HeapNumber::kExponentShift));
+  if (mantissa_shift_for_hi_word > 0) {
+    masm->mov(loword, Operand(hiword, LSL, mantissa_shift_for_lo_word));
+    masm->orr(hiword, scratch,
+              Operand(hiword, LSR, mantissa_shift_for_hi_word));
+  } else {
+    masm->mov(loword, Operand(0, RelocInfo::NONE));
+    masm->orr(hiword, scratch,
+              Operand(hiword, LSL, -mantissa_shift_for_hi_word));
+  }
+
+  // If least significant bit of biased exponent was not 1 it was corrupted
+  // by most significant bit of mantissa so we should fix that.
+  if (!(biased_exponent & 1)) {
+    masm->bic(hiword, hiword, Operand(1 << HeapNumber::kExponentShift));
+  }
+}
+
+
 void LCodeGen::DoDeferredNumberTagI(LInstruction* instr,
                                     LOperand* value,
                                     IntegerSignedness signedness) {
   Label slow;
   Register src = ToRegister(value);
   Register dst = ToRegister(instr->result());
-  DoubleRegister dbl_scratch = double_scratch0();
+  DwVfpRegister dbl_scratch = double_scratch0();
   SwVfpRegister flt_scratch = dbl_scratch.low();
 
   // Preserve the value of all registers.
@@ -4546,16 +4736,40 @@ void LCodeGen::DoDeferredNumberTagI(LInstruction* instr,
       __ SmiUntag(src, dst);
       __ eor(src, src, Operand(0x80000000));
     }
-    __ vmov(flt_scratch, src);
-    __ vcvt_f64_s32(dbl_scratch, flt_scratch);
+    if (CpuFeatures::IsSupported(VFP2)) {
+      CpuFeatures::Scope scope(VFP2);
+      __ vmov(flt_scratch, src);
+      __ vcvt_f64_s32(dbl_scratch, flt_scratch);
+    } else {
+      FloatingPointHelper::Destination dest =
+          FloatingPointHelper::kCoreRegisters;
+      FloatingPointHelper::ConvertIntToDouble(masm(), src, dest, d0,
+                                              sfpd_lo, sfpd_hi,
+                                              scratch0(), s0);
+    }
   } else {
-    __ vmov(flt_scratch, src);
-    __ vcvt_f64_u32(dbl_scratch, flt_scratch);
+    if (CpuFeatures::IsSupported(VFP2)) {
+      CpuFeatures::Scope scope(VFP2);
+      __ vmov(flt_scratch, src);
+      __ vcvt_f64_u32(dbl_scratch, flt_scratch);
+    } else {
+      Label no_leading_zero, done;
+      __ tst(src, Operand(0x80000000));
+      __ b(ne, &no_leading_zero);
+
+      // Integer has one leading zeros.
+      GenerateUInt2Double(masm(), sfpd_hi, sfpd_lo, r9, 1);
+      __ b(&done);
+
+      __ bind(&no_leading_zero);
+      GenerateUInt2Double(masm(), sfpd_hi, sfpd_lo, r9, 0);
+      __ b(&done);
+    }
   }
 
   if (FLAG_inline_new) {
-    __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
-    __ AllocateHeapNumber(r5, r3, r4, r6, &slow, DONT_TAG_RESULT);
+    __ LoadRoot(scratch0(), Heap::kHeapNumberMapRootIndex);
+    __ AllocateHeapNumber(r5, r3, r4, scratch0(), &slow, DONT_TAG_RESULT);
     __ Move(dst, r5);
     __ b(&done);
   }
@@ -4575,7 +4789,13 @@ void LCodeGen::DoDeferredNumberTagI(LInstruction* instr,
   // Done. Put the value in dbl_scratch into the value of the allocated heap
   // number.
   __ bind(&done);
-  __ vstr(dbl_scratch, dst, HeapNumber::kValueOffset);
+  if (CpuFeatures::IsSupported(VFP2)) {
+    CpuFeatures::Scope scope(VFP2);
+    __ vstr(dbl_scratch, dst, HeapNumber::kValueOffset);
+  } else {
+    __ str(sfpd_lo, MemOperand(dst, HeapNumber::kMantissaOffset));
+    __ str(sfpd_hi, MemOperand(dst, HeapNumber::kExponentOffset));
+  }
   __ add(dst, dst, Operand(kHeapObjectTag));
   __ StoreToSafepointRegisterSlot(dst, dst);
 }
@@ -4592,7 +4812,7 @@ void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
     LNumberTagD* instr_;
   };
 
-  DoubleRegister input_reg = ToDoubleRegister(instr->value());
+  DwVfpRegister input_reg = ToDoubleRegister(instr->value());
   Register scratch = scratch0();
   Register reg = ToRegister(instr->result());
   Register temp1 = ToRegister(instr->temp());
@@ -4608,7 +4828,13 @@ void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
     __ jmp(deferred->entry());
   }
   __ bind(deferred->exit());
-  __ vstr(input_reg, reg, HeapNumber::kValueOffset);
+  if (CpuFeatures::IsSupported(VFP2)) {
+    CpuFeatures::Scope scope(VFP2);
+    __ vstr(input_reg, reg, HeapNumber::kValueOffset);
+  } else {
+    __ str(sfpd_lo, MemOperand(reg, HeapNumber::kValueOffset));
+    __ str(sfpd_hi, MemOperand(reg, HeapNumber::kValueOffset + kPointerSize));
+  }
   // Now that we have finished with the object's real address tag it
   __ add(reg, reg, Operand(kHeapObjectTag));
 }
@@ -4649,13 +4875,14 @@ void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
 
 
 void LCodeGen::EmitNumberUntagD(Register input_reg,
-                                DoubleRegister result_reg,
+                                DwVfpRegister result_reg,
                                 bool deoptimize_on_undefined,
                                 bool deoptimize_on_minus_zero,
                                 LEnvironment* env) {
   Register scratch = scratch0();
   SwVfpRegister flt_scratch = double_scratch0().low();
   ASSERT(!result_reg.is(double_scratch0()));
+  CpuFeatures::Scope scope(VFP2);
 
   Label load_smi, heap_number, done;
 
@@ -4730,6 +4957,7 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
   __ cmp(scratch1, Operand(ip));
 
   if (instr->truncating()) {
+    CpuFeatures::Scope scope(VFP2);
     Register scratch3 = ToRegister(instr->temp2());
     SwVfpRegister single_scratch = double_scratch.low();
     ASSERT(!scratch3.is(input_reg) &&
@@ -4821,7 +5049,7 @@ void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
   ASSERT(result->IsDoubleRegister());
 
   Register input_reg = ToRegister(input);
-  DoubleRegister result_reg = ToDoubleRegister(result);
+  DwVfpRegister result_reg = ToDoubleRegister(result);
 
   EmitNumberUntagD(input_reg, result_reg,
                    instr->hydrogen()->deoptimize_on_undefined(),
@@ -4970,14 +5198,16 @@ void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
 
 
 void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) {
-  DoubleRegister value_reg = ToDoubleRegister(instr->unclamped());
+  CpuFeatures::Scope vfp_scope(VFP2);
+  DwVfpRegister value_reg = ToDoubleRegister(instr->unclamped());
   Register result_reg = ToRegister(instr->result());
-  DoubleRegister temp_reg = ToDoubleRegister(instr->temp());
+  DwVfpRegister temp_reg = ToDoubleRegister(instr->temp());
   __ ClampDoubleToUint8(result_reg, value_reg, temp_reg);
 }
 
 
 void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) {
+  CpuFeatures::Scope scope(VFP2);
   Register unclamped_reg = ToRegister(instr->unclamped());
   Register result_reg = ToRegister(instr->result());
   __ ClampUint8(result_reg, unclamped_reg);
@@ -4985,10 +5215,11 @@ void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) {
 
 
 void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
+  CpuFeatures::Scope scope(VFP2);
   Register scratch = scratch0();
   Register input_reg = ToRegister(instr->unclamped());
   Register result_reg = ToRegister(instr->result());
-  DoubleRegister temp_reg = ToDoubleRegister(instr->temp());
+  DwVfpRegister temp_reg = ToDoubleRegister(instr->temp());
   Label is_smi, done, heap_number;
 
   // Both smi and heap number cases are handled.
@@ -5565,6 +5796,7 @@ void LCodeGen::EmitIsConstructCall(Register temp1, Register temp2) {
 
 
 void LCodeGen::EnsureSpaceForLazyDeopt() {
+  if (info()->IsStub()) return;
   // Ensure that we have enough space after the previous lazy-bailout
   // instruction for patching the code here.
   int current_pc = masm()->pc_offset();
index 921285b..e7afcbf 100644 (file)
@@ -61,6 +61,7 @@ class LCodeGen BASE_EMBEDDED {
         deferred_(8, info->zone()),
         osr_pc_offset_(-1),
         last_lazy_deopt_pc_(0),
+        frame_is_built_(false),
         safepoints_(info->zone()),
         resolver_(this),
         expected_safepoint_kind_(Safepoint::kSimple) {
@@ -76,6 +77,15 @@ class LCodeGen BASE_EMBEDDED {
   Heap* heap() const { return isolate()->heap(); }
   Zone* zone() const { return zone_; }
 
+  bool NeedsEagerFrame() const {
+    return GetStackSlotCount() > 0 ||
+        info()->is_non_deferred_calling() ||
+        !info()->IsStub();
+  }
+  bool NeedsDeferredFrame() const {
+    return !NeedsEagerFrame() && info()->is_deferred_calling();
+  }
+
   // Support for converting LOperands to assembler types.
   // LOperand must be a register.
   Register ToRegister(LOperand* op) const;
@@ -84,12 +94,12 @@ class LCodeGen BASE_EMBEDDED {
   Register EmitLoadRegister(LOperand* op, Register scratch);
 
   // LOperand must be a double register.
-  DoubleRegister ToDoubleRegister(LOperand* op) const;
+  DwVfpRegister ToDoubleRegister(LOperand* op) const;
 
   // LOperand is loaded into dbl_scratch, unless already a double register.
-  DoubleRegister EmitLoadDoubleRegister(LOperand* op,
-                                        SwVfpRegister flt_scratch,
-                                        DoubleRegister dbl_scratch);
+  DwVfpRegister EmitLoadDoubleRegister(LOperand* op,
+                                       SwVfpRegister flt_scratch,
+                                       DwVfpRegister dbl_scratch);
   int ToInteger32(LConstantOperand* op) const;
   double ToDouble(LConstantOperand* op) const;
   Operand ToOperand(LOperand* op);
@@ -193,7 +203,7 @@ class LCodeGen BASE_EMBEDDED {
                        Register temporary2);
 
   int GetStackSlotCount() const { return chunk()->spill_slot_count(); }
-  int GetParameterCount() const { return scope()->num_parameters(); }
+  int GetParameterCount() const { return info()->num_parameters(); }
 
   void Abort(const char* reason);
   void Comment(const char* format, ...);
@@ -275,7 +285,7 @@ class LCodeGen BASE_EMBEDDED {
   void PopulateDeoptimizationLiteralsWithInlinedFunctions();
 
   Register ToRegister(int index) const;
-  DoubleRegister ToDoubleRegister(int index) const;
+  DwVfpRegister ToDoubleRegister(int index) const;
 
   // Specific math operations - used from DoUnaryMathOperation.
   void EmitIntegerMathAbs(LUnaryMathOperation* instr);
@@ -308,7 +318,7 @@ class LCodeGen BASE_EMBEDDED {
   void EmitGoto(int block);
   void EmitBranch(int left_block, int right_block, Condition cc);
   void EmitNumberUntagD(Register input,
-                        DoubleRegister result,
+                        DwVfpRegister result,
                         bool deoptimize_on_undefined,
                         bool deoptimize_on_minus_zero,
                         LEnvironment* env);
@@ -369,11 +379,15 @@ class LCodeGen BASE_EMBEDDED {
                                            LEnvironment* environment);
 
   struct JumpTableEntry {
-    explicit inline JumpTableEntry(Address entry)
+    inline JumpTableEntry(Address entry, bool frame, bool is_lazy)
         : label(),
-          address(entry) { }
+          address(entry),
+          needs_frame(frame),
+          is_lazy_deopt(is_lazy) { }
     Label label;
     Address address;
+    bool needs_frame;
+    bool is_lazy_deopt;
   };
 
   void EnsureSpaceForLazyDeopt();
@@ -402,6 +416,7 @@ class LCodeGen BASE_EMBEDDED {
   ZoneList<LDeferredCode*> deferred_;
   int osr_pc_offset_;
   int last_lazy_deopt_pc_;
+  bool frame_is_built_;
 
   // Builder that keeps track of safepoints in the code. The table
   // itself is emitted at the end of the generated code.
@@ -417,6 +432,7 @@ class LCodeGen BASE_EMBEDDED {
     PushSafepointRegistersScope(LCodeGen* codegen,
                                 Safepoint::Kind kind)
         : codegen_(codegen) {
+      ASSERT(codegen_->info()->is_calling());
       ASSERT(codegen_->expected_safepoint_kind_ == Safepoint::kSimple);
       codegen_->expected_safepoint_kind_ = kind;
 
index c100720..4df1338 100644 (file)
@@ -171,8 +171,10 @@ void LGapResolver::BreakCycle(int index) {
   } else if (source->IsStackSlot()) {
     __ ldr(kSavedValueRegister, cgen_->ToMemOperand(source));
   } else if (source->IsDoubleRegister()) {
+    CpuFeatures::Scope scope(VFP2);
     __ vmov(kScratchDoubleReg, cgen_->ToDoubleRegister(source));
   } else if (source->IsDoubleStackSlot()) {
+    CpuFeatures::Scope scope(VFP2);
     __ vldr(kScratchDoubleReg, cgen_->ToMemOperand(source));
   } else {
     UNREACHABLE();
@@ -192,8 +194,10 @@ void LGapResolver::RestoreValue() {
   } else if (saved_destination_->IsStackSlot()) {
     __ str(kSavedValueRegister, cgen_->ToMemOperand(saved_destination_));
   } else if (saved_destination_->IsDoubleRegister()) {
+    CpuFeatures::Scope scope(VFP2);
     __ vmov(cgen_->ToDoubleRegister(saved_destination_), kScratchDoubleReg);
   } else if (saved_destination_->IsDoubleStackSlot()) {
+    CpuFeatures::Scope scope(VFP2);
     __ vstr(kScratchDoubleReg, cgen_->ToMemOperand(saved_destination_));
   } else {
     UNREACHABLE();
@@ -229,7 +233,8 @@ void LGapResolver::EmitMove(int index) {
       MemOperand destination_operand = cgen_->ToMemOperand(destination);
       if (in_cycle_) {
         if (!destination_operand.OffsetIsUint12Encodable()) {
-          // ip is overwritten while saving the value to the destination.
+          CpuFeatures::Scope scope(VFP2);
+            // ip is overwritten while saving the value to the destination.
           // Therefore we can't use ip.  It is OK if the read from the source
           // destroys ip, since that happens before the value is read.
           __ vldr(kScratchDoubleReg.low(), source_operand);
@@ -267,7 +272,8 @@ void LGapResolver::EmitMove(int index) {
     }
 
   } else if (source->IsDoubleRegister()) {
-    DoubleRegister source_register = cgen_->ToDoubleRegister(source);
+    CpuFeatures::Scope scope(VFP2);
+    DwVfpRegister source_register = cgen_->ToDoubleRegister(source);
     if (destination->IsDoubleRegister()) {
       __ vmov(cgen_->ToDoubleRegister(destination), source_register);
     } else {
@@ -276,7 +282,8 @@ void LGapResolver::EmitMove(int index) {
     }
 
   } else if (source->IsDoubleStackSlot()) {
-    MemOperand source_operand = cgen_->ToMemOperand(source);
+    CpuFeatures::Scope scope(VFP2);
+      MemOperand source_operand = cgen_->ToMemOperand(source);
     if (destination->IsDoubleRegister()) {
       __ vldr(cgen_->ToDoubleRegister(destination), source_operand);
     } else {
index 5c064c1..067a05d 100644 (file)
@@ -290,7 +290,7 @@ void MacroAssembler::Move(Register dst, Register src, Condition cond) {
 }
 
 
-void MacroAssembler::Move(DoubleRegister dst, DoubleRegister src) {
+void MacroAssembler::Move(DwVfpRegister dst, DwVfpRegister src) {
   ASSERT(CpuFeatures::IsSupported(VFP2));
   CpuFeatures::Scope scope(VFP2);
   if (!dst.is(src)) {
@@ -643,19 +643,19 @@ void MacroAssembler::PopSafepointRegisters() {
 
 void MacroAssembler::PushSafepointRegistersAndDoubles() {
   PushSafepointRegisters();
-  sub(sp, sp, Operand(DwVfpRegister::kNumAllocatableRegisters *
+  sub(sp, sp, Operand(DwVfpRegister::NumAllocatableRegisters() *
                       kDoubleSize));
-  for (int i = 0; i < DwVfpRegister::kNumAllocatableRegisters; i++) {
+  for (int i = 0; i < DwVfpRegister::NumAllocatableRegisters(); i++) {
     vstr(DwVfpRegister::FromAllocationIndex(i), sp, i * kDoubleSize);
   }
 }
 
 
 void MacroAssembler::PopSafepointRegistersAndDoubles() {
-  for (int i = 0; i < DwVfpRegister::kNumAllocatableRegisters; i++) {
+  for (int i = 0; i < DwVfpRegister::NumAllocatableRegisters(); i++) {
     vldr(DwVfpRegister::FromAllocationIndex(i), sp, i * kDoubleSize);
   }
-  add(sp, sp, Operand(DwVfpRegister::kNumAllocatableRegisters *
+  add(sp, sp, Operand(DwVfpRegister::NumAllocatableRegisters() *
                       kDoubleSize));
   PopSafepointRegisters();
 }
@@ -691,7 +691,7 @@ MemOperand MacroAssembler::SafepointRegisterSlot(Register reg) {
 
 MemOperand MacroAssembler::SafepointRegistersAndDoublesSlot(Register reg) {
   // General purpose registers are pushed last on the stack.
-  int doubles_size = DwVfpRegister::kNumAllocatableRegisters * kDoubleSize;
+  int doubles_size = DwVfpRegister::NumAllocatableRegisters() * kDoubleSize;
   int register_offset = SafepointRegisterStackIndex(reg.code()) * kPointerSize;
   return MemOperand(sp, doubles_size + register_offset);
 }
@@ -967,7 +967,7 @@ void MacroAssembler::LeaveExitFrame(bool save_doubles,
   }
 }
 
-void MacroAssembler::GetCFunctionDoubleResult(const DoubleRegister dst) {
+void MacroAssembler::GetCFunctionDoubleResult(const DwVfpRegister dst) {
   ASSERT(CpuFeatures::IsSupported(VFP2));
   if (use_eabi_hardfloat()) {
     Move(dst, d0);
@@ -2717,7 +2717,10 @@ void MacroAssembler::CallRuntimeSaveDoubles(Runtime::FunctionId id) {
   const Runtime::Function* function = Runtime::FunctionForId(id);
   mov(r0, Operand(function->nargs));
   mov(r1, Operand(ExternalReference(function, isolate())));
-  CEntryStub stub(1, kSaveFPRegs);
+  SaveFPRegsMode mode = CpuFeatures::IsSupported(VFP2)
+      ? kSaveFPRegs
+      : kDontSaveFPRegs;
+  CEntryStub stub(1, mode);
   CallStub(&stub);
 }
 
@@ -3393,9 +3396,9 @@ int MacroAssembler::CalculateStackPassedWords(int num_reg_arguments,
   if (use_eabi_hardfloat()) {
     // In the hard floating point calling convention, we can use
     // all double registers to pass doubles.
-    if (num_double_arguments > DoubleRegister::kNumRegisters) {
+    if (num_double_arguments > DoubleRegister::NumRegisters()) {
       stack_passed_words +=
-          2 * (num_double_arguments - DoubleRegister::kNumRegisters);
+          2 * (num_double_arguments - DoubleRegister::NumRegisters());
     }
   } else {
     // In the soft floating point calling convention, every double
@@ -3436,7 +3439,7 @@ void MacroAssembler::PrepareCallCFunction(int num_reg_arguments,
 }
 
 
-void MacroAssembler::SetCallCDoubleArguments(DoubleRegister dreg) {
+void MacroAssembler::SetCallCDoubleArguments(DwVfpRegister dreg) {
   ASSERT(CpuFeatures::IsSupported(VFP2));
   if (use_eabi_hardfloat()) {
     Move(d0, dreg);
@@ -3446,8 +3449,8 @@ void MacroAssembler::SetCallCDoubleArguments(DoubleRegister dreg) {
 }
 
 
-void MacroAssembler::SetCallCDoubleArguments(DoubleRegister dreg1,
-                                             DoubleRegister dreg2) {
+void MacroAssembler::SetCallCDoubleArguments(DwVfpRegister dreg1,
+                                             DwVfpRegister dreg2) {
   ASSERT(CpuFeatures::IsSupported(VFP2));
   if (use_eabi_hardfloat()) {
     if (dreg2.is(d0)) {
@@ -3465,7 +3468,7 @@ void MacroAssembler::SetCallCDoubleArguments(DoubleRegister dreg1,
 }
 
 
-void MacroAssembler::SetCallCDoubleArguments(DoubleRegister dreg,
+void MacroAssembler::SetCallCDoubleArguments(DwVfpRegister dreg,
                                              Register reg) {
   ASSERT(CpuFeatures::IsSupported(VFP2));
   if (use_eabi_hardfloat()) {
@@ -3748,8 +3751,8 @@ void MacroAssembler::ClampUint8(Register output_reg, Register input_reg) {
 
 
 void MacroAssembler::ClampDoubleToUint8(Register result_reg,
-                                        DoubleRegister input_reg,
-                                        DoubleRegister temp_double_reg) {
+                                        DwVfpRegister input_reg,
+                                        DwVfpRegister temp_double_reg) {
   Label above_zero;
   Label done;
   Label in_bounds;
index 3c05e00..50c298b 100644 (file)
@@ -178,7 +178,7 @@ class MacroAssembler: public Assembler {
   // Register move. May do nothing if the registers are identical.
   void Move(Register dst, Handle<Object> value);
   void Move(Register dst, Register src, Condition cond = al);
-  void Move(DoubleRegister dst, DoubleRegister src);
+  void Move(DwVfpRegister dst, DwVfpRegister src);
 
   // Load an object from the root table.
   void LoadRoot(Register destination,
@@ -1058,9 +1058,9 @@ class MacroAssembler: public Assembler {
   // whether soft or hard floating point ABI is used. These functions
   // abstract parameter passing for the three different ways we call
   // C functions from generated code.
-  void SetCallCDoubleArguments(DoubleRegister dreg);
-  void SetCallCDoubleArguments(DoubleRegister dreg1, DoubleRegister dreg2);
-  void SetCallCDoubleArguments(DoubleRegister dreg, Register reg);
+  void SetCallCDoubleArguments(DwVfpRegister dreg);
+  void SetCallCDoubleArguments(DwVfpRegister dreg1, DwVfpRegister dreg2);
+  void SetCallCDoubleArguments(DwVfpRegister dreg, Register reg);
 
   // Calls a C function and cleans up the space for arguments allocated
   // by PrepareCallCFunction. The called function is not allowed to trigger a
@@ -1076,7 +1076,7 @@ class MacroAssembler: public Assembler {
                      int num_reg_arguments,
                      int num_double_arguments);
 
-  void GetCFunctionDoubleResult(const DoubleRegister dst);
+  void GetCFunctionDoubleResult(const DwVfpRegister dst);
 
   // Calls an API function.  Allocates HandleScope, extracts returned value
   // from handle and propagates exceptions.  Restores context.  stack_space
@@ -1289,8 +1289,8 @@ class MacroAssembler: public Assembler {
   void ClampUint8(Register output_reg, Register input_reg);
 
   void ClampDoubleToUint8(Register result_reg,
-                          DoubleRegister input_reg,
-                          DoubleRegister temp_double_reg);
+                          DwVfpRegister input_reg,
+                          DwVfpRegister temp_double_reg);
 
 
   void LoadInstanceDescriptors(Register map, Register descriptors);
@@ -1365,9 +1365,9 @@ class MacroAssembler: public Assembler {
   // This handle will be patched with the code object on installation.
   Handle<Object> code_object_;
 
-  // Needs access to SafepointRegisterStackIndex for optimized frame
+  // Needs access to SafepointRegisterStackIndex for compiled frame
   // traversal.
-  friend class OptimizedFrame;
+  friend class CompiledFrame;
 };
 
 
index 4604c33..e79c520 100644 (file)
@@ -1053,42 +1053,6 @@ static void StoreIntAsFloat(MacroAssembler* masm,
 }
 
 
-// Convert unsigned integer with specified number of leading zeroes in binary
-// representation to IEEE 754 double.
-// Integer to convert is passed in register hiword.
-// Resulting double is returned in registers hiword:loword.
-// This functions does not work correctly for 0.
-static void GenerateUInt2Double(MacroAssembler* masm,
-                                Register hiword,
-                                Register loword,
-                                Register scratch,
-                                int leading_zeroes) {
-  const int meaningful_bits = kBitsPerInt - leading_zeroes - 1;
-  const int biased_exponent = HeapNumber::kExponentBias + meaningful_bits;
-
-  const int mantissa_shift_for_hi_word =
-      meaningful_bits - HeapNumber::kMantissaBitsInTopWord;
-
-  const int mantissa_shift_for_lo_word =
-      kBitsPerInt - mantissa_shift_for_hi_word;
-
-  __ mov(scratch, Operand(biased_exponent << HeapNumber::kExponentShift));
-  if (mantissa_shift_for_hi_word > 0) {
-    __ mov(loword, Operand(hiword, LSL, mantissa_shift_for_lo_word));
-    __ orr(hiword, scratch, Operand(hiword, LSR, mantissa_shift_for_hi_word));
-  } else {
-    __ mov(loword, Operand(0, RelocInfo::NONE));
-    __ orr(hiword, scratch, Operand(hiword, LSL, mantissa_shift_for_hi_word));
-  }
-
-  // If least significant bit of biased exponent was not 1 it was corrupted
-  // by most significant bit of mantissa so we should fix that.
-  if (!(biased_exponent & 1)) {
-    __ bic(hiword, hiword, Operand(1 << HeapNumber::kExponentShift));
-  }
-}
-
-
 #undef __
 #define __ ACCESS_MASM(masm())
 
@@ -3319,9 +3283,17 @@ Handle<Code> KeyedLoadStubCompiler::CompileLoadElement(
   //  -- r1    : receiver
   // -----------------------------------
   ElementsKind elements_kind = receiver_map->elements_kind();
-  Handle<Code> stub = KeyedLoadElementStub(elements_kind).GetCode();
-
-  __ DispatchMap(r1, r2, receiver_map, stub, DO_SMI_CHECK);
+  if (receiver_map->has_fast_elements() ||
+      receiver_map->has_external_array_elements()) {
+    Handle<Code> stub = KeyedLoadFastElementStub(
+        receiver_map->instance_type() == JS_ARRAY_TYPE,
+        elements_kind).GetCode();
+    __ DispatchMap(r1, r2, receiver_map, stub, DO_SMI_CHECK);
+  } else {
+    Handle<Code> stub =
+        KeyedLoadDictionaryElementStub().GetCode();
+    __ DispatchMap(r1, r2, receiver_map, stub, DO_SMI_CHECK);
+  }
 
   Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Miss();
   __ Jump(ic, RelocInfo::CODE_TARGET);
@@ -3726,339 +3698,6 @@ static void GenerateSmiKeyCheck(MacroAssembler* masm,
 }
 
 
-void KeyedLoadStubCompiler::GenerateLoadExternalArray(
-    MacroAssembler* masm,
-    ElementsKind elements_kind) {
-  // ---------- S t a t e --------------
-  //  -- lr     : return address
-  //  -- r0     : key
-  //  -- r1     : receiver
-  // -----------------------------------
-  Label miss_force_generic, slow, failed_allocation;
-
-  Register key = r0;
-  Register receiver = r1;
-
-  // This stub is meant to be tail-jumped to, the receiver must already
-  // have been verified by the caller to not be a smi.
-
-  // Check that the key is a smi or a heap number convertible to a smi.
-  GenerateSmiKeyCheck(masm, key, r4, r5, d1, d2, &miss_force_generic);
-
-  __ ldr(r3, FieldMemOperand(receiver, JSObject::kElementsOffset));
-  // r3: elements array
-
-  // Check that the index is in range.
-  __ ldr(ip, FieldMemOperand(r3, ExternalArray::kLengthOffset));
-  __ cmp(key, ip);
-  // Unsigned comparison catches both negative and too-large values.
-  __ b(hs, &miss_force_generic);
-
-  __ ldr(r3, FieldMemOperand(r3, ExternalArray::kExternalPointerOffset));
-  // r3: base pointer of external storage
-
-  // We are not untagging smi key and instead work with it
-  // as if it was premultiplied by 2.
-  STATIC_ASSERT((kSmiTag == 0) && (kSmiTagSize == 1));
-
-  Register value = r2;
-  switch (elements_kind) {
-    case EXTERNAL_BYTE_ELEMENTS:
-      __ ldrsb(value, MemOperand(r3, key, LSR, 1));
-      break;
-    case EXTERNAL_PIXEL_ELEMENTS:
-    case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
-      __ ldrb(value, MemOperand(r3, key, LSR, 1));
-      break;
-    case EXTERNAL_SHORT_ELEMENTS:
-      __ ldrsh(value, MemOperand(r3, key, LSL, 0));
-      break;
-    case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
-      __ ldrh(value, MemOperand(r3, key, LSL, 0));
-      break;
-    case EXTERNAL_INT_ELEMENTS:
-    case EXTERNAL_UNSIGNED_INT_ELEMENTS:
-      __ ldr(value, MemOperand(r3, key, LSL, 1));
-      break;
-    case EXTERNAL_FLOAT_ELEMENTS:
-      if (CpuFeatures::IsSupported(VFP2)) {
-        CpuFeatures::Scope scope(VFP2);
-        __ add(r2, r3, Operand(key, LSL, 1));
-        __ vldr(s0, r2, 0);
-      } else {
-        __ ldr(value, MemOperand(r3, key, LSL, 1));
-      }
-      break;
-    case EXTERNAL_DOUBLE_ELEMENTS:
-      if (CpuFeatures::IsSupported(VFP2)) {
-        CpuFeatures::Scope scope(VFP2);
-        __ add(r2, r3, Operand(key, LSL, 2));
-        __ vldr(d0, r2, 0);
-      } else {
-        __ add(r4, r3, Operand(key, LSL, 2));
-        // r4: pointer to the beginning of the double we want to load.
-        __ ldr(r2, MemOperand(r4, 0));
-        __ ldr(r3, MemOperand(r4, Register::kSizeInBytes));
-      }
-      break;
-    case FAST_ELEMENTS:
-    case FAST_SMI_ELEMENTS:
-    case FAST_DOUBLE_ELEMENTS:
-    case FAST_HOLEY_ELEMENTS:
-    case FAST_HOLEY_SMI_ELEMENTS:
-    case FAST_HOLEY_DOUBLE_ELEMENTS:
-    case DICTIONARY_ELEMENTS:
-    case NON_STRICT_ARGUMENTS_ELEMENTS:
-      UNREACHABLE();
-      break;
-  }
-
-  // For integer array types:
-  // r2: value
-  // For float array type:
-  // s0: value (if VFP3 is supported)
-  // r2: value (if VFP3 is not supported)
-  // For double array type:
-  // d0: value (if VFP3 is supported)
-  // r2/r3: value (if VFP3 is not supported)
-
-  if (elements_kind == EXTERNAL_INT_ELEMENTS) {
-    // For the Int and UnsignedInt array types, we need to see whether
-    // the value can be represented in a Smi. If not, we need to convert
-    // it to a HeapNumber.
-    Label box_int;
-    __ cmp(value, Operand(0xC0000000));
-    __ b(mi, &box_int);
-    // Tag integer as smi and return it.
-    __ mov(r0, Operand(value, LSL, kSmiTagSize));
-    __ Ret();
-
-    __ bind(&box_int);
-    if (CpuFeatures::IsSupported(VFP2)) {
-      CpuFeatures::Scope scope(VFP2);
-      // Allocate a HeapNumber for the result and perform int-to-double
-      // conversion.  Don't touch r0 or r1 as they are needed if allocation
-      // fails.
-      __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
-
-      __ AllocateHeapNumber(r5, r3, r4, r6, &slow, DONT_TAG_RESULT);
-      // Now we can use r0 for the result as key is not needed any more.
-      __ add(r0, r5, Operand(kHeapObjectTag));
-      __ vmov(s0, value);
-      __ vcvt_f64_s32(d0, s0);
-      __ vstr(d0, r5, HeapNumber::kValueOffset);
-      __ Ret();
-    } else {
-      // Allocate a HeapNumber for the result and perform int-to-double
-      // conversion.  Don't touch r0 or r1 as they are needed if allocation
-      // fails.
-      __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
-      __ AllocateHeapNumber(r5, r3, r4, r6, &slow, TAG_RESULT);
-      // Now we can use r0 for the result as key is not needed any more.
-      __ mov(r0, r5);
-      Register dst_mantissa = r1;
-      Register dst_exponent = r3;
-      FloatingPointHelper::Destination dest =
-          FloatingPointHelper::kCoreRegisters;
-      FloatingPointHelper::ConvertIntToDouble(masm,
-                                              value,
-                                              dest,
-                                              d0,
-                                              dst_mantissa,
-                                              dst_exponent,
-                                              r9,
-                                              s0);
-      __ str(dst_mantissa, FieldMemOperand(r0, HeapNumber::kMantissaOffset));
-      __ str(dst_exponent, FieldMemOperand(r0, HeapNumber::kExponentOffset));
-      __ Ret();
-    }
-  } else if (elements_kind == EXTERNAL_UNSIGNED_INT_ELEMENTS) {
-    // The test is different for unsigned int values. Since we need
-    // the value to be in the range of a positive smi, we can't
-    // handle either of the top two bits being set in the value.
-    if (CpuFeatures::IsSupported(VFP2)) {
-      CpuFeatures::Scope scope(VFP2);
-      Label box_int, done;
-      __ tst(value, Operand(0xC0000000));
-      __ b(ne, &box_int);
-      // Tag integer as smi and return it.
-      __ mov(r0, Operand(value, LSL, kSmiTagSize));
-      __ Ret();
-
-      __ bind(&box_int);
-      __ vmov(s0, value);
-      // Allocate a HeapNumber for the result and perform int-to-double
-      // conversion. Don't use r0 and r1 as AllocateHeapNumber clobbers all
-      // registers - also when jumping due to exhausted young space.
-      __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
-      __ AllocateHeapNumber(r2, r3, r4, r6, &slow, DONT_TAG_RESULT);
-
-      __ vcvt_f64_u32(d0, s0);
-      __ vstr(d0, r2, HeapNumber::kValueOffset);
-
-      __ add(r0, r2, Operand(kHeapObjectTag));
-      __ Ret();
-    } else {
-      // Check whether unsigned integer fits into smi.
-      Label box_int_0, box_int_1, done;
-      __ tst(value, Operand(0x80000000));
-      __ b(ne, &box_int_0);
-      __ tst(value, Operand(0x40000000));
-      __ b(ne, &box_int_1);
-      // Tag integer as smi and return it.
-      __ mov(r0, Operand(value, LSL, kSmiTagSize));
-      __ Ret();
-
-      Register hiword = value;  // r2.
-      Register loword = r3;
-
-      __ bind(&box_int_0);
-      // Integer does not have leading zeros.
-      GenerateUInt2Double(masm, hiword, loword, r4, 0);
-      __ b(&done);
-
-      __ bind(&box_int_1);
-      // Integer has one leading zero.
-      GenerateUInt2Double(masm, hiword, loword, r4, 1);
-
-
-      __ bind(&done);
-      // Integer was converted to double in registers hiword:loword.
-      // Wrap it into a HeapNumber. Don't use r0 and r1 as AllocateHeapNumber
-      // clobbers all registers - also when jumping due to exhausted young
-      // space.
-      __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
-      __ AllocateHeapNumber(r4, r5, r7, r6, &slow, TAG_RESULT);
-
-      __ str(hiword, FieldMemOperand(r4, HeapNumber::kExponentOffset));
-      __ str(loword, FieldMemOperand(r4, HeapNumber::kMantissaOffset));
-
-      __ mov(r0, r4);
-      __ Ret();
-    }
-  } else if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
-    // For the floating-point array type, we need to always allocate a
-    // HeapNumber.
-    if (CpuFeatures::IsSupported(VFP2)) {
-      CpuFeatures::Scope scope(VFP2);
-      // Allocate a HeapNumber for the result. Don't use r0 and r1 as
-      // AllocateHeapNumber clobbers all registers - also when jumping due to
-      // exhausted young space.
-      __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
-      __ AllocateHeapNumber(r2, r3, r4, r6, &slow, DONT_TAG_RESULT);
-      __ vcvt_f64_f32(d0, s0);
-      __ vstr(d0, r2, HeapNumber::kValueOffset);
-
-      __ add(r0, r2, Operand(kHeapObjectTag));
-      __ Ret();
-    } else {
-      // Allocate a HeapNumber for the result. Don't use r0 and r1 as
-      // AllocateHeapNumber clobbers all registers - also when jumping due to
-      // exhausted young space.
-      __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
-      __ AllocateHeapNumber(r3, r4, r5, r6, &slow, TAG_RESULT);
-      // VFP is not available, do manual single to double conversion.
-
-      // r2: floating point value (binary32)
-      // r3: heap number for result
-
-      // Extract mantissa to r0. OK to clobber r0 now as there are no jumps to
-      // the slow case from here.
-      __ and_(r0, value, Operand(kBinary32MantissaMask));
-
-      // Extract exponent to r1. OK to clobber r1 now as there are no jumps to
-      // the slow case from here.
-      __ mov(r1, Operand(value, LSR, kBinary32MantissaBits));
-      __ and_(r1, r1, Operand(kBinary32ExponentMask >> kBinary32MantissaBits));
-
-      Label exponent_rebiased;
-      __ teq(r1, Operand(0x00));
-      __ b(eq, &exponent_rebiased);
-
-      __ teq(r1, Operand(0xff));
-      __ mov(r1, Operand(0x7ff), LeaveCC, eq);
-      __ b(eq, &exponent_rebiased);
-
-      // Rebias exponent.
-      __ add(r1,
-             r1,
-             Operand(-kBinary32ExponentBias + HeapNumber::kExponentBias));
-
-      __ bind(&exponent_rebiased);
-      __ and_(r2, value, Operand(kBinary32SignMask));
-      value = no_reg;
-      __ orr(r2, r2, Operand(r1, LSL, HeapNumber::kMantissaBitsInTopWord));
-
-      // Shift mantissa.
-      static const int kMantissaShiftForHiWord =
-          kBinary32MantissaBits - HeapNumber::kMantissaBitsInTopWord;
-
-      static const int kMantissaShiftForLoWord =
-          kBitsPerInt - kMantissaShiftForHiWord;
-
-      __ orr(r2, r2, Operand(r0, LSR, kMantissaShiftForHiWord));
-      __ mov(r0, Operand(r0, LSL, kMantissaShiftForLoWord));
-
-      __ str(r2, FieldMemOperand(r3, HeapNumber::kExponentOffset));
-      __ str(r0, FieldMemOperand(r3, HeapNumber::kMantissaOffset));
-
-      __ mov(r0, r3);
-      __ Ret();
-    }
-  } else if (elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
-    if (CpuFeatures::IsSupported(VFP2)) {
-      CpuFeatures::Scope scope(VFP2);
-      // Allocate a HeapNumber for the result. Don't use r0 and r1 as
-      // AllocateHeapNumber clobbers all registers - also when jumping due to
-      // exhausted young space.
-      __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
-      __ AllocateHeapNumber(r2, r3, r4, r6, &slow, DONT_TAG_RESULT);
-      __ vstr(d0, r2, HeapNumber::kValueOffset);
-
-      __ add(r0, r2, Operand(kHeapObjectTag));
-      __ Ret();
-    } else {
-      // Allocate a HeapNumber for the result. Don't use r0 and r1 as
-      // AllocateHeapNumber clobbers all registers - also when jumping due to
-      // exhausted young space.
-      __ LoadRoot(r7, Heap::kHeapNumberMapRootIndex);
-      __ AllocateHeapNumber(r4, r5, r6, r7, &slow, TAG_RESULT);
-
-      __ str(r2, FieldMemOperand(r4, HeapNumber::kMantissaOffset));
-      __ str(r3, FieldMemOperand(r4, HeapNumber::kExponentOffset));
-      __ mov(r0, r4);
-      __ Ret();
-    }
-
-  } else {
-    // Tag integer as smi and return it.
-    __ mov(r0, Operand(value, LSL, kSmiTagSize));
-    __ Ret();
-  }
-
-  // Slow case, key and receiver still in r0 and r1.
-  __ bind(&slow);
-  __ IncrementCounter(
-      masm->isolate()->counters()->keyed_load_external_array_slow(),
-      1, r2, r3);
-
-  // ---------- S t a t e --------------
-  //  -- lr     : return address
-  //  -- r0     : key
-  //  -- r1     : receiver
-  // -----------------------------------
-
-  __ Push(r1, r0);
-
-  __ TailCallRuntime(Runtime::kKeyedGetProperty, 2, 1);
-
-  __ bind(&miss_force_generic);
-  Handle<Code> stub =
-      masm->isolate()->builtins()->KeyedLoadIC_MissForceGeneric();
-  __ Jump(stub, RelocInfo::CODE_TARGET);
-}
-
-
 void KeyedStoreStubCompiler::GenerateStoreExternalArray(
     MacroAssembler* masm,
     ElementsKind elements_kind) {
@@ -4403,118 +4042,6 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray(
 }
 
 
-void KeyedLoadStubCompiler::GenerateLoadFastElement(MacroAssembler* masm) {
-  // ----------- S t a t e -------------
-  //  -- lr    : return address
-  //  -- r0    : key
-  //  -- r1    : receiver
-  // -----------------------------------
-  Label miss_force_generic;
-
-  // This stub is meant to be tail-jumped to, the receiver must already
-  // have been verified by the caller to not be a smi.
-
-  // Check that the key is a smi or a heap number convertible to a smi.
-  GenerateSmiKeyCheck(masm, r0, r4, r5, d1, d2, &miss_force_generic);
-
-  // Get the elements array.
-  __ ldr(r2, FieldMemOperand(r1, JSObject::kElementsOffset));
-  __ AssertFastElements(r2);
-
-  // Check that the key is within bounds.
-  __ ldr(r3, FieldMemOperand(r2, FixedArray::kLengthOffset));
-  __ cmp(r0, Operand(r3));
-  __ b(hs, &miss_force_generic);
-
-  // Load the result and make sure it's not the hole.
-  __ add(r3, r2, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
-  STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2);
-  __ ldr(r4,
-         MemOperand(r3, r0, LSL, kPointerSizeLog2 - kSmiTagSize));
-  __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
-  __ cmp(r4, ip);
-  __ b(eq, &miss_force_generic);
-  __ mov(r0, r4);
-  __ Ret();
-
-  __ bind(&miss_force_generic);
-  Handle<Code> stub =
-      masm->isolate()->builtins()->KeyedLoadIC_MissForceGeneric();
-  __ Jump(stub, RelocInfo::CODE_TARGET);
-}
-
-
-void KeyedLoadStubCompiler::GenerateLoadFastDoubleElement(
-    MacroAssembler* masm) {
-  // ----------- S t a t e -------------
-  //  -- lr    : return address
-  //  -- r0    : key
-  //  -- r1    : receiver
-  // -----------------------------------
-  Label miss_force_generic, slow_allocate_heapnumber;
-
-  Register key_reg = r0;
-  Register receiver_reg = r1;
-  Register elements_reg = r2;
-  Register heap_number_reg = r2;
-  Register indexed_double_offset = r3;
-  Register scratch = r4;
-  Register scratch2 = r5;
-  Register scratch3 = r6;
-  Register heap_number_map = r7;
-
-  // This stub is meant to be tail-jumped to, the receiver must already
-  // have been verified by the caller to not be a smi.
-
-  // Check that the key is a smi or a heap number convertible to a smi.
-  GenerateSmiKeyCheck(masm, key_reg, r4, r5, d1, d2, &miss_force_generic);
-
-  // Get the elements array.
-  __ ldr(elements_reg,
-         FieldMemOperand(receiver_reg, JSObject::kElementsOffset));
-
-  // Check that the key is within bounds.
-  __ ldr(scratch, FieldMemOperand(elements_reg, FixedArray::kLengthOffset));
-  __ cmp(key_reg, Operand(scratch));
-  __ b(hs, &miss_force_generic);
-
-  // Load the upper word of the double in the fixed array and test for NaN.
-  __ add(indexed_double_offset, elements_reg,
-         Operand(key_reg, LSL, kDoubleSizeLog2 - kSmiTagSize));
-  uint32_t upper_32_offset = FixedArray::kHeaderSize + sizeof(kHoleNanLower32);
-  __ ldr(scratch, FieldMemOperand(indexed_double_offset, upper_32_offset));
-  __ cmp(scratch, Operand(kHoleNanUpper32));
-  __ b(&miss_force_generic, eq);
-
-  // Non-NaN. Allocate a new heap number and copy the double value into it.
-  __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
-  __ AllocateHeapNumber(heap_number_reg, scratch2, scratch3,
-                        heap_number_map, &slow_allocate_heapnumber, TAG_RESULT);
-
-  // Don't need to reload the upper 32 bits of the double, it's already in
-  // scratch.
-  __ str(scratch, FieldMemOperand(heap_number_reg,
-                                  HeapNumber::kExponentOffset));
-  __ ldr(scratch, FieldMemOperand(indexed_double_offset,
-                                  FixedArray::kHeaderSize));
-  __ str(scratch, FieldMemOperand(heap_number_reg,
-                                  HeapNumber::kMantissaOffset));
-
-  __ mov(r0, heap_number_reg);
-  __ Ret();
-
-  __ bind(&slow_allocate_heapnumber);
-  Handle<Code> slow_ic =
-      masm->isolate()->builtins()->KeyedLoadIC_Slow();
-  __ Jump(slow_ic, RelocInfo::CODE_TARGET);
-
-  __ bind(&miss_force_generic);
-  Handle<Code> miss_ic =
-      masm->isolate()->builtins()->KeyedLoadIC_MissForceGeneric();
-  __ Jump(miss_ic, RelocInfo::CODE_TARGET);
-}
-
-
 void KeyedStoreStubCompiler::GenerateStoreFastElement(
     MacroAssembler* masm,
     bool is_js_array,
index 25157be..ccaf290 100644 (file)
@@ -1375,6 +1375,11 @@ ExternalReference ExternalReference::page_flags(Page* page) {
 }
 
 
+ExternalReference ExternalReference::ForDeoptEntry(Address entry) {
+  return ExternalReference(entry);
+}
+
+
 // Helper function to compute x^y, where y is known to be an
 // integer. Uses binary decomposition to limit the number of
 // multiplications; see the discussion in "Hacker's Delight" by Henry
index 4639374..111c1d9 100644 (file)
@@ -736,6 +736,8 @@ class ExternalReference BASE_EMBEDDED {
 
   static ExternalReference page_flags(Page* page);
 
+  static ExternalReference ForDeoptEntry(Address entry);
+
   Address address() const {return reinterpret_cast<Address>(address_);}
 
 #ifdef ENABLE_DEBUGGER_SUPPORT
index 232cb73..c43b913 100644 (file)
@@ -616,14 +616,6 @@ void ObjectLiteral::Property::RecordTypeFeedback(TypeFeedbackOracle* oracle) {
 // ----------------------------------------------------------------------------
 // Implementation of AstVisitor
 
-bool AstVisitor::CheckStackOverflow() {
-  if (stack_overflow_) return true;
-  StackLimitCheck check(isolate_);
-  if (!check.HasOverflowed()) return false;
-  return (stack_overflow_ = true);
-}
-
-
 void AstVisitor::VisitDeclarations(ZoneList<Declaration*>* declarations) {
   for (int i = 0; i < declarations->length(); i++) {
     Visit(declarations->at(i));
index d299f19..a0a7a73 100644 (file)
--- a/src/ast.h
+++ b/src/ast.h
@@ -2492,40 +2492,51 @@ inline ModuleVariable::ModuleVariable(VariableProxy* proxy)
 
 class AstVisitor BASE_EMBEDDED {
  public:
-  AstVisitor() : isolate_(Isolate::Current()), stack_overflow_(false) { }
+  AstVisitor() {}
   virtual ~AstVisitor() { }
 
   // Stack overflow check and dynamic dispatch.
-  void Visit(AstNode* node) { if (!CheckStackOverflow()) node->Accept(this); }
+  virtual void Visit(AstNode* node) = 0;
 
   // Iteration left-to-right.
   virtual void VisitDeclarations(ZoneList<Declaration*>* declarations);
   virtual void VisitStatements(ZoneList<Statement*>* statements);
   virtual void VisitExpressions(ZoneList<Expression*>* expressions);
 
-  // Stack overflow tracking support.
-  bool HasStackOverflow() const { return stack_overflow_; }
-  bool CheckStackOverflow();
-
-  // If a stack-overflow exception is encountered when visiting a
-  // node, calling SetStackOverflow will make sure that the visitor
-  // bails out without visiting more nodes.
-  void SetStackOverflow() { stack_overflow_ = true; }
-  void ClearStackOverflow() { stack_overflow_ = false; }
-
   // Individual AST nodes.
 #define DEF_VISIT(type)                         \
   virtual void Visit##type(type* node) = 0;
   AST_NODE_LIST(DEF_VISIT)
 #undef DEF_VISIT
+};
 
- protected:
-  Isolate* isolate() { return isolate_; }
 
- private:
-  Isolate* isolate_;
-  bool stack_overflow_;
-};
+#define DEFINE_AST_VISITOR_SUBCLASS_MEMBERS()                       \
+public:                                                             \
+  virtual void Visit(AstNode* node) {                               \
+    if (!CheckStackOverflow()) node->Accept(this);                  \
+  }                                                                 \
+                                                                    \
+  void SetStackOverflow() { stack_overflow_ = true; }               \
+  void ClearStackOverflow() { stack_overflow_ = false; }            \
+  bool HasStackOverflow() const { return stack_overflow_; }         \
+                                                                    \
+  bool CheckStackOverflow() {                                       \
+    if (stack_overflow_) return true;                               \
+    StackLimitCheck check(isolate_);                                \
+    if (!check.HasOverflowed()) return false;                       \
+    return (stack_overflow_ = true);                                \
+  }                                                                 \
+                                                                    \
+private:                                                            \
+  void InitializeAstVisitor() {                                     \
+    isolate_ = Isolate::Current();                                  \
+    stack_overflow_ = false;                                        \
+  }                                                                 \
+  Isolate* isolate() { return isolate_; }                           \
+                                                                    \
+  Isolate* isolate_;                                                \
+  bool stack_overflow_
 
 
 // ----------------------------------------------------------------------------
index a2f752e..1ca4053 100644 (file)
@@ -107,6 +107,8 @@ enum BuiltinExtraArguments {
                                     Code::kNoExtraICState)              \
   V(NotifyLazyDeoptimized,          BUILTIN, UNINITIALIZED,             \
                                     Code::kNoExtraICState)              \
+  V(NotifyICMiss,                   BUILTIN, UNINITIALIZED,             \
+                                    Code::kNoExtraICState)              \
   V(NotifyOSR,                      BUILTIN, UNINITIALIZED,             \
                                     Code::kNoExtraICState)              \
                                                                         \
@@ -386,6 +388,7 @@ class Builtins {
   static void Generate_NotifyDeoptimized(MacroAssembler* masm);
   static void Generate_NotifyLazyDeoptimized(MacroAssembler* masm);
   static void Generate_NotifyOSR(MacroAssembler* masm);
+  static void Generate_NotifyICMiss(MacroAssembler* masm);
   static void Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm);
 
   static void Generate_FunctionCall(MacroAssembler* masm);
diff --git a/src/code-stubs-hydrogen.cc b/src/code-stubs-hydrogen.cc
new file mode 100644 (file)
index 0000000..da9ec6a
--- /dev/null
@@ -0,0 +1,137 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "code-stubs.h"
+#include "hydrogen.h"
+#include "lithium.h"
+
+namespace v8 {
+namespace internal {
+
+
+Handle<Code> HydrogenCodeStub::CodeFromGraph(HGraph* graph) {
+  graph->OrderBlocks();
+  graph->AssignDominators();
+  graph->CollectPhis();
+  graph->InsertRepresentationChanges();
+  graph->EliminateRedundantBoundsChecks();
+  LChunk* chunk = LChunk::NewChunk(graph);
+  ASSERT(chunk != NULL);
+  Handle<Code> stub = chunk->Codegen(Code::COMPILED_STUB);
+  return stub;
+}
+
+
+class CodeStubGraphBuilderBase : public HGraphBuilder {
+ public:
+  CodeStubGraphBuilderBase(Isolate* isolate, HydrogenCodeStub* stub)
+      : HGraphBuilder(&info_), info_(stub, isolate) {}
+  virtual bool BuildGraph();
+
+ protected:
+  virtual void BuildCodeStub() = 0;
+  HParameter* GetParameter(int parameter) { return parameters_[parameter]; }
+  HydrogenCodeStub* stub() { return info_.code_stub(); }
+
+ private:
+  SmartArrayPointer<HParameter*> parameters_;
+  CompilationInfoWithZone info_;
+};
+
+
+bool CodeStubGraphBuilderBase::BuildGraph() {
+  if (FLAG_trace_hydrogen) {
+    PrintF("-----------------------------------------------------------\n");
+    PrintF("Compiling stub using hydrogen\n");
+    HTracer::Instance()->TraceCompilation(&info_);
+  }
+  HBasicBlock* next_block = graph()->CreateBasicBlock();
+  next_block->SetInitialEnvironment(graph()->start_environment());
+  HGoto* jump = new(zone()) HGoto(next_block);
+  graph()->entry_block()->Finish(jump);
+  set_current_block(next_block);
+
+  int major_key = stub()->MajorKey();
+  CodeStubInterfaceDescriptor** descriptors =
+      info_.isolate()->code_stub_interface_descriptors();
+  if (descriptors[major_key] == NULL) {
+    descriptors[major_key] = stub()->GetInterfaceDescriptor(info_.isolate());
+  }
+
+  CodeStubInterfaceDescriptor* descriptor = descriptors[major_key];
+  parameters_.Reset(new HParameter*[descriptor->number_of_register_params]);
+
+  HGraph* graph = this->graph();
+  Zone* zone = this->zone();
+  for (int i = 0; i < descriptor->number_of_register_params; ++i) {
+    HParameter* param = new(zone) HParameter(i);
+    AddInstruction(param);
+    graph->start_environment()->Push(param);
+    parameters_[i] = param;
+  }
+  AddSimulate(BailoutId::StubEntry());
+
+  BuildCodeStub();
+
+  return true;
+}
+
+template <class Stub>
+class CodeStubGraphBuilder: public CodeStubGraphBuilderBase {
+ public:
+  explicit CodeStubGraphBuilder(Stub* stub)
+      : CodeStubGraphBuilderBase(Isolate::Current(), stub) {}
+
+ protected:
+  virtual void BuildCodeStub();
+  Stub* casted_stub() { return static_cast<Stub*>(stub()); }
+};
+
+
+template <>
+void CodeStubGraphBuilder<KeyedLoadFastElementStub>::BuildCodeStub() {
+  Zone* zone = this->zone();
+
+  HInstruction* load = BuildUncheckedMonomorphicElementAccess(
+      GetParameter(0), GetParameter(1), NULL, NULL,
+      casted_stub()->is_js_array(), casted_stub()->elements_kind(), false);
+  AddInstruction(load);
+
+  HReturn* ret = new(zone) HReturn(load);
+  current_block()->Finish(ret);
+}
+
+
+Handle<Code> KeyedLoadFastElementStub::GenerateCode() {
+  CodeStubGraphBuilder<KeyedLoadFastElementStub> builder(this);
+  return CodeFromGraph(builder.CreateGraph());
+}
+
+
+} }  // namespace v8::internal
index 276c87e..c7d4c80 100644 (file)
@@ -48,20 +48,6 @@ bool CodeStub::FindCodeInCache(Code** code_out, Isolate* isolate) {
 }
 
 
-void CodeStub::GenerateCode(MacroAssembler* masm) {
-  // Update the static counter each time a new code stub is generated.
-  masm->isolate()->counters()->code_stubs()->Increment();
-
-  // Nested stubs are not allowed for leaves.
-  AllowStubCallsScope allow_scope(masm, false);
-
-  // Generate the code for the stub.
-  masm->set_generating_stub(true);
-  NoCurrentFrameScope scope(masm);
-  Generate(masm);
-}
-
-
 SmartArrayPointer<const char> CodeStub::GetName() {
   char buffer[100];
   NoAllocationStringAllocator allocator(buffer,
@@ -72,8 +58,7 @@ SmartArrayPointer<const char> CodeStub::GetName() {
 }
 
 
-void CodeStub::RecordCodeGeneration(Code* code, MacroAssembler* masm) {
-  Isolate* isolate = masm->isolate();
+void CodeStub::RecordCodeGeneration(Code* code, Isolate* isolate) {
   SmartArrayPointer<const char> name = GetName();
   PROFILE(isolate, CodeCreateEvent(Logger::STUB_TAG, code, *name));
   GDBJIT(AddCode(GDBJITInterface::STUB, *name, code));
@@ -87,6 +72,39 @@ int CodeStub::GetCodeKind() {
 }
 
 
+Handle<Code> PlatformCodeStub::GenerateCode() {
+  Isolate* isolate = Isolate::Current();
+  Factory* factory = isolate->factory();
+
+  // Generate the new code.
+  MacroAssembler masm(isolate, NULL, 256);
+
+  {
+    // Update the static counter each time a new code stub is generated.
+    isolate->counters()->code_stubs()->Increment();
+
+    // Nested stubs are not allowed for leaves.
+    AllowStubCallsScope allow_scope(&masm, false);
+
+    // Generate the code for the stub.
+    masm.set_generating_stub(true);
+    NoCurrentFrameScope scope(&masm);
+    Generate(&masm);
+  }
+
+  // Create the code object.
+  CodeDesc desc;
+  masm.GetCode(&desc);
+
+  // Copy the generated code into a heap object.
+  Code::Flags flags = Code::ComputeFlags(
+      static_cast<Code::Kind>(GetCodeKind()), GetICState());
+  Handle<Code> new_object = factory->NewCode(
+      desc, flags, masm.CodeObject(), NeedsImmovableCode());
+  return new_object;
+}
+
+
 Handle<Code> CodeStub::GetCode() {
   Isolate* isolate = Isolate::Current();
   Factory* factory = isolate->factory();
@@ -102,23 +120,10 @@ Handle<Code> CodeStub::GetCode() {
   {
     HandleScope scope(isolate);
 
-    // Generate the new code.
-    MacroAssembler masm(isolate, NULL, 256);
-    GenerateCode(&masm);
-
-    // Create the code object.
-    CodeDesc desc;
-    masm.GetCode(&desc);
-
-    // Copy the generated code into a heap object.
-    Code::Flags flags = Code::ComputeFlags(
-        static_cast<Code::Kind>(GetCodeKind()),
-        GetICState());
-    Handle<Code> new_object = factory->NewCode(
-        desc, flags, masm.CodeObject(), NeedsImmovableCode());
+    Handle<Code> new_object = GenerateCode();
     new_object->set_major_key(MajorKey());
     FinishCode(new_object);
-    RecordCodeGeneration(*new_object, &masm);
+    RecordCodeGeneration(*new_object, isolate);
 
 #ifdef ENABLE_DISASSEMBLER
     if (FLAG_print_code_stubs) {
@@ -416,36 +421,8 @@ void JSEntryStub::FinishCode(Handle<Code> code) {
 }
 
 
-void KeyedLoadElementStub::Generate(MacroAssembler* masm) {
-  switch (elements_kind_) {
-    case FAST_ELEMENTS:
-    case FAST_HOLEY_ELEMENTS:
-    case FAST_SMI_ELEMENTS:
-    case FAST_HOLEY_SMI_ELEMENTS:
-      KeyedLoadStubCompiler::GenerateLoadFastElement(masm);
-      break;
-    case FAST_DOUBLE_ELEMENTS:
-    case FAST_HOLEY_DOUBLE_ELEMENTS:
-      KeyedLoadStubCompiler::GenerateLoadFastDoubleElement(masm);
-      break;
-    case EXTERNAL_BYTE_ELEMENTS:
-    case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
-    case EXTERNAL_SHORT_ELEMENTS:
-    case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
-    case EXTERNAL_INT_ELEMENTS:
-    case EXTERNAL_UNSIGNED_INT_ELEMENTS:
-    case EXTERNAL_FLOAT_ELEMENTS:
-    case EXTERNAL_DOUBLE_ELEMENTS:
-    case EXTERNAL_PIXEL_ELEMENTS:
-      KeyedLoadStubCompiler::GenerateLoadExternalArray(masm, elements_kind_);
-      break;
-    case DICTIONARY_ELEMENTS:
-      KeyedLoadStubCompiler::GenerateLoadDictionaryElement(masm);
-      break;
-    case NON_STRICT_ARGUMENTS_ELEMENTS:
-      UNREACHABLE();
-      break;
-  }
+void KeyedLoadDictionaryElementStub::Generate(MacroAssembler* masm) {
+  KeyedLoadStubCompiler::GenerateLoadDictionaryElement(masm);
 }
 
 
index ae113f5..c6e328c 100644 (file)
@@ -162,20 +162,29 @@ class CodeStub BASE_EMBEDDED {
   // Lookup the code in the (possibly custom) cache.
   bool FindCodeInCache(Code** code_out, Isolate* isolate);
 
+  // Returns information for computing the number key.
+  virtual Major MajorKey() = 0;
+  virtual int MinorKey() = 0;
+
  protected:
   static bool CanUseFPRegisters();
 
- private:
-  // Nonvirtual wrapper around the stub-specific Generate function.  Call
-  // this function to set up the macro assembler and generate the code.
-  void GenerateCode(MacroAssembler* masm);
-
   // Generates the assembler code for the stub.
-  virtual void Generate(MacroAssembler* masm) = 0;
+  virtual Handle<Code> GenerateCode() = 0;
 
+  // BinaryOpStub needs to override this.
+  virtual InlineCacheState GetICState() {
+    return UNINITIALIZED;
+  }
+
+  // Returns whether the code generated for this stub needs to be allocated as
+  // a fixed (non-moveable) code object.
+  virtual bool NeedsImmovableCode() { return false; }
+
+ private:
   // Perform bookkeeping required after code generation when stub code is
   // initially generated.
-  void RecordCodeGeneration(Code* code, MacroAssembler* masm);
+  void RecordCodeGeneration(Code* code, Isolate* isolate);
 
   // Finish the code object after it has been generated.
   virtual void FinishCode(Handle<Code> code) { }
@@ -184,18 +193,9 @@ class CodeStub BASE_EMBEDDED {
   // registering stub in the stub cache.
   virtual void Activate(Code* code) { }
 
-  // Returns information for computing the number key.
-  virtual Major MajorKey() = 0;
-  virtual int MinorKey() = 0;
-
   // BinaryOpStub needs to override this.
   virtual int GetCodeKind();
 
-  // BinaryOpStub needs to override this.
-  virtual InlineCacheState GetICState() {
-    return UNINITIALIZED;
-  }
-
   // Add the code to a specialized cache, specific to an individual
   // stub type. Please note, this method must add the code object to a
   // roots object, otherwise we will remove the code during GC.
@@ -213,10 +213,6 @@ class CodeStub BASE_EMBEDDED {
   SmartArrayPointer<const char> GetName();
   virtual void PrintName(StringStream* stream);
 
-  // Returns whether the code generated for this stub needs to be allocated as
-  // a fixed (non-moveable) code object.
-  virtual bool NeedsImmovableCode() { return false; }
-
   // Computes the key based on major and minor.
   uint32_t GetKey() {
     ASSERT(static_cast<int>(MajorKey()) < NUMBER_OF_IDS);
@@ -232,6 +228,43 @@ class CodeStub BASE_EMBEDDED {
 };
 
 
+class PlatformCodeStub : public CodeStub {
+ public:
+  // Retrieve the code for the stub. Generate the code if needed.
+  virtual Handle<Code> GenerateCode();
+
+  virtual int GetCodeKind() { return Code::STUB; }
+
+ protected:
+  // Generates the assembler code for the stub.
+  virtual void Generate(MacroAssembler* masm) = 0;
+};
+
+
+struct CodeStubInterfaceDescriptor {
+  int number_of_register_params;
+  Register* register_params;
+  Handle<Code> deoptimization_handler;
+};
+
+
+class HGraph;
+struct Register;
+class HydrogenCodeStub : public CodeStub {
+ public:
+  // Retrieve the code for the stub. Generate the code if needed.
+  virtual Handle<Code> GenerateCode() = 0;
+
+  virtual int GetCodeKind() { return Code::COMPILED_STUB; }
+
+  virtual CodeStubInterfaceDescriptor* GetInterfaceDescriptor(
+      Isolate* isolate) = 0;
+
+ protected:
+  Handle<Code> CodeFromGraph(HGraph* graph);
+};
+
+
 // Helper interface to prepare to/restore after making runtime calls.
 class RuntimeCallHelper {
  public:
@@ -289,7 +322,7 @@ class NopRuntimeCallHelper : public RuntimeCallHelper {
 };
 
 
-class StackCheckStub : public CodeStub {
+class StackCheckStub : public PlatformCodeStub {
  public:
   StackCheckStub() { }
 
@@ -301,7 +334,7 @@ class StackCheckStub : public CodeStub {
 };
 
 
-class InterruptStub : public CodeStub {
+class InterruptStub : public PlatformCodeStub {
  public:
   InterruptStub() { }
 
@@ -313,7 +346,7 @@ class InterruptStub : public CodeStub {
 };
 
 
-class ToNumberStub: public CodeStub {
+class ToNumberStub: public PlatformCodeStub {
  public:
   ToNumberStub() { }
 
@@ -325,7 +358,7 @@ class ToNumberStub: public CodeStub {
 };
 
 
-class FastNewClosureStub : public CodeStub {
+class FastNewClosureStub : public PlatformCodeStub {
  public:
   explicit FastNewClosureStub(LanguageMode language_mode)
     : language_mode_(language_mode) { }
@@ -341,7 +374,7 @@ class FastNewClosureStub : public CodeStub {
 };
 
 
-class FastNewContextStub : public CodeStub {
+class FastNewContextStub : public PlatformCodeStub {
  public:
   static const int kMaximumSlots = 64;
 
@@ -359,7 +392,7 @@ class FastNewContextStub : public CodeStub {
 };
 
 
-class FastNewBlockContextStub : public CodeStub {
+class FastNewBlockContextStub : public PlatformCodeStub {
  public:
   static const int kMaximumSlots = 64;
 
@@ -377,7 +410,7 @@ class FastNewBlockContextStub : public CodeStub {
 };
 
 
-class FastCloneShallowArrayStub : public CodeStub {
+class FastCloneShallowArrayStub : public PlatformCodeStub {
  public:
   // Maximum length of copied elements array.
   static const int kMaximumClonedLength = 8;
@@ -410,7 +443,7 @@ class FastCloneShallowArrayStub : public CodeStub {
 };
 
 
-class FastCloneShallowObjectStub : public CodeStub {
+class FastCloneShallowObjectStub : public PlatformCodeStub {
  public:
   // Maximum number of properties in copied object.
   static const int kMaximumClonedProperties = 6;
@@ -430,7 +463,7 @@ class FastCloneShallowObjectStub : public CodeStub {
 };
 
 
-class InstanceofStub: public CodeStub {
+class InstanceofStub: public PlatformCodeStub {
  public:
   enum Flags {
     kNoFlags = 0,
@@ -468,7 +501,7 @@ class InstanceofStub: public CodeStub {
 };
 
 
-class MathPowStub: public CodeStub {
+class MathPowStub: public PlatformCodeStub {
  public:
   enum ExponentType { INTEGER, DOUBLE, TAGGED, ON_STACK};
 
@@ -484,7 +517,7 @@ class MathPowStub: public CodeStub {
 };
 
 
-class BinaryOpStub: public CodeStub {
+class BinaryOpStub: public PlatformCodeStub {
  public:
   BinaryOpStub(Token::Value op, OverwriteMode mode)
       : op_(op),
@@ -600,7 +633,7 @@ class BinaryOpStub: public CodeStub {
 };
 
 
-class ICCompareStub: public CodeStub {
+class ICCompareStub: public PlatformCodeStub {
  public:
   ICCompareStub(Token::Value op,
                 CompareIC::State left,
@@ -666,7 +699,7 @@ class ICCompareStub: public CodeStub {
 };
 
 
-class CEntryStub : public CodeStub {
+class CEntryStub : public PlatformCodeStub {
  public:
   explicit CEntryStub(int result_size,
                       SaveFPRegsMode save_doubles = kDontSaveFPRegs)
@@ -700,7 +733,7 @@ class CEntryStub : public CodeStub {
 };
 
 
-class JSEntryStub : public CodeStub {
+class JSEntryStub : public PlatformCodeStub {
  public:
   JSEntryStub() { }
 
@@ -734,7 +767,7 @@ class JSConstructEntryStub : public JSEntryStub {
 };
 
 
-class ArgumentsAccessStub: public CodeStub {
+class ArgumentsAccessStub: public PlatformCodeStub {
  public:
   enum Type {
     READ_ELEMENT,
@@ -761,7 +794,7 @@ class ArgumentsAccessStub: public CodeStub {
 };
 
 
-class RegExpExecStub: public CodeStub {
+class RegExpExecStub: public PlatformCodeStub {
  public:
   RegExpExecStub() { }
 
@@ -773,7 +806,7 @@ class RegExpExecStub: public CodeStub {
 };
 
 
-class RegExpConstructResultStub: public CodeStub {
+class RegExpConstructResultStub: public PlatformCodeStub {
  public:
   RegExpConstructResultStub() { }
 
@@ -785,7 +818,7 @@ class RegExpConstructResultStub: public CodeStub {
 };
 
 
-class CallFunctionStub: public CodeStub {
+class CallFunctionStub: public PlatformCodeStub {
  public:
   CallFunctionStub(int argc, CallFunctionFlags flags)
       : argc_(argc), flags_(flags) { }
@@ -826,7 +859,7 @@ class CallFunctionStub: public CodeStub {
 };
 
 
-class CallConstructStub: public CodeStub {
+class CallConstructStub: public PlatformCodeStub {
  public:
   explicit CallConstructStub(CallFunctionFlags flags) : flags_(flags) {}
 
@@ -1017,25 +1050,53 @@ class AllowStubCallsScope {
 };
 
 
-class KeyedLoadElementStub : public CodeStub {
+class KeyedLoadDictionaryElementStub : public PlatformCodeStub {
  public:
-  explicit KeyedLoadElementStub(ElementsKind elements_kind)
-      : elements_kind_(elements_kind)
-  { }
+  KeyedLoadDictionaryElementStub() {}
 
   Major MajorKey() { return KeyedLoadElement; }
-  int MinorKey() { return elements_kind_; }
+  int MinorKey() { return DICTIONARY_ELEMENTS; }
 
   void Generate(MacroAssembler* masm);
 
  private:
-  ElementsKind elements_kind_;
+  DISALLOW_COPY_AND_ASSIGN(KeyedLoadDictionaryElementStub);
+};
+
+
+class KeyedLoadFastElementStub : public HydrogenCodeStub {
+ public:
+  KeyedLoadFastElementStub(bool is_js_array, ElementsKind elements_kind) {
+    bit_field_ = ElementsKindBits::encode(elements_kind) |
+        IsJSArrayBits::encode(is_js_array);
+  }
+
+  Major MajorKey() { return KeyedLoadElement; }
+  int MinorKey() { return bit_field_; }
+
+  bool is_js_array() const {
+    return IsJSArrayBits::decode(bit_field_);
+  }
+
+  ElementsKind elements_kind() const {
+    return ElementsKindBits::decode(bit_field_);
+  }
+
+  virtual Handle<Code> GenerateCode();
+
+  virtual CodeStubInterfaceDescriptor* GetInterfaceDescriptor(
+      Isolate* isolate);
+
+ private:
+  class IsJSArrayBits: public BitField<bool, 8, 1> {};
+  class ElementsKindBits: public BitField<ElementsKind, 0, 8> {};
+  uint32_t bit_field_;
 
-  DISALLOW_COPY_AND_ASSIGN(KeyedLoadElementStub);
+  DISALLOW_COPY_AND_ASSIGN(KeyedLoadFastElementStub);
 };
 
 
-class KeyedStoreElementStub : public CodeStub {
+class KeyedStoreElementStub : public PlatformCodeStub {
  public:
   KeyedStoreElementStub(bool is_js_array,
                         ElementsKind elements_kind,
@@ -1070,7 +1131,7 @@ class KeyedStoreElementStub : public CodeStub {
 };
 
 
-class ToBooleanStub: public CodeStub {
+class ToBooleanStub: public PlatformCodeStub {
  public:
   enum Type {
     UNDEFINED,
@@ -1140,7 +1201,7 @@ class ToBooleanStub: public CodeStub {
 };
 
 
-class ElementsTransitionAndStoreStub : public CodeStub {
+class ElementsTransitionAndStoreStub : public PlatformCodeStub {
  public:
   ElementsTransitionAndStoreStub(ElementsKind from,
                                  ElementsKind to,
@@ -1181,7 +1242,7 @@ class ElementsTransitionAndStoreStub : public CodeStub {
 };
 
 
-class StoreArrayLiteralElementStub : public CodeStub {
+class StoreArrayLiteralElementStub : public PlatformCodeStub {
  public:
   StoreArrayLiteralElementStub()
         : fp_registers_(CanUseFPRegisters()) { }
@@ -1200,7 +1261,7 @@ class StoreArrayLiteralElementStub : public CodeStub {
 };
 
 
-class ProfileEntryHookStub : public CodeStub {
+class ProfileEntryHookStub : public PlatformCodeStub {
  public:
   explicit ProfileEntryHookStub() {}
 
index 83ac854..c8bdf68 100644 (file)
@@ -121,19 +121,21 @@ void CodeGenerator::PrintCode(Handle<Code> code, CompilationInfo* info) {
   if (print_code) {
     // Print the source code if available.
     FunctionLiteral* function = info->function();
-    Handle<Script> script = info->script();
-    if (!script->IsUndefined() && !script->source()->IsUndefined()) {
-      PrintF("--- Raw source ---\n");
-      StringInputBuffer stream(String::cast(script->source()));
-      stream.Seek(function->start_position());
-      // fun->end_position() points to the last character in the stream. We
-      // need to compensate by adding one to calculate the length.
-      int source_len =
-          function->end_position() - function->start_position() + 1;
-      for (int i = 0; i < source_len; i++) {
-        if (stream.has_more()) PrintF("%c", stream.GetNext());
+    if (code->kind() != Code::COMPILED_STUB) {
+      Handle<Script> script = info->script();
+      if (!script->IsUndefined() && !script->source()->IsUndefined()) {
+        PrintF("--- Raw source ---\n");
+        StringInputBuffer stream(String::cast(script->source()));
+        stream.Seek(function->start_position());
+        // fun->end_position() points to the last character in the stream. We
+        // need to compensate by adding one to calculate the length.
+        int source_len =
+            function->end_position() - function->start_position() + 1;
+        for (int i = 0; i < source_len; i++) {
+          if (stream.has_more()) PrintF("%c", stream.GetNext());
+        }
+        PrintF("\n\n");
       }
-      PrintF("\n\n");
     }
     if (info->IsOptimizing()) {
       if (FLAG_print_unopt_code) {
@@ -145,7 +147,12 @@ void CodeGenerator::PrintCode(Handle<Code> code, CompilationInfo* info) {
     } else {
       PrintF("--- Code ---\n");
     }
-    code->Disassemble(*function->debug_name()->ToCString());
+    if (info->IsStub()) {
+      CodeStub::Major major_key = info->code_stub()->MajorKey();
+      code->Disassemble(CodeStub::MajorName(major_key, false));
+    } else {
+      code->Disassemble(*function->debug_name()->ToCString());
+    }
   }
 #endif  // ENABLE_DISASSEMBLER
 }
index ff6e05d..f79a66c 100644 (file)
@@ -55,7 +55,7 @@ CompilationInfo::CompilationInfo(Handle<Script> script, Zone* zone)
     : flags_(LanguageModeField::encode(CLASSIC_MODE)),
       script_(script),
       osr_ast_id_(BailoutId::None()) {
-  Initialize(zone);
+  Initialize(script->GetIsolate(), BASE, zone);
 }
 
 
@@ -65,7 +65,7 @@ CompilationInfo::CompilationInfo(Handle<SharedFunctionInfo> shared_info,
       shared_info_(shared_info),
       script_(Handle<Script>(Script::cast(shared_info->script()))),
       osr_ast_id_(BailoutId::None()) {
-  Initialize(zone);
+  Initialize(script_->GetIsolate(), BASE, zone);
 }
 
 
@@ -76,12 +76,22 @@ CompilationInfo::CompilationInfo(Handle<JSFunction> closure, Zone* zone)
       script_(Handle<Script>(Script::cast(shared_info_->script()))),
       context_(closure->context()),
       osr_ast_id_(BailoutId::None()) {
-  Initialize(zone);
+  Initialize(script_->GetIsolate(), BASE, zone);
 }
 
 
-void CompilationInfo::Initialize(Zone* zone) {
-  isolate_ = script_->GetIsolate();
+CompilationInfo::CompilationInfo(HydrogenCodeStub* stub,
+                                 Isolate* isolate, Zone* zone)
+    : flags_(LanguageModeField::encode(CLASSIC_MODE) |
+             IsLazy::encode(true)),
+      osr_ast_id_(BailoutId::None()) {
+  Initialize(isolate, STUB, zone);
+  code_stub_ = stub;
+}
+
+
+void CompilationInfo::Initialize(Isolate* isolate, Mode mode, Zone* zone) {
+  isolate_ = isolate;
   function_ = NULL;
   scope_ = NULL;
   global_scope_ = NULL;
@@ -89,8 +99,13 @@ void CompilationInfo::Initialize(Zone* zone) {
   pre_parse_data_ = NULL;
   zone_ = zone;
   deferred_handles_ = NULL;
+  code_stub_ = NULL;
   prologue_offset_ = kPrologueOffsetNotSet;
-  mode_ = V8::UseCrankshaft() ? BASE : NONOPT;
+  if (mode == STUB) {
+    mode_ = STUB;
+    return;
+  }
+  mode_ = V8::UseCrankshaft() ? mode : NONOPT;
   if (script_->type()->value() == Script::TYPE_NATIVE) {
     MarkAsNative();
   }
@@ -107,6 +122,33 @@ CompilationInfo::~CompilationInfo() {
 }
 
 
+int CompilationInfo::num_parameters() const {
+  if (IsStub()) {
+    return 0;
+  } else {
+    return scope()->num_parameters();
+  }
+}
+
+
+int CompilationInfo::num_heap_slots() const {
+  if (IsStub()) {
+    return 0;
+  } else {
+    return scope()->num_heap_slots();
+  }
+}
+
+
+Code::Flags CompilationInfo::flags() const {
+  if (IsStub()) {
+    return Code::ComputeFlags(Code::COMPILED_STUB);
+  } else {
+    return Code::ComputeFlags(Code::OPTIMIZED_FUNCTION);
+  }
+}
+
+
 // Disable optimization for the rest of the compilation pipeline.
 void CompilationInfo::DisableOptimization() {
   bool is_optimizable_closure =
@@ -317,13 +359,13 @@ OptimizingCompiler::Status OptimizingCompiler::CreateGraph() {
   if (FLAG_trace_hydrogen) {
     PrintF("-----------------------------------------------------------\n");
     PrintF("Compiling method %s using hydrogen\n", *name->ToCString());
-    HTracer::Instance()->TraceCompilation(info()->function());
+    HTracer::Instance()->TraceCompilation(info());
   }
   Handle<Context> native_context(
       info()->closure()->context()->native_context());
   oracle_ = new(info()->zone()) TypeFeedbackOracle(
       code, native_context, info()->isolate(), info()->zone());
-  graph_builder_ = new(info()->zone()) HGraphBuilder(info(), oracle_);
+  graph_builder_ = new(info()->zone()) HOptimizedGraphBuilder(info(), oracle_);
 
   Timer t(this, &time_taken_to_create_graph_);
   graph_ = graph_builder_->CreateGraph();
@@ -376,7 +418,7 @@ OptimizingCompiler::Status OptimizingCompiler::GenerateAndInstallCode() {
     Timer timer(this, &time_taken_to_codegen_);
     ASSERT(chunk_ != NULL);
     ASSERT(graph_ != NULL);
-    Handle<Code> optimized_code = chunk_->Codegen();
+    Handle<Code> optimized_code = chunk_->Codegen(Code::OPTIMIZED_FUNCTION);
     if (optimized_code.is_null()) {
       info()->set_bailout_reason("code generation failed");
       return AbortOptimization();
index 653d5f1..6d374d9 100644 (file)
@@ -38,6 +38,7 @@ namespace internal {
 static const int kPrologueOffsetNotSet = -1;
 
 class ScriptDataImpl;
+class HydrogenCodeStub;
 
 // CompilationInfo encapsulates some information known at compile time.  It
 // is constructed based on the resources available at compile-time.
@@ -46,6 +47,7 @@ class CompilationInfo {
   CompilationInfo(Handle<Script> script, Zone* zone);
   CompilationInfo(Handle<SharedFunctionInfo> shared_info, Zone* zone);
   CompilationInfo(Handle<JSFunction> closure, Zone* zone);
+  CompilationInfo(HydrogenCodeStub* stub, Isolate* isolate, Zone* zone);
 
   virtual ~CompilationInfo();
 
@@ -72,10 +74,14 @@ class CompilationInfo {
   Handle<JSFunction> closure() const { return closure_; }
   Handle<SharedFunctionInfo> shared_info() const { return shared_info_; }
   Handle<Script> script() const { return script_; }
+  HydrogenCodeStub* code_stub() {return code_stub_; }
   v8::Extension* extension() const { return extension_; }
   ScriptDataImpl* pre_parse_data() const { return pre_parse_data_; }
   Handle<Context> context() const { return context_; }
   BailoutId osr_ast_id() const { return osr_ast_id_; }
+  int num_parameters() const;
+  int num_heap_slots() const;
+  Code::Flags flags() const;
 
   void MarkAsEval() {
     ASSERT(!is_lazy());
@@ -98,9 +104,31 @@ class CompilationInfo {
   void MarkAsNative() {
     flags_ |= IsNative::encode(true);
   }
+
   bool is_native() const {
     return IsNative::decode(flags_);
   }
+
+  bool is_calling() const {
+    return is_deferred_calling() || is_non_deferred_calling();
+  }
+
+  void MarkAsDeferredCalling() {
+    flags_ |= IsDeferredCalling::encode(true);
+  }
+
+  bool is_deferred_calling() const {
+    return IsDeferredCalling::decode(flags_);
+  }
+
+  void MarkAsNonDeferredCalling() {
+    flags_ |= IsNonDeferredCalling::encode(true);
+  }
+
+  bool is_non_deferred_calling() const {
+    return IsNonDeferredCalling::decode(flags_);
+  }
+
   void SetFunction(FunctionLiteral* literal) {
     ASSERT(function_ == NULL);
     function_ = literal;
@@ -151,6 +179,7 @@ class CompilationInfo {
   // Accessors for the different compilation modes.
   bool IsOptimizing() const { return mode_ == OPTIMIZE; }
   bool IsOptimizable() const { return mode_ == BASE; }
+  bool IsStub() const { return mode_ == STUB; }
   void SetOptimizing(BailoutId osr_ast_id) {
     SetMode(OPTIMIZE);
     osr_ast_id_ = osr_ast_id;
@@ -209,10 +238,11 @@ class CompilationInfo {
   enum Mode {
     BASE,
     OPTIMIZE,
-    NONOPT
+    NONOPT,
+    STUB
   };
 
-  void Initialize(Zone* zone);
+  void Initialize(Isolate* isolate, Mode mode, Zone* zone);
 
   void SetMode(Mode mode) {
     ASSERT(V8::UseCrankshaft());
@@ -238,6 +268,12 @@ class CompilationInfo {
   // If compiling for debugging produce just full code matching the
   // initial mode setting.
   class IsCompilingForDebugging: public BitField<bool, 8, 1> {};
+  // If the compiled code contains calls that require building a frame
+  class IsCalling: public BitField<bool, 9, 1> {};
+  // If the compiled code contains calls that require building a frame
+  class IsDeferredCalling: public BitField<bool, 10, 1> {};
+  // If the compiled code contains calls that require building a frame
+  class IsNonDeferredCalling: public BitField<bool, 11, 1> {};
 
 
   unsigned flags_;
@@ -250,6 +286,8 @@ class CompilationInfo {
   Scope* scope_;
   // The global scope provided as a convenience.
   Scope* global_scope_;
+  // For compiled stubs, the stub object
+  HydrogenCodeStub* code_stub_;
   // The compiled code.
   Handle<Code> code_;
 
@@ -310,6 +348,10 @@ class CompilationInfoWithZone: public CompilationInfo {
       : CompilationInfo(closure, &zone_),
         zone_(closure->GetIsolate()),
         zone_scope_(&zone_, DELETE_ON_EXIT) {}
+  explicit CompilationInfoWithZone(HydrogenCodeStub* stub, Isolate* isolate)
+      : CompilationInfo(stub, isolate, &zone_),
+        zone_(isolate),
+        zone_scope_(&zone_, DELETE_ON_EXIT) {}
 
  private:
   Zone zone_;
@@ -335,7 +377,7 @@ class CompilationHandleScope BASE_EMBEDDED {
 
 
 class HGraph;
-class HGraphBuilder;
+class HOptimizedGraphBuilder;
 class LChunk;
 
 // A helper class that calls the three compilation phases in
@@ -377,7 +419,7 @@ class OptimizingCompiler: public ZoneObject {
  private:
   CompilationInfo* info_;
   TypeFeedbackOracle* oracle_;
-  HGraphBuilder* graph_builder_;
+  HOptimizedGraphBuilder* graph_builder_;
   HGraph* graph_;
   LChunk* chunk_;
   int64_t time_taken_to_create_graph_;
index 9d16211..f34c3e0 100644 (file)
@@ -410,17 +410,24 @@ Deoptimizer::Deoptimizer(Isolate* isolate,
            reinterpret_cast<intptr_t>(from),
            fp_to_sp_delta - (2 * kPointerSize));
   }
-  function->shared()->increment_deopt_count();
+  // For COMPILED_STUBs called from builtins, the function pointer
+  // is a SMI indicating an internal frame.
+  if (function->IsSmi()) {
+    function = NULL;
+  }
+  if (function != NULL && function->IsOptimized()) {
+    function->shared()->increment_deopt_count();
+  }
   // Find the optimized code.
   if (type == EAGER) {
     ASSERT(from == NULL);
-    optimized_code_ = function_->code();
+    compiled_code_ = function_->code();
     if (FLAG_trace_deopt && FLAG_code_comments) {
       // Print instruction associated with this bailout.
       const char* last_comment = NULL;
       int mask = RelocInfo::ModeMask(RelocInfo::COMMENT)
           | RelocInfo::ModeMask(RelocInfo::RUNTIME_ENTRY);
-      for (RelocIterator it(optimized_code_, mask); !it.done(); it.next()) {
+      for (RelocIterator it(compiled_code_, mask); !it.done(); it.next()) {
         RelocInfo* info = it.rinfo();
         if (info->rmode() == RelocInfo::COMMENT) {
           last_comment = reinterpret_cast<const char*>(info->data());
@@ -436,18 +443,22 @@ Deoptimizer::Deoptimizer(Isolate* isolate,
       }
     }
   } else if (type == LAZY) {
-    optimized_code_ = FindDeoptimizingCodeFromAddress(from);
-    ASSERT(optimized_code_ != NULL);
+    compiled_code_ = FindDeoptimizingCodeFromAddress(from);
+    if (compiled_code_ == NULL) {
+      compiled_code_ =
+          static_cast<Code*>(isolate->heap()->FindCodeObject(from));
+    }
+    ASSERT(compiled_code_ != NULL);
   } else if (type == OSR) {
     // The function has already been optimized and we're transitioning
     // from the unoptimized shared version to the optimized one in the
     // function. The return address (from) points to unoptimized code.
-    optimized_code_ = function_->code();
-    ASSERT(optimized_code_->kind() == Code::OPTIMIZED_FUNCTION);
-    ASSERT(!optimized_code_->contains(from));
+    compiled_code_ = function_->code();
+    ASSERT(compiled_code_->kind() == Code::OPTIMIZED_FUNCTION);
+    ASSERT(!compiled_code_->contains(from));
   } else if (type == DEBUGGER) {
-    optimized_code_ = optimized_code;
-    ASSERT(optimized_code_->contains(from));
+    compiled_code_ = optimized_code;
+    ASSERT(compiled_code_->contains(from));
   }
   ASSERT(HEAP->allow_allocation(false));
   unsigned size = ComputeInputFrameSize();
@@ -573,7 +584,7 @@ void Deoptimizer::DoComputeOutputFrames() {
   // Determine basic deoptimization information.  The optimized frame is
   // described by the input data.
   DeoptimizationInputData* input_data =
-      DeoptimizationInputData::cast(optimized_code_->deoptimization_data());
+      DeoptimizationInputData::cast(compiled_code_->deoptimization_data());
   BailoutId node_id = input_data->AstId(bailout_id_);
   ByteArray* translations = input_data->TranslationByteArray();
   unsigned translation_index =
@@ -618,6 +629,9 @@ void Deoptimizer::DoComputeOutputFrames() {
       case Translation::SETTER_STUB_FRAME:
         DoComputeAccessorStubFrame(&iterator, i, true);
         break;
+      case Translation::COMPILED_STUB_FRAME:
+        DoCompiledStubFrame(&iterator, i);
+        break;
       case Translation::BEGIN:
       case Translation::REGISTER:
       case Translation::INT32_REGISTER:
@@ -630,6 +644,7 @@ void Deoptimizer::DoComputeOutputFrames() {
       case Translation::LITERAL:
       case Translation::ARGUMENTS_OBJECT:
       case Translation::DUPLICATE:
+      default:
         UNREACHABLE();
         break;
     }
@@ -809,6 +824,7 @@ void Deoptimizer::DoTranslateCommand(TranslationIterator* iterator,
     case Translation::CONSTRUCT_STUB_FRAME:
     case Translation::GETTER_STUB_FRAME:
     case Translation::SETTER_STUB_FRAME:
+    case Translation::COMPILED_STUB_FRAME:
     case Translation::DUPLICATE:
       UNREACHABLE();
       return;
@@ -1117,6 +1133,7 @@ bool Deoptimizer::DoOsrTranslateCommand(TranslationIterator* iterator,
     case Translation::CONSTRUCT_STUB_FRAME:
     case Translation::GETTER_STUB_FRAME:
     case Translation::SETTER_STUB_FRAME:
+    case Translation::COMPILED_STUB_FRAME:
     case Translation::DUPLICATE:
       UNREACHABLE();  // Malformed input.
        return false;
@@ -1337,8 +1354,9 @@ unsigned Deoptimizer::ComputeInputFrameSize() const {
     // environment at the OSR entry. The code for that his built into
     // the DoComputeOsrOutputFrame function for now.
   } else {
-    unsigned stack_slots = optimized_code_->stack_slots();
-    unsigned outgoing_size = ComputeOutgoingArgumentSize();
+    unsigned stack_slots = compiled_code_->stack_slots();
+    unsigned outgoing_size = compiled_code_->kind() == Code::COMPILED_STUB
+        ? 0 : ComputeOutgoingArgumentSize();
     ASSERT(result == fixed_size + (stack_slots * kPointerSize) + outgoing_size);
   }
 #endif
@@ -1357,6 +1375,10 @@ unsigned Deoptimizer::ComputeFixedSize(JSFunction* function) const {
 unsigned Deoptimizer::ComputeIncomingArgumentSize(JSFunction* function) const {
   // The incoming arguments is the values for formal parameters and
   // the receiver. Every slot contains a pointer.
+  if (function->IsSmi()) {
+    ASSERT(Smi::cast(function) == Smi::FromInt(StackFrame::STUB));
+    return 0;
+  }
   unsigned arguments = function->shared()->formal_parameter_count() + 1;
   return arguments * kPointerSize;
 }
@@ -1364,7 +1386,7 @@ unsigned Deoptimizer::ComputeIncomingArgumentSize(JSFunction* function) const {
 
 unsigned Deoptimizer::ComputeOutgoingArgumentSize() const {
   DeoptimizationInputData* data = DeoptimizationInputData::cast(
-      optimized_code_->deoptimization_data());
+      compiled_code_->deoptimization_data());
   unsigned height = data->ArgumentsStackHeight(bailout_id_)->value();
   return height * kPointerSize;
 }
@@ -1372,7 +1394,7 @@ unsigned Deoptimizer::ComputeOutgoingArgumentSize() const {
 
 Object* Deoptimizer::ComputeLiteral(int index) const {
   DeoptimizationInputData* data = DeoptimizationInputData::cast(
-      optimized_code_->deoptimization_data());
+      compiled_code_->deoptimization_data());
   FixedArray* literals = data->LiteralArray();
   return literals->get(index);
 }
@@ -1403,8 +1425,6 @@ void Deoptimizer::EnsureCodeForDeoptimizationEntry(BailoutType type,
   // cause us to emit relocation information for the external
   // references. This is fine because the deoptimizer's code section
   // isn't meant to be serialized at all.
-  ASSERT(!Serializer::enabled());
-
   ASSERT(type == EAGER || type == LAZY);
   DeoptimizerData* data = Isolate::Current()->deoptimizer_data();
   int entry_count = (type == EAGER)
@@ -1419,7 +1439,6 @@ void Deoptimizer::EnsureCodeForDeoptimizationEntry(BailoutType type,
   GenerateDeoptimizationEntries(&masm, entry_count, type);
   CodeDesc desc;
   masm.GetCode(&desc);
-  ASSERT(desc.reloc_size == 0);
 
   VirtualMemory* memory = type == EAGER
       ? data->eager_deoptimization_entry_code_
@@ -1681,6 +1700,11 @@ void Translation::BeginJSFrame(BailoutId node_id,
 }
 
 
+void Translation::BeginCompiledStubFrame() {
+  buffer_->Add(COMPILED_STUB_FRAME, zone());
+}
+
+
 void Translation::StoreRegister(Register reg) {
   buffer_->Add(REGISTER, zone());
   buffer_->Add(reg.code(), zone());
@@ -1762,6 +1786,7 @@ int Translation::NumberOfOperandsFor(Opcode opcode) {
     case UINT32_STACK_SLOT:
     case DOUBLE_STACK_SLOT:
     case LITERAL:
+    case COMPILED_STUB_FRAME:
       return 1;
     case BEGIN:
     case ARGUMENTS_ADAPTOR_FRAME:
@@ -1792,6 +1817,8 @@ const char* Translation::StringFor(Opcode opcode) {
       return "GETTER_STUB_FRAME";
     case SETTER_STUB_FRAME:
       return "SETTER_STUB_FRAME";
+    case COMPILED_STUB_FRAME:
+      return "COMPILED_STUB_FRAME";
     case REGISTER:
       return "REGISTER";
     case INT32_REGISTER:
@@ -1899,6 +1926,10 @@ SlotRef SlotRef::ComputeSlotForNextArgument(TranslationIterator* iterator,
       int literal_index = iterator->Next();
       return SlotRef(data->LiteralArray()->get(literal_index));
     }
+
+    case Translation::COMPILED_STUB_FRAME:
+      UNREACHABLE();
+      break;
   }
 
   UNREACHABLE();
index 89955b3..dbcdf61 100644 (file)
@@ -135,6 +135,8 @@ class Deoptimizer : public Malloced {
 
   int output_count() const { return output_count_; }
 
+  Code::Kind compiled_code_kind() const { return compiled_code_->kind(); }
+
   // Number of created JS frames. Not all created frames are necessarily JS.
   int jsframe_count() const { return jsframe_count_; }
 
@@ -297,6 +299,9 @@ class Deoptimizer : public Malloced {
 
   static size_t GetMaxDeoptTableSize();
 
+  static void EnsureCodeForDeoptimizationEntry(BailoutType type,
+                                               int max_entry_id);
+
  private:
   static const int kMinNumberOfEntries = 64;
   static const int kMaxNumberOfEntries = 16384;
@@ -320,6 +325,8 @@ class Deoptimizer : public Malloced {
   void DoComputeAccessorStubFrame(TranslationIterator* iterator,
                                   int frame_index,
                                   bool is_setter_stub_frame);
+  void DoCompiledStubFrame(TranslationIterator* iterator,
+                           int frame_index);
   void DoTranslateCommand(TranslationIterator* iterator,
                           int frame_index,
                           unsigned output_offset);
@@ -342,8 +349,6 @@ class Deoptimizer : public Malloced {
   void AddArgumentsObjectValue(intptr_t value);
   void AddDoubleValue(intptr_t slot_address, double value);
 
-  static void EnsureCodeForDeoptimizationEntry(BailoutType type,
-                                               int max_entry_id);
   static void GenerateDeoptimizationEntries(
       MacroAssembler* masm, int count, BailoutType type);
 
@@ -360,7 +365,7 @@ class Deoptimizer : public Malloced {
 
   Isolate* isolate_;
   JSFunction* function_;
-  Code* optimized_code_;
+  Code* compiled_code_;
   unsigned bailout_id_;
   BailoutType bailout_type_;
   Address from_;
@@ -530,7 +535,7 @@ class FrameDescription {
   uintptr_t frame_size_;  // Number of bytes.
   JSFunction* function_;
   intptr_t registers_[Register::kNumRegisters];
-  double double_registers_[DoubleRegister::kNumAllocatableRegisters];
+  double double_registers_[DoubleRegister::kMaxNumAllocatableRegisters];
   intptr_t top_;
   intptr_t pc_;
   intptr_t fp_;
@@ -600,6 +605,7 @@ class Translation BASE_EMBEDDED {
     GETTER_STUB_FRAME,
     SETTER_STUB_FRAME,
     ARGUMENTS_ADAPTOR_FRAME,
+    COMPILED_STUB_FRAME,
     REGISTER,
     INT32_REGISTER,
     UINT32_REGISTER,
@@ -630,6 +636,7 @@ class Translation BASE_EMBEDDED {
 
   // Commands.
   void BeginJSFrame(BailoutId node_id, int literal_id, unsigned height);
+  void BeginCompiledStubFrame();
   void BeginArgumentsAdaptorFrame(int literal_id, unsigned height);
   void BeginConstructStubFrame(int literal_id, unsigned height);
   void BeginGetterStubFrame(int literal_id);
index 9f8b9a8..05d6b9b 100644 (file)
@@ -287,7 +287,12 @@ static int DecodeIt(FILE* f,
         Address addr = relocinfo.target_address();
         int id = Deoptimizer::GetDeoptimizationId(addr, Deoptimizer::EAGER);
         if (id == Deoptimizer::kNotDeoptimizationEntry) {
-          out.AddFormatted("    ;; %s", RelocInfo::RelocModeName(rmode));
+          id = Deoptimizer::GetDeoptimizationId(addr, Deoptimizer::LAZY);
+          if (id == Deoptimizer::kNotDeoptimizationEntry) {
+            out.AddFormatted("    ;; %s", RelocInfo::RelocModeName(rmode));
+          } else {
+            out.AddFormatted("    ;; lazy deoptimization bailout %d", id);
+          }
         } else {
           out.AddFormatted("    ;; deoptimization bailout %d", id);
         }
@@ -322,7 +327,8 @@ int Disassembler::Decode(FILE* f, byte* begin, byte* end) {
 
 // Called by Code::CodePrint.
 void Disassembler::Decode(FILE* f, Code* code) {
-  int decode_size = (code->kind() == Code::OPTIMIZED_FUNCTION)
+  int decode_size = (code->kind() == Code::OPTIMIZED_FUNCTION ||
+                     code->kind() == Code::COMPILED_STUB)
       ? static_cast<int>(code->safepoint_table_offset())
       : code->instruction_size();
   // If there might be a stack check table, stop before reaching it.
index 27a526c..4753932 100644 (file)
@@ -235,11 +235,21 @@ inline Object* JavaScriptFrame::function() const {
 }
 
 
-inline OptimizedFrame::OptimizedFrame(StackFrameIterator* iterator)
+inline CompiledFrame::CompiledFrame(StackFrameIterator* iterator)
     : JavaScriptFrame(iterator) {
 }
 
 
+inline StubFrame::StubFrame(StackFrameIterator* iterator)
+    : CompiledFrame(iterator) {
+}
+
+
+inline OptimizedFrame::OptimizedFrame(StackFrameIterator* iterator)
+    : CompiledFrame(iterator) {
+}
+
+
 inline ArgumentsAdaptorFrame::ArgumentsAdaptorFrame(
     StackFrameIterator* iterator) : JavaScriptFrame(iterator) {
 }
index 3b60fb5..cb9ffba 100644 (file)
@@ -617,7 +617,7 @@ bool StandardFrame::IsExpressionInsideHandler(int n) const {
 }
 
 
-void OptimizedFrame::Iterate(ObjectVisitor* v) const {
+void CompiledFrame::Iterate(ObjectVisitor* v) const {
 #ifdef DEBUG
   // Make sure that optimized frames do not contain any stack handlers.
   StackHandlerIterator it(this, top_handler());
@@ -649,7 +649,7 @@ void OptimizedFrame::Iterate(ObjectVisitor* v) const {
 
   // Skip saved double registers.
   if (safepoint_entry.has_doubles()) {
-    parameters_base += DoubleRegister::kNumAllocatableRegisters *
+    parameters_base += DoubleRegister::NumAllocatableRegisters() *
         kDoubleSize / kPointerSize;
   }
 
@@ -681,14 +681,24 @@ void OptimizedFrame::Iterate(ObjectVisitor* v) const {
     }
   }
 
+  // Visit the return address in the callee and incoming arguments.
+  IteratePc(v, pc_address(), code);
+}
+
+
+void StubFrame::Iterate(ObjectVisitor* v) const {
+  CompiledFrame::Iterate(v);
+}
+
+
+void OptimizedFrame::Iterate(ObjectVisitor* v) const {
+  CompiledFrame::Iterate(v);
+
   // Visit the context and the function.
   Object** fixed_base = &Memory::Object_at(
       fp() + JavaScriptFrameConstants::kFunctionOffset);
   Object** fixed_limit = &Memory::Object_at(fp());
   v->VisitPointers(fixed_base, fixed_limit);
-
-  // Visit the return address in the callee and incoming arguments.
-  IteratePc(v, pc_address(), code);
 }
 
 
index 30f7e1f..6a9570e 100644 (file)
@@ -136,6 +136,7 @@ class StackHandler BASE_EMBEDDED {
   V(EXIT,              ExitFrame)             \
   V(JAVA_SCRIPT,       JavaScriptFrame)       \
   V(OPTIMIZED,         OptimizedFrame)        \
+  V(STUB,              StubFrame)             \
   V(INTERNAL,          InternalFrame)         \
   V(CONSTRUCT,         ConstructFrame)        \
   V(ARGUMENTS_ADAPTOR, ArgumentsAdaptorFrame)
@@ -555,7 +556,33 @@ class JavaScriptFrame: public StandardFrame {
 };
 
 
-class OptimizedFrame : public JavaScriptFrame {
+class CompiledFrame : public JavaScriptFrame {
+ public:
+  virtual Type type() const = 0;
+
+  // GC support.
+  virtual void Iterate(ObjectVisitor* v) const;
+
+ protected:
+  inline explicit CompiledFrame(StackFrameIterator* iterator);
+};
+
+
+class StubFrame : public CompiledFrame {
+ public:
+  virtual Type type() const { return STUB; }
+
+  // GC support.
+  virtual void Iterate(ObjectVisitor* v) const;
+
+ protected:
+  inline explicit StubFrame(StackFrameIterator* iterator);
+
+  friend class StackFrameIterator;
+};
+
+
+class OptimizedFrame : public CompiledFrame {
  public:
   virtual Type type() const { return OPTIMIZED; }
 
index 01d9bd0..e136170 100644 (file)
@@ -398,6 +398,7 @@ void FullCodeGenerator::Initialize() {
                          !Snapshot::HaveASnapshotToStartFrom();
   masm_->set_emit_debug_code(generate_debug_code_);
   masm_->set_predictable_code_size(true);
+  InitializeAstVisitor();
 }
 
 
index cfa7da3..364f0c3 100644 (file)
@@ -48,7 +48,9 @@ class JumpPatchSite;
 // debugger to piggybag on.
 class BreakableStatementChecker: public AstVisitor {
  public:
-  BreakableStatementChecker() : is_breakable_(false) {}
+  BreakableStatementChecker() : is_breakable_(false) {
+    InitializeAstVisitor();
+  }
 
   void Check(Statement* stmt);
   void Check(Expression* stmt);
@@ -63,6 +65,7 @@ class BreakableStatementChecker: public AstVisitor {
 
   bool is_breakable_;
 
+  DEFINE_AST_VISITOR_SUBCLASS_MEMBERS();
   DISALLOW_COPY_AND_ASSIGN(BreakableStatementChecker);
 };
 
@@ -824,6 +827,7 @@ class FullCodeGenerator: public AstVisitor {
 
   friend class NestedStatement;
 
+  DEFINE_AST_VISITOR_SUBCLASS_MEMBERS();
   DISALLOW_COPY_AND_ASSIGN(FullCodeGenerator);
 };
 
index 1590ab3..4fd14e8 100644 (file)
@@ -138,6 +138,7 @@ HSimulate* HBasicBlock::CreateSimulate(BailoutId ast_id,
   ASSERT(HasEnvironment());
   HEnvironment* environment = last_environment();
   ASSERT(ast_id.IsNone() ||
+         ast_id == BailoutId::StubEntry() ||
          environment->closure()->shared()->VerifyBailoutId(ast_id));
 
   int push_count = environment->push_count();
@@ -621,33 +622,204 @@ HConstant* HGraph::GetConstantHole() {
 }
 
 
-HGraphBuilder::HGraphBuilder(CompilationInfo* info,
-                             TypeFeedbackOracle* oracle)
-    : function_state_(NULL),
+HGraph* HGraphBuilder::CreateGraph() {
+  graph_ = new(zone()) HGraph(info_);
+  if (FLAG_hydrogen_stats) HStatistics::Instance()->Initialize(info_);
+  HPhase phase("H_Block building");
+  set_current_block(graph()->entry_block());
+  if (!BuildGraph()) return NULL;
+  return graph_;
+}
+
+
+HInstruction* HGraphBuilder::AddInstruction(HInstruction* instr) {
+  ASSERT(current_block() != NULL);
+  current_block()->AddInstruction(instr);
+  return instr;
+}
+
+
+void HGraphBuilder::AddSimulate(BailoutId id,
+                                RemovableSimulate removable) {
+  ASSERT(current_block() != NULL);
+  current_block()->AddSimulate(id, removable);
+}
+
+
+HInstruction* HGraphBuilder::BuildExternalArrayElementAccess(
+    HValue* external_elements,
+    HValue* checked_key,
+    HValue* val,
+    HValue* dependency,
+    ElementsKind elements_kind,
+    bool is_store) {
+  Zone* zone = this->zone();
+  if (is_store) {
+    ASSERT(val != NULL);
+    switch (elements_kind) {
+      case EXTERNAL_PIXEL_ELEMENTS: {
+        val = AddInstruction(new(zone) HClampToUint8(val));
+        break;
+      }
+      case EXTERNAL_BYTE_ELEMENTS:
+      case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
+      case EXTERNAL_SHORT_ELEMENTS:
+      case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
+      case EXTERNAL_INT_ELEMENTS:
+      case EXTERNAL_UNSIGNED_INT_ELEMENTS: {
+        break;
+      }
+      case EXTERNAL_FLOAT_ELEMENTS:
+      case EXTERNAL_DOUBLE_ELEMENTS:
+        break;
+      case FAST_SMI_ELEMENTS:
+      case FAST_ELEMENTS:
+      case FAST_DOUBLE_ELEMENTS:
+      case FAST_HOLEY_SMI_ELEMENTS:
+      case FAST_HOLEY_ELEMENTS:
+      case FAST_HOLEY_DOUBLE_ELEMENTS:
+      case DICTIONARY_ELEMENTS:
+      case NON_STRICT_ARGUMENTS_ELEMENTS:
+        UNREACHABLE();
+        break;
+    }
+    return new(zone) HStoreKeyed(external_elements, checked_key,
+                                 val, elements_kind);
+  } else {
+    ASSERT(val == NULL);
+    HLoadKeyed* load =
+       new(zone) HLoadKeyed(
+           external_elements, checked_key, dependency, elements_kind);
+    if (FLAG_opt_safe_uint32_operations &&
+        elements_kind == EXTERNAL_UNSIGNED_INT_ELEMENTS) {
+      graph()->RecordUint32Instruction(load);
+    }
+    return load;
+  }
+}
+
+
+HInstruction* HGraphBuilder::BuildFastElementAccess(
+    HValue* elements,
+    HValue* checked_key,
+    HValue* val,
+    HValue* load_dependency,
+    ElementsKind elements_kind,
+    bool is_store) {
+  Zone* zone = this->zone();
+  if (is_store) {
+    ASSERT(val != NULL);
+    switch (elements_kind) {
+      case FAST_SMI_ELEMENTS:
+      case FAST_HOLEY_SMI_ELEMENTS:
+        // Smi-only arrays need a smi check.
+        AddInstruction(new(zone) HCheckSmi(val));
+        // Fall through.
+      case FAST_ELEMENTS:
+      case FAST_HOLEY_ELEMENTS:
+      case FAST_DOUBLE_ELEMENTS:
+      case FAST_HOLEY_DOUBLE_ELEMENTS:
+        return new(zone) HStoreKeyed(elements, checked_key, val, elements_kind);
+      default:
+        UNREACHABLE();
+        return NULL;
+    }
+  }
+  // It's an element load (!is_store).
+  return new(zone) HLoadKeyed(elements,
+                              checked_key,
+                              load_dependency,
+                              elements_kind);
+}
+
+
+HInstruction* HGraphBuilder::BuildUncheckedMonomorphicElementAccess(
+    HValue* object,
+    HValue* key,
+    HValue* val,
+    HCheckMaps* mapcheck,
+    bool is_js_array,
+    ElementsKind elements_kind,
+    bool is_store) {
+  Zone* zone = this->zone();
+  // No GVNFlag is necessary for ElementsKind if there is an explicit dependency
+  // on a HElementsTransition instruction. The flag can also be removed if the
+  // map to check has FAST_HOLEY_ELEMENTS, since there can be no further
+  // ElementsKind transitions. Finally, the dependency can be removed for stores
+  // for FAST_ELEMENTS, since a transition to HOLEY elements won't change the
+  // generated store code.
+  if ((elements_kind == FAST_HOLEY_ELEMENTS) ||
+      (elements_kind == FAST_ELEMENTS && is_store)) {
+    if (mapcheck != NULL) {
+      mapcheck->ClearGVNFlag(kDependsOnElementsKind);
+    }
+  }
+  bool fast_smi_only_elements = IsFastSmiElementsKind(elements_kind);
+  bool fast_elements = IsFastObjectElementsKind(elements_kind);
+  HInstruction* elements =
+      AddInstruction(new(zone) HLoadElements(object, mapcheck));
+  if (is_store && (fast_elements || fast_smi_only_elements)) {
+    HCheckMaps* check_cow_map = new(zone) HCheckMaps(
+        elements, Isolate::Current()->factory()->fixed_array_map(), zone);
+    check_cow_map->ClearGVNFlag(kDependsOnElementsKind);
+    AddInstruction(check_cow_map);
+  }
+  HInstruction* length = NULL;
+  HInstruction* checked_key = NULL;
+  if (IsExternalArrayElementsKind(elements_kind)) {
+    length = AddInstruction(new(zone) HFixedArrayBaseLength(elements));
+    checked_key = AddInstruction(new(zone) HBoundsCheck(key, length,
+                                                        ALLOW_SMI_KEY));
+    HLoadExternalArrayPointer* external_elements =
+        new(zone) HLoadExternalArrayPointer(elements);
+    AddInstruction(external_elements);
+    return BuildExternalArrayElementAccess(
+        external_elements, checked_key, val, mapcheck,
+        elements_kind, is_store);
+  }
+  ASSERT(fast_smi_only_elements ||
+         fast_elements ||
+         IsFastDoubleElementsKind(elements_kind));
+  if (is_js_array) {
+    length = AddInstruction(new(zone) HJSArrayLength(object, mapcheck,
+                                                     HType::Smi()));
+  } else {
+    length = AddInstruction(new(zone) HFixedArrayBaseLength(elements));
+  }
+  checked_key = AddInstruction(new(zone) HBoundsCheck(key, length,
+                                                      ALLOW_SMI_KEY));
+  return BuildFastElementAccess(elements, checked_key, val, mapcheck,
+                                elements_kind, is_store);
+}
+
+
+HOptimizedGraphBuilder::HOptimizedGraphBuilder(CompilationInfo* info,
+                                               TypeFeedbackOracle* oracle)
+    : HGraphBuilder(info),
+      function_state_(NULL),
       initial_function_state_(this, info, oracle, NORMAL_RETURN),
       ast_context_(NULL),
       break_scope_(NULL),
-      graph_(NULL),
-      current_block_(NULL),
       inlined_count_(0),
       globals_(10, info->zone()),
-      zone_(info->zone()),
       inline_bailout_(false) {
   // This is not initialized in the initializer list because the
   // constructor for the initial state relies on function_state_ == NULL
   // to know it's the initial state.
   function_state_= &initial_function_state_;
+  InitializeAstVisitor();
 }
 
-HBasicBlock* HGraphBuilder::CreateJoin(HBasicBlock* first,
-                                       HBasicBlock* second,
-                                       BailoutId join_id) {
+
+HBasicBlock* HOptimizedGraphBuilder::CreateJoin(HBasicBlock* first,
+                                                HBasicBlock* second,
+                                                BailoutId join_id) {
   if (first == NULL) {
     return second;
   } else if (second == NULL) {
     return first;
   } else {
-    HBasicBlock* join_block = graph_->CreateBasicBlock();
+    HBasicBlock* join_block = graph()->CreateBasicBlock();
     first->Goto(join_block);
     second->Goto(join_block);
     join_block->SetJoinId(join_id);
@@ -656,9 +828,9 @@ HBasicBlock* HGraphBuilder::CreateJoin(HBasicBlock* first,
 }
 
 
-HBasicBlock* HGraphBuilder::JoinContinue(IterationStatement* statement,
-                                         HBasicBlock* exit_block,
-                                         HBasicBlock* continue_block) {
+HBasicBlock* HOptimizedGraphBuilder::JoinContinue(IterationStatement* statement,
+                                                  HBasicBlock* exit_block,
+                                                  HBasicBlock* continue_block) {
   if (continue_block != NULL) {
     if (exit_block != NULL) exit_block->Goto(continue_block);
     continue_block->SetJoinId(statement->ContinueId());
@@ -668,11 +840,11 @@ HBasicBlock* HGraphBuilder::JoinContinue(IterationStatement* statement,
 }
 
 
-HBasicBlock* HGraphBuilder::CreateLoop(IterationStatement* statement,
-                                       HBasicBlock* loop_entry,
-                                       HBasicBlock* body_exit,
-                                       HBasicBlock* loop_successor,
-                                       HBasicBlock* break_block) {
+HBasicBlock* HOptimizedGraphBuilder::CreateLoop(IterationStatement* statement,
+                                                HBasicBlock* loop_entry,
+                                                HBasicBlock* body_exit,
+                                                HBasicBlock* loop_successor,
+                                                HBasicBlock* break_block) {
   if (body_exit != NULL) body_exit->Goto(loop_entry);
   loop_entry->PostProcessLoopHeader(statement);
   if (break_block != NULL) {
@@ -703,8 +875,13 @@ HGraph::HGraph(CompilationInfo* info)
       is_recursive_(false),
       use_optimistic_licm_(false),
       type_change_checksum_(0) {
-  start_environment_ =
-      new(zone_) HEnvironment(NULL, info->scope(), info->closure(), zone_);
+  if (info->IsStub()) {
+    start_environment_ =
+        new(zone_) HEnvironment(zone_);
+  } else {
+    start_environment_ =
+        new(zone_) HEnvironment(NULL, info->scope(), info->closure(), zone_);
+  }
   start_environment_->set_ast_id(BailoutId::FunctionEntry());
   entry_block_ = CreateBasicBlock();
   entry_block_->SetInitialEnvironment(start_environment_);
@@ -2893,7 +3070,7 @@ void HGraph::ComputeMinusZeroChecks() {
 
 // Implementation of utility class to encapsulate the translation state for
 // a (possibly inlined) function.
-FunctionState::FunctionState(HGraphBuilder* owner,
+FunctionState::FunctionState(HOptimizedGraphBuilder* owner,
                              CompilationInfo* info,
                              TypeFeedbackOracle* oracle,
                              InliningKind inlining_kind)
@@ -2942,7 +3119,7 @@ FunctionState::~FunctionState() {
 
 // Implementation of utility classes to represent an expression's context in
 // the AST.
-AstContext::AstContext(HGraphBuilder* owner, Expression::Context kind)
+AstContext::AstContext(HOptimizedGraphBuilder* owner, Expression::Context kind)
     : owner_(owner),
       kind_(kind),
       outer_(owner->ast_context()),
@@ -3053,7 +3230,7 @@ void ValueContext::ReturnControl(HControlInstruction* instr, BailoutId ast_id) {
 
 void TestContext::ReturnInstruction(HInstruction* instr, BailoutId ast_id) {
   ASSERT(!instr->IsControlInstruction());
-  HGraphBuilder* builder = owner();
+  HOptimizedGraphBuilder* builder = owner();
   builder->AddInstruction(instr);
   // We expect a simulate after every expression with side effects, though
   // this one isn't actually needed (and wouldn't work if it were targeted).
@@ -3084,7 +3261,7 @@ void TestContext::BuildBranch(HValue* value) {
   // connects a branch node to a join node.  We conservatively ensure that
   // property by always adding an empty block on the outgoing edges of this
   // branch.
-  HGraphBuilder* builder = owner();
+  HOptimizedGraphBuilder* builder = owner();
   if (value != NULL && value->CheckFlag(HValue::kIsArguments)) {
     builder->Bailout("arguments object value in a test context");
   }
@@ -3101,7 +3278,7 @@ void TestContext::BuildBranch(HValue* value) {
 }
 
 
-// HGraphBuilder infrastructure for bailing out and checking bailouts.
+// HOptimizedGraphBuilder infrastructure for bailing out and checking bailouts.
 #define CHECK_BAILOUT(call)                     \
   do {                                          \
     call;                                       \
@@ -3116,25 +3293,26 @@ void TestContext::BuildBranch(HValue* value) {
   } while (false)
 
 
-void HGraphBuilder::Bailout(const char* reason) {
+void HOptimizedGraphBuilder::Bailout(const char* reason) {
   info()->set_bailout_reason(reason);
   SetStackOverflow();
 }
 
 
-void HGraphBuilder::VisitForEffect(Expression* expr) {
+void HOptimizedGraphBuilder::VisitForEffect(Expression* expr) {
   EffectContext for_effect(this);
   Visit(expr);
 }
 
 
-void HGraphBuilder::VisitForValue(Expression* expr, ArgumentsAllowedFlag flag) {
+void HOptimizedGraphBuilder::VisitForValue(Expression* expr,
+                                           ArgumentsAllowedFlag flag) {
   ValueContext for_value(this, flag);
   Visit(expr);
 }
 
 
-void HGraphBuilder::VisitForTypeOf(Expression* expr) {
+void HOptimizedGraphBuilder::VisitForTypeOf(Expression* expr) {
   ValueContext for_value(this, ARGUMENTS_NOT_ALLOWED);
   for_value.set_for_typeof(true);
   Visit(expr);
@@ -3142,113 +3320,108 @@ void HGraphBuilder::VisitForTypeOf(Expression* expr) {
 
 
 
-void HGraphBuilder::VisitForControl(Expression* expr,
-                                    HBasicBlock* true_block,
-                                    HBasicBlock* false_block) {
+void HOptimizedGraphBuilder::VisitForControl(Expression* expr,
+                                             HBasicBlock* true_block,
+                                             HBasicBlock* false_block) {
   TestContext for_test(this, expr, oracle(), true_block, false_block);
   Visit(expr);
 }
 
 
-void HGraphBuilder::VisitArgument(Expression* expr) {
+void HOptimizedGraphBuilder::VisitArgument(Expression* expr) {
   CHECK_ALIVE(VisitForValue(expr));
   Push(AddInstruction(new(zone()) HPushArgument(Pop())));
 }
 
 
-void HGraphBuilder::VisitArgumentList(ZoneList<Expression*>* arguments) {
+void HOptimizedGraphBuilder::VisitArgumentList(
+    ZoneList<Expression*>* arguments) {
   for (int i = 0; i < arguments->length(); i++) {
     CHECK_ALIVE(VisitArgument(arguments->at(i)));
   }
 }
 
 
-void HGraphBuilder::VisitExpressions(ZoneList<Expression*>* exprs) {
+void HOptimizedGraphBuilder::VisitExpressions(
+    ZoneList<Expression*>* exprs) {
   for (int i = 0; i < exprs->length(); ++i) {
     CHECK_ALIVE(VisitForValue(exprs->at(i)));
   }
 }
 
 
-HGraph* HGraphBuilder::CreateGraph() {
-  graph_ = new(zone()) HGraph(info());
-  if (FLAG_hydrogen_stats) HStatistics::Instance()->Initialize(info());
-
-  {
-    HPhase phase("H_Block building");
-    current_block_ = graph()->entry_block();
-
-    Scope* scope = info()->scope();
-    if (scope->HasIllegalRedeclaration()) {
-      Bailout("function with illegal redeclaration");
-      return NULL;
-    }
-    if (scope->calls_eval()) {
-      Bailout("function calls eval");
-      return NULL;
-    }
-    SetUpScope(scope);
-
-    // Add an edge to the body entry.  This is warty: the graph's start
-    // environment will be used by the Lithium translation as the initial
-    // environment on graph entry, but it has now been mutated by the
-    // Hydrogen translation of the instructions in the start block.  This
-    // environment uses values which have not been defined yet.  These
-    // Hydrogen instructions will then be replayed by the Lithium
-    // translation, so they cannot have an environment effect.  The edge to
-    // the body's entry block (along with some special logic for the start
-    // block in HInstruction::InsertAfter) seals the start block from
-    // getting unwanted instructions inserted.
-    //
-    // TODO(kmillikin): Fix this.  Stop mutating the initial environment.
-    // Make the Hydrogen instructions in the initial block into Hydrogen
-    // values (but not instructions), present in the initial environment and
-    // not replayed by the Lithium translation.
-    HEnvironment* initial_env = environment()->CopyWithoutHistory();
-    HBasicBlock* body_entry = CreateBasicBlock(initial_env);
-    current_block()->Goto(body_entry);
-    body_entry->SetJoinId(BailoutId::FunctionEntry());
-    set_current_block(body_entry);
-
-    // Handle implicit declaration of the function name in named function
-    // expressions before other declarations.
-    if (scope->is_function_scope() && scope->function() != NULL) {
-      VisitVariableDeclaration(scope->function());
-    }
-    VisitDeclarations(scope->declarations());
-    AddSimulate(BailoutId::Declarations());
-
-    HValue* context = environment()->LookupContext();
-    AddInstruction(
-        new(zone()) HStackCheck(context, HStackCheck::kFunctionEntry));
+bool HOptimizedGraphBuilder::BuildGraph() {
+  Scope* scope = info()->scope();
+  if (scope->HasIllegalRedeclaration()) {
+    Bailout("function with illegal redeclaration");
+    return false;
+  }
+  if (scope->calls_eval()) {
+    Bailout("function calls eval");
+    return false;
+  }
+  SetUpScope(scope);
+
+  // Add an edge to the body entry.  This is warty: the graph's start
+  // environment will be used by the Lithium translation as the initial
+  // environment on graph entry, but it has now been mutated by the
+  // Hydrogen translation of the instructions in the start block.  This
+  // environment uses values which have not been defined yet.  These
+  // Hydrogen instructions will then be replayed by the Lithium
+  // translation, so they cannot have an environment effect.  The edge to
+  // the body's entry block (along with some special logic for the start
+  // block in HInstruction::InsertAfter) seals the start block from
+  // getting unwanted instructions inserted.
+  //
+  // TODO(kmillikin): Fix this.  Stop mutating the initial environment.
+  // Make the Hydrogen instructions in the initial block into Hydrogen
+  // values (but not instructions), present in the initial environment and
+  // not replayed by the Lithium translation.
+  HEnvironment* initial_env = environment()->CopyWithoutHistory();
+  HBasicBlock* body_entry = CreateBasicBlock(initial_env);
+  current_block()->Goto(body_entry);
+  body_entry->SetJoinId(BailoutId::FunctionEntry());
+  set_current_block(body_entry);
+
+  // Handle implicit declaration of the function name in named function
+  // expressions before other declarations.
+  if (scope->is_function_scope() && scope->function() != NULL) {
+    VisitVariableDeclaration(scope->function());
+  }
+  VisitDeclarations(scope->declarations());
+  AddSimulate(BailoutId::Declarations());
 
-    VisitStatements(info()->function()->body());
-    if (HasStackOverflow()) return NULL;
+  HValue* context = environment()->LookupContext();
+  AddInstruction(
+      new(zone()) HStackCheck(context, HStackCheck::kFunctionEntry));
 
-    if (current_block() != NULL) {
-      HReturn* instr = new(zone()) HReturn(graph()->GetConstantUndefined());
-      current_block()->FinishExit(instr);
-      set_current_block(NULL);
-    }
+  VisitStatements(info()->function()->body());
+  if (HasStackOverflow()) return false;
 
-    // If the checksum of the number of type info changes is the same as the
-    // last time this function was compiled, then this recompile is likely not
-    // due to missing/inadequate type feedback, but rather too aggressive
-    // optimization. Disable optimistic LICM in that case.
-    Handle<Code> unoptimized_code(info()->shared_info()->code());
-    ASSERT(unoptimized_code->kind() == Code::FUNCTION);
-    Handle<TypeFeedbackInfo> type_info(
-        TypeFeedbackInfo::cast(unoptimized_code->type_feedback_info()));
-    int checksum = type_info->own_type_change_checksum();
-    int composite_checksum = graph()->update_type_change_checksum(checksum);
-    graph()->set_use_optimistic_licm(
-        !type_info->matches_inlined_type_change_checksum(composite_checksum));
-    type_info->set_inlined_type_change_checksum(composite_checksum);
+  if (current_block() != NULL) {
+    HReturn* instr = new(zone()) HReturn(graph()->GetConstantUndefined());
+    current_block()->FinishExit(instr);
+    set_current_block(NULL);
   }
 
-  return graph();
+  // If the checksum of the number of type info changes is the same as the
+  // last time this function was compiled, then this recompile is likely not
+  // due to missing/inadequate type feedback, but rather too aggressive
+  // optimization. Disable optimistic LICM in that case.
+  Handle<Code> unoptimized_code(info()->shared_info()->code());
+  ASSERT(unoptimized_code->kind() == Code::FUNCTION);
+  Handle<TypeFeedbackInfo> type_info(
+      TypeFeedbackInfo::cast(unoptimized_code->type_feedback_info()));
+  int checksum = type_info->own_type_change_checksum();
+  int composite_checksum = graph()->update_type_change_checksum(checksum);
+  graph()->set_use_optimistic_licm(
+      !type_info->matches_inlined_type_change_checksum(composite_checksum));
+  type_info->set_inlined_type_change_checksum(composite_checksum);
+
+  return true;
 }
 
+
 bool HGraph::Optimize(SmartArrayPointer<char>* bailout_reason) {
   *bailout_reason = SmartArrayPointer<char>();
   OrderBlocks();
@@ -3772,33 +3945,20 @@ void HGraph::DeadCodeElimination() {
 }
 
 
-HInstruction* HGraphBuilder::AddInstruction(HInstruction* instr) {
-  ASSERT(current_block() != NULL);
-  current_block()->AddInstruction(instr);
-  return instr;
-}
-
-
-void HGraphBuilder::AddSimulate(BailoutId ast_id, RemovableSimulate removable) {
-  ASSERT(current_block() != NULL);
-  current_block()->AddSimulate(ast_id, removable);
-}
-
-
-void HGraphBuilder::AddPhi(HPhi* instr) {
+void HOptimizedGraphBuilder::AddPhi(HPhi* instr) {
   ASSERT(current_block() != NULL);
   current_block()->AddPhi(instr);
 }
 
 
-void HGraphBuilder::PushAndAdd(HInstruction* instr) {
+void HOptimizedGraphBuilder::PushAndAdd(HInstruction* instr) {
   Push(instr);
   AddInstruction(instr);
 }
 
 
 template <class Instruction>
-HInstruction* HGraphBuilder::PreProcessCall(Instruction* call) {
+HInstruction* HOptimizedGraphBuilder::PreProcessCall(Instruction* call) {
   int count = call->argument_count();
   ZoneList<HValue*> arguments(count, zone());
   for (int i = 0; i < count; ++i) {
@@ -3812,11 +3972,11 @@ HInstruction* HGraphBuilder::PreProcessCall(Instruction* call) {
 }
 
 
-void HGraphBuilder::SetUpScope(Scope* scope) {
+void HOptimizedGraphBuilder::SetUpScope(Scope* scope) {
   HConstant* undefined_constant = new(zone()) HConstant(
       isolate()->factory()->undefined_value(), Representation::Tagged());
   AddInstruction(undefined_constant);
-  graph_->set_undefined_constant(undefined_constant);
+  graph()->set_undefined_constant(undefined_constant);
 
   HArgumentsObject* object = new(zone()) HArgumentsObject;
   AddInstruction(object);
@@ -3855,21 +4015,21 @@ void HGraphBuilder::SetUpScope(Scope* scope) {
 }
 
 
-void HGraphBuilder::VisitStatements(ZoneList<Statement*>* statements) {
+void HOptimizedGraphBuilder::VisitStatements(ZoneList<Statement*>* statements) {
   for (int i = 0; i < statements->length(); i++) {
     CHECK_ALIVE(Visit(statements->at(i)));
   }
 }
 
 
-HBasicBlock* HGraphBuilder::CreateBasicBlock(HEnvironment* env) {
+HBasicBlock* HOptimizedGraphBuilder::CreateBasicBlock(HEnvironment* env) {
   HBasicBlock* b = graph()->CreateBasicBlock();
   b->SetInitialEnvironment(env);
   return b;
 }
 
 
-HBasicBlock* HGraphBuilder::CreateLoopHeaderBlock() {
+HBasicBlock* HOptimizedGraphBuilder::CreateLoopHeaderBlock() {
   HBasicBlock* header = graph()->CreateBasicBlock();
   HEnvironment* entry_env = environment()->CopyAsLoopHeader(header);
   header->SetInitialEnvironment(entry_env);
@@ -3878,7 +4038,7 @@ HBasicBlock* HGraphBuilder::CreateLoopHeaderBlock() {
 }
 
 
-void HGraphBuilder::VisitBlock(Block* stmt) {
+void HOptimizedGraphBuilder::VisitBlock(Block* stmt) {
   ASSERT(!HasStackOverflow());
   ASSERT(current_block() != NULL);
   ASSERT(current_block()->HasPredecessor());
@@ -3898,7 +4058,8 @@ void HGraphBuilder::VisitBlock(Block* stmt) {
 }
 
 
-void HGraphBuilder::VisitExpressionStatement(ExpressionStatement* stmt) {
+void HOptimizedGraphBuilder::VisitExpressionStatement(
+    ExpressionStatement* stmt) {
   ASSERT(!HasStackOverflow());
   ASSERT(current_block() != NULL);
   ASSERT(current_block()->HasPredecessor());
@@ -3906,14 +4067,14 @@ void HGraphBuilder::VisitExpressionStatement(ExpressionStatement* stmt) {
 }
 
 
-void HGraphBuilder::VisitEmptyStatement(EmptyStatement* stmt) {
+void HOptimizedGraphBuilder::VisitEmptyStatement(EmptyStatement* stmt) {
   ASSERT(!HasStackOverflow());
   ASSERT(current_block() != NULL);
   ASSERT(current_block()->HasPredecessor());
 }
 
 
-void HGraphBuilder::VisitIfStatement(IfStatement* stmt) {
+void HOptimizedGraphBuilder::VisitIfStatement(IfStatement* stmt) {
   ASSERT(!HasStackOverflow());
   ASSERT(current_block() != NULL);
   ASSERT(current_block()->HasPredecessor());
@@ -3952,7 +4113,7 @@ void HGraphBuilder::VisitIfStatement(IfStatement* stmt) {
 }
 
 
-HBasicBlock* HGraphBuilder::BreakAndContinueScope::Get(
+HBasicBlock* HOptimizedGraphBuilder::BreakAndContinueScope::Get(
     BreakableStatement* stmt,
     BreakType type,
     int* drop_extra) {
@@ -3991,7 +4152,8 @@ HBasicBlock* HGraphBuilder::BreakAndContinueScope::Get(
 }
 
 
-void HGraphBuilder::VisitContinueStatement(ContinueStatement* stmt) {
+void HOptimizedGraphBuilder::VisitContinueStatement(
+    ContinueStatement* stmt) {
   ASSERT(!HasStackOverflow());
   ASSERT(current_block() != NULL);
   ASSERT(current_block()->HasPredecessor());
@@ -4005,7 +4167,7 @@ void HGraphBuilder::VisitContinueStatement(ContinueStatement* stmt) {
 }
 
 
-void HGraphBuilder::VisitBreakStatement(BreakStatement* stmt) {
+void HOptimizedGraphBuilder::VisitBreakStatement(BreakStatement* stmt) {
   ASSERT(!HasStackOverflow());
   ASSERT(current_block() != NULL);
   ASSERT(current_block()->HasPredecessor());
@@ -4019,7 +4181,7 @@ void HGraphBuilder::VisitBreakStatement(BreakStatement* stmt) {
 }
 
 
-void HGraphBuilder::VisitReturnStatement(ReturnStatement* stmt) {
+void HOptimizedGraphBuilder::VisitReturnStatement(ReturnStatement* stmt) {
   ASSERT(!HasStackOverflow());
   ASSERT(current_block() != NULL);
   ASSERT(current_block()->HasPredecessor());
@@ -4091,7 +4253,7 @@ void HGraphBuilder::VisitReturnStatement(ReturnStatement* stmt) {
 }
 
 
-void HGraphBuilder::VisitWithStatement(WithStatement* stmt) {
+void HOptimizedGraphBuilder::VisitWithStatement(WithStatement* stmt) {
   ASSERT(!HasStackOverflow());
   ASSERT(current_block() != NULL);
   ASSERT(current_block()->HasPredecessor());
@@ -4099,7 +4261,7 @@ void HGraphBuilder::VisitWithStatement(WithStatement* stmt) {
 }
 
 
-void HGraphBuilder::VisitSwitchStatement(SwitchStatement* stmt) {
+void HOptimizedGraphBuilder::VisitSwitchStatement(SwitchStatement* stmt) {
   ASSERT(!HasStackOverflow());
   ASSERT(current_block() != NULL);
   ASSERT(current_block()->HasPredecessor());
@@ -4285,12 +4447,12 @@ void HGraphBuilder::VisitSwitchStatement(SwitchStatement* stmt) {
 }
 
 
-bool HGraphBuilder::HasOsrEntryAt(IterationStatement* statement) {
+bool HOptimizedGraphBuilder::HasOsrEntryAt(IterationStatement* statement) {
   return statement->OsrEntryId() == info()->osr_ast_id();
 }
 
 
-bool HGraphBuilder::PreProcessOsrEntry(IterationStatement* statement) {
+bool HOptimizedGraphBuilder::PreProcessOsrEntry(IterationStatement* statement) {
   if (!HasOsrEntryAt(statement)) return false;
 
   HBasicBlock* non_osr_entry = graph()->CreateBasicBlock();
@@ -4340,9 +4502,9 @@ bool HGraphBuilder::PreProcessOsrEntry(IterationStatement* statement) {
 }
 
 
-void HGraphBuilder::VisitLoopBody(IterationStatement* stmt,
-                                  HBasicBlock* loop_entry,
-                                  BreakAndContinueInfo* break_info) {
+void HOptimizedGraphBuilder::VisitLoopBody(IterationStatement* stmt,
+                                           HBasicBlock* loop_entry,
+                                           BreakAndContinueInfo* break_info) {
   BreakAndContinueScope push(break_info, this);
   AddSimulate(stmt->StackCheckId());
   HValue* context = environment()->LookupContext();
@@ -4355,7 +4517,7 @@ void HGraphBuilder::VisitLoopBody(IterationStatement* stmt,
 }
 
 
-void HGraphBuilder::VisitDoWhileStatement(DoWhileStatement* stmt) {
+void HOptimizedGraphBuilder::VisitDoWhileStatement(DoWhileStatement* stmt) {
   ASSERT(!HasStackOverflow());
   ASSERT(current_block() != NULL);
   ASSERT(current_block()->HasPredecessor());
@@ -4398,7 +4560,7 @@ void HGraphBuilder::VisitDoWhileStatement(DoWhileStatement* stmt) {
 }
 
 
-void HGraphBuilder::VisitWhileStatement(WhileStatement* stmt) {
+void HOptimizedGraphBuilder::VisitWhileStatement(WhileStatement* stmt) {
   ASSERT(!HasStackOverflow());
   ASSERT(current_block() != NULL);
   ASSERT(current_block()->HasPredecessor());
@@ -4442,7 +4604,7 @@ void HGraphBuilder::VisitWhileStatement(WhileStatement* stmt) {
 }
 
 
-void HGraphBuilder::VisitForStatement(ForStatement* stmt) {
+void HOptimizedGraphBuilder::VisitForStatement(ForStatement* stmt) {
   ASSERT(!HasStackOverflow());
   ASSERT(current_block() != NULL);
   ASSERT(current_block()->HasPredecessor());
@@ -4494,7 +4656,7 @@ void HGraphBuilder::VisitForStatement(ForStatement* stmt) {
 }
 
 
-void HGraphBuilder::VisitForInStatement(ForInStatement* stmt) {
+void HOptimizedGraphBuilder::VisitForInStatement(ForInStatement* stmt) {
   ASSERT(!HasStackOverflow());
   ASSERT(current_block() != NULL);
   ASSERT(current_block()->HasPredecessor());
@@ -4615,7 +4777,7 @@ void HGraphBuilder::VisitForInStatement(ForInStatement* stmt) {
 }
 
 
-void HGraphBuilder::VisitTryCatchStatement(TryCatchStatement* stmt) {
+void HOptimizedGraphBuilder::VisitTryCatchStatement(TryCatchStatement* stmt) {
   ASSERT(!HasStackOverflow());
   ASSERT(current_block() != NULL);
   ASSERT(current_block()->HasPredecessor());
@@ -4623,7 +4785,8 @@ void HGraphBuilder::VisitTryCatchStatement(TryCatchStatement* stmt) {
 }
 
 
-void HGraphBuilder::VisitTryFinallyStatement(TryFinallyStatement* stmt) {
+void HOptimizedGraphBuilder::VisitTryFinallyStatement(
+    TryFinallyStatement* stmt) {
   ASSERT(!HasStackOverflow());
   ASSERT(current_block() != NULL);
   ASSERT(current_block()->HasPredecessor());
@@ -4631,7 +4794,7 @@ void HGraphBuilder::VisitTryFinallyStatement(TryFinallyStatement* stmt) {
 }
 
 
-void HGraphBuilder::VisitDebuggerStatement(DebuggerStatement* stmt) {
+void HOptimizedGraphBuilder::VisitDebuggerStatement(DebuggerStatement* stmt) {
   ASSERT(!HasStackOverflow());
   ASSERT(current_block() != NULL);
   ASSERT(current_block()->HasPredecessor());
@@ -4659,7 +4822,7 @@ static Handle<SharedFunctionInfo> SearchSharedFunctionInfo(
 }
 
 
-void HGraphBuilder::VisitFunctionLiteral(FunctionLiteral* expr) {
+void HOptimizedGraphBuilder::VisitFunctionLiteral(FunctionLiteral* expr) {
   ASSERT(!HasStackOverflow());
   ASSERT(current_block() != NULL);
   ASSERT(current_block()->HasPredecessor());
@@ -4678,7 +4841,7 @@ void HGraphBuilder::VisitFunctionLiteral(FunctionLiteral* expr) {
 }
 
 
-void HGraphBuilder::VisitSharedFunctionInfoLiteral(
+void HOptimizedGraphBuilder::VisitSharedFunctionInfoLiteral(
     SharedFunctionInfoLiteral* expr) {
   ASSERT(!HasStackOverflow());
   ASSERT(current_block() != NULL);
@@ -4687,7 +4850,7 @@ void HGraphBuilder::VisitSharedFunctionInfoLiteral(
 }
 
 
-void HGraphBuilder::VisitConditional(Conditional* expr) {
+void HOptimizedGraphBuilder::VisitConditional(Conditional* expr) {
   ASSERT(!HasStackOverflow());
   ASSERT(current_block() != NULL);
   ASSERT(current_block()->HasPredecessor());
@@ -4725,8 +4888,9 @@ void HGraphBuilder::VisitConditional(Conditional* expr) {
 }
 
 
-HGraphBuilder::GlobalPropertyAccess HGraphBuilder::LookupGlobalProperty(
-    Variable* var, LookupResult* lookup, bool is_store) {
+HOptimizedGraphBuilder::GlobalPropertyAccess
+    HOptimizedGraphBuilder::LookupGlobalProperty(
+        Variable* var, LookupResult* lookup, bool is_store) {
   if (var->is_this() || !info()->has_global_object()) {
     return kUseGeneric;
   }
@@ -4742,7 +4906,7 @@ HGraphBuilder::GlobalPropertyAccess HGraphBuilder::LookupGlobalProperty(
 }
 
 
-HValue* HGraphBuilder::BuildContextChainWalk(Variable* var) {
+HValue* HOptimizedGraphBuilder::BuildContextChainWalk(Variable* var) {
   ASSERT(var->IsContextSlot());
   HValue* context = environment()->LookupContext();
   int length = info()->scope()->ContextChainLength(var->scope());
@@ -4755,7 +4919,7 @@ HValue* HGraphBuilder::BuildContextChainWalk(Variable* var) {
 }
 
 
-void HGraphBuilder::VisitVariableProxy(VariableProxy* expr) {
+void HOptimizedGraphBuilder::VisitVariableProxy(VariableProxy* expr) {
   ASSERT(!HasStackOverflow());
   ASSERT(current_block() != NULL);
   ASSERT(current_block()->HasPredecessor());
@@ -4828,7 +4992,7 @@ void HGraphBuilder::VisitVariableProxy(VariableProxy* expr) {
 }
 
 
-void HGraphBuilder::VisitLiteral(Literal* expr) {
+void HOptimizedGraphBuilder::VisitLiteral(Literal* expr) {
   ASSERT(!HasStackOverflow());
   ASSERT(current_block() != NULL);
   ASSERT(current_block()->HasPredecessor());
@@ -4838,7 +5002,7 @@ void HGraphBuilder::VisitLiteral(Literal* expr) {
 }
 
 
-void HGraphBuilder::VisitRegExpLiteral(RegExpLiteral* expr) {
+void HOptimizedGraphBuilder::VisitRegExpLiteral(RegExpLiteral* expr) {
   ASSERT(!HasStackOverflow());
   ASSERT(current_block() != NULL);
   ASSERT(current_block()->HasPredecessor());
@@ -4997,7 +5161,7 @@ static bool IsFastLiteral(Handle<JSObject> boilerplate,
 }
 
 
-void HGraphBuilder::VisitObjectLiteral(ObjectLiteral* expr) {
+void HOptimizedGraphBuilder::VisitObjectLiteral(ObjectLiteral* expr) {
   ASSERT(!HasStackOverflow());
   ASSERT(current_block() != NULL);
   ASSERT(current_block()->HasPredecessor());
@@ -5102,7 +5266,7 @@ void HGraphBuilder::VisitObjectLiteral(ObjectLiteral* expr) {
 }
 
 
-void HGraphBuilder::VisitArrayLiteral(ArrayLiteral* expr) {
+void HOptimizedGraphBuilder::VisitArrayLiteral(ArrayLiteral* expr) {
   ASSERT(!HasStackOverflow());
   ASSERT(current_block() != NULL);
   ASSERT(current_block()->HasPredecessor());
@@ -5236,18 +5400,19 @@ static int ComputeLoadStoreFieldIndex(Handle<Map> type,
 }
 
 
-void HGraphBuilder::AddCheckMapsWithTransitions(HValue* object,
-                                                Handle<Map> map) {
+void HOptimizedGraphBuilder::AddCheckMapsWithTransitions(HValue* object,
+                                                         Handle<Map> map) {
   AddInstruction(new(zone()) HCheckNonSmi(object));
   AddInstruction(HCheckMaps::NewWithTransitions(object, map, zone()));
 }
 
 
-HInstruction* HGraphBuilder::BuildStoreNamedField(HValue* object,
-                                                  Handle<String> name,
-                                                  HValue* value,
-                                                  Handle<Map> map,
-                                                  LookupResult* lookup) {
+HInstruction* HOptimizedGraphBuilder::BuildStoreNamedField(
+    HValue* object,
+    Handle<String> name,
+    HValue* value,
+    Handle<Map> map,
+    LookupResult* lookup) {
   ASSERT(lookup->IsFound());
   // If the property does not exist yet, we have to check that it wasn't made
   // readonly or turned into a setter by some meanwhile modifications on the
@@ -5299,9 +5464,10 @@ HInstruction* HGraphBuilder::BuildStoreNamedField(HValue* object,
 }
 
 
-HInstruction* HGraphBuilder::BuildStoreNamedGeneric(HValue* object,
-                                                    Handle<String> name,
-                                                    HValue* value) {
+HInstruction* HOptimizedGraphBuilder::BuildStoreNamedGeneric(
+    HValue* object,
+    Handle<String> name,
+    HValue* value) {
   HValue* context = environment()->LookupContext();
   return new(zone()) HStoreNamedGeneric(
                          context,
@@ -5312,11 +5478,12 @@ HInstruction* HGraphBuilder::BuildStoreNamedGeneric(HValue* object,
 }
 
 
-HInstruction* HGraphBuilder::BuildCallSetter(HValue* object,
-                                             HValue* value,
-                                             Handle<Map> map,
-                                             Handle<JSFunction> setter,
-                                             Handle<JSObject> holder) {
+HInstruction* HOptimizedGraphBuilder::BuildCallSetter(
+    HValue* object,
+    HValue* value,
+    Handle<Map> map,
+    Handle<JSFunction> setter,
+    Handle<JSObject> holder) {
   AddCheckConstantFunction(holder, object, map);
   AddInstruction(new(zone()) HPushArgument(object));
   AddInstruction(new(zone()) HPushArgument(value));
@@ -5324,10 +5491,11 @@ HInstruction* HGraphBuilder::BuildCallSetter(HValue* object,
 }
 
 
-HInstruction* HGraphBuilder::BuildStoreNamedMonomorphic(HValue* object,
-                                                        Handle<String> name,
-                                                        HValue* value,
-                                                        Handle<Map> map) {
+HInstruction* HOptimizedGraphBuilder::BuildStoreNamedMonomorphic(
+    HValue* object,
+    Handle<String> name,
+    HValue* value,
+    Handle<Map> map) {
   // Handle a store to a known field.
   LookupResult lookup(isolate());
   if (ComputeLoadStoreField(map, name, &lookup, true)) {
@@ -5340,10 +5508,11 @@ HInstruction* HGraphBuilder::BuildStoreNamedMonomorphic(HValue* object,
 }
 
 
-void HGraphBuilder::HandlePolymorphicLoadNamedField(Property* expr,
-                                                    HValue* object,
-                                                    SmallMapList* types,
-                                                    Handle<String> name) {
+void HOptimizedGraphBuilder::HandlePolymorphicLoadNamedField(
+    Property* expr,
+    HValue* object,
+    SmallMapList* types,
+    Handle<String> name) {
   int count = 0;
   int previous_field_offset = 0;
   bool previous_field_is_in_object = false;
@@ -5395,11 +5564,12 @@ void HGraphBuilder::HandlePolymorphicLoadNamedField(Property* expr,
 }
 
 
-void HGraphBuilder::HandlePolymorphicStoreNamedField(Assignment* expr,
-                                                     HValue* object,
-                                                     HValue* value,
-                                                     SmallMapList* types,
-                                                     Handle<String> name) {
+void HOptimizedGraphBuilder::HandlePolymorphicStoreNamedField(
+    Assignment* expr,
+    HValue* object,
+    HValue* value,
+    SmallMapList* types,
+    Handle<String> name) {
   // TODO(ager): We should recognize when the prototype chains for different
   // maps are identical. In that case we can avoid repeatedly generating the
   // same prototype map checks.
@@ -5471,7 +5641,7 @@ void HGraphBuilder::HandlePolymorphicStoreNamedField(Assignment* expr,
 }
 
 
-void HGraphBuilder::HandlePropertyAssignment(Assignment* expr) {
+void HOptimizedGraphBuilder::HandlePropertyAssignment(Assignment* expr) {
   Property* prop = expr->target()->AsProperty();
   ASSERT(prop != NULL);
   expr->RecordTypeFeedback(oracle(), zone());
@@ -5554,10 +5724,11 @@ void HGraphBuilder::HandlePropertyAssignment(Assignment* expr) {
 // Because not every expression has a position and there is not common
 // superclass of Assignment and CountOperation, we cannot just pass the
 // owning expression instead of position and ast_id separately.
-void HGraphBuilder::HandleGlobalVariableAssignment(Variable* var,
-                                                   HValue* value,
-                                                   int position,
-                                                   BailoutId ast_id) {
+void HOptimizedGraphBuilder::HandleGlobalVariableAssignment(
+    Variable* var,
+    HValue* value,
+    int position,
+    BailoutId ast_id) {
   LookupResult lookup(isolate());
   GlobalPropertyAccess type = LookupGlobalProperty(var, &lookup, true);
   if (type == kUseCell) {
@@ -5588,7 +5759,7 @@ void HGraphBuilder::HandleGlobalVariableAssignment(Variable* var,
 }
 
 
-void HGraphBuilder::HandleCompoundAssignment(Assignment* expr) {
+void HOptimizedGraphBuilder::HandleCompoundAssignment(Assignment* expr) {
   Expression* target = expr->target();
   VariableProxy* proxy = target->AsVariableProxy();
   Property* prop = target->AsProperty();
@@ -5785,7 +5956,7 @@ void HGraphBuilder::HandleCompoundAssignment(Assignment* expr) {
 }
 
 
-void HGraphBuilder::VisitAssignment(Assignment* expr) {
+void HOptimizedGraphBuilder::VisitAssignment(Assignment* expr) {
   ASSERT(!HasStackOverflow());
   ASSERT(current_block() != NULL);
   ASSERT(current_block()->HasPredecessor());
@@ -5912,7 +6083,7 @@ void HGraphBuilder::VisitAssignment(Assignment* expr) {
 }
 
 
-void HGraphBuilder::VisitThrow(Throw* expr) {
+void HOptimizedGraphBuilder::VisitThrow(Throw* expr) {
   ASSERT(!HasStackOverflow());
   ASSERT(current_block() != NULL);
   ASSERT(current_block()->HasPredecessor());
@@ -5933,9 +6104,10 @@ void HGraphBuilder::VisitThrow(Throw* expr) {
 }
 
 
-HLoadNamedField* HGraphBuilder::BuildLoadNamedField(HValue* object,
-                                                    Handle<Map> map,
-                                                    LookupResult* lookup) {
+HLoadNamedField* HOptimizedGraphBuilder::BuildLoadNamedField(
+    HValue* object,
+    Handle<Map> map,
+    LookupResult* lookup) {
   int index = lookup->GetLocalFieldIndexFromMap(*map);
   if (index < 0) {
     // Negative property indices are in-object properties, indexed
@@ -5950,9 +6122,10 @@ HLoadNamedField* HGraphBuilder::BuildLoadNamedField(HValue* object,
 }
 
 
-HInstruction* HGraphBuilder::BuildLoadNamedGeneric(HValue* object,
-                                                   Handle<String> name,
-                                                   Property* expr) {
+HInstruction* HOptimizedGraphBuilder::BuildLoadNamedGeneric(
+    HValue* object,
+    Handle<String> name,
+    Property* expr) {
   if (expr->IsUninitialized() && !FLAG_always_opt) {
     AddInstruction(new(zone()) HSoftDeoptimize);
     current_block()->MarkAsDeoptimizing();
@@ -5962,20 +6135,22 @@ HInstruction* HGraphBuilder::BuildLoadNamedGeneric(HValue* object,
 }
 
 
-HInstruction* HGraphBuilder::BuildCallGetter(HValue* object,
-                                             Handle<Map> map,
-                                             Handle<JSFunction> getter,
-                                             Handle<JSObject> holder) {
+HInstruction* HOptimizedGraphBuilder::BuildCallGetter(
+    HValue* object,
+    Handle<Map> map,
+    Handle<JSFunction> getter,
+    Handle<JSObject> holder) {
   AddCheckConstantFunction(holder, object, map);
   AddInstruction(new(zone()) HPushArgument(object));
   return new(zone()) HCallConstantFunction(getter, 1);
 }
 
 
-HInstruction* HGraphBuilder::BuildLoadNamedMonomorphic(HValue* object,
-                                                       Handle<String> name,
-                                                       Property* expr,
-                                                       Handle<Map> map) {
+HInstruction* HOptimizedGraphBuilder::BuildLoadNamedMonomorphic(
+    HValue* object,
+    Handle<String> name,
+    Property* expr,
+    Handle<Map> map) {
   // Handle a load from a known field.
   ASSERT(!map->is_dictionary_map());
   LookupResult lookup(isolate());
@@ -6009,174 +6184,34 @@ HInstruction* HGraphBuilder::BuildLoadNamedMonomorphic(HValue* object,
 }
 
 
-HInstruction* HGraphBuilder::BuildLoadKeyedGeneric(HValue* object,
-                                                   HValue* key) {
+HInstruction* HOptimizedGraphBuilder::BuildLoadKeyedGeneric(HValue* object,
+                                                            HValue* key) {
   HValue* context = environment()->LookupContext();
   return new(zone()) HLoadKeyedGeneric(context, object, key);
 }
 
 
-HInstruction* HGraphBuilder::BuildExternalArrayElementAccess(
-    HValue* external_elements,
-    HValue* checked_key,
+HInstruction* HOptimizedGraphBuilder::BuildMonomorphicElementAccess(
+    HValue* object,
+    HValue* key,
     HValue* val,
     HValue* dependency,
-    ElementsKind elements_kind,
+    Handle<Map> map,
     bool is_store) {
-  if (is_store) {
-    ASSERT(val != NULL);
-    switch (elements_kind) {
-      case EXTERNAL_PIXEL_ELEMENTS: {
-        val = AddInstruction(new(zone()) HClampToUint8(val));
-        break;
-      }
-      case EXTERNAL_BYTE_ELEMENTS:
-      case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
-      case EXTERNAL_SHORT_ELEMENTS:
-      case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
-      case EXTERNAL_INT_ELEMENTS:
-      case EXTERNAL_UNSIGNED_INT_ELEMENTS: {
-        break;
-      }
-      case EXTERNAL_FLOAT_ELEMENTS:
-      case EXTERNAL_DOUBLE_ELEMENTS:
-        break;
-      case FAST_SMI_ELEMENTS:
-      case FAST_ELEMENTS:
-      case FAST_DOUBLE_ELEMENTS:
-      case FAST_HOLEY_SMI_ELEMENTS:
-      case FAST_HOLEY_ELEMENTS:
-      case FAST_HOLEY_DOUBLE_ELEMENTS:
-      case DICTIONARY_ELEMENTS:
-      case NON_STRICT_ARGUMENTS_ELEMENTS:
-        UNREACHABLE();
-        break;
-    }
-    return new(zone()) HStoreKeyed(external_elements,
-                                   checked_key,
-                                   val,
-                                   elements_kind);
-  } else {
-    ASSERT(val == NULL);
-    HLoadKeyed* load =
-       new(zone()) HLoadKeyed(
-           external_elements, checked_key, dependency, elements_kind);
-    if (FLAG_opt_safe_uint32_operations &&
-        elements_kind == EXTERNAL_UNSIGNED_INT_ELEMENTS) {
-      graph()->RecordUint32Instruction(load);
-    }
-    return load;
-  }
-}
-
-
-HInstruction* HGraphBuilder::BuildFastElementAccess(HValue* elements,
-                                                    HValue* checked_key,
-                                                    HValue* val,
-                                                    HValue* load_dependency,
-                                                    ElementsKind elements_kind,
-                                                    bool is_store) {
-  if (is_store) {
-    ASSERT(val != NULL);
-    switch (elements_kind) {
-      case FAST_SMI_ELEMENTS:
-      case FAST_HOLEY_SMI_ELEMENTS:
-        // Smi-only arrays need a smi check.
-        AddInstruction(new(zone()) HCheckSmi(val));
-        // Fall through.
-      case FAST_ELEMENTS:
-      case FAST_HOLEY_ELEMENTS:
-      case FAST_DOUBLE_ELEMENTS:
-      case FAST_HOLEY_DOUBLE_ELEMENTS:
-        return new(zone()) HStoreKeyed(
-            elements, checked_key, val, elements_kind);
-      default:
-        UNREACHABLE();
-        return NULL;
-    }
-  }
-  // It's an element load (!is_store).
-  return new(zone()) HLoadKeyed(elements,
-                                checked_key,
-                                load_dependency,
-                                elements_kind);
-}
-
-
-HInstruction* HGraphBuilder::BuildMonomorphicElementAccess(HValue* object,
-                                                           HValue* key,
-                                                           HValue* val,
-                                                           HValue* dependency,
-                                                           Handle<Map> map,
-                                                           bool is_store) {
   HCheckMaps* mapcheck = new(zone()) HCheckMaps(object, map,
                                                 zone(), dependency);
   AddInstruction(mapcheck);
   if (dependency) {
     mapcheck->ClearGVNFlag(kDependsOnElementsKind);
   }
-  return BuildUncheckedMonomorphicElementAccess(object, key, val,
-                                                mapcheck, map, is_store);
-}
-
-
-HInstruction* HGraphBuilder::BuildUncheckedMonomorphicElementAccess(
-    HValue* object,
-    HValue* key,
-    HValue* val,
-    HCheckMaps* mapcheck,
-    Handle<Map> map,
-    bool is_store) {
-  // No GVNFlag is necessary for ElementsKind if there is an explicit dependency
-  // on a HElementsTransition instruction. The flag can also be removed if the
-  // map to check has FAST_HOLEY_ELEMENTS, since there can be no further
-  // ElementsKind transitions. Finally, the dependency can be removed for stores
-  // for FAST_ELEMENTS, since a transition to HOLEY elements won't change the
-  // generated store code.
-  if ((map->elements_kind() == FAST_HOLEY_ELEMENTS) ||
-      (map->elements_kind() == FAST_ELEMENTS && is_store)) {
-    mapcheck->ClearGVNFlag(kDependsOnElementsKind);
-  }
-  bool fast_smi_only_elements = map->has_fast_smi_elements();
-  bool fast_elements = map->has_fast_object_elements();
-  HInstruction* elements =
-      AddInstruction(new(zone()) HLoadElements(object, mapcheck));
-  if (is_store && (fast_elements || fast_smi_only_elements)) {
-    HCheckMaps* check_cow_map = new(zone()) HCheckMaps(
-        elements, isolate()->factory()->fixed_array_map(), zone());
-    check_cow_map->ClearGVNFlag(kDependsOnElementsKind);
-    AddInstruction(check_cow_map);
-  }
-  HInstruction* length = NULL;
-  HInstruction* checked_key = NULL;
-  if (map->has_external_array_elements()) {
-    length = AddInstruction(new(zone()) HFixedArrayBaseLength(elements));
-    checked_key = AddInstruction(new(zone()) HBoundsCheck(key, length,
-                                                          ALLOW_SMI_KEY));
-    HLoadExternalArrayPointer* external_elements =
-        new(zone()) HLoadExternalArrayPointer(elements);
-    AddInstruction(external_elements);
-    return BuildExternalArrayElementAccess(
-        external_elements, checked_key, val, mapcheck,
-        map->elements_kind(), is_store);
-  }
-  ASSERT(fast_smi_only_elements ||
-         fast_elements ||
-         map->has_fast_double_elements());
-  if (map->instance_type() == JS_ARRAY_TYPE) {
-    length = AddInstruction(new(zone()) HJSArrayLength(object, mapcheck,
-                                                       HType::Smi()));
-  } else {
-    length = AddInstruction(new(zone()) HFixedArrayBaseLength(elements));
-  }
-  checked_key = AddInstruction(new(zone()) HBoundsCheck(key, length,
-                                                        ALLOW_SMI_KEY));
-  return BuildFastElementAccess(elements, checked_key, val, mapcheck,
-                                map->elements_kind(), is_store);
+  return BuildUncheckedMonomorphicElementAccess(
+      object, key, val,
+      mapcheck, map->instance_type() == JS_ARRAY_TYPE,
+      map->elements_kind(), is_store);
 }
 
 
-HInstruction* HGraphBuilder::TryBuildConsolidatedElementLoad(
+HInstruction* HOptimizedGraphBuilder::TryBuildConsolidatedElementLoad(
     HValue* object,
     HValue* key,
     HValue* val,
@@ -6224,19 +6259,23 @@ HInstruction* HGraphBuilder::TryBuildConsolidatedElementLoad(
   HCheckMaps* check_maps = new(zone()) HCheckMaps(object, maps, zone());
   AddInstruction(check_maps);
   HInstruction* instr = BuildUncheckedMonomorphicElementAccess(
-      object, key, val, check_maps, most_general_consolidated_map, false);
+      object, key, val, check_maps,
+      most_general_consolidated_map->instance_type() == JS_ARRAY_TYPE,
+      most_general_consolidated_map->elements_kind(),
+      false);
   return instr;
 }
 
 
-HValue* HGraphBuilder::HandlePolymorphicElementAccess(HValue* object,
-                                                      HValue* key,
-                                                      HValue* val,
-                                                      Expression* prop,
-                                                      BailoutId ast_id,
-                                                      int position,
-                                                      bool is_store,
-                                                      bool* has_side_effects) {
+HValue* HOptimizedGraphBuilder::HandlePolymorphicElementAccess(
+    HValue* object,
+    HValue* key,
+    HValue* val,
+    Expression* prop,
+    BailoutId ast_id,
+    int position,
+    bool is_store,
+    bool* has_side_effects) {
   *has_side_effects = false;
   AddInstruction(new(zone()) HCheckNonSmi(object));
   SmallMapList* maps = prop->GetReceiverTypes();
@@ -6421,8 +6460,8 @@ HValue* HGraphBuilder::HandlePolymorphicElementAccess(HValue* object,
         }
       } else {  // External array elements.
         access = AddInstruction(BuildExternalArrayElementAccess(
-            external_elements, checked_key, val, elements_kind_branch,
-            elements_kind, is_store));
+            external_elements, checked_key, val,
+            elements_kind_branch, elements_kind, is_store));
       }
       *has_side_effects |= access->HasObservableSideEffects();
       if (position != RelocInfo::kNoPosition) access->set_position(position);
@@ -6442,14 +6481,15 @@ HValue* HGraphBuilder::HandlePolymorphicElementAccess(HValue* object,
 }
 
 
-HValue* HGraphBuilder::HandleKeyedElementAccess(HValue* obj,
-                                                HValue* key,
-                                                HValue* val,
-                                                Expression* expr,
-                                                BailoutId ast_id,
-                                                int position,
-                                                bool is_store,
-                                                bool* has_side_effects) {
+HValue* HOptimizedGraphBuilder::HandleKeyedElementAccess(
+    HValue* obj,
+    HValue* key,
+    HValue* val,
+    Expression* expr,
+    BailoutId ast_id,
+    int position,
+    bool is_store,
+    bool* has_side_effects) {
   ASSERT(!expr->IsPropertyName());
   HInstruction* instr = NULL;
   if (expr->IsMonomorphic()) {
@@ -6479,9 +6519,10 @@ HValue* HGraphBuilder::HandleKeyedElementAccess(HValue* obj,
 }
 
 
-HInstruction* HGraphBuilder::BuildStoreKeyedGeneric(HValue* object,
-                                                    HValue* key,
-                                                    HValue* value) {
+HInstruction* HOptimizedGraphBuilder::BuildStoreKeyedGeneric(
+    HValue* object,
+    HValue* key,
+    HValue* value) {
   HValue* context = environment()->LookupContext();
   return new(zone()) HStoreKeyedGeneric(
                          context,
@@ -6492,7 +6533,7 @@ HInstruction* HGraphBuilder::BuildStoreKeyedGeneric(HValue* object,
 }
 
 
-void HGraphBuilder::EnsureArgumentsArePushedForAccess() {
+void HOptimizedGraphBuilder::EnsureArgumentsArePushedForAccess() {
   // Outermost function already has arguments on the stack.
   if (function_state()->outer() == NULL) return;
 
@@ -6520,7 +6561,7 @@ void HGraphBuilder::EnsureArgumentsArePushedForAccess() {
 }
 
 
-bool HGraphBuilder::TryArgumentsAccess(Property* expr) {
+bool HOptimizedGraphBuilder::TryArgumentsAccess(Property* expr) {
   VariableProxy* proxy = expr->obj()->AsVariableProxy();
   if (proxy == NULL) return false;
   if (!proxy->var()->IsStackAllocated()) return false;
@@ -6579,7 +6620,7 @@ bool HGraphBuilder::TryArgumentsAccess(Property* expr) {
 }
 
 
-void HGraphBuilder::VisitProperty(Property* expr) {
+void HOptimizedGraphBuilder::VisitProperty(Property* expr) {
   ASSERT(!HasStackOverflow());
   ASSERT(current_block() != NULL);
   ASSERT(current_block()->HasPredecessor());
@@ -6670,8 +6711,8 @@ void HGraphBuilder::VisitProperty(Property* expr) {
 }
 
 
-void HGraphBuilder::AddCheckPrototypeMaps(Handle<JSObject> holder,
-                                          Handle<Map> receiver_map) {
+void HOptimizedGraphBuilder::AddCheckPrototypeMaps(Handle<JSObject> holder,
+                                                   Handle<Map> receiver_map) {
   if (!holder.is_null()) {
     AddInstruction(new(zone()) HCheckPrototypeMaps(
         Handle<JSObject>(JSObject::cast(receiver_map->prototype())), holder));
@@ -6679,9 +6720,10 @@ void HGraphBuilder::AddCheckPrototypeMaps(Handle<JSObject> holder,
 }
 
 
-void HGraphBuilder::AddCheckConstantFunction(Handle<JSObject> holder,
-                                             HValue* receiver,
-                                             Handle<Map> receiver_map) {
+void HOptimizedGraphBuilder::AddCheckConstantFunction(
+    Handle<JSObject> holder,
+    HValue* receiver,
+    Handle<Map> receiver_map) {
   // Constant functions have the nice property that the map will change if they
   // are overwritten.  Therefore it is enough to check the map of the holder and
   // its prototypes.
@@ -6723,10 +6765,11 @@ static int CompareHotness(void const* a, void const* b) {
 }
 
 
-void HGraphBuilder::HandlePolymorphicCallNamed(Call* expr,
-                                               HValue* receiver,
-                                               SmallMapList* types,
-                                               Handle<String> name) {
+void HOptimizedGraphBuilder::HandlePolymorphicCallNamed(
+    Call* expr,
+    HValue* receiver,
+    SmallMapList* types,
+    Handle<String> name) {
   // TODO(ager): We should recognize when the prototype chains for different
   // maps are identical. In that case we can avoid repeatedly generating the
   // same prototype map checks.
@@ -6828,9 +6871,9 @@ void HGraphBuilder::HandlePolymorphicCallNamed(Call* expr,
 }
 
 
-void HGraphBuilder::TraceInline(Handle<JSFunction> target,
-                                Handle<JSFunction> caller,
-                                const char* reason) {
+void HOptimizedGraphBuilder::TraceInline(Handle<JSFunction> target,
+                                         Handle<JSFunction> caller,
+                                         const char* reason) {
   if (FLAG_trace_inlining) {
     SmartArrayPointer<char> target_name =
         target->shared()->DebugName()->ToCString();
@@ -6849,7 +6892,7 @@ void HGraphBuilder::TraceInline(Handle<JSFunction> target,
 static const int kNotInlinable = 1000000000;
 
 
-int HGraphBuilder::InliningAstSize(Handle<JSFunction> target) {
+int HOptimizedGraphBuilder::InliningAstSize(Handle<JSFunction> target) {
   if (!FLAG_use_inlining) return kNotInlinable;
 
   // Precondition: call is monomorphic and we have found a target with the
@@ -6880,13 +6923,13 @@ int HGraphBuilder::InliningAstSize(Handle<JSFunction> target) {
 }
 
 
-bool HGraphBuilder::TryInline(CallKind call_kind,
-                              Handle<JSFunction> target,
-                              int arguments_count,
-                              HValue* implicit_return_value,
-                              BailoutId ast_id,
-                              BailoutId return_id,
-                              InliningKind inlining_kind) {
+bool HOptimizedGraphBuilder::TryInline(CallKind call_kind,
+                                       Handle<JSFunction> target,
+                                       int arguments_count,
+                                       HValue* implicit_return_value,
+                                       BailoutId ast_id,
+                                       BailoutId return_id,
+                                       InliningKind inlining_kind) {
   int nodes_added = InliningAstSize(target);
   if (nodes_added == kNotInlinable) return false;
 
@@ -7195,7 +7238,7 @@ bool HGraphBuilder::TryInline(CallKind call_kind,
 }
 
 
-bool HGraphBuilder::TryInlineCall(Call* expr, bool drop_extra) {
+bool HOptimizedGraphBuilder::TryInlineCall(Call* expr, bool drop_extra) {
   // The function call we are inlining is a method call if the call
   // is a property call.
   CallKind call_kind = (expr->expression()->AsProperty() == NULL)
@@ -7212,8 +7255,8 @@ bool HGraphBuilder::TryInlineCall(Call* expr, bool drop_extra) {
 }
 
 
-bool HGraphBuilder::TryInlineConstruct(CallNew* expr,
-                                       HValue* implicit_return_value) {
+bool HOptimizedGraphBuilder::TryInlineConstruct(CallNew* expr,
+                                                HValue* implicit_return_value) {
   return TryInline(CALL_AS_FUNCTION,
                    expr->target(),
                    expr->arguments()->length(),
@@ -7224,8 +7267,8 @@ bool HGraphBuilder::TryInlineConstruct(CallNew* expr,
 }
 
 
-bool HGraphBuilder::TryInlineGetter(Handle<JSFunction> getter,
-                                    Property* prop) {
+bool HOptimizedGraphBuilder::TryInlineGetter(Handle<JSFunction> getter,
+                                             Property* prop) {
   return TryInline(CALL_AS_METHOD,
                    getter,
                    0,
@@ -7236,9 +7279,9 @@ bool HGraphBuilder::TryInlineGetter(Handle<JSFunction> getter,
 }
 
 
-bool HGraphBuilder::TryInlineSetter(Handle<JSFunction> setter,
-                                    Assignment* assignment,
-                                    HValue* implicit_return_value) {
+bool HOptimizedGraphBuilder::TryInlineSetter(Handle<JSFunction> setter,
+                                             Assignment* assignment,
+                                             HValue* implicit_return_value) {
   return TryInline(CALL_AS_METHOD,
                    setter,
                    1,
@@ -7249,7 +7292,8 @@ bool HGraphBuilder::TryInlineSetter(Handle<JSFunction> setter,
 }
 
 
-bool HGraphBuilder::TryInlineBuiltinFunctionCall(Call* expr, bool drop_extra) {
+bool HOptimizedGraphBuilder::TryInlineBuiltinFunctionCall(Call* expr,
+                                                          bool drop_extra) {
   if (!expr->target()->shared()->HasBuiltinFunctionId()) return false;
   BuiltinFunctionId id = expr->target()->shared()->builtin_function_id();
   switch (id) {
@@ -7283,10 +7327,11 @@ bool HGraphBuilder::TryInlineBuiltinFunctionCall(Call* expr, bool drop_extra) {
 }
 
 
-bool HGraphBuilder::TryInlineBuiltinMethodCall(Call* expr,
-                                               HValue* receiver,
-                                               Handle<Map> receiver_map,
-                                               CheckType check_type) {
+bool HOptimizedGraphBuilder::TryInlineBuiltinMethodCall(
+    Call* expr,
+    HValue* receiver,
+    Handle<Map> receiver_map,
+    CheckType check_type) {
   ASSERT(check_type != RECEIVER_MAP_CHECK || !receiver_map.is_null());
   // Try to inline calls like Math.* as operations in the calling function.
   if (!expr->target()->shared()->HasBuiltinFunctionId()) return false;
@@ -7416,7 +7461,7 @@ bool HGraphBuilder::TryInlineBuiltinMethodCall(Call* expr,
 }
 
 
-bool HGraphBuilder::TryCallApply(Call* expr) {
+bool HOptimizedGraphBuilder::TryCallApply(Call* expr) {
   Expression* callee = expr->expression();
   Property* prop = callee->AsProperty();
   ASSERT(prop != NULL);
@@ -7544,7 +7589,7 @@ static Map* CheckSameElementsFamily(SmallMapList* types) {
 }
 
 
-void HGraphBuilder::VisitCall(Call* expr) {
+void HOptimizedGraphBuilder::VisitCall(Call* expr) {
   ASSERT(!HasStackOverflow());
   ASSERT(current_block() != NULL);
   ASSERT(current_block()->HasPredecessor());
@@ -7774,7 +7819,7 @@ static bool IsAllocationInlineable(Handle<JSFunction> constructor) {
 }
 
 
-void HGraphBuilder::VisitCallNew(CallNew* expr) {
+void HOptimizedGraphBuilder::VisitCallNew(CallNew* expr) {
   ASSERT(!HasStackOverflow());
   ASSERT(current_block() != NULL);
   ASSERT(current_block()->HasPredecessor());
@@ -7838,20 +7883,21 @@ void HGraphBuilder::VisitCallNew(CallNew* expr) {
 
 // Support for generating inlined runtime functions.
 
-// Lookup table for generators for runtime calls that are  generated inline.
-// Elements of the table are member pointers to functions of HGraphBuilder.
+// Lookup table for generators for runtime calls that are generated inline.
+// Elements of the table are member pointers to functions of
+// HOptimizedGraphBuilder.
 #define INLINE_FUNCTION_GENERATOR_ADDRESS(Name, argc, ressize)  \
-    &HGraphBuilder::Generate##Name,
+    &HOptimizedGraphBuilder::Generate##Name,
 
-const HGraphBuilder::InlineFunctionGenerator
-    HGraphBuilder::kInlineFunctionGenerators[] = {
+const HOptimizedGraphBuilder::InlineFunctionGenerator
+    HOptimizedGraphBuilder::kInlineFunctionGenerators[] = {
         INLINE_FUNCTION_LIST(INLINE_FUNCTION_GENERATOR_ADDRESS)
         INLINE_RUNTIME_FUNCTION_LIST(INLINE_FUNCTION_GENERATOR_ADDRESS)
 };
 #undef INLINE_FUNCTION_GENERATOR_ADDRESS
 
 
-void HGraphBuilder::VisitCallRuntime(CallRuntime* expr) {
+void HOptimizedGraphBuilder::VisitCallRuntime(CallRuntime* expr) {
   ASSERT(!HasStackOverflow());
   ASSERT(current_block() != NULL);
   ASSERT(current_block()->HasPredecessor());
@@ -7889,7 +7935,7 @@ void HGraphBuilder::VisitCallRuntime(CallRuntime* expr) {
 }
 
 
-void HGraphBuilder::VisitUnaryOperation(UnaryOperation* expr) {
+void HOptimizedGraphBuilder::VisitUnaryOperation(UnaryOperation* expr) {
   ASSERT(!HasStackOverflow());
   ASSERT(current_block() != NULL);
   ASSERT(current_block()->HasPredecessor());
@@ -7905,7 +7951,7 @@ void HGraphBuilder::VisitUnaryOperation(UnaryOperation* expr) {
   }
 }
 
-void HGraphBuilder::VisitDelete(UnaryOperation* expr) {
+void HOptimizedGraphBuilder::VisitDelete(UnaryOperation* expr) {
   Property* prop = expr->expression()->AsProperty();
   VariableProxy* proxy = expr->expression()->AsVariableProxy();
   if (prop != NULL) {
@@ -7940,13 +7986,13 @@ void HGraphBuilder::VisitDelete(UnaryOperation* expr) {
 }
 
 
-void HGraphBuilder::VisitVoid(UnaryOperation* expr) {
+void HOptimizedGraphBuilder::VisitVoid(UnaryOperation* expr) {
   CHECK_ALIVE(VisitForEffect(expr->expression()));
   return ast_context()->ReturnValue(graph()->GetConstantUndefined());
 }
 
 
-void HGraphBuilder::VisitTypeof(UnaryOperation* expr) {
+void HOptimizedGraphBuilder::VisitTypeof(UnaryOperation* expr) {
   CHECK_ALIVE(VisitForTypeOf(expr->expression()));
   HValue* value = Pop();
   HValue* context = environment()->LookupContext();
@@ -7955,22 +8001,22 @@ void HGraphBuilder::VisitTypeof(UnaryOperation* expr) {
 }
 
 
-void HGraphBuilder::VisitAdd(UnaryOperation* expr) {
+void HOptimizedGraphBuilder::VisitAdd(UnaryOperation* expr) {
   CHECK_ALIVE(VisitForValue(expr->expression()));
   HValue* value = Pop();
   HValue* context = environment()->LookupContext();
   HInstruction* instr =
-      new(zone()) HMul(context, value, graph_->GetConstant1());
+      new(zone()) HMul(context, value, graph()->GetConstant1());
   return ast_context()->ReturnInstruction(instr, expr->id());
 }
 
 
-void HGraphBuilder::VisitSub(UnaryOperation* expr) {
+void HOptimizedGraphBuilder::VisitSub(UnaryOperation* expr) {
   CHECK_ALIVE(VisitForValue(expr->expression()));
   HValue* value = Pop();
   HValue* context = environment()->LookupContext();
   HInstruction* instr =
-      new(zone()) HMul(context, value, graph_->GetConstantMinus1());
+      new(zone()) HMul(context, value, graph()->GetConstantMinus1());
   TypeInfo info = oracle()->UnaryType(expr);
   Representation rep = ToRepresentation(info);
   if (info.IsUninitialized()) {
@@ -7983,7 +8029,7 @@ void HGraphBuilder::VisitSub(UnaryOperation* expr) {
 }
 
 
-void HGraphBuilder::VisitBitNot(UnaryOperation* expr) {
+void HOptimizedGraphBuilder::VisitBitNot(UnaryOperation* expr) {
   CHECK_ALIVE(VisitForValue(expr->expression()));
   HValue* value = Pop();
   TypeInfo info = oracle()->UnaryType(expr);
@@ -7996,7 +8042,7 @@ void HGraphBuilder::VisitBitNot(UnaryOperation* expr) {
 }
 
 
-void HGraphBuilder::VisitNot(UnaryOperation* expr) {
+void HOptimizedGraphBuilder::VisitNot(UnaryOperation* expr) {
   if (ast_context()->IsTest()) {
     TestContext* context = TestContext::cast(ast_context());
     VisitForControl(expr->expression(),
@@ -8040,8 +8086,9 @@ void HGraphBuilder::VisitNot(UnaryOperation* expr) {
 }
 
 
-HInstruction* HGraphBuilder::BuildIncrement(bool returns_original_input,
-                                            CountOperation* expr) {
+HInstruction* HOptimizedGraphBuilder::BuildIncrement(
+    bool returns_original_input,
+    CountOperation* expr) {
   // The input to the count operation is on top of the expression stack.
   TypeInfo info = oracle()->IncrementType(expr);
   Representation rep = ToRepresentation(info);
@@ -8063,8 +8110,8 @@ HInstruction* HGraphBuilder::BuildIncrement(bool returns_original_input,
   // to simulate the expression stack after this instruction.
   // Any later failures deopt to the load of the input or earlier.
   HConstant* delta = (expr->op() == Token::INC)
-      ? graph_->GetConstant1()
-      : graph_->GetConstantMinus1();
+      ? graph()->GetConstant1()
+      : graph()->GetConstantMinus1();
   HValue* context = environment()->LookupContext();
   HInstruction* instr = new(zone()) HAdd(context, Top(), delta);
   // We can't insert a simulate here, because it would break deoptimization,
@@ -8077,7 +8124,7 @@ HInstruction* HGraphBuilder::BuildIncrement(bool returns_original_input,
 }
 
 
-void HGraphBuilder::VisitCountOperation(CountOperation* expr) {
+void HOptimizedGraphBuilder::VisitCountOperation(CountOperation* expr) {
   ASSERT(!HasStackOverflow());
   ASSERT(current_block() != NULL);
   ASSERT(current_block()->HasPredecessor());
@@ -8161,7 +8208,7 @@ void HGraphBuilder::VisitCountOperation(CountOperation* expr) {
 
     if (prop->key()->IsPropertyName()) {
       // Named property.
-      if (returns_original_input) Push(graph_->GetConstantUndefined());
+      if (returns_original_input) Push(graph()->GetConstantUndefined());
 
       CHECK_ALIVE(VisitForValue(prop->obj()));
       HValue* object = Top();
@@ -8222,7 +8269,7 @@ void HGraphBuilder::VisitCountOperation(CountOperation* expr) {
 
     } else {
       // Keyed property.
-      if (returns_original_input) Push(graph_->GetConstantUndefined());
+      if (returns_original_input) Push(graph()->GetConstantUndefined());
 
       CHECK_ALIVE(VisitForValue(prop->obj()));
       CHECK_ALIVE(VisitForValue(prop->key()));
@@ -8262,9 +8309,10 @@ void HGraphBuilder::VisitCountOperation(CountOperation* expr) {
 }
 
 
-HStringCharCodeAt* HGraphBuilder::BuildStringCharCodeAt(HValue* context,
-                                                        HValue* string,
-                                                        HValue* index) {
+HStringCharCodeAt* HOptimizedGraphBuilder::BuildStringCharCodeAt(
+    HValue* context,
+    HValue* string,
+    HValue* index) {
   AddInstruction(new(zone()) HCheckNonSmi(string));
   AddInstruction(HCheckInstanceType::NewIsString(string, zone()));
   HStringLength* length = new(zone()) HStringLength(string);
@@ -8292,10 +8340,10 @@ static bool ShiftAmountsAllowReplaceByRotate(HValue* sa,
 // directions that can be replaced by one rotate right instruction or not.
 // Returns the operand and the shift amount for the rotate instruction in the
 // former case.
-bool HGraphBuilder::MatchRotateRight(HValue* left,
-                                     HValue* right,
-                                     HValue** operand,
-                                     HValue** shift_amount) {
+bool HOptimizedGraphBuilder::MatchRotateRight(HValue* left,
+                                              HValue* right,
+                                              HValue** operand,
+                                              HValue** shift_amount) {
   HShl* shl;
   HShr* shr;
   if (left->IsShl() && right->IsShr()) {
@@ -8330,9 +8378,10 @@ bool CanBeZero(HValue *right) {
 }
 
 
-HInstruction* HGraphBuilder::BuildBinaryOperation(BinaryOperation* expr,
-                                                  HValue* left,
-                                                  HValue* right) {
+HInstruction* HOptimizedGraphBuilder::BuildBinaryOperation(
+    BinaryOperation* expr,
+    HValue* left,
+    HValue* right) {
   HValue* context = environment()->LookupContext();
   TypeInfo left_info, right_info, result_info, combined_info;
   oracle()->BinaryType(expr, &left_info, &right_info, &result_info);
@@ -8425,7 +8474,7 @@ static bool IsClassOfTest(CompareOperation* expr) {
 }
 
 
-void HGraphBuilder::VisitBinaryOperation(BinaryOperation* expr) {
+void HOptimizedGraphBuilder::VisitBinaryOperation(BinaryOperation* expr) {
   ASSERT(!HasStackOverflow());
   ASSERT(current_block() != NULL);
   ASSERT(current_block()->HasPredecessor());
@@ -8441,7 +8490,7 @@ void HGraphBuilder::VisitBinaryOperation(BinaryOperation* expr) {
 }
 
 
-void HGraphBuilder::VisitComma(BinaryOperation* expr) {
+void HOptimizedGraphBuilder::VisitComma(BinaryOperation* expr) {
   CHECK_ALIVE(VisitForEffect(expr->left()));
   // Visit the right subexpression in the same AST context as the entire
   // expression.
@@ -8449,7 +8498,7 @@ void HGraphBuilder::VisitComma(BinaryOperation* expr) {
 }
 
 
-void HGraphBuilder::VisitLogicalExpression(BinaryOperation* expr) {
+void HOptimizedGraphBuilder::VisitLogicalExpression(BinaryOperation* expr) {
   bool is_logical_and = expr->op() == Token::AND;
   if (ast_context()->IsTest()) {
     TestContext* context = TestContext::cast(ast_context());
@@ -8539,7 +8588,7 @@ void HGraphBuilder::VisitLogicalExpression(BinaryOperation* expr) {
 }
 
 
-void HGraphBuilder::VisitArithmeticExpression(BinaryOperation* expr) {
+void HOptimizedGraphBuilder::VisitArithmeticExpression(BinaryOperation* expr) {
   CHECK_ALIVE(VisitForValue(expr->left()));
   CHECK_ALIVE(VisitForValue(expr->right()));
   HValue* right = Pop();
@@ -8550,7 +8599,7 @@ void HGraphBuilder::VisitArithmeticExpression(BinaryOperation* expr) {
 }
 
 
-Representation HGraphBuilder::ToRepresentation(TypeInfo info) {
+Representation HOptimizedGraphBuilder::ToRepresentation(TypeInfo info) {
   if (info.IsUninitialized()) return Representation::None();
   if (info.IsSmi()) return Representation::Integer32();
   if (info.IsInteger32()) return Representation::Integer32();
@@ -8560,9 +8609,9 @@ Representation HGraphBuilder::ToRepresentation(TypeInfo info) {
 }
 
 
-void HGraphBuilder::HandleLiteralCompareTypeof(CompareOperation* expr,
-                                               HTypeof* typeof_expr,
-                                               Handle<String> check) {
+void HOptimizedGraphBuilder::HandleLiteralCompareTypeof(CompareOperation* expr,
+                                                        HTypeof* typeof_expr,
+                                                        Handle<String> check) {
   // Note: The HTypeof itself is removed during canonicalization, if possible.
   HValue* value = typeof_expr->value();
   HTypeofIsAndBranch* instr = new(zone()) HTypeofIsAndBranch(value, check);
@@ -8632,7 +8681,7 @@ static bool IsLiteralCompareBool(HValue* left,
 }
 
 
-void HGraphBuilder::VisitCompareOperation(CompareOperation* expr) {
+void HOptimizedGraphBuilder::VisitCompareOperation(CompareOperation* expr) {
   ASSERT(!HasStackOverflow());
   ASSERT(current_block() != NULL);
   ASSERT(current_block()->HasPredecessor());
@@ -8785,9 +8834,9 @@ void HGraphBuilder::VisitCompareOperation(CompareOperation* expr) {
 }
 
 
-void HGraphBuilder::HandleLiteralCompareNil(CompareOperation* expr,
-                                            HValue* value,
-                                            NilValue nil) {
+void HOptimizedGraphBuilder::HandleLiteralCompareNil(CompareOperation* expr,
+                                                     HValue* value,
+                                                     NilValue nil) {
   ASSERT(!HasStackOverflow());
   ASSERT(current_block() != NULL);
   ASSERT(current_block()->HasPredecessor());
@@ -8799,7 +8848,7 @@ void HGraphBuilder::HandleLiteralCompareNil(CompareOperation* expr,
 }
 
 
-HInstruction* HGraphBuilder::BuildThisFunction() {
+HInstruction* HOptimizedGraphBuilder::BuildThisFunction() {
   // If we share optimized code between different closures, the
   // this-function is not a constant, except inside an inlined body.
   if (function_state()->outer() != NULL) {
@@ -8812,7 +8861,7 @@ HInstruction* HGraphBuilder::BuildThisFunction() {
 }
 
 
-void HGraphBuilder::VisitThisFunction(ThisFunction* expr) {
+void HOptimizedGraphBuilder::VisitThisFunction(ThisFunction* expr) {
   ASSERT(!HasStackOverflow());
   ASSERT(current_block() != NULL);
   ASSERT(current_block()->HasPredecessor());
@@ -8821,7 +8870,8 @@ void HGraphBuilder::VisitThisFunction(ThisFunction* expr) {
 }
 
 
-void HGraphBuilder::VisitDeclarations(ZoneList<Declaration*>* declarations) {
+void HOptimizedGraphBuilder::VisitDeclarations(
+    ZoneList<Declaration*>* declarations) {
   ASSERT(globals_.is_empty());
   AstVisitor::VisitDeclarations(declarations);
   if (!globals_.is_empty()) {
@@ -8839,7 +8889,8 @@ void HGraphBuilder::VisitDeclarations(ZoneList<Declaration*>* declarations) {
 }
 
 
-void HGraphBuilder::VisitVariableDeclaration(VariableDeclaration* declaration) {
+void HOptimizedGraphBuilder::VisitVariableDeclaration(
+    VariableDeclaration* declaration) {
   VariableProxy* proxy = declaration->proxy();
   VariableMode mode = declaration->mode();
   Variable* variable = proxy->var();
@@ -8876,7 +8927,8 @@ void HGraphBuilder::VisitVariableDeclaration(VariableDeclaration* declaration) {
 }
 
 
-void HGraphBuilder::VisitFunctionDeclaration(FunctionDeclaration* declaration) {
+void HOptimizedGraphBuilder::VisitFunctionDeclaration(
+    FunctionDeclaration* declaration) {
   VariableProxy* proxy = declaration->proxy();
   Variable* variable = proxy->var();
   switch (variable->location()) {
@@ -8914,49 +8966,52 @@ void HGraphBuilder::VisitFunctionDeclaration(FunctionDeclaration* declaration) {
 }
 
 
-void HGraphBuilder::VisitModuleDeclaration(ModuleDeclaration* declaration) {
+void HOptimizedGraphBuilder::VisitModuleDeclaration(
+    ModuleDeclaration* declaration) {
   UNREACHABLE();
 }
 
 
-void HGraphBuilder::VisitImportDeclaration(ImportDeclaration* declaration) {
+void HOptimizedGraphBuilder::VisitImportDeclaration(
+    ImportDeclaration* declaration) {
   UNREACHABLE();
 }
 
 
-void HGraphBuilder::VisitExportDeclaration(ExportDeclaration* declaration) {
+void HOptimizedGraphBuilder::VisitExportDeclaration(
+    ExportDeclaration* declaration) {
   UNREACHABLE();
 }
 
 
-void HGraphBuilder::VisitModuleLiteral(ModuleLiteral* module) {
+void HOptimizedGraphBuilder::VisitModuleLiteral(ModuleLiteral* module) {
   UNREACHABLE();
 }
 
 
-void HGraphBuilder::VisitModuleVariable(ModuleVariable* module) {
+void HOptimizedGraphBuilder::VisitModuleVariable(ModuleVariable* module) {
   UNREACHABLE();
 }
 
 
-void HGraphBuilder::VisitModulePath(ModulePath* module) {
+void HOptimizedGraphBuilder::VisitModulePath(ModulePath* module) {
   UNREACHABLE();
 }
 
 
-void HGraphBuilder::VisitModuleUrl(ModuleUrl* module) {
+void HOptimizedGraphBuilder::VisitModuleUrl(ModuleUrl* module) {
   UNREACHABLE();
 }
 
 
-void HGraphBuilder::VisitModuleStatement(ModuleStatement* stmt) {
+void HOptimizedGraphBuilder::VisitModuleStatement(ModuleStatement* stmt) {
   UNREACHABLE();
 }
 
 
 // Generators for inline runtime functions.
 // Support for types.
-void HGraphBuilder::GenerateIsSmi(CallRuntime* call) {
+void HOptimizedGraphBuilder::GenerateIsSmi(CallRuntime* call) {
   ASSERT(call->arguments()->length() == 1);
   CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
   HValue* value = Pop();
@@ -8965,7 +9020,7 @@ void HGraphBuilder::GenerateIsSmi(CallRuntime* call) {
 }
 
 
-void HGraphBuilder::GenerateIsSpecObject(CallRuntime* call) {
+void HOptimizedGraphBuilder::GenerateIsSpecObject(CallRuntime* call) {
   ASSERT(call->arguments()->length() == 1);
   CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
   HValue* value = Pop();
@@ -8977,7 +9032,7 @@ void HGraphBuilder::GenerateIsSpecObject(CallRuntime* call) {
 }
 
 
-void HGraphBuilder::GenerateIsFunction(CallRuntime* call) {
+void HOptimizedGraphBuilder::GenerateIsFunction(CallRuntime* call) {
   ASSERT(call->arguments()->length() == 1);
   CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
   HValue* value = Pop();
@@ -8987,7 +9042,7 @@ void HGraphBuilder::GenerateIsFunction(CallRuntime* call) {
 }
 
 
-void HGraphBuilder::GenerateHasCachedArrayIndex(CallRuntime* call) {
+void HOptimizedGraphBuilder::GenerateHasCachedArrayIndex(CallRuntime* call) {
   ASSERT(call->arguments()->length() == 1);
   CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
   HValue* value = Pop();
@@ -8997,7 +9052,7 @@ void HGraphBuilder::GenerateHasCachedArrayIndex(CallRuntime* call) {
 }
 
 
-void HGraphBuilder::GenerateIsArray(CallRuntime* call) {
+void HOptimizedGraphBuilder::GenerateIsArray(CallRuntime* call) {
   ASSERT(call->arguments()->length() == 1);
   CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
   HValue* value = Pop();
@@ -9007,7 +9062,7 @@ void HGraphBuilder::GenerateIsArray(CallRuntime* call) {
 }
 
 
-void HGraphBuilder::GenerateIsRegExp(CallRuntime* call) {
+void HOptimizedGraphBuilder::GenerateIsRegExp(CallRuntime* call) {
   ASSERT(call->arguments()->length() == 1);
   CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
   HValue* value = Pop();
@@ -9017,7 +9072,7 @@ void HGraphBuilder::GenerateIsRegExp(CallRuntime* call) {
 }
 
 
-void HGraphBuilder::GenerateIsObject(CallRuntime* call) {
+void HOptimizedGraphBuilder::GenerateIsObject(CallRuntime* call) {
   ASSERT(call->arguments()->length() == 1);
   CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
   HValue* value = Pop();
@@ -9026,12 +9081,12 @@ void HGraphBuilder::GenerateIsObject(CallRuntime* call) {
 }
 
 
-void HGraphBuilder::GenerateIsNonNegativeSmi(CallRuntime* call) {
+void HOptimizedGraphBuilder::GenerateIsNonNegativeSmi(CallRuntime* call) {
   return Bailout("inlined runtime function: IsNonNegativeSmi");
 }
 
 
-void HGraphBuilder::GenerateIsUndetectableObject(CallRuntime* call) {
+void HOptimizedGraphBuilder::GenerateIsUndetectableObject(CallRuntime* call) {
   ASSERT(call->arguments()->length() == 1);
   CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
   HValue* value = Pop();
@@ -9041,7 +9096,7 @@ void HGraphBuilder::GenerateIsUndetectableObject(CallRuntime* call) {
 }
 
 
-void HGraphBuilder::GenerateIsStringWrapperSafeForDefaultValueOf(
+void HOptimizedGraphBuilder::GenerateIsStringWrapperSafeForDefaultValueOf(
     CallRuntime* call) {
   return Bailout(
       "inlined runtime function: IsStringWrapperSafeForDefaultValueOf");
@@ -9049,7 +9104,7 @@ void HGraphBuilder::GenerateIsStringWrapperSafeForDefaultValueOf(
 
 
 // Support for construct call checks.
-void HGraphBuilder::GenerateIsConstructCall(CallRuntime* call) {
+void HOptimizedGraphBuilder::GenerateIsConstructCall(CallRuntime* call) {
   ASSERT(call->arguments()->length() == 0);
   if (function_state()->outer() != NULL) {
     // We are generating graph for inlined function.
@@ -9065,7 +9120,7 @@ void HGraphBuilder::GenerateIsConstructCall(CallRuntime* call) {
 
 
 // Support for arguments.length and arguments[?].
-void HGraphBuilder::GenerateArgumentsLength(CallRuntime* call) {
+void HOptimizedGraphBuilder::GenerateArgumentsLength(CallRuntime* call) {
   // Our implementation of arguments (based on this stack frame or an
   // adapter below it) does not work for inlined functions.  This runtime
   // function is blacklisted by AstNode::IsInlineable.
@@ -9078,7 +9133,7 @@ void HGraphBuilder::GenerateArgumentsLength(CallRuntime* call) {
 }
 
 
-void HGraphBuilder::GenerateArguments(CallRuntime* call) {
+void HOptimizedGraphBuilder::GenerateArguments(CallRuntime* call) {
   // Our implementation of arguments (based on this stack frame or an
   // adapter below it) does not work for inlined functions.  This runtime
   // function is blacklisted by AstNode::IsInlineable.
@@ -9098,14 +9153,14 @@ void HGraphBuilder::GenerateArguments(CallRuntime* call) {
 
 
 // Support for accessing the class and value fields of an object.
-void HGraphBuilder::GenerateClassOf(CallRuntime* call) {
+void HOptimizedGraphBuilder::GenerateClassOf(CallRuntime* call) {
   // The special form detected by IsClassOfTest is detected before we get here
   // and does not cause a bailout.
   return Bailout("inlined runtime function: ClassOf");
 }
 
 
-void HGraphBuilder::GenerateValueOf(CallRuntime* call) {
+void HOptimizedGraphBuilder::GenerateValueOf(CallRuntime* call) {
   ASSERT(call->arguments()->length() == 1);
   CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
   HValue* value = Pop();
@@ -9114,7 +9169,7 @@ void HGraphBuilder::GenerateValueOf(CallRuntime* call) {
 }
 
 
-void HGraphBuilder::GenerateDateField(CallRuntime* call) {
+void HOptimizedGraphBuilder::GenerateDateField(CallRuntime* call) {
   ASSERT(call->arguments()->length() == 2);
   ASSERT_NE(NULL, call->arguments()->at(1)->AsLiteral());
   Smi* index = Smi::cast(*(call->arguments()->at(1)->AsLiteral()->handle()));
@@ -9125,7 +9180,7 @@ void HGraphBuilder::GenerateDateField(CallRuntime* call) {
 }
 
 
-void HGraphBuilder::GenerateSetValueOf(CallRuntime* call) {
+void HOptimizedGraphBuilder::GenerateSetValueOf(CallRuntime* call) {
   ASSERT(call->arguments()->length() == 2);
   CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
   CHECK_ALIVE(VisitForValue(call->arguments()->at(1)));
@@ -9168,7 +9223,7 @@ void HGraphBuilder::GenerateSetValueOf(CallRuntime* call) {
 
 
 // Fast support for charCodeAt(n).
-void HGraphBuilder::GenerateStringCharCodeAt(CallRuntime* call) {
+void HOptimizedGraphBuilder::GenerateStringCharCodeAt(CallRuntime* call) {
   ASSERT(call->arguments()->length() == 2);
   CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
   CHECK_ALIVE(VisitForValue(call->arguments()->at(1)));
@@ -9181,7 +9236,7 @@ void HGraphBuilder::GenerateStringCharCodeAt(CallRuntime* call) {
 
 
 // Fast support for string.charAt(n) and string[n].
-void HGraphBuilder::GenerateStringCharFromCode(CallRuntime* call) {
+void HOptimizedGraphBuilder::GenerateStringCharFromCode(CallRuntime* call) {
   ASSERT(call->arguments()->length() == 1);
   CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
   HValue* char_code = Pop();
@@ -9193,7 +9248,7 @@ void HGraphBuilder::GenerateStringCharFromCode(CallRuntime* call) {
 
 
 // Fast support for string.charAt(n) and string[n].
-void HGraphBuilder::GenerateStringCharAt(CallRuntime* call) {
+void HOptimizedGraphBuilder::GenerateStringCharAt(CallRuntime* call) {
   ASSERT(call->arguments()->length() == 2);
   CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
   CHECK_ALIVE(VisitForValue(call->arguments()->at(1)));
@@ -9209,7 +9264,7 @@ void HGraphBuilder::GenerateStringCharAt(CallRuntime* call) {
 
 
 // Fast support for object equality testing.
-void HGraphBuilder::GenerateObjectEquals(CallRuntime* call) {
+void HOptimizedGraphBuilder::GenerateObjectEquals(CallRuntime* call) {
   ASSERT(call->arguments()->length() == 2);
   CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
   CHECK_ALIVE(VisitForValue(call->arguments()->at(1)));
@@ -9221,14 +9276,14 @@ void HGraphBuilder::GenerateObjectEquals(CallRuntime* call) {
 }
 
 
-void HGraphBuilder::GenerateLog(CallRuntime* call) {
+void HOptimizedGraphBuilder::GenerateLog(CallRuntime* call) {
   // %_Log is ignored in optimized code.
   return ast_context()->ReturnValue(graph()->GetConstantUndefined());
 }
 
 
 // Fast support for Math.random().
-void HGraphBuilder::GenerateRandomHeapNumber(CallRuntime* call) {
+void HOptimizedGraphBuilder::GenerateRandomHeapNumber(CallRuntime* call) {
   HValue* context = environment()->LookupContext();
   HGlobalObject* global_object = new(zone()) HGlobalObject(context);
   AddInstruction(global_object);
@@ -9238,7 +9293,7 @@ void HGraphBuilder::GenerateRandomHeapNumber(CallRuntime* call) {
 
 
 // Fast support for StringAdd.
-void HGraphBuilder::GenerateStringAdd(CallRuntime* call) {
+void HOptimizedGraphBuilder::GenerateStringAdd(CallRuntime* call) {
   ASSERT_EQ(2, call->arguments()->length());
   CHECK_ALIVE(VisitArgumentList(call->arguments()));
   HValue* context = environment()->LookupContext();
@@ -9249,7 +9304,7 @@ void HGraphBuilder::GenerateStringAdd(CallRuntime* call) {
 
 
 // Fast support for SubString.
-void HGraphBuilder::GenerateSubString(CallRuntime* call) {
+void HOptimizedGraphBuilder::GenerateSubString(CallRuntime* call) {
   ASSERT_EQ(3, call->arguments()->length());
   CHECK_ALIVE(VisitArgumentList(call->arguments()));
   HValue* context = environment()->LookupContext();
@@ -9260,7 +9315,7 @@ void HGraphBuilder::GenerateSubString(CallRuntime* call) {
 
 
 // Fast support for StringCompare.
-void HGraphBuilder::GenerateStringCompare(CallRuntime* call) {
+void HOptimizedGraphBuilder::GenerateStringCompare(CallRuntime* call) {
   ASSERT_EQ(2, call->arguments()->length());
   CHECK_ALIVE(VisitArgumentList(call->arguments()));
   HValue* context = environment()->LookupContext();
@@ -9272,7 +9327,7 @@ void HGraphBuilder::GenerateStringCompare(CallRuntime* call) {
 
 
 // Support for direct calls from JavaScript to native RegExp code.
-void HGraphBuilder::GenerateRegExpExec(CallRuntime* call) {
+void HOptimizedGraphBuilder::GenerateRegExpExec(CallRuntime* call) {
   ASSERT_EQ(4, call->arguments()->length());
   CHECK_ALIVE(VisitArgumentList(call->arguments()));
   HValue* context = environment()->LookupContext();
@@ -9283,7 +9338,7 @@ void HGraphBuilder::GenerateRegExpExec(CallRuntime* call) {
 
 
 // Construct a RegExp exec result with two in-object properties.
-void HGraphBuilder::GenerateRegExpConstructResult(CallRuntime* call) {
+void HOptimizedGraphBuilder::GenerateRegExpConstructResult(CallRuntime* call) {
   ASSERT_EQ(3, call->arguments()->length());
   CHECK_ALIVE(VisitArgumentList(call->arguments()));
   HValue* context = environment()->LookupContext();
@@ -9295,13 +9350,13 @@ void HGraphBuilder::GenerateRegExpConstructResult(CallRuntime* call) {
 
 
 // Support for fast native caches.
-void HGraphBuilder::GenerateGetFromCache(CallRuntime* call) {
+void HOptimizedGraphBuilder::GenerateGetFromCache(CallRuntime* call) {
   return Bailout("inlined runtime function: GetFromCache");
 }
 
 
 // Fast support for number to string.
-void HGraphBuilder::GenerateNumberToString(CallRuntime* call) {
+void HOptimizedGraphBuilder::GenerateNumberToString(CallRuntime* call) {
   ASSERT_EQ(1, call->arguments()->length());
   CHECK_ALIVE(VisitArgumentList(call->arguments()));
   HValue* context = environment()->LookupContext();
@@ -9313,7 +9368,7 @@ void HGraphBuilder::GenerateNumberToString(CallRuntime* call) {
 
 
 // Fast call for custom callbacks.
-void HGraphBuilder::GenerateCallFunction(CallRuntime* call) {
+void HOptimizedGraphBuilder::GenerateCallFunction(CallRuntime* call) {
   // 1 ~ The function to call is not itself an argument to the call.
   int arg_count = call->arguments()->length() - 1;
   ASSERT(arg_count >= 1);  // There's always at least a receiver.
@@ -9357,7 +9412,7 @@ void HGraphBuilder::GenerateCallFunction(CallRuntime* call) {
 
 
 // Fast call to math functions.
-void HGraphBuilder::GenerateMathPow(CallRuntime* call) {
+void HOptimizedGraphBuilder::GenerateMathPow(CallRuntime* call) {
   ASSERT_EQ(2, call->arguments()->length());
   CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
   CHECK_ALIVE(VisitForValue(call->arguments()->at(1)));
@@ -9368,7 +9423,7 @@ void HGraphBuilder::GenerateMathPow(CallRuntime* call) {
 }
 
 
-void HGraphBuilder::GenerateMathSin(CallRuntime* call) {
+void HOptimizedGraphBuilder::GenerateMathSin(CallRuntime* call) {
   ASSERT_EQ(1, call->arguments()->length());
   CHECK_ALIVE(VisitArgumentList(call->arguments()));
   HValue* context = environment()->LookupContext();
@@ -9380,7 +9435,7 @@ void HGraphBuilder::GenerateMathSin(CallRuntime* call) {
 }
 
 
-void HGraphBuilder::GenerateMathCos(CallRuntime* call) {
+void HOptimizedGraphBuilder::GenerateMathCos(CallRuntime* call) {
   ASSERT_EQ(1, call->arguments()->length());
   CHECK_ALIVE(VisitArgumentList(call->arguments()));
   HValue* context = environment()->LookupContext();
@@ -9392,7 +9447,7 @@ void HGraphBuilder::GenerateMathCos(CallRuntime* call) {
 }
 
 
-void HGraphBuilder::GenerateMathTan(CallRuntime* call) {
+void HOptimizedGraphBuilder::GenerateMathTan(CallRuntime* call) {
   ASSERT_EQ(1, call->arguments()->length());
   CHECK_ALIVE(VisitArgumentList(call->arguments()));
   HValue* context = environment()->LookupContext();
@@ -9404,7 +9459,7 @@ void HGraphBuilder::GenerateMathTan(CallRuntime* call) {
 }
 
 
-void HGraphBuilder::GenerateMathLog(CallRuntime* call) {
+void HOptimizedGraphBuilder::GenerateMathLog(CallRuntime* call) {
   ASSERT_EQ(1, call->arguments()->length());
   CHECK_ALIVE(VisitArgumentList(call->arguments()));
   HValue* context = environment()->LookupContext();
@@ -9416,18 +9471,18 @@ void HGraphBuilder::GenerateMathLog(CallRuntime* call) {
 }
 
 
-void HGraphBuilder::GenerateMathSqrt(CallRuntime* call) {
+void HOptimizedGraphBuilder::GenerateMathSqrt(CallRuntime* call) {
   return Bailout("inlined runtime function: MathSqrt");
 }
 
 
 // Check whether two RegExps are equivalent
-void HGraphBuilder::GenerateIsRegExpEquivalent(CallRuntime* call) {
+void HOptimizedGraphBuilder::GenerateIsRegExpEquivalent(CallRuntime* call) {
   return Bailout("inlined runtime function: IsRegExpEquivalent");
 }
 
 
-void HGraphBuilder::GenerateGetCachedArrayIndex(CallRuntime* call) {
+void HOptimizedGraphBuilder::GenerateGetCachedArrayIndex(CallRuntime* call) {
   ASSERT(call->arguments()->length() == 1);
   CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
   HValue* value = Pop();
@@ -9436,7 +9491,7 @@ void HGraphBuilder::GenerateGetCachedArrayIndex(CallRuntime* call) {
 }
 
 
-void HGraphBuilder::GenerateFastAsciiArrayJoin(CallRuntime* call) {
+void HOptimizedGraphBuilder::GenerateFastAsciiArrayJoin(CallRuntime* call) {
   return Bailout("inlined runtime function: FastAsciiArrayJoin");
 }
 
@@ -9466,6 +9521,23 @@ HEnvironment::HEnvironment(HEnvironment* outer,
 }
 
 
+HEnvironment::HEnvironment(Zone* zone)
+    : values_(0, zone),
+      assigned_variables_(0, zone),
+      frame_type_(STUB),
+      parameter_count_(0),
+      specials_count_(0),
+      local_count_(0),
+      outer_(NULL),
+      entry_(NULL),
+      pop_count_(0),
+      push_count_(0),
+      ast_id_(BailoutId::None()),
+      zone_(zone) {
+  Initialize(0, 0, 0);
+}
+
+
 HEnvironment::HEnvironment(const HEnvironment* other, Zone* zone)
     : values_(0, zone),
       assigned_variables_(0, zone),
@@ -9733,11 +9805,17 @@ void HEnvironment::PrintToStd() {
 }
 
 
-void HTracer::TraceCompilation(FunctionLiteral* function) {
+void HTracer::TraceCompilation(CompilationInfo* info) {
   Tag tag(this, "compilation");
-  Handle<String> name = function->debug_name();
-  PrintStringProperty("name", *name->ToCString());
-  PrintStringProperty("method", *name->ToCString());
+  if (info->IsOptimizing()) {
+    Handle<String> name = info->function()->debug_name();
+    PrintStringProperty("name", *name->ToCString());
+    PrintStringProperty("method", *name->ToCString());
+  } else {
+    CodeStub::Major major_key = info->code_stub()->MajorKey();
+    PrintStringProperty("name", CodeStub::MajorName(major_key, false));
+    PrintStringProperty("method", "stub");
+  }
   PrintLongProperty("date", static_cast<int64_t>(OS::TimeCurrentMillis()));
 }
 
index 98b05d1..0837bf9 100644 (file)
@@ -429,7 +429,8 @@ enum FrameType {
   JS_CONSTRUCT,
   JS_GETTER,
   JS_SETTER,
-  ARGUMENTS_ADAPTOR
+  ARGUMENTS_ADAPTOR,
+  STUB
 };
 
 
@@ -440,6 +441,8 @@ class HEnvironment: public ZoneObject {
                Handle<JSFunction> closure,
                Zone* zone);
 
+  explicit HEnvironment(Zone* zone);
+
   HEnvironment* arguments_environment() {
     return outer()->frame_type() == ARGUMENTS_ADAPTOR ? outer() : this;
   }
@@ -636,7 +639,7 @@ class HInferRepresentation BASE_EMBEDDED {
 };
 
 
-class HGraphBuilder;
+class HOptimizedGraphBuilder;
 
 enum ArgumentsAllowedFlag {
   ARGUMENTS_NOT_ALLOWED,
@@ -672,10 +675,10 @@ class AstContext {
   bool is_for_typeof() { return for_typeof_; }
 
  protected:
-  AstContext(HGraphBuilder* owner, Expression::Context kind);
+  AstContext(HOptimizedGraphBuilder* owner, Expression::Context kind);
   virtual ~AstContext();
 
-  HGraphBuilder* owner() const { return owner_; }
+  HOptimizedGraphBuilder* owner() const { return owner_; }
 
   inline Zone* zone() const;
 
@@ -686,7 +689,7 @@ class AstContext {
 #endif
 
  private:
-  HGraphBuilder* owner_;
+  HOptimizedGraphBuilder* owner_;
   Expression::Context kind_;
   AstContext* outer_;
   bool for_typeof_;
@@ -695,7 +698,7 @@ class AstContext {
 
 class EffectContext: public AstContext {
  public:
-  explicit EffectContext(HGraphBuilder* owner)
+  explicit EffectContext(HOptimizedGraphBuilder* owner)
       : AstContext(owner, Expression::kEffect) {
   }
   virtual ~EffectContext();
@@ -708,7 +711,7 @@ class EffectContext: public AstContext {
 
 class ValueContext: public AstContext {
  public:
-  explicit ValueContext(HGraphBuilder* owner, ArgumentsAllowedFlag flag)
+  ValueContext(HOptimizedGraphBuilder* owner, ArgumentsAllowedFlag flag)
       : AstContext(owner, Expression::kValue), flag_(flag) {
   }
   virtual ~ValueContext();
@@ -726,7 +729,7 @@ class ValueContext: public AstContext {
 
 class TestContext: public AstContext {
  public:
-  TestContext(HGraphBuilder* owner,
+  TestContext(HOptimizedGraphBuilder* owner,
               Expression* condition,
               TypeFeedbackOracle* oracle,
               HBasicBlock* if_true,
@@ -766,7 +769,7 @@ class TestContext: public AstContext {
 
 class FunctionState {
  public:
-  FunctionState(HGraphBuilder* owner,
+  FunctionState(HOptimizedGraphBuilder* owner,
                 CompilationInfo* info,
                 TypeFeedbackOracle* oracle,
                 InliningKind inlining_kind);
@@ -796,7 +799,7 @@ class FunctionState {
   bool arguments_pushed() { return arguments_elements() != NULL; }
 
  private:
-  HGraphBuilder* owner_;
+  HOptimizedGraphBuilder* owner_;
 
   CompilationInfo* compilation_info_;
   TypeFeedbackOracle* oracle_;
@@ -828,7 +831,65 @@ class FunctionState {
 };
 
 
-class HGraphBuilder: public AstVisitor {
+class HGraphBuilder {
+ public:
+  explicit HGraphBuilder(CompilationInfo* info)
+      : info_(info), graph_(NULL), current_block_(NULL) {}
+  virtual ~HGraphBuilder() {}
+
+  HBasicBlock* current_block() const { return current_block_; }
+  void set_current_block(HBasicBlock* block) { current_block_ = block; }
+  HEnvironment* environment() const {
+    return current_block()->last_environment();
+  }
+  Zone* zone() const { return info_->zone(); }
+  HGraph* graph() { return graph_; }
+
+  HGraph* CreateGraph();
+
+  // Adding instructions.
+  HInstruction* AddInstruction(HInstruction* instr);
+  void AddSimulate(BailoutId id,
+                   RemovableSimulate removable = FIXED_SIMULATE);
+
+ protected:
+  virtual bool BuildGraph() = 0;
+
+  // Building common constructs
+  HInstruction* BuildExternalArrayElementAccess(
+      HValue* external_elements,
+      HValue* checked_key,
+      HValue* val,
+      HValue* dependency,
+      ElementsKind elements_kind,
+      bool is_store);
+
+  HInstruction* BuildFastElementAccess(
+      HValue* elements,
+      HValue* checked_key,
+      HValue* val,
+      HValue* dependency,
+      ElementsKind elements_kind,
+      bool is_store);
+
+  HInstruction* BuildUncheckedMonomorphicElementAccess(
+      HValue* object,
+      HValue* key,
+      HValue* val,
+      HCheckMaps* mapcheck,
+      bool is_js_array,
+      ElementsKind elements_kind,
+      bool is_store);
+
+ private:
+  HGraphBuilder();
+  CompilationInfo* info_;
+  HGraph* graph_;
+  HBasicBlock* current_block_;
+};
+
+
+class HOptimizedGraphBuilder: public HGraphBuilder, public AstVisitor {
  public:
   enum BreakType { BREAK, CONTINUE };
   enum SwitchType { UNKNOWN_SWITCH, SMI_SWITCH, STRING_SWITCH };
@@ -864,7 +925,8 @@ class HGraphBuilder: public AstVisitor {
   // structures mirroring BreakableStatement nesting.
   class BreakAndContinueScope BASE_EMBEDDED {
    public:
-    BreakAndContinueScope(BreakAndContinueInfo* info, HGraphBuilder* owner)
+    BreakAndContinueScope(BreakAndContinueInfo* info,
+                          HOptimizedGraphBuilder* owner)
         : info_(info), owner_(owner), next_(owner->break_scope()) {
       owner->set_break_scope(this);
     }
@@ -872,7 +934,7 @@ class HGraphBuilder: public AstVisitor {
     ~BreakAndContinueScope() { owner_->set_break_scope(next_); }
 
     BreakAndContinueInfo* info() { return info_; }
-    HGraphBuilder* owner() { return owner_; }
+    HOptimizedGraphBuilder* owner() { return owner_; }
     BreakAndContinueScope* next() { return next_; }
 
     // Search the break stack for a break or continue target.
@@ -880,32 +942,20 @@ class HGraphBuilder: public AstVisitor {
 
    private:
     BreakAndContinueInfo* info_;
-    HGraphBuilder* owner_;
+    HOptimizedGraphBuilder* owner_;
     BreakAndContinueScope* next_;
   };
 
-  HGraphBuilder(CompilationInfo* info, TypeFeedbackOracle* oracle);
+  HOptimizedGraphBuilder(CompilationInfo* info, TypeFeedbackOracle* oracle);
 
-  HGraph* CreateGraph();
+  virtual bool BuildGraph();
 
   // Simple accessors.
-  HGraph* graph() const { return graph_; }
   BreakAndContinueScope* break_scope() const { return break_scope_; }
   void set_break_scope(BreakAndContinueScope* head) { break_scope_ = head; }
 
-  HBasicBlock* current_block() const { return current_block_; }
-  void set_current_block(HBasicBlock* block) { current_block_ = block; }
-  HEnvironment* environment() const {
-    return current_block()->last_environment();
-  }
-
   bool inline_bailout() { return inline_bailout_; }
 
-  // Adding instructions.
-  HInstruction* AddInstruction(HInstruction* instr);
-  void AddSimulate(BailoutId ast_id,
-                   RemovableSimulate removable = FIXED_SIMULATE);
-
   // Bailout environment manipulation.
   void Push(HValue* value) { environment()->Push(value); }
   HValue* Pop() { return environment()->Pop(); }
@@ -928,9 +978,12 @@ class HGraphBuilder: public AstVisitor {
   void operator delete(void* pointer, Zone* zone) { }
   void operator delete(void* pointer) { }
 
+  DEFINE_AST_VISITOR_SUBCLASS_MEMBERS();
+
  private:
   // Type of a member function that generates inline code for a native function.
-  typedef void (HGraphBuilder::*InlineFunctionGenerator)(CallRuntime* call);
+  typedef void (HOptimizedGraphBuilder::*InlineFunctionGenerator)
+      (CallRuntime* call);
 
   // Forward declarations for inner scope classes.
   class SubgraphScope;
@@ -1139,25 +1192,14 @@ class HGraphBuilder: public AstVisitor {
                                      HValue* right);
   HInstruction* BuildIncrement(bool returns_original_input,
                                CountOperation* expr);
-  HInstruction* BuildFastElementAccess(HValue* elements,
-                                       HValue* checked_key,
-                                       HValue* val,
-                                       HValue* dependency,
-                                       ElementsKind elements_kind,
-                                       bool is_store);
+  HInstruction* BuildLoadKeyedGeneric(HValue* object,
+                                      HValue* key);
 
   HInstruction* TryBuildConsolidatedElementLoad(HValue* object,
                                                 HValue* key,
                                                 HValue* val,
                                                 SmallMapList* maps);
 
-  HInstruction* BuildUncheckedMonomorphicElementAccess(HValue* object,
-                                                       HValue* key,
-                                                       HValue* val,
-                                                       HCheckMaps* mapcheck,
-                                                       Handle<Map> map,
-                                                       bool is_store);
-
   HInstruction* BuildMonomorphicElementAccess(HValue* object,
                                               HValue* key,
                                               HValue* val,
@@ -1197,14 +1239,6 @@ class HGraphBuilder: public AstVisitor {
                                           Handle<String> name,
                                           Property* expr,
                                           Handle<Map> map);
-  HInstruction* BuildLoadKeyedGeneric(HValue* object, HValue* key);
-  HInstruction* BuildExternalArrayElementAccess(
-      HValue* external_elements,
-      HValue* checked_key,
-      HValue* val,
-      HValue* dependency,
-      ElementsKind elements_kind,
-      bool is_store);
 
   void AddCheckMapsWithTransitions(HValue* object,
                                    Handle<Map> map);
@@ -1246,8 +1280,6 @@ class HGraphBuilder: public AstVisitor {
                         HValue** operand,
                         HValue** shift_amount);
 
-  Zone* zone() const { return zone_; }
-
   // The translation state of the currently-being-translated function.
   FunctionState* function_state_;
 
@@ -1261,20 +1293,16 @@ class HGraphBuilder: public AstVisitor {
   // A stack of breakable statements entered.
   BreakAndContinueScope* break_scope_;
 
-  HGraph* graph_;
-  HBasicBlock* current_block_;
-
   int inlined_count_;
   ZoneList<Handle<Object> > globals_;
 
-  Zone* zone_;
-
   bool inline_bailout_;
 
   friend class FunctionState;  // Pushes and pops the state stack.
   friend class AstContext;  // Pushes and pops the AST context stack.
+  friend class KeyedLoadFastElementStub;
 
-  DISALLOW_COPY_AND_ASSIGN(HGraphBuilder);
+  DISALLOW_COPY_AND_ASSIGN(HOptimizedGraphBuilder);
 };
 
 
@@ -1447,7 +1475,7 @@ class HPhase BASE_EMBEDDED {
 
 class HTracer: public Malloced {
  public:
-  void TraceCompilation(FunctionLiteral* function);
+  void TraceCompilation(CompilationInfo* info);
   void TraceHydrogen(const char* name, HGraph* graph);
   void TraceLithium(const char* name, LChunk* chunk);
   void TraceLiveRanges(const char* name, LAllocator* allocator);
index 8cccaa5..c915e05 100644 (file)
@@ -55,6 +55,33 @@ uint64_t CpuFeatures::supported_ = 0;
 uint64_t CpuFeatures::found_by_runtime_probing_ = 0;
 
 
+int IntelDoubleRegister::NumAllocatableRegisters() {
+  if (CpuFeatures::IsSupported(SSE2)) {
+    return XMMRegister::kNumAllocatableRegisters;
+  } else {
+    return X87TopOfStackRegister::kNumAllocatableRegisters;
+  }
+}
+
+
+int IntelDoubleRegister::NumRegisters() {
+  if (CpuFeatures::IsSupported(SSE2)) {
+    return XMMRegister::kNumRegisters;
+  } else {
+    return X87TopOfStackRegister::kNumRegisters;
+  }
+}
+
+
+const char* IntelDoubleRegister::AllocationIndexToString(int index) {
+  if (CpuFeatures::IsSupported(SSE2)) {
+    return XMMRegister::AllocationIndexToString(index);
+  } else {
+    return X87TopOfStackRegister::AllocationIndexToString(index);
+  }
+}
+
+
 // The Probe method needs executable memory, so it uses Heap::CreateCode.
 // Allocation failure is silent and leads to safe default.
 void CpuFeatures::Probe() {
@@ -2199,7 +2226,8 @@ void Assembler::prefetch(const Operand& src, int level) {
   EnsureSpace ensure_space(this);
   EMIT(0x0F);
   EMIT(0x18);
-  XMMRegister code = { level };  // Emit hint number in Reg position of RegR/M.
+  // Emit hint number in Reg position of RegR/M.
+  XMMRegister code = XMMRegister(level);
   emit_sse_operand(code, src);
 }
 
index b1f421e..609f949 100644 (file)
@@ -65,7 +65,10 @@ namespace internal {
 // and best performance in optimized code.
 //
 struct Register {
-  static const int kNumAllocatableRegisters = 6;
+  static const int kMaxNumAllocatableRegisters = 6;
+  static int NumAllocatableRegisters() {
+    return kMaxNumAllocatableRegisters;
+  }
   static const int kNumRegisters = 8;
 
   static inline const char* AllocationIndexToString(int index);
@@ -119,7 +122,7 @@ const Register no_reg = { kRegister_no_reg_Code };
 
 
 inline const char* Register::AllocationIndexToString(int index) {
-  ASSERT(index >= 0 && index < kNumAllocatableRegisters);
+  ASSERT(index >= 0 && index < kMaxNumAllocatableRegisters);
   // This is the mapping of allocation indices to registers.
   const char* const kNames[] = { "eax", "ecx", "edx", "ebx", "esi", "edi" };
   return kNames[index];
@@ -133,22 +136,58 @@ inline int Register::ToAllocationIndex(Register reg) {
 
 
 inline Register Register::FromAllocationIndex(int index)  {
-  ASSERT(index >= 0 && index < kNumAllocatableRegisters);
+  ASSERT(index >= 0 && index < kMaxNumAllocatableRegisters);
   return (index >= 4) ? from_code(index + 2) : from_code(index);
 }
 
 
-struct XMMRegister {
-  static const int kNumAllocatableRegisters = 7;
-  static const int kNumRegisters = 8;
+struct IntelDoubleRegister {
+  static const int kMaxNumAllocatableRegisters = 7;
+  explicit IntelDoubleRegister(int code) { code_ = code; }
+  static int NumAllocatableRegisters();
+  static int NumRegisters();
+  static const char* AllocationIndexToString(int index);
 
-  static int ToAllocationIndex(XMMRegister reg) {
+  static int ToAllocationIndex(IntelDoubleRegister reg) {
     ASSERT(reg.code() != 0);
     return reg.code() - 1;
   }
 
+  static IntelDoubleRegister FromAllocationIndex(int index) {
+    ASSERT(index >= 0 && index < NumAllocatableRegisters());
+    return from_code(index + 1);
+  }
+
+  static IntelDoubleRegister from_code(int code) {
+    return IntelDoubleRegister(code);
+  }
+
+  bool is_valid() const {
+    return 0 <= code_ && code_ < NumRegisters();
+  }
+  int code() const {
+    ASSERT(is_valid());
+    return code_;
+  }
+
+  int code_;
+};
+
+struct XMMRegister : IntelDoubleRegister {
+  static const int kNumAllocatableRegisters = 7;
+  static const int kNumRegisters = 8;
+
+  explicit XMMRegister(int code) : IntelDoubleRegister(code) {}
+
+  static XMMRegister from_code(int code) {
+    XMMRegister r = XMMRegister(code);
+    return r;
+  }
+
+  bool is(XMMRegister reg) const { return code_ == reg.code_; }
+
   static XMMRegister FromAllocationIndex(int index) {
-    ASSERT(index >= 0 && index < kNumAllocatableRegisters);
+    ASSERT(index >= 0 && index < NumAllocatableRegisters());
     return from_code(index + 1);
   }
 
@@ -165,34 +204,46 @@ struct XMMRegister {
     };
     return names[index];
   }
+};
 
-  static XMMRegister from_code(int code) {
-    XMMRegister r = { code };
-    return r;
+
+const XMMRegister xmm0 = XMMRegister(0);
+const XMMRegister xmm1 = XMMRegister(1);
+const XMMRegister xmm2 = XMMRegister(2);
+const XMMRegister xmm3 = XMMRegister(3);
+const XMMRegister xmm4 = XMMRegister(4);
+const XMMRegister xmm5 = XMMRegister(5);
+const XMMRegister xmm6 = XMMRegister(6);
+const XMMRegister xmm7 = XMMRegister(7);
+
+struct X87TopOfStackRegister : IntelDoubleRegister {
+  static const int kNumAllocatableRegisters = 1;
+  static const int kNumRegisters = 1;
+
+  explicit X87TopOfStackRegister(int code)
+      : IntelDoubleRegister(code) {}
+
+  bool is(X87TopOfStackRegister reg) const {
+    return code_ == reg.code_;
   }
 
-  bool is_valid() const { return 0 <= code_ && code_ < kNumRegisters; }
-  bool is(XMMRegister reg) const { return code_ == reg.code_; }
-  int code() const {
-    ASSERT(is_valid());
-    return code_;
+  static const char* AllocationIndexToString(int index) {
+    ASSERT(index >= 0 && index < kNumAllocatableRegisters);
+    const char* const names[] = {
+      "st0",
+    };
+    return names[index];
   }
 
-  int code_;
+  static int ToAllocationIndex(X87TopOfStackRegister reg) {
+    ASSERT(reg.code() == 0);
+    return 0;
+  }
 };
 
+const X87TopOfStackRegister x87tos = X87TopOfStackRegister(0);
 
-const XMMRegister xmm0 = { 0 };
-const XMMRegister xmm1 = { 1 };
-const XMMRegister xmm2 = { 2 };
-const XMMRegister xmm3 = { 3 };
-const XMMRegister xmm4 = { 4 };
-const XMMRegister xmm5 = { 5 };
-const XMMRegister xmm6 = { 6 };
-const XMMRegister xmm7 = { 7 };
-
-
-typedef XMMRegister DoubleRegister;
+typedef IntelDoubleRegister DoubleRegister;
 
 
 enum Condition {
index 01785bb..cadff49 100644 (file)
@@ -574,6 +574,25 @@ CODE_AGE_LIST(DEFINE_CODE_AGE_BUILTIN_GENERATOR)
 #undef DEFINE_CODE_AGE_BUILTIN_GENERATOR
 
 
+void Builtins::Generate_NotifyICMiss(MacroAssembler* masm) {
+  // Enter an internal frame.
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
+
+    // Preserve registers across notification, this is important for compiled
+    // stubs that tail call the runtime on deopts passing their parameters in
+    // registers.
+    __ pushad();
+    __ CallRuntime(Runtime::kNotifyICMiss, 0);
+    __ popad();
+    // Tear down internal frame.
+  }
+
+  __ pop(MemOperand(esp, 0));  // Ignore state offset
+  __ ret(0);  // Return to IC Miss stub, continuation still on stack.
+}
+
+
 static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm,
                                              Deoptimizer::BailoutType type) {
   {
index da8e2ae..8ddc934 100644 (file)
 namespace v8 {
 namespace internal {
 
+
+CodeStubInterfaceDescriptor*
+    KeyedLoadFastElementStub::GetInterfaceDescriptor(Isolate* isolate) {
+  static CodeStubInterfaceDescriptor* result = NULL;
+  if (result == NULL) {
+    Handle<Code> miss = isolate->builtins()->KeyedLoadIC_Miss();
+    static Register registers[] = { edx, ecx };
+    static CodeStubInterfaceDescriptor info = {
+      2,
+      registers,
+      miss
+    };
+    result = &info;
+  }
+  return result;
+}
+
+
 #define __ ACCESS_MASM(masm)
 
 void ToNumberStub::Generate(MacroAssembler* masm) {
@@ -2426,6 +2444,7 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
 
     __ bind(&loaded);
   } else {  // UNTAGGED.
+    CpuFeatures::Scope scope(SSE2);
     if (CpuFeatures::IsSupported(SSE4_1)) {
       CpuFeatures::Scope sse4_scope(SSE4_1);
       __ pextrd(edx, xmm1, 0x1);  // copy xmm1[63..32] to edx.
@@ -2498,6 +2517,7 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
     __ fstp(0);
     __ ret(kPointerSize);
   } else {  // UNTAGGED.
+    CpuFeatures::Scope scope(SSE2);
     __ movdbl(xmm1, FieldOperand(eax, HeapNumber::kValueOffset));
     __ Ret();
   }
@@ -2510,6 +2530,7 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
   if (tagged) {
     __ AllocateHeapNumber(eax, edi, no_reg, &runtime_call_clear_stack);
   } else {  // UNTAGGED.
+    CpuFeatures::Scope scope(SSE2);
     __ AllocateHeapNumber(eax, edi, no_reg, &skip_cache);
     __ sub(esp, Immediate(kDoubleSize));
     __ movdbl(Operand(esp, 0), xmm1);
@@ -2524,6 +2545,7 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
   if (tagged) {
     __ ret(kPointerSize);
   } else {  // UNTAGGED.
+    CpuFeatures::Scope scope(SSE2);
     __ movdbl(xmm1, FieldOperand(eax, HeapNumber::kValueOffset));
     __ Ret();
 
@@ -2556,6 +2578,7 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
         ExternalReference(RuntimeFunction(), masm->isolate());
     __ TailCallExternalReference(runtime, 1, 1);
   } else {  // UNTAGGED.
+    CpuFeatures::Scope scope(SSE2);
     __ bind(&runtime_call_clear_stack);
     __ bind(&runtime_call);
     __ AllocateHeapNumber(eax, edi, no_reg, &skip_cache);
@@ -4808,10 +4831,17 @@ void CodeStub::GenerateStubsAheadOfTime() {
 
 
 void CodeStub::GenerateFPStubs() {
-  CEntryStub save_doubles(1, kSaveFPRegs);
-  Handle<Code> code = save_doubles.GetCode();
-  code->set_is_pregenerated(true);
-  code->GetIsolate()->set_fp_stubs_generated(true);
+  if (CpuFeatures::IsSupported(SSE2)) {
+    CEntryStub save_doubles(1, kSaveFPRegs);
+    // Stubs might already be in the snapshot, detect that and don't regenerate,
+    // which would lead to code stub initialization state being messed up.
+    Code* save_doubles_code;
+    if (!save_doubles.FindCodeInCache(&save_doubles_code, ISOLATE)) {
+      save_doubles_code = *(save_doubles.GetCode());
+    }
+    save_doubles_code->set_is_pregenerated(true);
+    save_doubles_code->GetIsolate()->set_fp_stubs_generated(true);
+  }
 }
 
 
index 29c16e1..4f8c81f 100644 (file)
@@ -38,7 +38,7 @@ namespace internal {
 
 // Compute a transcendental math function natively, or call the
 // TranscendentalCache runtime function.
-class TranscendentalCacheStub: public CodeStub {
+class TranscendentalCacheStub: public PlatformCodeStub {
  public:
   enum ArgumentType {
     TAGGED = 0,
@@ -61,7 +61,7 @@ class TranscendentalCacheStub: public CodeStub {
 };
 
 
-class StoreBufferOverflowStub: public CodeStub {
+class StoreBufferOverflowStub: public PlatformCodeStub {
  public:
   explicit StoreBufferOverflowStub(SaveFPRegsMode save_fp)
       : save_doubles_(save_fp) { }
@@ -80,7 +80,7 @@ class StoreBufferOverflowStub: public CodeStub {
 };
 
 
-class UnaryOpStub: public CodeStub {
+class UnaryOpStub: public PlatformCodeStub {
  public:
   UnaryOpStub(Token::Value op,
               UnaryOverwriteMode mode,
@@ -225,7 +225,7 @@ enum StringAddFlags {
 };
 
 
-class StringAddStub: public CodeStub {
+class StringAddStub: public PlatformCodeStub {
  public:
   explicit StringAddStub(StringAddFlags flags) : flags_(flags) {}
 
@@ -247,7 +247,7 @@ class StringAddStub: public CodeStub {
 };
 
 
-class SubStringStub: public CodeStub {
+class SubStringStub: public PlatformCodeStub {
  public:
   SubStringStub() {}
 
@@ -259,7 +259,7 @@ class SubStringStub: public CodeStub {
 };
 
 
-class StringCompareStub: public CodeStub {
+class StringCompareStub: public PlatformCodeStub {
  public:
   StringCompareStub() { }
 
@@ -295,7 +295,7 @@ class StringCompareStub: public CodeStub {
 };
 
 
-class NumberToStringStub: public CodeStub {
+class NumberToStringStub: public PlatformCodeStub {
  public:
   NumberToStringStub() { }
 
@@ -320,7 +320,7 @@ class NumberToStringStub: public CodeStub {
 };
 
 
-class StringDictionaryLookupStub: public CodeStub {
+class StringDictionaryLookupStub: public PlatformCodeStub {
  public:
   enum LookupMode { POSITIVE_LOOKUP, NEGATIVE_LOOKUP };
 
@@ -382,7 +382,7 @@ class StringDictionaryLookupStub: public CodeStub {
 };
 
 
-class RecordWriteStub: public CodeStub {
+class RecordWriteStub: public PlatformCodeStub {
  public:
   RecordWriteStub(Register object,
                   Register value,
@@ -585,7 +585,7 @@ class RecordWriteStub: public CodeStub {
     Register GetRegThatIsNotEcxOr(Register r1,
                                   Register r2,
                                   Register r3) {
-      for (int i = 0; i < Register::kNumAllocatableRegisters; i++) {
+      for (int i = 0; i < Register::NumAllocatableRegisters(); i++) {
         Register candidate = Register::FromAllocationIndex(i);
         if (candidate.is(ecx)) continue;
         if (candidate.is(r1)) continue;
index 99ad522..bdbaec1 100644 (file)
@@ -307,7 +307,7 @@ static int LookupBailoutId(DeoptimizationInputData* data, BailoutId ast_id) {
 
 void Deoptimizer::DoComputeOsrOutputFrame() {
   DeoptimizationInputData* data = DeoptimizationInputData::cast(
-      optimized_code_->deoptimization_data());
+      compiled_code_->deoptimization_data());
   unsigned ast_id = data->OsrAstId()->value();
   // TODO(kasperl): This should not be the bailout_id_. It should be
   // the ast id. Confusing.
@@ -344,7 +344,7 @@ void Deoptimizer::DoComputeOsrOutputFrame() {
   unsigned input_frame_size = input_->GetFrameSize();
   ASSERT(fixed_size + height_in_bytes == input_frame_size);
 
-  unsigned stack_slot_size = optimized_code_->stack_slots() * kPointerSize;
+  unsigned stack_slot_size = compiled_code_->stack_slots() * kPointerSize;
   unsigned outgoing_height = data->ArgumentsStackHeight(bailout_id)->value();
   unsigned outgoing_size = outgoing_height * kPointerSize;
   unsigned output_frame_size = fixed_size + stack_slot_size + outgoing_size;
@@ -455,7 +455,7 @@ void Deoptimizer::DoComputeOsrOutputFrame() {
 
     unsigned pc_offset = data->OsrPcOffset()->value();
     uint32_t pc = reinterpret_cast<uint32_t>(
-        optimized_code_->entry() + pc_offset);
+        compiled_code_->entry() + pc_offset);
     output_[0]->SetPc(pc);
   }
   Code* continuation =
@@ -569,6 +569,70 @@ void Deoptimizer::DoComputeArgumentsAdaptorFrame(TranslationIterator* iterator,
 }
 
 
+void Deoptimizer::DoCompiledStubFrame(TranslationIterator* iterator,
+                                      int frame_index) {
+  //
+  //               FROM                                  TO             <-ebp
+  //    |          ....           |          |          ....           |
+  //    +-------------------------+          +-------------------------+
+  //    | JSFunction continuation |          | JSFunction continuation |
+  //    +-------------------------+          +-------------------------+<-esp
+  // |  |   saved frame (ebp)     |
+  // |  +=========================+<-ebp
+  // |  |   JSFunction context    |
+  // v  +-------------------------+
+  //    |   COMPILED_STUB marker  |          ebp = saved frame
+  //    +-------------------------+          esi = JSFunction context
+  //    |                         |
+  //    | ...                     |
+  //    |                         |
+  //    +-------------------------+<-esp
+  //
+  //
+  int output_frame_size = 1 * kPointerSize;
+  FrameDescription* output_frame =
+      new(output_frame_size) FrameDescription(output_frame_size, 0);
+  Code* notify_miss =
+      isolate_->builtins()->builtin(Builtins::kNotifyICMiss);
+  output_frame->SetState(Smi::FromInt(FullCodeGenerator::NO_REGISTERS));
+  output_frame->SetContinuation(
+      reinterpret_cast<uint32_t>(notify_miss->entry()));
+
+  ASSERT(compiled_code_->kind() == Code::COMPILED_STUB);
+  int major_key = compiled_code_->major_key();
+  CodeStubInterfaceDescriptor* descriptor =
+      isolate_->code_stub_interface_descriptors()[major_key];
+  Handle<Code> miss_ic(descriptor->deoptimization_handler);
+  output_frame->SetPc(reinterpret_cast<intptr_t>(miss_ic->instruction_start()));
+  unsigned input_frame_size = input_->GetFrameSize();
+  intptr_t value = input_->GetFrameSlot(input_frame_size - kPointerSize);
+  output_frame->SetFrameSlot(0, value);
+  value = input_->GetFrameSlot(input_frame_size - 2 * kPointerSize);
+  output_frame->SetRegister(ebp.code(), value);
+  output_frame->SetFp(value);
+  value = input_->GetFrameSlot(input_frame_size - 3 * kPointerSize);
+  output_frame->SetRegister(esi.code(), value);
+
+  Translation::Opcode opcode =
+      static_cast<Translation::Opcode>(iterator->Next());
+  ASSERT(opcode == Translation::REGISTER);
+  USE(opcode);
+  int input_reg = iterator->Next();
+  intptr_t input_value = input_->GetRegister(input_reg);
+  output_frame->SetRegister(edx.code(), input_value);
+
+  int32_t next = iterator->Next();
+  opcode = static_cast<Translation::Opcode>(next);
+  ASSERT(opcode == Translation::REGISTER);
+  input_reg = iterator->Next();
+  input_value = input_->GetRegister(input_reg);
+  output_frame->SetRegister(ecx.code(), input_value);
+
+  ASSERT(frame_index == 0);
+  output_[frame_index] = output_frame;
+}
+
+
 void Deoptimizer::DoComputeConstructStubFrame(TranslationIterator* iterator,
                                               int frame_index) {
   Builtins* builtins = isolate_->builtins();
@@ -997,7 +1061,7 @@ void Deoptimizer::FillInputFrame(Address tos, JavaScriptFrame* frame) {
   }
   input_->SetRegister(esp.code(), reinterpret_cast<intptr_t>(frame->sp()));
   input_->SetRegister(ebp.code(), reinterpret_cast<intptr_t>(frame->fp()));
-  for (int i = 0; i < DoubleRegister::kNumAllocatableRegisters; i++) {
+  for (int i = 0; i < DoubleRegister::NumAllocatableRegisters(); i++) {
     input_->SetDoubleRegister(i, 0.0);
   }
 
@@ -1012,7 +1076,6 @@ void Deoptimizer::FillInputFrame(Address tos, JavaScriptFrame* frame) {
 
 void Deoptimizer::EntryGenerator::Generate() {
   GeneratePrologue();
-  CpuFeatures::Scope scope(SSE2);
 
   Isolate* isolate = masm()->isolate();
 
@@ -1022,10 +1085,13 @@ void Deoptimizer::EntryGenerator::Generate() {
   const int kDoubleRegsSize = kDoubleSize *
                               XMMRegister::kNumAllocatableRegisters;
   __ sub(esp, Immediate(kDoubleRegsSize));
-  for (int i = 0; i < XMMRegister::kNumAllocatableRegisters; ++i) {
-    XMMRegister xmm_reg = XMMRegister::FromAllocationIndex(i);
-    int offset = i * kDoubleSize;
-    __ movdbl(Operand(esp, offset), xmm_reg);
+  if (CpuFeatures::IsSupported(SSE2)) {
+    CpuFeatures::Scope scope(SSE2);
+    for (int i = 0; i < XMMRegister::kNumAllocatableRegisters; ++i) {
+      XMMRegister xmm_reg = XMMRegister::FromAllocationIndex(i);
+      int offset = i * kDoubleSize;
+      __ movdbl(Operand(esp, offset), xmm_reg);
+    }
   }
 
   __ pushad();
@@ -1073,14 +1139,18 @@ void Deoptimizer::EntryGenerator::Generate() {
     __ pop(Operand(ebx, offset));
   }
 
-  // Fill in the double input registers.
   int double_regs_offset = FrameDescription::double_registers_offset();
-  for (int i = 0; i < XMMRegister::kNumAllocatableRegisters; ++i) {
-    int dst_offset = i * kDoubleSize + double_regs_offset;
-    int src_offset = i * kDoubleSize;
-    __ movdbl(xmm0, Operand(esp, src_offset));
-    __ movdbl(Operand(ebx, dst_offset), xmm0);
+  if (CpuFeatures::IsSupported(SSE2)) {
+    CpuFeatures::Scope scope(SSE2);
+    // Fill in the double input registers.
+    for (int i = 0; i < XMMRegister::kNumAllocatableRegisters; ++i) {
+      int dst_offset = i * kDoubleSize + double_regs_offset;
+      int src_offset = i * kDoubleSize;
+      __ movdbl(xmm0, Operand(esp, src_offset));
+      __ movdbl(Operand(ebx, dst_offset), xmm0);
+    }
   }
+  __ fninit();
 
   // Remove the bailout id and the double registers from the stack.
   if (type() == EAGER) {
@@ -1098,10 +1168,13 @@ void Deoptimizer::EntryGenerator::Generate() {
   // limit and copy the contents of the activation frame to the input
   // frame description.
   __ lea(edx, Operand(ebx, FrameDescription::frame_content_offset()));
+  Label pop_loop_header;
+  __ jmp(&pop_loop_header);
   Label pop_loop;
   __ bind(&pop_loop);
   __ pop(Operand(edx, 0));
   __ add(edx, Immediate(sizeof(uint32_t)));
+  __ bind(&pop_loop_header);
   __ cmp(ecx, esp);
   __ j(not_equal, &pop_loop);
 
@@ -1139,31 +1212,39 @@ void Deoptimizer::EntryGenerator::Generate() {
   }
 
   // Replace the current frame with the output frames.
-  Label outer_push_loop, inner_push_loop;
+  Label outer_push_loop, inner_push_loop,
+      outer_loop_header, inner_loop_header;
   // Outer loop state: eax = current FrameDescription**, edx = one past the
   // last FrameDescription**.
   __ mov(edx, Operand(eax, Deoptimizer::output_count_offset()));
   __ mov(eax, Operand(eax, Deoptimizer::output_offset()));
   __ lea(edx, Operand(eax, edx, times_4, 0));
+  __ jmp(&outer_loop_header);
   __ bind(&outer_push_loop);
   // Inner loop state: ebx = current FrameDescription*, ecx = loop index.
   __ mov(ebx, Operand(eax, 0));
   __ mov(ecx, Operand(ebx, FrameDescription::frame_size_offset()));
+  __ jmp(&inner_loop_header);
   __ bind(&inner_push_loop);
   __ sub(ecx, Immediate(sizeof(uint32_t)));
   __ push(Operand(ebx, ecx, times_1, FrameDescription::frame_content_offset()));
+  __ bind(&inner_loop_header);
   __ test(ecx, ecx);
   __ j(not_zero, &inner_push_loop);
   __ add(eax, Immediate(kPointerSize));
+  __ bind(&outer_loop_header);
   __ cmp(eax, edx);
   __ j(below, &outer_push_loop);
 
   // In case of OSR, we have to restore the XMM registers.
   if (type() == OSR) {
-    for (int i = 0; i < XMMRegister::kNumAllocatableRegisters; ++i) {
-      XMMRegister xmm_reg = XMMRegister::FromAllocationIndex(i);
-      int src_offset = i * kDoubleSize + double_regs_offset;
-      __ movdbl(xmm_reg, Operand(ebx, src_offset));
+    if (CpuFeatures::IsSupported(SSE2)) {
+      CpuFeatures::Scope scope(SSE2);
+      for (int i = 0; i < XMMRegister::kNumAllocatableRegisters; ++i) {
+        XMMRegister xmm_reg = XMMRegister::FromAllocationIndex(i);
+        int src_offset = i * kDoubleSize + double_regs_offset;
+        __ movdbl(xmm_reg, Operand(ebx, src_offset));
+      }
     }
   }
 
index de60451..0e00033 100644 (file)
@@ -30,6 +30,7 @@
 #if defined(V8_TARGET_ARCH_IA32)
 
 #include "ia32/lithium-codegen-ia32.h"
+#include "ic.h"
 #include "code-stubs.h"
 #include "deoptimizer.h"
 #include "stub-cache.h"
@@ -70,7 +71,6 @@ bool LCodeGen::GenerateCode() {
   HPhase phase("Z_Code generation", chunk());
   ASSERT(is_unused());
   status_ = GENERATING;
-  CpuFeatures::Scope scope(SSE2);
 
   CodeStub::GenerateFPStubs();
 
@@ -79,13 +79,15 @@ bool LCodeGen::GenerateCode() {
   // the frame (that is done in GeneratePrologue).
   FrameScope frame_scope(masm_, StackFrame::MANUAL);
 
-  dynamic_frame_alignment_ = (chunk()->num_double_slots() > 2 &&
-                              !chunk()->graph()->is_recursive()) ||
-                             !info()->osr_ast_id().IsNone();
+  dynamic_frame_alignment_ = info()->IsOptimizing() &&
+      ((chunk()->num_double_slots() > 2 &&
+        !chunk()->graph()->is_recursive()) ||
+       !info()->osr_ast_id().IsNone());
 
   return GeneratePrologue() &&
       GenerateBody() &&
       GenerateDeferredCode() &&
+      GenerateJumpTable() &&
       GenerateSafepointTable();
 }
 
@@ -95,7 +97,9 @@ void LCodeGen::FinishCode(Handle<Code> code) {
   code->set_stack_slots(GetStackSlotCount());
   code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
   PopulateDeoptimizationData(code);
-  Deoptimizer::EnsureRelocSpaceForLazyDeoptimization(code);
+  if (!info()->IsStub()) {
+    Deoptimizer::EnsureRelocSpaceForLazyDeoptimization(code);
+  }
 }
 
 
@@ -126,113 +130,126 @@ void LCodeGen::Comment(const char* format, ...) {
 bool LCodeGen::GeneratePrologue() {
   ASSERT(is_generating());
 
-  ProfileEntryHookStub::MaybeCallEntryHook(masm_);
+  if (info()->IsOptimizing()) {
+    ProfileEntryHookStub::MaybeCallEntryHook(masm_);
 
 #ifdef DEBUG
-  if (strlen(FLAG_stop_at) > 0 &&
-      info_->function()->name()->IsEqualTo(CStrVector(FLAG_stop_at))) {
-    __ int3();
-  }
+    if (strlen(FLAG_stop_at) > 0 &&
+        info_->function()->name()->IsEqualTo(CStrVector(FLAG_stop_at))) {
+      __ int3();
+    }
 #endif
 
-  // Strict mode functions and builtins need to replace the receiver
-  // with undefined when called as functions (without an explicit
-  // receiver object). ecx is zero for method calls and non-zero for
-  // function calls.
-  if (!info_->is_classic_mode() || info_->is_native()) {
-    Label ok;
-    __ test(ecx, Operand(ecx));
-    __ j(zero, &ok, Label::kNear);
-    // +1 for return address.
-    int receiver_offset = (scope()->num_parameters() + 1) * kPointerSize;
-    __ mov(Operand(esp, receiver_offset),
-           Immediate(isolate()->factory()->undefined_value()));
-    __ bind(&ok);
-  }
-
-
-  if (dynamic_frame_alignment_) {
-    // Move state of dynamic frame alignment into edx.
-    __ mov(edx, Immediate(kNoAlignmentPadding));
+    // Strict mode functions and builtins need to replace the receiver
+    // with undefined when called as functions (without an explicit
+    // receiver object). ecx is zero for method calls and non-zero for
+    // function calls.
+    if (!info_->is_classic_mode() || info_->is_native()) {
+      Label ok;
+      __ test(ecx, Operand(ecx));
+      __ j(zero, &ok, Label::kNear);
+      // +1 for return address.
+      int receiver_offset = (scope()->num_parameters() + 1) * kPointerSize;
+      __ mov(Operand(esp, receiver_offset),
+             Immediate(isolate()->factory()->undefined_value()));
+      __ bind(&ok);
+    }
 
-    Label do_not_pad, align_loop;
-    STATIC_ASSERT(kDoubleSize == 2 * kPointerSize);
-    // Align esp + 4 to a multiple of 2 * kPointerSize.
-    __ test(esp, Immediate(kPointerSize));
-    __ j(not_zero, &do_not_pad, Label::kNear);
-    __ push(Immediate(0));
-    __ mov(ebx, esp);
-    __ mov(edx, Immediate(kAlignmentPaddingPushed));
-    // Copy arguments, receiver, and return address.
-    __ mov(ecx, Immediate(scope()->num_parameters() + 2));
-
-    __ bind(&align_loop);
-    __ mov(eax, Operand(ebx, 1 * kPointerSize));
-    __ mov(Operand(ebx, 0), eax);
-    __ add(Operand(ebx), Immediate(kPointerSize));
-    __ dec(ecx);
-    __ j(not_zero, &align_loop, Label::kNear);
-    __ mov(Operand(ebx, 0), Immediate(kAlignmentZapValue));
-    __ bind(&do_not_pad);
+    if (dynamic_frame_alignment_) {
+      // Move state of dynamic frame alignment into edx.
+      __ mov(edx, Immediate(kNoAlignmentPadding));
+
+      Label do_not_pad, align_loop;
+      STATIC_ASSERT(kDoubleSize == 2 * kPointerSize);
+      // Align esp + 4 to a multiple of 2 * kPointerSize.
+      __ test(esp, Immediate(kPointerSize));
+      __ j(not_zero, &do_not_pad, Label::kNear);
+      __ push(Immediate(0));
+      __ mov(ebx, esp);
+      __ mov(edx, Immediate(kAlignmentPaddingPushed));
+      // Copy arguments, receiver, and return address.
+      __ mov(ecx, Immediate(scope()->num_parameters() + 2));
+
+      __ bind(&align_loop);
+      __ mov(eax, Operand(ebx, 1 * kPointerSize));
+      __ mov(Operand(ebx, 0), eax);
+      __ add(Operand(ebx), Immediate(kPointerSize));
+      __ dec(ecx);
+      __ j(not_zero, &align_loop, Label::kNear);
+      __ mov(Operand(ebx, 0), Immediate(kAlignmentZapValue));
+      __ bind(&do_not_pad);
+    }
   }
 
   info()->set_prologue_offset(masm_->pc_offset());
-  __ push(ebp);  // Caller's frame pointer.
-  __ mov(ebp, esp);
-  __ push(esi);  // Callee's context.
-  __ push(edi);  // Callee's JS function.
+  if (NeedsEagerFrame()) {
+    ASSERT(!frame_is_built_);
+    frame_is_built_ = true;
+    __ push(ebp);  // Caller's frame pointer.
+    __ mov(ebp, esp);
+    __ push(esi);  // Callee's context.
+    if (info()->IsStub()) {
+      __ push(Immediate(Smi::FromInt(StackFrame::STUB)));
+    } else {
+      __ push(edi);  // Callee's JS function.
+    }
+  }
 
-  if (dynamic_frame_alignment_ && FLAG_debug_code) {
+  if (info()->IsOptimizing() &&
+      dynamic_frame_alignment_ &&
+      FLAG_debug_code) {
     __ test(esp, Immediate(kPointerSize));
     __ Assert(zero, "frame is expected to be aligned");
   }
 
   // Reserve space for the stack slots needed by the code.
   int slots = GetStackSlotCount();
-  ASSERT_GE(slots, 1);
-  if (slots == 1) {
-    if (dynamic_frame_alignment_) {
-      __ push(edx);
-    } else {
-      __ push(Immediate(kNoAlignmentPadding));
-    }
-  } else {
-    if (FLAG_debug_code) {
-      __ mov(Operand(eax), Immediate(slots));
-      Label loop;
-      __ bind(&loop);
-      __ push(Immediate(kSlotsZapValue));
-      __ dec(eax);
-      __ j(not_zero, &loop);
+  ASSERT(slots != 0 || !info()->IsOptimizing());
+  if (slots > 0) {
+    if (slots == 1) {
+      if (dynamic_frame_alignment_) {
+        __ push(edx);
+      } else {
+        __ push(Immediate(kNoAlignmentPadding));
+      }
     } else {
-      __ sub(Operand(esp), Immediate(slots * kPointerSize));
-  #ifdef _MSC_VER
-      // On windows, you may not access the stack more than one page below
-      // the most recently mapped page. To make the allocated area randomly
-      // accessible, we write to each page in turn (the value is irrelevant).
-      const int kPageSize = 4 * KB;
-      for (int offset = slots * kPointerSize - kPageSize;
-           offset > 0;
-           offset -= kPageSize) {
-        __ mov(Operand(esp, offset), eax);
+      if (FLAG_debug_code) {
+        __ mov(Operand(eax), Immediate(slots));
+        Label loop;
+        __ bind(&loop);
+        __ push(Immediate(kSlotsZapValue));
+        __ dec(eax);
+        __ j(not_zero, &loop);
+      } else {
+        __ sub(Operand(esp), Immediate(slots * kPointerSize));
+#ifdef _MSC_VER
+        // On windows, you may not access the stack more than one page below
+        // the most recently mapped page. To make the allocated area randomly
+        // accessible, we write to each page in turn (the value is irrelevant).
+        const int kPageSize = 4 * KB;
+        for (int offset = slots * kPointerSize - kPageSize;
+             offset > 0;
+             offset -= kPageSize) {
+          __ mov(Operand(esp, offset), eax);
+        }
+#endif
       }
-  #endif
-    }
 
-    // Store dynamic frame alignment state in the first local.
-    if (dynamic_frame_alignment_) {
-      __ mov(Operand(ebp,
-                     JavaScriptFrameConstants::kDynamicAlignmentStateOffset),
-             edx);
-    } else {
-      __ mov(Operand(ebp,
-                     JavaScriptFrameConstants::kDynamicAlignmentStateOffset),
-             Immediate(kNoAlignmentPadding));
+      // Store dynamic frame alignment state in the first local.
+      if (dynamic_frame_alignment_) {
+        __ mov(Operand(ebp,
+                       JavaScriptFrameConstants::kDynamicAlignmentStateOffset),
+               edx);
+      } else {
+        __ mov(Operand(ebp,
+                       JavaScriptFrameConstants::kDynamicAlignmentStateOffset),
+               Immediate(kNoAlignmentPadding));
+      }
     }
   }
 
   // Possibly allocate a local context.
-  int heap_slots = scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
+  int heap_slots = info_->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
   if (heap_slots > 0) {
     Comment(";;; Allocate local context");
     // Argument to NewContext is the function, which is still in edi.
@@ -272,7 +289,7 @@ bool LCodeGen::GeneratePrologue() {
   }
 
   // Trace the call.
-  if (FLAG_trace) {
+  if (FLAG_trace && info()->IsOptimizing()) {
     // We have not executed any compiled code yet, so esi still holds the
     // incoming context.
     __ CallRuntime(Runtime::kTraceEnter, 0);
@@ -326,16 +343,102 @@ bool LCodeGen::GenerateBody() {
 }
 
 
+bool LCodeGen::GenerateJumpTable() {
+  Label needs_frame_not_call;
+  Label needs_frame_is_call;
+  for (int i = 0; i < jump_table_.length(); i++) {
+    __ bind(&jump_table_[i].label);
+    Address entry = jump_table_[i].address;
+    if (jump_table_[i].needs_frame) {
+      __ push(Immediate(ExternalReference::ForDeoptEntry(entry)));
+      if (jump_table_[i].is_lazy_deopt) {
+        if (needs_frame_is_call.is_bound()) {
+          __ jmp(&needs_frame_is_call);
+        } else {
+          __ bind(&needs_frame_is_call);
+          __ push(esi);
+          // This variant of deopt can only be used with stubs. Since we don't
+          // have a function pointer to install in the stack frame that we're
+          // building, install a special marker there instead.
+          ASSERT(info()->IsStub());
+          __ push(Immediate(Smi::FromInt(StackFrame::STUB)));
+          // Push a PC inside the function so that the deopt code can find where
+          // the deopt comes from. It doesn't have to be the precise return
+          // address of a "calling" LAZY deopt, it only has to be somewhere
+          // inside the code body.
+          Label push_approx_pc;
+          __ call(&push_approx_pc);
+          __ bind(&push_approx_pc);
+          // Push the continuation which was stashed were the ebp should
+          // be. Replace it with the saved ebp.
+          __ push(MemOperand(esp, 3 * kPointerSize));
+          __ mov(MemOperand(esp, 4 * kPointerSize), ebp);
+          __ lea(ebp, MemOperand(esp, 4 * kPointerSize));
+          __ ret(0);  // Call the continuation without clobbering registers.
+        }
+      } else {
+        if (needs_frame_not_call.is_bound()) {
+          __ jmp(&needs_frame_not_call);
+        } else {
+          __ bind(&needs_frame_not_call);
+          __ push(esi);
+          // This variant of deopt can only be used with stubs. Since we don't
+          // have a function pointer to install in the stack frame that we're
+          // building, install a special marker there instead.
+          ASSERT(info()->IsStub());
+          __ push(Immediate(Smi::FromInt(StackFrame::STUB)));
+          // Push the continuation which was stashed were the ebp should
+          // be. Replace it with the saved ebp.
+          __ push(MemOperand(esp, 2 * kPointerSize));
+          __ mov(MemOperand(esp, 3 * kPointerSize), ebp);
+          __ lea(ebp, MemOperand(esp, 3 * kPointerSize));
+          __ ret(0);  // Call the continuation without clobbering registers.
+        }
+      }
+    } else {
+      if (jump_table_[i].is_lazy_deopt) {
+        __ call(entry, RelocInfo::RUNTIME_ENTRY);
+      } else {
+        __ jmp(entry, RelocInfo::RUNTIME_ENTRY);
+      }
+    }
+  }
+  return !is_aborted();
+}
+
+
 bool LCodeGen::GenerateDeferredCode() {
   ASSERT(is_generating());
   if (deferred_.length() > 0) {
     for (int i = 0; !is_aborted() && i < deferred_.length(); i++) {
       LDeferredCode* code = deferred_[i];
       __ bind(code->entry());
+      if (NeedsDeferredFrame()) {
+        Comment(";;; Deferred build frame",
+                code->instruction_index(),
+                code->instr()->Mnemonic());
+        ASSERT(!frame_is_built_);
+        ASSERT(info()->IsStub());
+        frame_is_built_ = true;
+        // Build the frame in such a way that esi isn't trashed.
+        __ push(ebp);  // Caller's frame pointer.
+        __ push(Operand(ebp, StandardFrameConstants::kContextOffset));
+        __ push(Immediate(Smi::FromInt(StackFrame::STUB)));
+        __ lea(ebp, Operand(esp, 2 * kPointerSize));
+      }
       Comment(";;; Deferred code @%d: %s.",
               code->instruction_index(),
               code->instr()->Mnemonic());
       code->Generate();
+      if (NeedsDeferredFrame()) {
+        Comment(";;; Deferred destroy frame",
+                code->instruction_index(),
+                code->instr()->Mnemonic());
+        ASSERT(frame_is_built_);
+        frame_is_built_ = false;
+        __ mov(esp, ebp);
+        __ pop(ebp);
+      }
       __ jmp(code->exit());
     }
   }
@@ -349,6 +452,15 @@ bool LCodeGen::GenerateDeferredCode() {
 
 bool LCodeGen::GenerateSafepointTable() {
   ASSERT(is_done());
+  if (!info()->IsStub()) {
+    // For lazy deoptimization we need space to patch a call after every call.
+    // Ensure there is always space for such patching, even if the code ends
+    // in a call.
+    int target_offset = masm()->pc_offset() + Deoptimizer::patch_size();
+    while (masm()->pc_offset() < target_offset) {
+      masm()->nop();
+    }
+  }
   safepoints_.Emit(masm(), GetStackSlotCount());
   return !is_aborted();
 }
@@ -364,6 +476,11 @@ XMMRegister LCodeGen::ToDoubleRegister(int index) const {
 }
 
 
+bool LCodeGen::IsX87TopOfStack(LOperand* op) const {
+  return op->IsDoubleRegister();
+}
+
+
 Register LCodeGen::ToRegister(LOperand* op) const {
   ASSERT(op->IsRegister());
   return ToRegister(op->index());
@@ -449,7 +566,9 @@ void LCodeGen::WriteTranslation(LEnvironment* environment,
                    translation,
                    arguments_index,
                    arguments_count);
-  int closure_id = *info()->closure() != *environment->closure()
+  bool has_closure_id = !info()->closure().is_null() &&
+      *info()->closure() != *environment->closure();
+  int closure_id = has_closure_id
       ? DefineDeoptimizationLiteral(environment->closure())
       : Translation::kSelfLiteralId;
   switch (environment->frame_type()) {
@@ -472,6 +591,11 @@ void LCodeGen::WriteTranslation(LEnvironment* environment,
     case ARGUMENTS_ADAPTOR:
       translation->BeginArgumentsAdaptorFrame(closure_id, translation_size);
       break;
+    case STUB:
+      translation->BeginCompiledStubFrame();
+      break;
+    default:
+      UNREACHABLE();
   }
 
   // Inlined frames which push their arguments cause the index to be
@@ -606,6 +730,8 @@ void LCodeGen::CallRuntime(const Runtime::Function* fun,
   __ CallRuntime(fun, argc);
 
   RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
+
+  ASSERT(info()->is_calling());
 }
 
 
@@ -630,6 +756,8 @@ void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id,
   __ CallRuntimeSaveDoubles(id);
   RecordSafepointWithRegisters(
       instr->pointer_map(), argc, Safepoint::kNoLazyDeopt);
+
+  ASSERT(info()->is_calling());
 }
 
 
@@ -675,7 +803,11 @@ void LCodeGen::DeoptimizeIf(Condition cc, LEnvironment* environment) {
   RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
   ASSERT(environment->HasBeenRegistered());
   int id = environment->deoptimization_index();
-  Address entry = Deoptimizer::GetDeoptimizationEntry(id, Deoptimizer::EAGER);
+  ASSERT(info()->IsOptimizing() || info()->IsStub());
+  Deoptimizer::BailoutType bailout_type = frame_is_built_
+      ? Deoptimizer::EAGER
+      : Deoptimizer::LAZY;
+  Address entry = Deoptimizer::GetDeoptimizationEntry(id, bailout_type);
   if (entry == NULL) {
     Abort("bailout was not prepared");
     return;
@@ -709,19 +841,44 @@ void LCodeGen::DeoptimizeIf(Condition cc, LEnvironment* environment) {
     __ popfd();
   }
 
+  ASSERT(info()->IsStub() || frame_is_built_);
+  bool lazy_deopt_needed = info()->IsStub();
   if (cc == no_condition) {
     if (FLAG_trap_on_deopt) __ int3();
-    __ jmp(entry, RelocInfo::RUNTIME_ENTRY);
+    if (lazy_deopt_needed) {
+      __ call(entry, RelocInfo::RUNTIME_ENTRY);
+    } else {
+      __ jmp(entry, RelocInfo::RUNTIME_ENTRY);
+    }
   } else {
+    Label done;
     if (FLAG_trap_on_deopt) {
-      Label done;
       __ j(NegateCondition(cc), &done, Label::kNear);
       __ int3();
-      __ jmp(entry, RelocInfo::RUNTIME_ENTRY);
-      __ bind(&done);
+    }
+    if (!lazy_deopt_needed && frame_is_built_) {
+      if (FLAG_trap_on_deopt) {
+        __ jmp(entry, RelocInfo::RUNTIME_ENTRY);
+      } else {
+        __ j(cc, entry, RelocInfo::RUNTIME_ENTRY);
+      }
     } else {
-      __ j(cc, entry, RelocInfo::RUNTIME_ENTRY);
+      // We often have several deopts to the same entry, reuse the last
+      // jump entry if this is the case.
+      if (jump_table_.is_empty() ||
+          jump_table_.last().address != entry ||
+          jump_table_.last().needs_frame != !frame_is_built_ ||
+          jump_table_.last().is_lazy_deopt != lazy_deopt_needed) {
+        JumpTableEntry table_entry(entry, !frame_is_built_, lazy_deopt_needed);
+        jump_table_.Add(table_entry, zone());
+      }
+      if (FLAG_trap_on_deopt) {
+        __ jmp(&jump_table_.last().label);
+      } else {
+        __ j(cc, &jump_table_.last().label);
+      }
     }
+    __ bind(&done);
   }
 }
 
@@ -1422,7 +1579,8 @@ void LCodeGen::DoConstantD(LConstantD* instr) {
     int32_t lower = static_cast<int32_t>(int_val);
     int32_t upper = static_cast<int32_t>(int_val >> (kBitsPerInt));
     if (CpuFeatures::IsSupported(SSE4_1)) {
-      CpuFeatures::Scope scope(SSE4_1);
+      CpuFeatures::Scope scope1(SSE2);
+      CpuFeatures::Scope scope2(SSE4_1);
       if (lower != 0) {
         __ Set(temp, Immediate(lower));
         __ movd(res, Operand(temp));
@@ -1434,6 +1592,7 @@ void LCodeGen::DoConstantD(LConstantD* instr) {
         __ pinsrd(res, Operand(temp), 1);
       }
     } else {
+      CpuFeatures::Scope scope(SSE2);
       __ Set(temp, Immediate(upper));
       __ movd(res, Operand(temp));
       __ psllq(res, 32);
@@ -1587,6 +1746,7 @@ void LCodeGen::DoAddI(LAddI* instr) {
 
 
 void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
+  CpuFeatures::Scope scope(SSE2);
   LOperand* left = instr->left();
   LOperand* right = instr->right();
   ASSERT(left->Equals(instr->result()));
@@ -1648,6 +1808,7 @@ void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
 
 
 void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
+  CpuFeatures::Scope scope(SSE2);
   XMMRegister left = ToDoubleRegister(instr->left());
   XMMRegister right = ToDoubleRegister(instr->right());
   XMMRegister result = ToDoubleRegister(instr->result());
@@ -1658,8 +1819,8 @@ void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
       __ addsd(left, right);
       break;
     case Token::SUB:
-       __ subsd(left, right);
-       break;
+      __ subsd(left, right);
+      break;
     case Token::MUL:
       __ mulsd(left, right);
       break;
@@ -1732,6 +1893,7 @@ void LCodeGen::EmitBranch(int left_block, int right_block, Condition cc) {
 void LCodeGen::DoBranch(LBranch* instr) {
   int true_block = chunk_->LookupDestination(instr->true_block_id());
   int false_block = chunk_->LookupDestination(instr->false_block_id());
+  CpuFeatures::Scope scope(SSE2);
 
   Representation r = instr->hydrogen()->value()->representation();
   if (r.IsInteger32()) {
@@ -1891,6 +2053,7 @@ void LCodeGen::DoCmpIDAndBranch(LCmpIDAndBranch* instr) {
   int false_block = chunk_->LookupDestination(instr->false_block_id());
   int true_block = chunk_->LookupDestination(instr->true_block_id());
   Condition cc = TokenToCondition(instr->op(), instr->is_double());
+  CpuFeatures::Scope scope(SSE2);
 
   if (left->IsConstantOperand() && right->IsConstantOperand()) {
     // We can statically evaluate the comparison.
@@ -2400,7 +2563,7 @@ void LCodeGen::DoCmpT(LCmpT* instr) {
 
 
 void LCodeGen::DoReturn(LReturn* instr) {
-  if (FLAG_trace) {
+  if (FLAG_trace && info()->IsOptimizing()) {
     // Preserve the return value on the stack and rely on the runtime call
     // to return the value in the same register.  We're leaving the code
     // managed by the register allocator and tearing down the frame, it's
@@ -2414,8 +2577,10 @@ void LCodeGen::DoReturn(LReturn* instr) {
     __ mov(edx, Operand(ebp,
       JavaScriptFrameConstants::kDynamicAlignmentStateOffset));
   }
-  __ mov(esp, ebp);
-  __ pop(ebp);
+  if (NeedsEagerFrame()) {
+    __ mov(esp, ebp);
+    __ pop(ebp);
+  }
   if (dynamic_frame_alignment_) {
     Label no_padding;
     __ cmp(edx, Immediate(kNoAlignmentPadding));
@@ -2428,7 +2593,12 @@ void LCodeGen::DoReturn(LReturn* instr) {
     __ Ret((GetParameterCount() + 2) * kPointerSize, ecx);
     __ bind(&no_padding);
   }
-  __ Ret((GetParameterCount() + 1) * kPointerSize, ecx);
+  if (info()->IsStub()) {
+    __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
+    __ Ret();
+  } else {
+    __ Ret((GetParameterCount() + 1) * kPointerSize, ecx);
+  }
 }
 
 
@@ -2804,11 +2974,23 @@ void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
       0,
       instr->additional_index()));
   if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
-    XMMRegister result(ToDoubleRegister(instr->result()));
-    __ movss(result, operand);
-    __ cvtss2sd(result, result);
+    if (CpuFeatures::IsSupported(SSE2)) {
+      CpuFeatures::Scope scope(SSE2);
+      XMMRegister result(ToDoubleRegister(instr->result()));
+      __ movss(result, operand);
+      __ cvtss2sd(result, result);
+    } else {
+      __ fld_s(operand);
+      HandleX87FPReturnValue(instr);
+    }
   } else if (elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
-    __ movdbl(ToDoubleRegister(instr->result()), operand);
+    if (CpuFeatures::IsSupported(SSE2)) {
+      CpuFeatures::Scope scope(SSE2);
+      __ movdbl(ToDoubleRegister(instr->result()), operand);
+    } else {
+      __ fld_d(operand);
+      HandleX87FPReturnValue(instr);
+    }
   } else {
     Register result(ToRegister(instr->result()));
     switch (elements_kind) {
@@ -2852,9 +3034,30 @@ void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
 }
 
 
-void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) {
-  XMMRegister result = ToDoubleRegister(instr->result());
+void LCodeGen::HandleX87FPReturnValue(LInstruction* instr) {
+  if (IsX87TopOfStack(instr->result())) {
+    // Return value is already on stack. If the value has no uses, then
+    // pop it off the FP stack. Otherwise, make sure that there are enough
+    // copies of the value on the stack to feed all of the usages, e.g.
+    // when the following instruction uses the return value in multiple
+    // inputs.
+    int count = instr->hydrogen_value()->UseCount();
+    if (count == 0) {
+      __ fstp(0);
+    } else {
+      count--;
+      ASSERT(count <= 7);
+      while (count-- > 0) {
+        __ fld(0);
+      }
+    }
+  } else {
+    __ fstp_d(ToOperand(instr->result()));
+  }
+}
+
 
+void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) {
   if (instr->hydrogen()->RequiresHoleCheck()) {
     int offset = FixedDoubleArray::kHeaderSize - kHeapObjectTag +
         sizeof(kHoleNanLower32);
@@ -2875,7 +3078,14 @@ void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) {
       FAST_DOUBLE_ELEMENTS,
       FixedDoubleArray::kHeaderSize - kHeapObjectTag,
       instr->additional_index());
-  __ movdbl(result, double_load_operand);
+  if (CpuFeatures::IsSupported(SSE2)) {
+    CpuFeatures::Scope scope(SSE2);
+    XMMRegister result = ToDoubleRegister(instr->result());
+    __ movdbl(result, double_load_operand);
+  } else {
+    __ fld_d(double_load_operand);
+    HandleX87FPReturnValue(instr);
+  }
 }
 
 
@@ -3291,6 +3501,7 @@ void LCodeGen::DoMathAbs(LUnaryMathOperation* instr) {
   ASSERT(instr->value()->Equals(instr->result()));
   Representation r = instr->hydrogen()->value()->representation();
 
+  CpuFeatures::Scope scope(SSE2);
   if (r.IsDouble()) {
     XMMRegister  scratch = xmm0;
     XMMRegister input_reg = ToDoubleRegister(instr->value());
@@ -3312,6 +3523,7 @@ void LCodeGen::DoMathAbs(LUnaryMathOperation* instr) {
 
 
 void LCodeGen::DoMathFloor(LUnaryMathOperation* instr) {
+  CpuFeatures::Scope scope(SSE2);
   XMMRegister xmm_scratch = xmm0;
   Register output_reg = ToRegister(instr->result());
   XMMRegister input_reg = ToDoubleRegister(instr->value());
@@ -3376,6 +3588,7 @@ void LCodeGen::DoMathFloor(LUnaryMathOperation* instr) {
 }
 
 void LCodeGen::DoMathRound(LUnaryMathOperation* instr) {
+  CpuFeatures::Scope scope(SSE2);
   XMMRegister xmm_scratch = xmm0;
   Register output_reg = ToRegister(instr->result());
   XMMRegister input_reg = ToDoubleRegister(instr->value());
@@ -3421,6 +3634,7 @@ void LCodeGen::DoMathRound(LUnaryMathOperation* instr) {
 
 
 void LCodeGen::DoMathSqrt(LUnaryMathOperation* instr) {
+  CpuFeatures::Scope scope(SSE2);
   XMMRegister input_reg = ToDoubleRegister(instr->value());
   ASSERT(ToDoubleRegister(instr->result()).is(input_reg));
   __ sqrtsd(input_reg, input_reg);
@@ -3428,6 +3642,7 @@ void LCodeGen::DoMathSqrt(LUnaryMathOperation* instr) {
 
 
 void LCodeGen::DoMathPowHalf(LMathPowHalf* instr) {
+  CpuFeatures::Scope scope(SSE2);
   XMMRegister xmm_scratch = xmm0;
   XMMRegister input_reg = ToDoubleRegister(instr->value());
   Register scratch = ToRegister(instr->temp());
@@ -3504,6 +3719,7 @@ void LCodeGen::DoRandom(LRandom* instr) {
 
   DeferredDoRandom* deferred = new(zone()) DeferredDoRandom(this, instr);
 
+  CpuFeatures::Scope scope(SSE2);
   // Having marked this instruction as a call we can use any
   // registers.
   ASSERT(ToDoubleRegister(instr->result()).is(xmm1));
@@ -3571,6 +3787,7 @@ void LCodeGen::DoDeferredRandom(LRandom* instr) {
 
 
 void LCodeGen::DoMathLog(LUnaryMathOperation* instr) {
+  CpuFeatures::Scope scope(SSE2);
   ASSERT(instr->value()->Equals(instr->result()));
   XMMRegister input_reg = ToDoubleRegister(instr->value());
   Label positive, done, zero;
@@ -3602,6 +3819,7 @@ void LCodeGen::DoMathLog(LUnaryMathOperation* instr) {
 
 
 void LCodeGen::DoMathExp(LMathExp* instr) {
+  CpuFeatures::Scope scope(SSE2);
   XMMRegister input = ToDoubleRegister(instr->value());
   XMMRegister result = ToDoubleRegister(instr->result());
   Register temp1 = ToRegister(instr->temp1());
@@ -3870,6 +4088,11 @@ void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
     }
     DeoptimizeIf(below_equal, instr->environment());
   } else {
+    if (instr->hydrogen()->index()->representation().IsTagged() &&
+        !instr->hydrogen()->index()->type().IsSmi()) {
+      __ test(ToRegister(instr->index()), Immediate(kSmiTagMask));
+      DeoptimizeIf(not_zero, instr->environment());
+    }
     __ cmp(ToRegister(instr->index()), ToOperand(instr->length()));
     DeoptimizeIf(above_equal, instr->environment());
   }
@@ -3892,9 +4115,11 @@ void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
       0,
       instr->additional_index()));
   if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
+    CpuFeatures::Scope scope(SSE2);
     __ cvtsd2ss(xmm0, ToDoubleRegister(instr->value()));
     __ movss(operand, xmm0);
   } else if (elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
+    CpuFeatures::Scope scope(SSE2);
     __ movdbl(operand, ToDoubleRegister(instr->value()));
   } else {
     Register value = ToRegister(instr->value());
@@ -3930,6 +4155,7 @@ void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
 
 
 void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) {
+  CpuFeatures::Scope scope(SSE2);
   XMMRegister value = ToDoubleRegister(instr->value());
 
   if (instr->NeedsCanonicalization()) {
@@ -4180,15 +4406,21 @@ void LCodeGen::DoStringAdd(LStringAdd* instr) {
 
 
 void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
-  LOperand* input = instr->value();
-  ASSERT(input->IsRegister() || input->IsStackSlot());
-  LOperand* output = instr->result();
-  ASSERT(output->IsDoubleRegister());
-  __ cvtsi2sd(ToDoubleRegister(output), ToOperand(input));
+  if (CpuFeatures::IsSupported(SSE2)) {
+    CpuFeatures::Scope scope(SSE2);
+    LOperand* input = instr->value();
+    ASSERT(input->IsRegister() || input->IsStackSlot());
+    LOperand* output = instr->result();
+    ASSERT(output->IsDoubleRegister());
+    __ cvtsi2sd(ToDoubleRegister(output), ToOperand(input));
+  } else {
+    UNREACHABLE();
+  }
 }
 
 
 void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) {
+  CpuFeatures::Scope scope(SSE2);
   LOperand* input = instr->value();
   LOperand* output = instr->result();
   LOperand* temp = instr->temp();
@@ -4266,9 +4498,21 @@ void LCodeGen::DoDeferredNumberTagI(LInstruction* instr,
     // the value in there. If that fails, call the runtime system.
     __ SmiUntag(reg);
     __ xor_(reg, 0x80000000);
-    __ cvtsi2sd(xmm0, Operand(reg));
+    if (CpuFeatures::IsSupported(SSE2)) {
+      CpuFeatures::Scope feature_scope(SSE2);
+      __ cvtsi2sd(xmm0, Operand(reg));
+    } else {
+      __ push(reg);
+      __ fild_s(Operand(esp, 0));
+      __ pop(reg);
+    }
   } else {
-    __ LoadUint32(xmm0, reg, xmm1);
+    if (CpuFeatures::IsSupported(SSE2)) {
+      CpuFeatures::Scope feature_scope(SSE2);
+      __ LoadUint32(xmm0, reg, xmm1);
+    } else {
+      UNREACHABLE();
+    }
   }
 
   if (FLAG_inline_new) {
@@ -4297,7 +4541,12 @@ void LCodeGen::DoDeferredNumberTagI(LInstruction* instr,
   // Done. Put the value in xmm0 into the value of the allocated heap
   // number.
   __ bind(&done);
-  __ movdbl(FieldOperand(reg, HeapNumber::kValueOffset), xmm0);
+  if (CpuFeatures::IsSupported(SSE2)) {
+    CpuFeatures::Scope feature_scope(SSE2);
+    __ movdbl(FieldOperand(reg, HeapNumber::kValueOffset), xmm0);
+  } else {
+    __ fstp_d(FieldOperand(reg, HeapNumber::kValueOffset));
+  }
   __ StoreToSafepointRegisterSlot(reg, reg);
 }
 
@@ -4313,7 +4562,6 @@ void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
     LNumberTagD* instr_;
   };
 
-  XMMRegister input_reg = ToDoubleRegister(instr->value());
   Register reg = ToRegister(instr->result());
   Register tmp = ToRegister(instr->temp());
 
@@ -4324,7 +4572,16 @@ void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
     __ jmp(deferred->entry());
   }
   __ bind(deferred->exit());
-  __ movdbl(FieldOperand(reg, HeapNumber::kValueOffset), input_reg);
+  if (CpuFeatures::IsSupported(SSE2)) {
+    CpuFeatures::Scope scope(SSE2);
+    XMMRegister input_reg = ToDoubleRegister(instr->value());
+    __ movdbl(FieldOperand(reg, HeapNumber::kValueOffset), input_reg);
+  } else {
+    if (!IsX87TopOfStack(instr->value())) {
+    __ fld_d(ToOperand(instr->value()));
+    }
+    __ fstp_d(FieldOperand(reg, HeapNumber::kValueOffset));
+  }
 }
 
 
@@ -4481,7 +4738,8 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
       DeoptimizeIf(not_equal, instr->environment());
       DeoptimizeIf(parity_even, instr->environment());  // NaN.
     }
-  } else {
+  } else if (CpuFeatures::IsSupported(SSE2)) {
+    CpuFeatures::Scope scope(SSE2);
     // Deoptimize if we don't have a heap number.
     __ RecordComment("Deferred TaggedToI: not a heap number");
     DeoptimizeIf(not_equal, instr->environment());
@@ -4503,6 +4761,8 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
       __ RecordComment("Deferred TaggedToI: minus zero");
       DeoptimizeIf(not_zero, instr->environment());
     }
+  } else {
+    UNREACHABLE();
   }
   __ bind(&done);
 }
@@ -4545,19 +4805,24 @@ void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
   LOperand* result = instr->result();
   ASSERT(result->IsDoubleRegister());
 
-  Register input_reg = ToRegister(input);
-  XMMRegister result_reg = ToDoubleRegister(result);
-
-  bool deoptimize_on_minus_zero =
-      instr->hydrogen()->deoptimize_on_minus_zero();
-  Register temp_reg = deoptimize_on_minus_zero ? ToRegister(temp) : no_reg;
-
-  EmitNumberUntagD(input_reg,
-                   temp_reg,
-                   result_reg,
-                   instr->hydrogen()->deoptimize_on_undefined(),
-                   deoptimize_on_minus_zero,
-                   instr->environment());
+  if (CpuFeatures::IsSupported(SSE2)) {
+    CpuFeatures::Scope scope(SSE2);
+    Register input_reg = ToRegister(input);
+    XMMRegister result_reg = ToDoubleRegister(result);
+
+    bool deoptimize_on_minus_zero =
+        instr->hydrogen()->deoptimize_on_minus_zero();
+    Register temp_reg = deoptimize_on_minus_zero ? ToRegister(temp) : no_reg;
+
+    EmitNumberUntagD(input_reg,
+                     temp_reg,
+                     result_reg,
+                     instr->hydrogen()->deoptimize_on_undefined(),
+                     deoptimize_on_minus_zero,
+                     instr->environment());
+  } else {
+    UNIMPLEMENTED();
+  }
 }
 
 
@@ -4566,6 +4831,7 @@ void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
   ASSERT(input->IsDoubleRegister());
   LOperand* result = instr->result();
   ASSERT(result->IsRegister());
+  CpuFeatures::Scope scope(SSE2);
 
   XMMRegister input_reg = ToDoubleRegister(input);
   Register result_reg = ToRegister(result);
@@ -4755,10 +5021,10 @@ void LCodeGen::DoCheckFunction(LCheckFunction* instr) {
 void LCodeGen::DoCheckMapCommon(Register reg,
                                 Handle<Map> map,
                                 CompareMapMode mode,
-                                LEnvironment* env) {
+                                LInstruction* instr) {
   Label success;
   __ CompareMap(reg, map, &success, mode);
-  DeoptimizeIf(not_equal, env);
+  DeoptimizeIf(not_equal, instr->environment());
   __ bind(&success);
 }
 
@@ -4776,12 +5042,13 @@ void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
     __ j(equal, &success);
   }
   Handle<Map> map = map_set->last();
-  DoCheckMapCommon(reg, map, REQUIRE_EXACT_MAP, instr->environment());
+  DoCheckMapCommon(reg, map, REQUIRE_EXACT_MAP, instr);
   __ bind(&success);
 }
 
 
 void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) {
+  CpuFeatures::Scope scope(SSE2);
   XMMRegister value_reg = ToDoubleRegister(instr->unclamped());
   Register result_reg = ToRegister(instr->result());
   __ ClampDoubleToUint8(value_reg, xmm0, result_reg);
@@ -4796,6 +5063,8 @@ void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) {
 
 
 void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
+  CpuFeatures::Scope scope(SSE2);
+
   ASSERT(instr->unclamped()->Equals(instr->result()));
   Register input_reg = ToRegister(instr->unclamped());
   Label is_smi, done, heap_number;
@@ -4842,7 +5111,7 @@ void LCodeGen::DoCheckPrototypeMaps(LCheckPrototypeMaps* instr) {
   // Check prototype maps up to the holder.
   while (!current_prototype.is_identical_to(holder)) {
     DoCheckMapCommon(reg, Handle<Map>(current_prototype->map()),
-                     ALLOW_ELEMENT_TRANSITION_MAPS, instr->environment());
+                     ALLOW_ELEMENT_TRANSITION_MAPS, instr);
 
     current_prototype =
         Handle<JSObject>(JSObject::cast(current_prototype->GetPrototype()));
@@ -4852,7 +5121,7 @@ void LCodeGen::DoCheckPrototypeMaps(LCheckPrototypeMaps* instr) {
 
   // Check the holder map.
   DoCheckMapCommon(reg, Handle<Map>(current_prototype->map()),
-                   ALLOW_ELEMENT_TRANSITION_MAPS, instr->environment());
+                   ALLOW_ELEMENT_TRANSITION_MAPS, instr);
 }
 
 
@@ -5389,13 +5658,15 @@ void LCodeGen::EmitIsConstructCall(Register temp) {
 
 
 void LCodeGen::EnsureSpaceForLazyDeopt() {
-  // Ensure that we have enough space after the previous lazy-bailout
-  // instruction for patching the code here.
-  int current_pc = masm()->pc_offset();
-  int patch_size = Deoptimizer::patch_size();
-  if (current_pc < last_lazy_deopt_pc_ + patch_size) {
-    int padding_size = last_lazy_deopt_pc_ + patch_size - current_pc;
-    __ Nop(padding_size);
+  if (!info()->IsStub()) {
+    // Ensure that we have enough space after the previous lazy-bailout
+    // instruction for patching the code here.
+    int current_pc = masm()->pc_offset();
+    int patch_size = Deoptimizer::patch_size();
+    if (current_pc < last_lazy_deopt_pc_ + patch_size) {
+      int padding_size = last_lazy_deopt_pc_ + patch_size - current_pc;
+      __ Nop(padding_size);
+    }
   }
   last_lazy_deopt_pc_ = masm()->pc_offset();
 }
index 44ddaff..63d15f4 100644 (file)
@@ -55,6 +55,7 @@ class LCodeGen BASE_EMBEDDED {
         current_instruction_(-1),
         instructions_(chunk->instructions()),
         deoptimizations_(4, info->zone()),
+        jump_table_(4, info->zone()),
         deoptimization_literals_(8, info->zone()),
         inlined_function_count_(0),
         scope_(info->scope()),
@@ -64,6 +65,7 @@ class LCodeGen BASE_EMBEDDED {
         dynamic_frame_alignment_(false),
         osr_pc_offset_(-1),
         last_lazy_deopt_pc_(0),
+        frame_is_built_(false),
         safepoints_(info->zone()),
         resolver_(this),
         expected_safepoint_kind_(Safepoint::kSimple) {
@@ -78,10 +80,20 @@ class LCodeGen BASE_EMBEDDED {
   Heap* heap() const { return isolate()->heap(); }
   Zone* zone() const { return zone_; }
 
+  bool NeedsEagerFrame() const {
+    return GetStackSlotCount() > 0 ||
+        info()->is_non_deferred_calling() ||
+        !info()->IsStub();
+  }
+  bool NeedsDeferredFrame() const {
+    return !NeedsEagerFrame() && info()->is_deferred_calling();
+  }
+
   // Support for converting LOperands to assembler types.
   Operand ToOperand(LOperand* op) const;
   Register ToRegister(LOperand* op) const;
   XMMRegister ToDoubleRegister(LOperand* op) const;
+  bool IsX87TopOfStack(LOperand* op) const;
 
   bool IsInteger32(LConstantOperand* op) const;
   Immediate ToInteger32Immediate(LOperand* op) const {
@@ -90,6 +102,9 @@ class LCodeGen BASE_EMBEDDED {
 
   Handle<Object> ToHandle(LConstantOperand* op) const;
 
+  // A utility for instructions that return floating point values on X87.
+  void HandleX87FPReturnValue(LInstruction* instr);
+
   // The operand denoting the second word (the one with a higher address) of
   // a double stack slot.
   Operand HighOperand(LOperand* op);
@@ -122,7 +137,7 @@ class LCodeGen BASE_EMBEDDED {
                                        Label* map_check);
 
   void DoCheckMapCommon(Register reg, Handle<Map> map,
-                        CompareMapMode mode, LEnvironment* env);
+                        CompareMapMode mode, LInstruction* instr);
 
   // Parallel move support.
   void DoParallelMove(LParallelMove* move);
@@ -172,7 +187,7 @@ class LCodeGen BASE_EMBEDDED {
                        Register temporary2);
 
   int GetStackSlotCount() const { return chunk()->spill_slot_count(); }
-  int GetParameterCount() const { return scope()->num_parameters(); }
+  int GetParameterCount() const { return info()->num_parameters(); }
 
   void Abort(const char* reason);
   void Comment(const char* format, ...);
@@ -184,9 +199,7 @@ class LCodeGen BASE_EMBEDDED {
   bool GeneratePrologue();
   bool GenerateBody();
   bool GenerateDeferredCode();
-  // Pad the reloc info to ensure that we have enough space to patch during
-  // deoptimization.
-  bool GenerateRelocPadding();
+  bool GenerateJumpTable();
   bool GenerateSafepointTable();
 
   enum SafepointMode {
@@ -356,10 +369,23 @@ class LCodeGen BASE_EMBEDDED {
   MacroAssembler* const masm_;
   CompilationInfo* const info_;
 
+  struct JumpTableEntry {
+    inline JumpTableEntry(Address entry, bool frame, bool is_lazy)
+        : label(),
+          address(entry),
+          needs_frame(frame),
+          is_lazy_deopt(is_lazy) { }
+    Label label;
+    Address address;
+    bool needs_frame;
+    bool is_lazy_deopt;
+  };
+
   int current_block_;
   int current_instruction_;
   const ZoneList<LInstruction*>* instructions_;
   ZoneList<LEnvironment*> deoptimizations_;
+  ZoneList<JumpTableEntry> jump_table_;
   ZoneList<Handle<Object> > deoptimization_literals_;
   int inlined_function_count_;
   Scope* const scope_;
@@ -369,6 +395,7 @@ class LCodeGen BASE_EMBEDDED {
   bool dynamic_frame_alignment_;
   int osr_pc_offset_;
   int last_lazy_deopt_pc_;
+  bool frame_is_built_;
 
   // Builder that keeps track of safepoints in the code. The table
   // itself is emitted at the end of the generated code.
@@ -386,6 +413,7 @@ class LCodeGen BASE_EMBEDDED {
       ASSERT(codegen_->expected_safepoint_kind_ == Safepoint::kSimple);
       codegen_->masm_->PushSafepointRegisters();
       codegen_->expected_safepoint_kind_ = Safepoint::kWithRegisters;
+      ASSERT(codegen_->info()->is_calling());
     }
 
     ~PushSafepointRegistersScope() {
index 6428916..7cb15e6 100644 (file)
@@ -191,7 +191,7 @@ int LGapResolver::CountSourceUses(LOperand* operand) {
 
 Register LGapResolver::GetFreeRegisterNot(Register reg) {
   int skip_index = reg.is(no_reg) ? -1 : Register::ToAllocationIndex(reg);
-  for (int i = 0; i < Register::kNumAllocatableRegisters; ++i) {
+  for (int i = 0; i < Register::NumAllocatableRegisters(); ++i) {
     if (source_uses_[i] == 0 && destination_uses_[i] > 0 && i != skip_index) {
       return Register::FromAllocationIndex(i);
     }
@@ -204,7 +204,7 @@ bool LGapResolver::HasBeenReset() {
   if (!moves_.is_empty()) return false;
   if (spilled_register_ >= 0) return false;
 
-  for (int i = 0; i < Register::kNumAllocatableRegisters; ++i) {
+  for (int i = 0; i < Register::NumAllocatableRegisters(); ++i) {
     if (source_uses_[i] != 0) return false;
     if (destination_uses_[i] != 0) return false;
   }
@@ -256,7 +256,7 @@ Register LGapResolver::EnsureTempRegister() {
 
   // 3. Prefer to spill a register that is not used in any remaining move
   // because it will not need to be restored until the end.
-  for (int i = 0; i < Register::kNumAllocatableRegisters; ++i) {
+  for (int i = 0; i < Register::NumAllocatableRegisters(); ++i) {
     if (source_uses_[i] == 0 && destination_uses_[i] == 0) {
       Register scratch = Register::FromAllocationIndex(i);
       __ push(scratch);
@@ -324,29 +324,38 @@ void LGapResolver::EmitMove(int index) {
     }
 
   } else if (source->IsDoubleRegister()) {
-    XMMRegister src = cgen_->ToDoubleRegister(source);
-    if (destination->IsDoubleRegister()) {
-      XMMRegister dst = cgen_->ToDoubleRegister(destination);
-      __ movaps(dst, src);
+    if (CpuFeatures::IsSupported(SSE2)) {
+      CpuFeatures::Scope scope(SSE2);
+      XMMRegister src = cgen_->ToDoubleRegister(source);
+      if (destination->IsDoubleRegister()) {
+        XMMRegister dst = cgen_->ToDoubleRegister(destination);
+        __ movaps(dst, src);
+      } else {
+        ASSERT(destination->IsDoubleStackSlot());
+        Operand dst = cgen_->ToOperand(destination);
+        __ movdbl(dst, src);
+      }
     } else {
-      ASSERT(destination->IsDoubleStackSlot());
-      Operand dst = cgen_->ToOperand(destination);
-      __ movdbl(dst, src);
+      UNREACHABLE();
     }
   } else if (source->IsDoubleStackSlot()) {
-    ASSERT(destination->IsDoubleRegister() ||
-           destination->IsDoubleStackSlot());
-    Operand src = cgen_->ToOperand(source);
-    if (destination->IsDoubleRegister()) {
-      XMMRegister dst = cgen_->ToDoubleRegister(destination);
-      __ movdbl(dst, src);
+    if (CpuFeatures::IsSupported(SSE2)) {
+      CpuFeatures::Scope scope(SSE2);
+      ASSERT(destination->IsDoubleRegister() ||
+             destination->IsDoubleStackSlot());
+      Operand src = cgen_->ToOperand(source);
+      if (destination->IsDoubleRegister()) {
+        XMMRegister dst = cgen_->ToDoubleRegister(destination);
+        __ movdbl(dst, src);
+      } else {
+        // We rely on having xmm0 available as a fixed scratch register.
+        Operand dst = cgen_->ToOperand(destination);
+        __ movdbl(xmm0, src);
+        __ movdbl(dst, xmm0);
+      }
     } else {
-      // We rely on having xmm0 available as a fixed scratch register.
-      Operand dst = cgen_->ToOperand(destination);
-      __ movdbl(xmm0, src);
-      __ movdbl(dst, xmm0);
+      UNREACHABLE();
     }
-
   } else {
     UNREACHABLE();
   }
@@ -410,6 +419,7 @@ void LGapResolver::EmitSwap(int index) {
       __ mov(src, tmp0);
     }
   } else if (source->IsDoubleRegister() && destination->IsDoubleRegister()) {
+    CpuFeatures::Scope scope(SSE2);
     // XMM register-register swap. We rely on having xmm0
     // available as a fixed scratch register.
     XMMRegister src = cgen_->ToDoubleRegister(source);
index 0c81d72..3a58f58 100644 (file)
@@ -97,8 +97,8 @@ class LGapResolver BASE_EMBEDDED {
   ZoneList<LMoveOperands> moves_;
 
   // Source and destination use counts for the general purpose registers.
-  int source_uses_[Register::kNumAllocatableRegisters];
-  int destination_uses_[Register::kNumAllocatableRegisters];
+  int source_uses_[Register::kMaxNumAllocatableRegisters];
+  int destination_uses_[Register::kMaxNumAllocatableRegisters];
 
   // If we had to spill on demand, the currently spilled register's
   // allocation index.
index 65a300e..098e6ff 100644 (file)
@@ -44,10 +44,10 @@ LITHIUM_CONCRETE_INSTRUCTION_LIST(DEFINE_COMPILE)
 #undef DEFINE_COMPILE
 
 LOsrEntry::LOsrEntry() {
-  for (int i = 0; i < Register::kNumAllocatableRegisters; ++i) {
+  for (int i = 0; i < Register::NumAllocatableRegisters(); ++i) {
     register_spills_[i] = NULL;
   }
-  for (int i = 0; i < DoubleRegister::kNumAllocatableRegisters; ++i) {
+  for (int i = 0; i < DoubleRegister::NumAllocatableRegisters(); ++i) {
     double_register_spills_[i] = NULL;
   }
 }
@@ -460,9 +460,11 @@ LPlatformChunk* LChunkBuilder::Build() {
   status_ = BUILDING;
 
   // Reserve the first spill slot for the state of dynamic alignment.
-  int alignment_state_index = chunk_->GetNextSpillIndex(false);
-  ASSERT_EQ(alignment_state_index, 0);
-  USE(alignment_state_index);
+  if (info()->IsOptimizing()) {
+    int alignment_state_index = chunk_->GetNextSpillIndex(false);
+    ASSERT_EQ(alignment_state_index, 0);
+    USE(alignment_state_index);
+  }
 
   const ZoneList<HBasicBlock*>* blocks = graph()->blocks();
   for (int i = 0; i < blocks->length(); i++) {
@@ -494,6 +496,12 @@ LUnallocated* LChunkBuilder::ToUnallocated(XMMRegister reg) {
 }
 
 
+LUnallocated* LChunkBuilder::ToUnallocated(X87TopOfStackRegister reg) {
+  return new(zone()) LUnallocated(LUnallocated::FIXED_DOUBLE_REGISTER,
+      X87TopOfStackRegister::ToAllocationIndex(reg));
+}
+
+
 LOperand* LChunkBuilder::UseFixed(HValue* value, Register fixed_register) {
   return Use(value, ToUnallocated(fixed_register));
 }
@@ -626,6 +634,13 @@ LInstruction* LChunkBuilder::DefineFixedDouble(
 }
 
 
+template<int I, int T>
+LInstruction* LChunkBuilder::DefineX87TOS(
+    LTemplateInstruction<1, I, T>* instr) {
+  return Define(instr, ToUnallocated(x87tos));
+}
+
+
 LInstruction* LChunkBuilder::AssignEnvironment(LInstruction* instr) {
   HEnvironment* hydrogen_env = current_block_->last_environment();
   int argument_index_accumulator = 0;
@@ -638,6 +653,8 @@ LInstruction* LChunkBuilder::AssignEnvironment(LInstruction* instr) {
 LInstruction* LChunkBuilder::MarkAsCall(LInstruction* instr,
                                         HInstruction* hinstr,
                                         CanDeoptimize can_deoptimize) {
+  info()->MarkAsNonDeferredCalling();
+
 #ifdef DEBUG
   instr->VerifyCall();
 #endif
@@ -1680,8 +1697,12 @@ LInstruction* LChunkBuilder::DoForceRepresentation(HForceRepresentation* bad) {
 LInstruction* LChunkBuilder::DoChange(HChange* instr) {
   Representation from = instr->from();
   Representation to = instr->to();
+  // Only mark conversions that might need to allocate as calling rather than
+  // all changes. This makes simple, non-allocating conversion not have to force
+  // building a stack frame.
   if (from.IsTagged()) {
     if (to.IsDouble()) {
+      info()->MarkAsDeferredCalling();
       LOperand* value = UseRegister(instr->value());
       // Temp register only necessary for minus zero check.
       LOperand* temp = instr->deoptimize_on_minus_zero()
@@ -1706,7 +1727,10 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) {
     }
   } else if (from.IsDouble()) {
     if (to.IsTagged()) {
-      LOperand* value = UseRegister(instr->value());
+      info()->MarkAsDeferredCalling();
+      LOperand* value = CpuFeatures::IsSupported(SSE2)
+          ? UseRegisterAtStart(instr->value())
+          : UseAtStart(instr->value());
       LOperand* temp = TempRegister();
 
       // Make sure that temp and result_temp are different registers.
@@ -1724,6 +1748,7 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) {
           DefineAsRegister(new(zone()) LDoubleToI(value, temp)));
     }
   } else if (from.IsInteger32()) {
+    info()->MarkAsDeferredCalling();
     if (to.IsTagged()) {
       HValue* val = instr->value();
       LOperand* value = UseRegister(val);
@@ -2240,8 +2265,17 @@ LInstruction* LChunkBuilder::DoOsrEntry(HOsrEntry* instr) {
 
 
 LInstruction* LChunkBuilder::DoParameter(HParameter* instr) {
-  int spill_index = chunk()->GetParameterStackSlot(instr->index());
-  return DefineAsSpilled(new(zone()) LParameter, spill_index);
+  LParameter* result = new(zone()) LParameter;
+  if (info()->IsOptimizing()) {
+    int spill_index = chunk()->GetParameterStackSlot(instr->index());
+    return DefineAsSpilled(result, spill_index);
+  } else {
+    ASSERT(info()->IsStub());
+    CodeStubInterfaceDescriptor* descriptor =
+        info()->code_stub()->GetInterfaceDescriptor(info()->isolate());
+    Register reg = descriptor->register_params[instr->index()];
+    return DefineFixed(result, reg);
+  }
 }
 
 
@@ -2342,6 +2376,7 @@ LInstruction* LChunkBuilder::DoSimulate(HSimulate* instr) {
 
 
 LInstruction* LChunkBuilder::DoStackCheck(HStackCheck* instr) {
+  info()->MarkAsDeferredCalling();
   if (instr->is_function_entry()) {
     LOperand* context = UseFixed(instr->context(), esi);
     return MarkAsCall(new(zone()) LStackCheck(context), instr);
index 2067c62..bec2f81 100644 (file)
@@ -249,7 +249,11 @@ class LInstruction: public ZoneObject {
   void MarkAsCall() { is_call_ = true; }
 
   // Interface to the register allocator and iterators.
-  bool IsMarkedAsCall() const { return is_call_; }
+  bool ClobbersTemps() const { return is_call_; }
+  bool ClobbersRegisters() const { return is_call_; }
+  virtual bool ClobbersDoubleRegisters() const {
+    return is_call_ || !CpuFeatures::IsSupported(SSE2);
+  }
 
   virtual bool HasResult() const = 0;
   virtual LOperand* result() = 0;
@@ -355,6 +359,7 @@ class LGap: public LTemplateInstruction<0, 0, 0> {
 class LInstructionGap: public LGap {
  public:
   explicit LInstructionGap(HBasicBlock* block) : LGap(block) { }
+  virtual bool ClobbersDoubleRegisters() const { return false; }
 
   DECLARE_CONCRETE_INSTRUCTION(InstructionGap, "gap")
 };
@@ -1413,7 +1418,6 @@ class LLoadKeyed: public LTemplateInstruction<1, 2, 0> {
     inputs_[0] = elements;
     inputs_[1] = key;
   }
-
   LOperand* elements() { return inputs_[0]; }
   LOperand* key() { return inputs_[1]; }
   ElementsKind elements_kind() const {
@@ -1423,11 +1427,18 @@ class LLoadKeyed: public LTemplateInstruction<1, 2, 0> {
     return hydrogen()->is_external();
   }
 
+  virtual bool ClobbersDoubleRegisters() const {
+    return !IsDoubleOrFloatElementsKind(hydrogen()->elements_kind());
+  }
+
   DECLARE_CONCRETE_INSTRUCTION(LoadKeyed, "load-keyed")
   DECLARE_HYDROGEN_ACCESSOR(LoadKeyed)
 
   virtual void PrintDataTo(StringStream* stream);
   uint32_t additional_index() const { return hydrogen()->index_offset(); }
+  bool key_is_smi() {
+    return hydrogen()->key()->representation().IsTagged();
+  }
 };
 
 
@@ -2408,8 +2419,9 @@ class LOsrEntry: public LTemplateInstruction<0, 0, 0> {
   // slot, i.e., that must also be restored to the spill slot on OSR entry.
   // NULL if the register has no assigned spill slot.  Indexed by allocation
   // index.
-  LOperand* register_spills_[Register::kNumAllocatableRegisters];
-  LOperand* double_register_spills_[DoubleRegister::kNumAllocatableRegisters];
+  LOperand* register_spills_[Register::kMaxNumAllocatableRegisters];
+  LOperand* double_register_spills_[
+      DoubleRegister::kMaxNumAllocatableRegisters];
 };
 
 
@@ -2573,6 +2585,7 @@ class LChunkBuilder BASE_EMBEDDED {
   // Methods for getting operands for Use / Define / Temp.
   LUnallocated* ToUnallocated(Register reg);
   LUnallocated* ToUnallocated(XMMRegister reg);
+  LUnallocated* ToUnallocated(X87TopOfStackRegister reg);
 
   // Methods for setting up define-use relationships.
   MUST_USE_RESULT LOperand* Use(HValue* value, LUnallocated* operand);
@@ -2633,6 +2646,8 @@ class LChunkBuilder BASE_EMBEDDED {
   template<int I, int T>
       LInstruction* DefineFixedDouble(LTemplateInstruction<1, I, T>* instr,
                                       XMMRegister reg);
+  template<int I, int T>
+      LInstruction* DefineX87TOS(LTemplateInstruction<1, I, T>* instr);
   // Assigns an environment to an instruction.  An instruction which can
   // deoptimize must have an environment.
   LInstruction* AssignEnvironment(LInstruction* instr);
index 14fb8ca..e9ce797 100644 (file)
@@ -1801,7 +1801,8 @@ void MacroAssembler::CallRuntimeSaveDoubles(Runtime::FunctionId id) {
   const Runtime::Function* function = Runtime::FunctionForId(id);
   Set(eax, Immediate(function->nargs));
   mov(ebx, Immediate(ExternalReference(function, isolate())));
-  CEntryStub ces(1, kSaveFPRegs);
+  CEntryStub ces(1, CpuFeatures::IsSupported(SSE2) ? kSaveFPRegs
+                                                   : kDontSaveFPRegs);
   CallStub(&ces);
 }
 
index 7abb29b..79960ec 100644 (file)
@@ -924,9 +924,9 @@ class MacroAssembler: public Assembler {
   Operand SafepointRegisterSlot(Register reg);
   static int SafepointRegisterStackIndex(int reg_code);
 
-  // Needs access to SafepointRegisterStackIndex for optimized frame
+  // Needs access to SafepointRegisterStackIndex for compiled frame
   // traversal.
-  friend class OptimizedFrame;
+  friend class CompiledFrame;
 };
 
 
index c8695c5..7834627 100644 (file)
@@ -3398,9 +3398,17 @@ Handle<Code> KeyedLoadStubCompiler::CompileLoadElement(
   // -----------------------------------
 
   ElementsKind elements_kind = receiver_map->elements_kind();
-  Handle<Code> stub = KeyedLoadElementStub(elements_kind).GetCode();
-
-  __ DispatchMap(edx, receiver_map, stub, DO_SMI_CHECK);
+  if (receiver_map->has_fast_elements() ||
+      receiver_map->has_external_array_elements()) {
+    Handle<Code> stub = KeyedLoadFastElementStub(
+        receiver_map->instance_type() == JS_ARRAY_TYPE,
+        elements_kind).GetCode();
+    __ DispatchMap(edx, receiver_map, stub, DO_SMI_CHECK);
+  } else {
+    Handle<Code> stub =
+        KeyedLoadDictionaryElementStub().GetCode();
+    __ DispatchMap(edx, receiver_map, stub, DO_SMI_CHECK);
+  }
 
   GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
 
@@ -3661,157 +3669,6 @@ static void GenerateSmiKeyCheck(MacroAssembler* masm,
 }
 
 
-void KeyedLoadStubCompiler::GenerateLoadExternalArray(
-    MacroAssembler* masm,
-    ElementsKind elements_kind) {
-  // ----------- S t a t e -------------
-  //  -- ecx    : key
-  //  -- edx    : receiver
-  //  -- esp[0] : return address
-  // -----------------------------------
-  Label miss_force_generic, failed_allocation, slow;
-
-  // This stub is meant to be tail-jumped to, the receiver must already
-  // have been verified by the caller to not be a smi.
-
-  // Check that the key is a smi or a heap number convertible to a smi.
-  GenerateSmiKeyCheck(masm, ecx, eax, xmm0, xmm1, &miss_force_generic);
-
-  // Check that the index is in range.
-  __ mov(ebx, FieldOperand(edx, JSObject::kElementsOffset));
-  __ cmp(ecx, FieldOperand(ebx, ExternalArray::kLengthOffset));
-  // Unsigned comparison catches both negative and too-large values.
-  __ j(above_equal, &miss_force_generic);
-  __ mov(ebx, FieldOperand(ebx, ExternalArray::kExternalPointerOffset));
-  // ebx: base pointer of external storage
-  switch (elements_kind) {
-    case EXTERNAL_BYTE_ELEMENTS:
-      __ SmiUntag(ecx);  // Untag the index.
-      __ movsx_b(eax, Operand(ebx, ecx, times_1, 0));
-      break;
-    case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
-    case EXTERNAL_PIXEL_ELEMENTS:
-      __ SmiUntag(ecx);  // Untag the index.
-      __ movzx_b(eax, Operand(ebx, ecx, times_1, 0));
-      break;
-    case EXTERNAL_SHORT_ELEMENTS:
-      __ movsx_w(eax, Operand(ebx, ecx, times_1, 0));
-      break;
-    case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
-      __ movzx_w(eax, Operand(ebx, ecx, times_1, 0));
-      break;
-    case EXTERNAL_UNSIGNED_INT_ELEMENTS:
-    case EXTERNAL_INT_ELEMENTS:
-      __ mov(eax, Operand(ebx, ecx, times_2, 0));
-      break;
-    case EXTERNAL_FLOAT_ELEMENTS:
-      __ fld_s(Operand(ebx, ecx, times_2, 0));
-      break;
-    case EXTERNAL_DOUBLE_ELEMENTS:
-      __ fld_d(Operand(ebx, ecx, times_4, 0));
-      break;
-    default:
-      UNREACHABLE();
-      break;
-  }
-
-  // For integer array types:
-  // eax: value
-  // For floating-point array type:
-  // FP(0): value
-
-  if (elements_kind == EXTERNAL_INT_ELEMENTS ||
-      elements_kind == EXTERNAL_UNSIGNED_INT_ELEMENTS) {
-    // For the Int and UnsignedInt array types, we need to see whether
-    // the value can be represented in a Smi. If not, we need to convert
-    // it to a HeapNumber.
-    Label box_int;
-    if (elements_kind == EXTERNAL_INT_ELEMENTS) {
-      __ cmp(eax, 0xc0000000);
-      __ j(sign, &box_int);
-    } else {
-      ASSERT_EQ(EXTERNAL_UNSIGNED_INT_ELEMENTS, elements_kind);
-      // The test is different for unsigned int values. Since we need
-      // the value to be in the range of a positive smi, we can't
-      // handle either of the top two bits being set in the value.
-      __ test(eax, Immediate(0xc0000000));
-      __ j(not_zero, &box_int);
-    }
-
-    __ SmiTag(eax);
-    __ ret(0);
-
-    __ bind(&box_int);
-
-    // Allocate a HeapNumber for the int and perform int-to-double
-    // conversion.
-    if (elements_kind == EXTERNAL_INT_ELEMENTS) {
-      __ push(eax);
-      __ fild_s(Operand(esp, 0));
-      __ pop(eax);
-    } else {
-      ASSERT_EQ(EXTERNAL_UNSIGNED_INT_ELEMENTS, elements_kind);
-      // Need to zero-extend the value.
-      // There's no fild variant for unsigned values, so zero-extend
-      // to a 64-bit int manually.
-      __ push(Immediate(0));
-      __ push(eax);
-      __ fild_d(Operand(esp, 0));
-      __ pop(eax);
-      __ pop(eax);
-    }
-    // FP(0): value
-    __ AllocateHeapNumber(eax, ebx, edi, &failed_allocation);
-    // Set the value.
-    __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
-    __ ret(0);
-  } else if (elements_kind == EXTERNAL_FLOAT_ELEMENTS ||
-             elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
-    // For the floating-point array type, we need to always allocate a
-    // HeapNumber.
-    __ AllocateHeapNumber(eax, ebx, edi, &failed_allocation);
-    // Set the value.
-    __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
-    __ ret(0);
-  } else {
-    __ SmiTag(eax);
-    __ ret(0);
-  }
-
-  // If we fail allocation of the HeapNumber, we still have a value on
-  // top of the FPU stack. Remove it.
-  __ bind(&failed_allocation);
-  __ fstp(0);
-  // Fall through to slow case.
-
-  // Slow case: Jump to runtime.
-  __ bind(&slow);
-  Counters* counters = masm->isolate()->counters();
-  __ IncrementCounter(counters->keyed_load_external_array_slow(), 1);
-
-  // ----------- S t a t e -------------
-  //  -- ecx    : key
-  //  -- edx    : receiver
-  //  -- esp[0] : return address
-  // -----------------------------------
-
-  Handle<Code> ic = masm->isolate()->builtins()->KeyedLoadIC_Slow();
-  __ jmp(ic, RelocInfo::CODE_TARGET);
-
-  // ----------- S t a t e -------------
-  //  -- ecx    : key
-  //  -- edx    : receiver
-  //  -- esp[0] : return address
-  // -----------------------------------
-
-  // Miss case: Jump to runtime.
-  __ bind(&miss_force_generic);
-  Handle<Code> miss_ic =
-      masm->isolate()->builtins()->KeyedLoadIC_MissForceGeneric();
-  __ jmp(miss_ic, RelocInfo::CODE_TARGET);
-}
-
-
 void KeyedStoreStubCompiler::GenerateStoreExternalArray(
     MacroAssembler* masm,
     ElementsKind elements_kind) {
@@ -4011,106 +3868,6 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray(
 }
 
 
-void KeyedLoadStubCompiler::GenerateLoadFastElement(MacroAssembler* masm) {
-  // ----------- S t a t e -------------
-  //  -- ecx    : key
-  //  -- edx    : receiver
-  //  -- esp[0] : return address
-  // -----------------------------------
-  Label miss_force_generic;
-
-  // This stub is meant to be tail-jumped to, the receiver must already
-  // have been verified by the caller to not be a smi.
-
-  // Check that the key is a smi or a heap number convertible to a smi.
-  GenerateSmiKeyCheck(masm, ecx, eax, xmm0, xmm1, &miss_force_generic);
-
-  // Get the elements array.
-  __ mov(eax, FieldOperand(edx, JSObject::kElementsOffset));
-  __ AssertFastElements(eax);
-
-  // Check that the key is within bounds.
-  __ cmp(ecx, FieldOperand(eax, FixedArray::kLengthOffset));
-  __ j(above_equal, &miss_force_generic);
-
-  // Load the result and make sure it's not the hole.
-  __ mov(ebx, Operand(eax, ecx, times_2,
-                      FixedArray::kHeaderSize - kHeapObjectTag));
-  __ cmp(ebx, masm->isolate()->factory()->the_hole_value());
-  __ j(equal, &miss_force_generic);
-  __ mov(eax, ebx);
-  __ ret(0);
-
-  __ bind(&miss_force_generic);
-  Handle<Code> miss_ic =
-      masm->isolate()->builtins()->KeyedLoadIC_MissForceGeneric();
-  __ jmp(miss_ic, RelocInfo::CODE_TARGET);
-}
-
-
-void KeyedLoadStubCompiler::GenerateLoadFastDoubleElement(
-    MacroAssembler* masm) {
-  // ----------- S t a t e -------------
-  //  -- ecx    : key
-  //  -- edx    : receiver
-  //  -- esp[0] : return address
-  // -----------------------------------
-  Label miss_force_generic, slow_allocate_heapnumber;
-
-  // This stub is meant to be tail-jumped to, the receiver must already
-  // have been verified by the caller to not be a smi.
-
-  // Check that the key is a smi or a heap number convertible to a smi.
-  GenerateSmiKeyCheck(masm, ecx, eax, xmm0, xmm1, &miss_force_generic);
-
-  // Get the elements array.
-  __ mov(eax, FieldOperand(edx, JSObject::kElementsOffset));
-  __ AssertFastElements(eax);
-
-  // Check that the key is within bounds.
-  __ cmp(ecx, FieldOperand(eax, FixedDoubleArray::kLengthOffset));
-  __ j(above_equal, &miss_force_generic);
-
-  // Check for the hole
-  uint32_t offset = FixedDoubleArray::kHeaderSize + sizeof(kHoleNanLower32);
-  __ cmp(FieldOperand(eax, ecx, times_4, offset), Immediate(kHoleNanUpper32));
-  __ j(equal, &miss_force_generic);
-
-  // Always allocate a heap number for the result.
-  if (CpuFeatures::IsSupported(SSE2)) {
-    CpuFeatures::Scope use_sse2(SSE2);
-    __ movdbl(xmm0, FieldOperand(eax, ecx, times_4,
-                                 FixedDoubleArray::kHeaderSize));
-  } else {
-    __ fld_d(FieldOperand(eax, ecx, times_4, FixedDoubleArray::kHeaderSize));
-  }
-  __ AllocateHeapNumber(eax, ebx, edi, &slow_allocate_heapnumber);
-  // Set the value.
-  if (CpuFeatures::IsSupported(SSE2)) {
-    CpuFeatures::Scope use_sse2(SSE2);
-    __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
-  } else {
-    __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
-  }
-  __ ret(0);
-
-  __ bind(&slow_allocate_heapnumber);
-  // A value was pushed on the floating point stack before the allocation, if
-  // the allocation fails it needs to be removed.
-  if (!CpuFeatures::IsSupported(SSE2)) {
-    __ fstp(0);
-  }
-  Handle<Code> slow_ic =
-      masm->isolate()->builtins()->KeyedLoadIC_Slow();
-  __ jmp(slow_ic, RelocInfo::CODE_TARGET);
-
-  __ bind(&miss_force_generic);
-  Handle<Code> miss_ic =
-      masm->isolate()->builtins()->KeyedLoadIC_MissForceGeneric();
-  __ jmp(miss_ic, RelocInfo::CODE_TARGET);
-}
-
-
 void KeyedStoreStubCompiler::GenerateStoreFastElement(
     MacroAssembler* masm,
     bool is_js_array,
index bf2a649..3633036 100644 (file)
--- a/src/ic.cc
+++ b/src/ic.cc
@@ -1054,7 +1054,13 @@ Handle<Code> KeyedLoadIC::GetElementStubWithoutMapCheck(
     ElementsKind elements_kind,
     KeyedAccessGrowMode grow_mode) {
   ASSERT(grow_mode == DO_NOT_ALLOW_JSARRAY_GROWTH);
-  return KeyedLoadElementStub(elements_kind).GetCode();
+  if (IsFastElementsKind(elements_kind) ||
+      IsExternalArrayElementsKind(elements_kind)) {
+    return KeyedLoadFastElementStub(is_js_array, elements_kind).GetCode();
+  } else {
+    ASSERT(elements_kind == DICTIONARY_ELEMENTS);
+    return KeyedLoadDictionaryElementStub().GetCode();
+  }
 }
 
 
index ef4e0af..9683ce7 100644 (file)
@@ -1619,6 +1619,7 @@ Isolate::Isolate()
       string_tracker_(NULL),
       regexp_stack_(NULL),
       date_cache_(NULL),
+      code_stub_interface_descriptors_(NULL),
       context_exit_happened_(false),
       deferred_handles_head_(NULL),
       optimizing_compiler_thread_(this) {
@@ -1781,6 +1782,9 @@ Isolate::~Isolate() {
   delete date_cache_;
   date_cache_ = NULL;
 
+  delete[] code_stub_interface_descriptors_;
+  code_stub_interface_descriptors_ = NULL;
+
   delete regexp_stack_;
   regexp_stack_ = NULL;
 
@@ -1944,6 +1948,10 @@ bool Isolate::Init(Deserializer* des) {
   regexp_stack_ = new RegExpStack();
   regexp_stack_->isolate_ = this;
   date_cache_ = new DateCache();
+  code_stub_interface_descriptors_ =
+      new CodeStubInterfaceDescriptor*[CodeStub::NUMBER_OF_IDS];
+  memset(code_stub_interface_descriptors_, 0,
+         kPointerSize * CodeStub::NUMBER_OF_IDS);
 
   // Enable logging before setting up the heap
   logger_->SetUp();
@@ -2004,6 +2012,8 @@ bool Isolate::Init(Deserializer* des) {
   debug_->SetUp(create_heap_objects);
 #endif
 
+  deoptimizer_data_ = new DeoptimizerData;
+
   // If we are deserializing, read the state into the now-empty heap.
   if (!create_heap_objects) {
     des->Deserialize();
@@ -2022,7 +2032,6 @@ bool Isolate::Init(Deserializer* des) {
   // Quiet the heap NaN if needed on target platform.
   if (!create_heap_objects) Assembler::QuietNaN(heap_.nan_value());
 
-  deoptimizer_data_ = new DeoptimizerData;
   runtime_profiler_ = new RuntimeProfiler(this);
   runtime_profiler_->SetUp();
 
@@ -2044,6 +2053,17 @@ bool Isolate::Init(Deserializer* des) {
 
   state_ = INITIALIZED;
   time_millis_at_init_ = OS::TimeCurrentMillis();
+
+  if (!create_heap_objects) {
+    // Now that the heap is consistent, it's OK to generate the code for the
+    // deopt entry table that might have been referred to by optimized code in
+    // the snapshot.
+    HandleScope scope(this);
+    Deoptimizer::EnsureCodeForDeoptimizationEntry(
+        Deoptimizer::LAZY,
+        kDeoptTableSerializeEntryCount - 1);
+  }
+
   if (FLAG_parallel_recompilation) optimizing_compiler_thread_.Start();
   return true;
 }
index 2faee75..e09f75a 100644 (file)
@@ -53,6 +53,7 @@ namespace internal {
 class Bootstrapper;
 class CodeGenerator;
 class CodeRange;
+struct CodeStubInterfaceDescriptor;
 class CompilationCache;
 class ContextSlotCache;
 class ContextSwitcher;
@@ -1062,6 +1063,10 @@ class Isolate {
     date_cache_ = date_cache;
   }
 
+  CodeStubInterfaceDescriptor** code_stub_interface_descriptors() {
+    return code_stub_interface_descriptors_;
+  }
+
   void IterateDeferredHandles(ObjectVisitor* visitor);
   void LinkDeferredHandles(DeferredHandles* deferred_handles);
   void UnlinkDeferredHandles(DeferredHandles* deferred_handles);
@@ -1245,6 +1250,7 @@ class Isolate {
   RegExpStack* regexp_stack_;
   DateCache* date_cache_;
   unibrow::Mapping<unibrow::Ecma262Canonicalize> interp_canonicalize_mapping_;
+  CodeStubInterfaceDescriptor** code_stub_interface_descriptors_;
 
   // The garbage collector should be a little more aggressive when it knows
   // that a context was recently exited.
index 91a9811..b23c867 100644 (file)
@@ -606,7 +606,7 @@ void LAllocator::AddInitialIntervals(HBasicBlock* block,
 
 
 int LAllocator::FixedDoubleLiveRangeID(int index) {
-  return -index - 1 - Register::kNumAllocatableRegisters;
+  return -index - 1 - Register::kMaxNumAllocatableRegisters;
 }
 
 
@@ -638,7 +638,7 @@ LOperand* LAllocator::AllocateFixed(LUnallocated* operand,
 
 
 LiveRange* LAllocator::FixedLiveRangeFor(int index) {
-  ASSERT(index < Register::kNumAllocatableRegisters);
+  ASSERT(index < Register::kMaxNumAllocatableRegisters);
   LiveRange* result = fixed_live_ranges_[index];
   if (result == NULL) {
     result = new(zone_) LiveRange(FixedLiveRangeID(index), zone_);
@@ -651,7 +651,7 @@ LiveRange* LAllocator::FixedLiveRangeFor(int index) {
 
 
 LiveRange* LAllocator::FixedDoubleLiveRangeFor(int index) {
-  ASSERT(index < DoubleRegister::kNumAllocatableRegisters);
+  ASSERT(index < DoubleRegister::NumAllocatableRegisters());
   LiveRange* result = fixed_double_live_ranges_[index];
   if (result == NULL) {
     result = new(zone_) LiveRange(FixedDoubleLiveRangeID(index), zone_);
@@ -768,6 +768,7 @@ void LAllocator::AddConstraintsGapMove(int index,
 void LAllocator::MeetRegisterConstraints(HBasicBlock* block) {
   int start = block->first_instruction_index();
   int end = block->last_instruction_index();
+  if (start == -1) return;
   for (int i = start; i <= end; ++i) {
     if (IsGapAt(i)) {
       LInstruction* instr = NULL;
@@ -946,8 +947,8 @@ void LAllocator::ProcessInstructions(HBasicBlock* block, BitVector* live) {
           Define(curr_position, output, NULL);
         }
 
-        if (instr->IsMarkedAsCall()) {
-          for (int i = 0; i < Register::kNumAllocatableRegisters; ++i) {
+        if (instr->ClobbersRegisters()) {
+          for (int i = 0; i < Register::kMaxNumAllocatableRegisters; ++i) {
             if (output == NULL || !output->IsRegister() ||
                 output->index() != i) {
               LiveRange* range = FixedLiveRangeFor(i);
@@ -958,8 +959,8 @@ void LAllocator::ProcessInstructions(HBasicBlock* block, BitVector* live) {
           }
         }
 
-        if (instr->IsMarkedAsCall()) {
-          for (int i = 0; i < DoubleRegister::kNumAllocatableRegisters; ++i) {
+        if (instr->ClobbersDoubleRegisters()) {
+          for (int i = 0; i < DoubleRegister::NumAllocatableRegisters(); ++i) {
             if (output == NULL || !output->IsDoubleRegister() ||
                 output->index() != i) {
               LiveRange* range = FixedDoubleLiveRangeFor(i);
@@ -989,7 +990,7 @@ void LAllocator::ProcessInstructions(HBasicBlock* block, BitVector* live) {
 
         for (TempIterator it(instr); !it.Done(); it.Advance()) {
           LOperand* temp = it.Current();
-          if (instr->IsMarkedAsCall()) {
+          if (instr->ClobbersTemps()) {
             if (temp->IsRegister()) continue;
             if (temp->IsUnallocated()) {
               LUnallocated* temp_unalloc = LUnallocated::cast(temp);
@@ -1324,8 +1325,14 @@ void LAllocator::BuildLiveRanges() {
       while (!iterator.Done()) {
         found = true;
         int operand_index = iterator.Current();
-        PrintF("Function: %s\n",
-               *chunk_->info()->function()->debug_name()->ToCString());
+        if (chunk_->info()->IsStub()) {
+          CodeStub::Major major_key = chunk_->info()->code_stub()->MajorKey();
+          PrintF("Function: %s\n", CodeStub::MajorName(major_key, false));
+        } else {
+          ASSERT(chunk_->info()->IsOptimizing());
+          PrintF("Function: %s\n",
+                 *chunk_->info()->function()->debug_name()->ToCString());
+        }
         PrintF("Value %d used before first definition!\n", operand_index);
         LiveRange* range = LiveRangeFor(operand_index);
         PrintF("First use is at %d\n", range->first_pos()->pos().Value());
@@ -1471,14 +1478,14 @@ void LAllocator::ProcessOsrEntry() {
 
 void LAllocator::AllocateGeneralRegisters() {
   HPhase phase("L_Allocate general registers", this);
-  num_registers_ = Register::kNumAllocatableRegisters;
+  num_registers_ = Register::NumAllocatableRegisters();
   AllocateRegisters();
 }
 
 
 void LAllocator::AllocateDoubleRegisters() {
   HPhase phase("L_Allocate double registers", this);
-  num_registers_ = DoubleRegister::kNumAllocatableRegisters;
+  num_registers_ = DoubleRegister::NumAllocatableRegisters();
   mode_ = DOUBLE_REGISTERS;
   AllocateRegisters();
 }
@@ -1757,14 +1764,14 @@ void LAllocator::InactiveToActive(LiveRange* range) {
 
 // TryAllocateFreeReg and AllocateBlockedReg assume this
 // when allocating local arrays.
-STATIC_ASSERT(DoubleRegister::kNumAllocatableRegisters >=
-              Register::kNumAllocatableRegisters);
+STATIC_ASSERT(DoubleRegister::kMaxNumAllocatableRegisters >=
+              Register::kMaxNumAllocatableRegisters);
 
 
 bool LAllocator::TryAllocateFreeReg(LiveRange* current) {
-  LifetimePosition free_until_pos[DoubleRegister::kNumAllocatableRegisters];
+  LifetimePosition free_until_pos[DoubleRegister::kMaxNumAllocatableRegisters];
 
-  for (int i = 0; i < DoubleRegister::kNumAllocatableRegisters; i++) {
+  for (int i = 0; i < DoubleRegister::kMaxNumAllocatableRegisters; i++) {
     free_until_pos[i] = LifetimePosition::MaxPosition();
   }
 
@@ -1853,10 +1860,10 @@ void LAllocator::AllocateBlockedReg(LiveRange* current) {
   }
 
 
-  LifetimePosition use_pos[DoubleRegister::kNumAllocatableRegisters];
-  LifetimePosition block_pos[DoubleRegister::kNumAllocatableRegisters];
+  LifetimePosition use_pos[DoubleRegister::kMaxNumAllocatableRegisters];
+  LifetimePosition block_pos[DoubleRegister::kMaxNumAllocatableRegisters];
 
-  for (int i = 0; i < DoubleRegister::kNumAllocatableRegisters; i++) {
+  for (int i = 0; i < DoubleRegister::NumAllocatableRegisters(); i++) {
     use_pos[i] = block_pos[i] = LifetimePosition::MaxPosition();
   }
 
index 5b05263..0dd192d 100644 (file)
@@ -608,9 +608,9 @@ class LAllocator BASE_EMBEDDED {
   ZoneList<LiveRange*> live_ranges_;
 
   // Lists of live ranges
-  EmbeddedVector<LiveRange*, Register::kNumAllocatableRegisters>
+  EmbeddedVector<LiveRange*, Register::kMaxNumAllocatableRegisters>
       fixed_live_ranges_;
-  EmbeddedVector<LiveRange*, DoubleRegister::kNumAllocatableRegisters>
+  EmbeddedVector<LiveRange*, DoubleRegister::kMaxNumAllocatableRegisters>
       fixed_double_live_ranges_;
   ZoneList<LiveRange*> unhandled_live_ranges_;
   ZoneList<LiveRange*> active_live_ranges_;
index eb2198d..7ad175e 100644 (file)
@@ -414,7 +414,7 @@ LChunk* LChunk::NewChunk(HGraph* graph) {
 }
 
 
-Handle<Code> LChunk::Codegen() {
+Handle<Code> LChunk::Codegen(Code::Kind kind) {
   MacroAssembler assembler(info()->isolate(), NULL, 0);
   LCodeGen generator(this, &assembler, info());
 
@@ -425,7 +425,7 @@ Handle<Code> LChunk::Codegen() {
       PrintF("Crankshaft Compiler - ");
     }
     CodeGenerator::MakeCodePrologue(info());
-    Code::Flags flags = Code::ComputeFlags(Code::OPTIMIZED_FUNCTION);
+    Code::Flags flags = Code::ComputeFlags(kind);
     Handle<Code> code =
         CodeGenerator::MakeCodeEpilogue(&assembler, flags, info());
     generator.FinishCode(code);
index b4eb2bb..222e893 100644 (file)
@@ -682,7 +682,7 @@ class LChunk: public ZoneObject {
 
   Zone* zone() const { return info_->zone(); }
 
-  Handle<Code> Codegen();
+  Handle<Code> Codegen(Code::Kind kind);
 
  protected:
   LChunk(CompilationInfo* info, HGraph* graph)
index 76af400..197ba89 100644 (file)
@@ -1537,6 +1537,7 @@ void Logger::LogCodeObject(Object* object) {
       case Code::BINARY_OP_IC:   // fall through
       case Code::COMPARE_IC:  // fall through
       case Code::TO_BOOLEAN_IC:  // fall through
+      case Code::COMPILED_STUB:  // fall through
       case Code::STUB:
         description =
             CodeStub::MajorName(CodeStub::GetMajorKey(code_object), true);
index 0ed2414..13b0b43 100644 (file)
@@ -70,6 +70,8 @@ class CodeGenerator: public AstVisitor {
                               int pos,
                               bool right_here = false);
 
+  DEFINE_AST_VISITOR_SUBCLASS_METHODS();
+
  private:
   DISALLOW_COPY_AND_ASSIGN(CodeGenerator);
 };
index b99ba44..430e620 100644 (file)
@@ -3225,6 +3225,7 @@ int Code::arguments_count() {
 
 int Code::major_key() {
   ASSERT(kind() == STUB ||
+         kind() == COMPILED_STUB ||
          kind() == UNARY_OP_IC ||
          kind() == BINARY_OP_IC ||
          kind() == COMPARE_IC ||
@@ -3236,6 +3237,7 @@ int Code::major_key() {
 
 void Code::set_major_key(int major) {
   ASSERT(kind() == STUB ||
+         kind() == COMPILED_STUB ||
          kind() == UNARY_OP_IC ||
          kind() == BINARY_OP_IC ||
          kind() == COMPARE_IC ||
@@ -3344,7 +3346,7 @@ void Code::set_profiler_ticks(int ticks) {
 
 
 unsigned Code::stack_slots() {
-  ASSERT(kind() == OPTIMIZED_FUNCTION);
+  ASSERT(kind() == OPTIMIZED_FUNCTION || kind() == COMPILED_STUB);
   return StackSlotsField::decode(
       READ_UINT32_FIELD(this, kKindSpecificFlags1Offset));
 }
@@ -3352,7 +3354,7 @@ unsigned Code::stack_slots() {
 
 void Code::set_stack_slots(unsigned slots) {
   CHECK(slots <= (1 << kStackSlotsBitCount));
-  ASSERT(kind() == OPTIMIZED_FUNCTION);
+  ASSERT(kind() == OPTIMIZED_FUNCTION || kind() == COMPILED_STUB);
   int previous = READ_UINT32_FIELD(this, kKindSpecificFlags1Offset);
   int updated = StackSlotsField::update(previous, slots);
   WRITE_UINT32_FIELD(this, kKindSpecificFlags1Offset, updated);
@@ -3360,7 +3362,7 @@ void Code::set_stack_slots(unsigned slots) {
 
 
 unsigned Code::safepoint_table_offset() {
-  ASSERT(kind() == OPTIMIZED_FUNCTION);
+  ASSERT(kind() == OPTIMIZED_FUNCTION || kind() == COMPILED_STUB);
   return SafepointTableOffsetField::decode(
       READ_UINT32_FIELD(this, kKindSpecificFlags2Offset));
 }
@@ -3368,7 +3370,7 @@ unsigned Code::safepoint_table_offset() {
 
 void Code::set_safepoint_table_offset(unsigned offset) {
   CHECK(offset <= (1 << kSafepointTableOffsetBitCount));
-  ASSERT(kind() == OPTIMIZED_FUNCTION);
+  ASSERT(kind() == OPTIMIZED_FUNCTION || kind() == COMPILED_STUB);
   ASSERT(IsAligned(offset, static_cast<unsigned>(kIntSize)));
   int previous = READ_UINT32_FIELD(this, kKindSpecificFlags2Offset);
   int updated = SafepointTableOffsetField::update(previous, offset);
index d444355..1f9fe4a 100644 (file)
@@ -8987,6 +8987,12 @@ void DeoptimizationInputData::DeoptimizationInputDataPrint(FILE* out) {
           break;
         }
 
+        case Translation::COMPILED_STUB_FRAME: {
+          Code::Kind stub_kind = static_cast<Code::Kind>(iterator.Next());
+          PrintF(out, "{kind=%d}", stub_kind);
+          break;
+        }
+
         case Translation::ARGUMENTS_ADAPTOR_FRAME:
         case Translation::CONSTRUCT_STUB_FRAME: {
           int function_id = iterator.Next();
@@ -9101,6 +9107,7 @@ const char* Code::Kind2String(Kind kind) {
   switch (kind) {
     case FUNCTION: return "FUNCTION";
     case OPTIMIZED_FUNCTION: return "OPTIMIZED_FUNCTION";
+    case COMPILED_STUB: return "COMPILED_STUB";
     case STUB: return "STUB";
     case BUILTIN: return "BUILTIN";
     case LOAD_IC: return "LOAD_IC";
@@ -9220,7 +9227,7 @@ void Code::Disassemble(const char* name, FILE* out) {
   }
   PrintF("\n");
 
-  if (kind() == OPTIMIZED_FUNCTION) {
+  if (kind() == OPTIMIZED_FUNCTION || kind() == COMPILED_STUB) {
     SafepointTable table(this);
     PrintF(out, "Safepoints (size = %u)\n", table.size());
     for (unsigned i = 0; i < table.length(); i++) {
index c476692..723cbf8 100644 (file)
@@ -4233,6 +4233,7 @@ class Code: public HeapObject {
   V(FUNCTION)             \
   V(OPTIMIZED_FUNCTION)   \
   V(STUB)                 \
+  V(COMPILED_STUB)        \
   V(BUILTIN)              \
   V(LOAD_IC)              \
   V(KEYED_LOAD_IC)        \
@@ -4849,6 +4850,10 @@ class Map: public HeapObject {
     return IsFastDoubleElementsKind(elements_kind());
   }
 
+  inline bool has_fast_elements() {
+    return IsFastElementsKind(elements_kind());
+  }
+
   inline bool has_non_strict_arguments_elements() {
     return elements_kind() == NON_STRICT_ARGUMENTS_ELEMENTS;
   }
index 2d56d1a..7aad78c 100644 (file)
@@ -36,7 +36,7 @@
 namespace v8 {
 namespace internal {
 
-class HGraphBuilder;
+class HOptimizedGraphBuilder;
 class OptimizingCompiler;
 class SharedFunctionInfo;
 
index 602fbb4..c0fdf48 100644 (file)
@@ -42,6 +42,7 @@ PrettyPrinter::PrettyPrinter() {
   output_ = NULL;
   size_ = 0;
   pos_ = 0;
+  InitializeAstVisitor();
 }
 
 
index 9ac7257..41175ab 100644 (file)
@@ -74,6 +74,8 @@ class PrettyPrinter: public AstVisitor {
   void PrintDeclarations(ZoneList<Declaration*>* declarations);
   void PrintFunctionLiteral(FunctionLiteral* function);
   void PrintCaseClause(CaseClause* clause);
+
+  DEFINE_AST_VISITOR_SUBCLASS_MEMBERS();
 };
 
 
index 2a98787..02907de 100644 (file)
@@ -43,7 +43,9 @@ class Processor: public AstVisitor {
         result_assigned_(false),
         is_set_(false),
         in_try_(false),
-        factory_(isolate(), zone) { }
+        factory_(Isolate::Current(), zone) {
+    InitializeAstVisitor();
+  }
 
   virtual ~Processor() { }
 
@@ -86,6 +88,8 @@ class Processor: public AstVisitor {
 #undef DEF_VISIT
 
   void VisitIterationStatement(IterationStatement* stmt);
+
+  DEFINE_AST_VISITOR_SUBCLASS_MEMBERS();
 };
 
 
index 5106be8..14bbae7 100644 (file)
@@ -7902,6 +7902,17 @@ class ActivationsFinder : public ThreadVisitor {
 };
 
 
+RUNTIME_FUNCTION(MaybeObject*, Runtime_NotifyICMiss) {
+  HandleScope scope(isolate);
+  ASSERT(args.length() == 0);
+  Deoptimizer* deoptimizer = Deoptimizer::Grab(isolate);
+  ASSERT(isolate->heap()->IsAllocationAllowed());
+  ASSERT(deoptimizer->compiled_code_kind() == Code::COMPILED_STUB);
+  delete deoptimizer;
+  return isolate->heap()->undefined_value();
+}
+
+
 RUNTIME_FUNCTION(MaybeObject*, Runtime_NotifyDeoptimized) {
   HandleScope scope(isolate);
   ASSERT(args.length() == 1);
@@ -7910,9 +7921,11 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_NotifyDeoptimized) {
       static_cast<Deoptimizer::BailoutType>(args.smi_at(0));
   Deoptimizer* deoptimizer = Deoptimizer::Grab(isolate);
   ASSERT(isolate->heap()->IsAllocationAllowed());
-  JavaScriptFrameIterator it(isolate);
+
+  ASSERT(deoptimizer->compiled_code_kind() != Code::COMPILED_STUB);
 
   // Make sure to materialize objects before causing any allocation.
+  JavaScriptFrameIterator it(isolate);
   deoptimizer->MaterializeHeapObjects(&it);
   delete deoptimizer;
 
index 19ff62d..cb8afba 100644 (file)
@@ -89,6 +89,7 @@ namespace internal {
   F(ForceParallelRecompile, 1, 1) \
   F(InstallRecompiledCode, 1, 1) \
   F(NotifyDeoptimized, 1, 1) \
+  F(NotifyICMiss, 0, 1) \
   F(NotifyOSR, 0, 1) \
   F(DeoptimizeFunction, 1, 1) \
   F(ClearFunctionTypeFeedback, 1, 1) \
index 714e5c3..9e42304 100644 (file)
@@ -59,7 +59,8 @@ bool SafepointEntry::HasRegisterAt(int reg_index) const {
 
 
 SafepointTable::SafepointTable(Code* code) {
-  ASSERT(code->kind() == Code::OPTIMIZED_FUNCTION);
+  ASSERT(code->kind() == Code::OPTIMIZED_FUNCTION ||
+         code->kind() == Code::COMPILED_STUB);
   code_ = code;
   Address header = code->instruction_start() + code->safepoint_table_offset();
   length_ = Memory::uint32_at(header + kLengthOffset);
@@ -158,14 +159,6 @@ unsigned SafepointTableBuilder::GetCodeOffset() const {
 
 
 void SafepointTableBuilder::Emit(Assembler* assembler, int bits_per_entry) {
-  // For lazy deoptimization we need space to patch a call after every call.
-  // Ensure there is always space for such patching, even if the code ends
-  // in a call.
-  int target_offset = assembler->pc_offset() + Deoptimizer::patch_size();
-  while (assembler->pc_offset() < target_offset) {
-    assembler->nop();
-  }
-
   // Make sure the safepoint table is properly aligned. Pad with nops.
   assembler->Align(kIntSize);
   assembler->RecordComment(";;; Safepoint table.");
index dfc5574..26e0f01 100644 (file)
@@ -30,6 +30,7 @@
 #include "accessors.h"
 #include "api.h"
 #include "bootstrapper.h"
+#include "deoptimizer.h"
 #include "execution.h"
 #include "global-handles.h"
 #include "ic-inl.h"
@@ -527,6 +528,17 @@ void ExternalReferenceTable::PopulateTable(Isolate* isolate) {
       UNCLASSIFIED,
       51,
       "Code::MakeCodeYoung");
+
+  // Add a small set of deopt entry addresses to encoder without generating the
+  // deopt table code, which isn't possible at deserialization time.
+  HandleScope scope(Isolate::Current());
+  for (int entry = 0; entry < kDeoptTableSerializeEntryCount; ++entry) {
+    Address address = Deoptimizer::GetDeoptimizationEntry(
+        entry,
+        Deoptimizer::LAZY,
+        Deoptimizer::CALCULATE_ENTRY_ADDRESS);
+    Add(address, LAZY_DEOPTIMIZATION, 52 + entry, "lazy_deopt");
+  }
 }
 
 
index 2041792..4bbde5a 100644 (file)
@@ -47,10 +47,11 @@ enum TypeCode {
   EXTENSION,
   ACCESSOR,
   RUNTIME_ENTRY,
-  STUB_CACHE_TABLE
+  STUB_CACHE_TABLE,
+  LAZY_DEOPTIMIZATION
 };
 
-const int kTypeCodeCount = STUB_CACHE_TABLE + 1;
+const int kTypeCodeCount = LAZY_DEOPTIMIZATION + 1;
 const int kFirstTypeCode = UNCLASSIFIED;
 
 const int kReferenceIdBits = 16;
@@ -59,6 +60,7 @@ const int kReferenceTypeShift = kReferenceIdBits;
 const int kDebugRegisterBits = 4;
 const int kDebugIdShift = kDebugRegisterBits;
 
+const int kDeoptTableSerializeEntryCount = 8;
 
 // ExternalReferenceTable is a helper class that defines the relationship
 // between external references and their encodings. It is used to build
index 345c4d4..02025bb 100644 (file)
@@ -58,11 +58,16 @@ class SmartPointerBase {
   // You can get the underlying pointer out with the * operator.
   inline T* operator*() { return p_; }
 
-  // You can use [n] to index as if it was a plain pointer
+  // You can use [n] to index as if it was a plain pointer.
   inline T& operator[](size_t i) {
     return p_[i];
   }
 
+  // You can use [n] to index as if it was a plain pointer.
+  const inline T& operator[](size_t i) const {
+    return p_[i];
+  }
+
   // We don't have implicit conversion to a T* since that hinders migration:
   // You would not be able to change a method from returning a T* to
   // returning an SmartArrayPointer<T> and then get errors wherever it is used.
@@ -77,6 +82,11 @@ class SmartPointerBase {
     return temp;
   }
 
+  inline void Reset(T* new_value) {
+    if (p_) Deallocator::Delete(p_);
+    p_ = new_value;
+  }
+
   // Assignment requires an empty (NULL) SmartArrayPointer as the receiver. Like
   // the copy constructor it removes the pointer in the original to avoid
   // double freeing.
index cacd969..ec9f30d 100644 (file)
@@ -1680,6 +1680,7 @@ static void ReportCodeKindStatistics() {
       CASE(FUNCTION);
       CASE(OPTIMIZED_FUNCTION);
       CASE(STUB);
+      CASE(COMPILED_STUB);
       CASE(BUILTIN);
       CASE(LOAD_IC);
       CASE(KEYED_LOAD_IC);
index f858e47..c562bc7 100644 (file)
@@ -681,13 +681,6 @@ class KeyedLoadStubCompiler: public StubCompiler {
   Handle<Code> CompileLoadPolymorphic(MapHandleList* receiver_maps,
                                       CodeHandleList* handler_ics);
 
-  static void GenerateLoadExternalArray(MacroAssembler* masm,
-                                        ElementsKind elements_kind);
-
-  static void GenerateLoadFastElement(MacroAssembler* masm);
-
-  static void GenerateLoadFastDoubleElement(MacroAssembler* masm);
-
   static void GenerateLoadDictionaryElement(MacroAssembler* masm);
 
  private:
index e03f96f..b4cd5a8 100644 (file)
@@ -1015,6 +1015,7 @@ class BailoutId {
   static BailoutId FunctionEntry() { return BailoutId(kFunctionEntryId); }
   static BailoutId Declarations() { return BailoutId(kDeclarationsId); }
   static BailoutId FirstUsable() { return BailoutId(kFirstUsableId); }
+  static BailoutId StubEntry() { return BailoutId(kStubEntryId); }
 
   bool IsNone() const { return id_ == kNoneId; }
   bool operator==(const BailoutId& other) const { return id_ == other.id_; }
@@ -1030,9 +1031,12 @@ class BailoutId {
   // code (function declarations).
   static const int kDeclarationsId = 3;
 
-  // Ever FunctionState starts with this id.
+  // Every FunctionState starts with this id.
   static const int kFirstUsableId = 4;
 
+  // Every compiled stub starts with this id.
+  static const int kStubEntryId = 5;
+
   int id_;
 };
 
index 370cb02..cc07287 100644 (file)
@@ -201,7 +201,8 @@ void RelocInfo::PatchCode(byte* instructions, int instruction_count) {
 // -----------------------------------------------------------------------------
 // Register constants.
 
-const int Register::kRegisterCodeByAllocationIndex[kNumAllocatableRegisters] = {
+const int
+    Register::kRegisterCodeByAllocationIndex[kMaxNumAllocatableRegisters] = {
   // rax, rbx, rdx, rcx, rdi, r8, r9, r11, r14, r15
   0, 3, 2, 1, 7, 8, 9, 11, 14, 15
 };
index 24c8df3..9471a6d 100644 (file)
@@ -95,21 +95,24 @@ struct Register {
   //  r10 - fixed scratch register
   //  r12 - smi constant register
   //  r13 - root register
+  static const int kMaxNumAllocatableRegisters = 10;
+  static int NumAllocatableRegisters() {
+    return kMaxNumAllocatableRegisters;
+  }
   static const int kNumRegisters = 16;
-  static const int kNumAllocatableRegisters = 10;
 
   static int ToAllocationIndex(Register reg) {
     return kAllocationIndexByRegisterCode[reg.code()];
   }
 
   static Register FromAllocationIndex(int index) {
-    ASSERT(index >= 0 && index < kNumAllocatableRegisters);
+    ASSERT(index >= 0 && index < kMaxNumAllocatableRegisters);
     Register result = { kRegisterCodeByAllocationIndex[index] };
     return result;
   }
 
   static const char* AllocationIndexToString(int index) {
-    ASSERT(index >= 0 && index < kNumAllocatableRegisters);
+    ASSERT(index >= 0 && index < kMaxNumAllocatableRegisters);
     const char* const names[] = {
       "rax",
       "rbx",
@@ -157,7 +160,7 @@ struct Register {
   int code_;
 
  private:
-  static const int kRegisterCodeByAllocationIndex[kNumAllocatableRegisters];
+  static const int kRegisterCodeByAllocationIndex[kMaxNumAllocatableRegisters];
   static const int kAllocationIndexByRegisterCode[kNumRegisters];
 };
 
@@ -200,7 +203,10 @@ const Register no_reg = { kRegister_no_reg_Code };
 
 struct XMMRegister {
   static const int kNumRegisters = 16;
-  static const int kNumAllocatableRegisters = 15;
+  static const int kMaxNumAllocatableRegisters = 15;
+  static int NumAllocatableRegisters() {
+    return kMaxNumAllocatableRegisters;
+  }
 
   static int ToAllocationIndex(XMMRegister reg) {
     ASSERT(reg.code() != 0);
@@ -208,13 +214,13 @@ struct XMMRegister {
   }
 
   static XMMRegister FromAllocationIndex(int index) {
-    ASSERT(0 <= index && index < kNumAllocatableRegisters);
+    ASSERT(0 <= index && index < kMaxNumAllocatableRegisters);
     XMMRegister result = { index + 1 };
     return result;
   }
 
   static const char* AllocationIndexToString(int index) {
-    ASSERT(index >= 0 && index < kNumAllocatableRegisters);
+    ASSERT(index >= 0 && index < kMaxNumAllocatableRegisters);
     const char* const names[] = {
       "xmm1",
       "xmm2",
index ed0ec68..e156dfd 100644 (file)
@@ -646,6 +646,25 @@ CODE_AGE_LIST(DEFINE_CODE_AGE_BUILTIN_GENERATOR)
 #undef DEFINE_CODE_AGE_BUILTIN_GENERATOR
 
 
+void Builtins::Generate_NotifyICMiss(MacroAssembler* masm) {
+  // Enter an internal frame.
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
+
+    // Preserve registers across notification, this is important for compiled
+    // stubs that tail call the runtime on deopts passing their parameters in
+    // registers.
+    __ Pushad();
+    __ CallRuntime(Runtime::kNotifyICMiss, 0);
+    __ Popad();
+    // Tear down internal frame.
+  }
+
+  __ pop(MemOperand(rsp, 0));  // Ignore state offset
+  __ ret(0);  // Return to IC Miss stub, continuation still on stack.
+}
+
+
 static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm,
                                              Deoptimizer::BailoutType type) {
   // Enter an internal frame.
@@ -660,17 +679,17 @@ static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm,
   }
 
   // Get the full codegen state from the stack and untag it.
-  __ SmiToInteger32(rcx, Operand(rsp, 1 * kPointerSize));
+  __ SmiToInteger32(r10, Operand(rsp, 1 * kPointerSize));
 
   // Switch on the state.
   Label not_no_registers, not_tos_rax;
-  __ cmpq(rcx, Immediate(FullCodeGenerator::NO_REGISTERS));
+  __ cmpq(r10, Immediate(FullCodeGenerator::NO_REGISTERS));
   __ j(not_equal, &not_no_registers, Label::kNear);
   __ ret(1 * kPointerSize);  // Remove state.
 
   __ bind(&not_no_registers);
   __ movq(rax, Operand(rsp, 2 * kPointerSize));
-  __ cmpq(rcx, Immediate(FullCodeGenerator::TOS_REG));
+  __ cmpq(r10, Immediate(FullCodeGenerator::TOS_REG));
   __ j(not_equal, &not_tos_rax, Label::kNear);
   __ ret(2 * kPointerSize);  // Remove state, rax.
 
index 9705718..2c39a0c 100644 (file)
 namespace v8 {
 namespace internal {
 
+
+CodeStubInterfaceDescriptor*
+    KeyedLoadFastElementStub::GetInterfaceDescriptor(Isolate* isolate) {
+  static CodeStubInterfaceDescriptor* result = NULL;
+  if (result == NULL) {
+    Handle<Code> miss = isolate->builtins()->KeyedLoadIC_Miss();
+    static Register registers[] = { rdx, rax };
+    static CodeStubInterfaceDescriptor info = {
+      2,
+      registers,
+      miss
+    };
+    result = &info;
+  }
+  return result;
+}
+
+
 #define __ ACCESS_MASM(masm)
 
 void ToNumberStub::Generate(MacroAssembler* masm) {
index ab8ea76..71cef58 100644 (file)
@@ -37,7 +37,7 @@ namespace internal {
 
 // Compute a transcendental math function natively, or call the
 // TranscendentalCache runtime function.
-class TranscendentalCacheStub: public CodeStub {
+class TranscendentalCacheStub: public PlatformCodeStub {
  public:
   enum ArgumentType {
     TAGGED = 0,
@@ -60,7 +60,7 @@ class TranscendentalCacheStub: public CodeStub {
 };
 
 
-class StoreBufferOverflowStub: public CodeStub {
+class StoreBufferOverflowStub: public PlatformCodeStub {
  public:
   explicit StoreBufferOverflowStub(SaveFPRegsMode save_fp)
       : save_doubles_(save_fp) { }
@@ -79,7 +79,7 @@ class StoreBufferOverflowStub: public CodeStub {
 };
 
 
-class UnaryOpStub: public CodeStub {
+class UnaryOpStub: public PlatformCodeStub {
  public:
   UnaryOpStub(Token::Value op,
               UnaryOverwriteMode mode,
@@ -216,7 +216,7 @@ enum StringAddFlags {
 };
 
 
-class StringAddStub: public CodeStub {
+class StringAddStub: public PlatformCodeStub {
  public:
   explicit StringAddStub(StringAddFlags flags) : flags_(flags) {}
 
@@ -238,7 +238,7 @@ class StringAddStub: public CodeStub {
 };
 
 
-class SubStringStub: public CodeStub {
+class SubStringStub: public PlatformCodeStub {
  public:
   SubStringStub() {}
 
@@ -250,7 +250,7 @@ class SubStringStub: public CodeStub {
 };
 
 
-class StringCompareStub: public CodeStub {
+class StringCompareStub: public PlatformCodeStub {
  public:
   StringCompareStub() {}
 
@@ -287,7 +287,7 @@ class StringCompareStub: public CodeStub {
 };
 
 
-class NumberToStringStub: public CodeStub {
+class NumberToStringStub: public PlatformCodeStub {
  public:
   NumberToStringStub() { }
 
@@ -316,7 +316,7 @@ class NumberToStringStub: public CodeStub {
 };
 
 
-class StringDictionaryLookupStub: public CodeStub {
+class StringDictionaryLookupStub: public PlatformCodeStub {
  public:
   enum LookupMode { POSITIVE_LOOKUP, NEGATIVE_LOOKUP };
 
@@ -378,7 +378,7 @@ class StringDictionaryLookupStub: public CodeStub {
 };
 
 
-class RecordWriteStub: public CodeStub {
+class RecordWriteStub: public PlatformCodeStub {
  public:
   RecordWriteStub(Register object,
                   Register value,
@@ -561,7 +561,7 @@ class RecordWriteStub: public CodeStub {
     Register GetRegThatIsNotRcxOr(Register r1,
                                   Register r2,
                                   Register r3) {
-      for (int i = 0; i < Register::kNumAllocatableRegisters; i++) {
+      for (int i = 0; i < Register::NumAllocatableRegisters(); i++) {
         Register candidate = Register::FromAllocationIndex(i);
         if (candidate.is(rcx)) continue;
         if (candidate.is(r1)) continue;
index d444095..3a7646b 100644 (file)
@@ -44,6 +44,10 @@ enum TypeofState { INSIDE_TYPEOF, NOT_INSIDE_TYPEOF };
 
 class CodeGenerator: public AstVisitor {
  public:
+  CodeGenerator() {
+    InitializeAstVisitor();
+  }
+
   static bool MakeCode(CompilationInfo* info);
 
   // Printing of AST, etc. as requested by flags.
@@ -63,6 +67,8 @@ class CodeGenerator: public AstVisitor {
                               int pos,
                               bool right_here = false);
 
+  DEFINE_AST_VISITOR_SUBCLASS_MEMBERS();
+
  private:
   DISALLOW_COPY_AND_ASSIGN(CodeGenerator);
 };
index a3fe8f9..2cdd3f0 100644 (file)
@@ -211,7 +211,7 @@ static int LookupBailoutId(DeoptimizationInputData* data, BailoutId ast_id) {
 
 void Deoptimizer::DoComputeOsrOutputFrame() {
   DeoptimizationInputData* data = DeoptimizationInputData::cast(
-      optimized_code_->deoptimization_data());
+      compiled_code_->deoptimization_data());
   unsigned ast_id = data->OsrAstId()->value();
   // TODO(kasperl): This should not be the bailout_id_. It should be
   // the ast id. Confusing.
@@ -248,7 +248,7 @@ void Deoptimizer::DoComputeOsrOutputFrame() {
   unsigned input_frame_size = input_->GetFrameSize();
   ASSERT(fixed_size + height_in_bytes == input_frame_size);
 
-  unsigned stack_slot_size = optimized_code_->stack_slots() * kPointerSize;
+  unsigned stack_slot_size = compiled_code_->stack_slots() * kPointerSize;
   unsigned outgoing_height = data->ArgumentsStackHeight(bailout_id)->value();
   unsigned outgoing_size = outgoing_height * kPointerSize;
   unsigned output_frame_size = fixed_size + stack_slot_size + outgoing_size;
@@ -340,7 +340,7 @@ void Deoptimizer::DoComputeOsrOutputFrame() {
 
     unsigned pc_offset = data->OsrPcOffset()->value();
     intptr_t pc = reinterpret_cast<intptr_t>(
-        optimized_code_->entry() + pc_offset);
+        compiled_code_->entry() + pc_offset);
     output_[0]->SetPc(pc);
   }
   Code* continuation =
@@ -459,6 +459,70 @@ void Deoptimizer::DoComputeArgumentsAdaptorFrame(TranslationIterator* iterator,
 }
 
 
+void Deoptimizer::DoCompiledStubFrame(TranslationIterator* iterator,
+                                      int frame_index) {
+  //
+  //               FROM                                  TO             <-rbp
+  //    |          ....           |          |          ....           |
+  //    +-------------------------+          +-------------------------+
+  //    | JSFunction continuation |          | JSFunction continuation |
+  //    +-------------------------+          +-------------------------+<-rsp
+  // |  |   saved frame (rbp)     |
+  // |  +=========================+<-rbp
+  // |  |   JSFunction context    |
+  // v  +-------------------------+
+  //    |   COMPILED_STUB marker  |          rbp = saved frame
+  //    +-------------------------+          rsi = JSFunction context
+  //    |                         |
+  //    | ...                     |
+  //    |                         |
+  //    +-------------------------+<-rsp
+  //
+  //
+  int output_frame_size = 1 * kPointerSize;
+  FrameDescription* output_frame =
+      new(output_frame_size) FrameDescription(output_frame_size, 0);
+  Code* notify_miss =
+      isolate_->builtins()->builtin(Builtins::kNotifyICMiss);
+  output_frame->SetState(Smi::FromInt(FullCodeGenerator::NO_REGISTERS));
+  output_frame->SetContinuation(
+      reinterpret_cast<intptr_t>(notify_miss->entry()));
+
+  ASSERT(compiled_code_->kind() == Code::COMPILED_STUB);
+  int major_key = compiled_code_->major_key();
+  CodeStubInterfaceDescriptor* descriptor =
+      isolate_->code_stub_interface_descriptors()[major_key];
+  Handle<Code> miss_ic(descriptor->deoptimization_handler);
+  output_frame->SetPc(reinterpret_cast<intptr_t>(miss_ic->instruction_start()));
+  unsigned input_frame_size = input_->GetFrameSize();
+  intptr_t value = input_->GetFrameSlot(input_frame_size - kPointerSize);
+  output_frame->SetFrameSlot(0, value);
+  value = input_->GetFrameSlot(input_frame_size - 2 * kPointerSize);
+  output_frame->SetRegister(rbp.code(), value);
+  output_frame->SetFp(value);
+  value = input_->GetFrameSlot(input_frame_size - 3 * kPointerSize);
+  output_frame->SetRegister(rsi.code(), value);
+
+  Translation::Opcode opcode =
+      static_cast<Translation::Opcode>(iterator->Next());
+  ASSERT(opcode == Translation::REGISTER);
+  USE(opcode);
+  int input_reg = iterator->Next();
+  intptr_t input_value = input_->GetRegister(input_reg);
+  output_frame->SetRegister(rdx.code(), input_value);
+
+  int32_t next = iterator->Next();
+  opcode = static_cast<Translation::Opcode>(next);
+  ASSERT(opcode == Translation::REGISTER);
+  input_reg = iterator->Next();
+  input_value = input_->GetRegister(input_reg);
+  output_frame->SetRegister(rax.code(), input_value);
+
+  ASSERT(frame_index == 0);
+  output_[frame_index] = output_frame;
+}
+
+
 void Deoptimizer::DoComputeConstructStubFrame(TranslationIterator* iterator,
                                               int frame_index) {
   Builtins* builtins = isolate_->builtins();
@@ -878,7 +942,7 @@ void Deoptimizer::FillInputFrame(Address tos, JavaScriptFrame* frame) {
   }
   input_->SetRegister(rsp.code(), reinterpret_cast<intptr_t>(frame->sp()));
   input_->SetRegister(rbp.code(), reinterpret_cast<intptr_t>(frame->fp()));
-  for (int i = 0; i < DoubleRegister::kNumAllocatableRegisters; i++) {
+  for (int i = 0; i < DoubleRegister::NumAllocatableRegisters(); i++) {
     input_->SetDoubleRegister(i, 0.0);
   }
 
@@ -898,10 +962,10 @@ void Deoptimizer::EntryGenerator::Generate() {
   const int kNumberOfRegisters = Register::kNumRegisters;
 
   const int kDoubleRegsSize = kDoubleSize *
-                              XMMRegister::kNumAllocatableRegisters;
+      XMMRegister::NumAllocatableRegisters();
   __ subq(rsp, Immediate(kDoubleRegsSize));
 
-  for (int i = 0; i < XMMRegister::kNumAllocatableRegisters; ++i) {
+  for (int i = 0; i < XMMRegister::NumAllocatableRegisters(); ++i) {
     XMMRegister xmm_reg = XMMRegister::FromAllocationIndex(i);
     int offset = i * kDoubleSize;
     __ movsd(Operand(rsp, offset), xmm_reg);
@@ -990,7 +1054,7 @@ void Deoptimizer::EntryGenerator::Generate() {
 
   // Fill in the double input registers.
   int double_regs_offset = FrameDescription::double_registers_offset();
-  for (int i = 0; i < XMMRegister::kNumAllocatableRegisters; i++) {
+  for (int i = 0; i < XMMRegister::NumAllocatableRegisters(); i++) {
     int dst_offset = i * kDoubleSize + double_regs_offset;
     __ pop(Operand(rbx, dst_offset));
   }
@@ -1011,10 +1075,13 @@ void Deoptimizer::EntryGenerator::Generate() {
   // limit and copy the contents of the activation frame to the input
   // frame description.
   __ lea(rdx, Operand(rbx, FrameDescription::frame_content_offset()));
+  Label pop_loop_header;
+  __ jmp(&pop_loop_header);
   Label pop_loop;
   __ bind(&pop_loop);
   __ pop(Operand(rdx, 0));
   __ addq(rdx, Immediate(sizeof(intptr_t)));
+  __ bind(&pop_loop_header);
   __ cmpq(rcx, rsp);
   __ j(not_equal, &pop_loop);
 
@@ -1031,28 +1098,33 @@ void Deoptimizer::EntryGenerator::Generate() {
   __ pop(rax);
 
   // Replace the current frame with the output frames.
-  Label outer_push_loop, inner_push_loop;
+  Label outer_push_loop, inner_push_loop,
+      outer_loop_header, inner_loop_header;
   // Outer loop state: rax = current FrameDescription**, rdx = one past the
   // last FrameDescription**.
   __ movl(rdx, Operand(rax, Deoptimizer::output_count_offset()));
   __ movq(rax, Operand(rax, Deoptimizer::output_offset()));
   __ lea(rdx, Operand(rax, rdx, times_8, 0));
+  __ jmp(&outer_loop_header);
   __ bind(&outer_push_loop);
   // Inner loop state: rbx = current FrameDescription*, rcx = loop index.
   __ movq(rbx, Operand(rax, 0));
   __ movq(rcx, Operand(rbx, FrameDescription::frame_size_offset()));
+  __ jmp(&inner_loop_header);
   __ bind(&inner_push_loop);
   __ subq(rcx, Immediate(sizeof(intptr_t)));
   __ push(Operand(rbx, rcx, times_1, FrameDescription::frame_content_offset()));
+  __ bind(&inner_loop_header);
   __ testq(rcx, rcx);
   __ j(not_zero, &inner_push_loop);
   __ addq(rax, Immediate(kPointerSize));
+  __ bind(&outer_loop_header);
   __ cmpq(rax, rdx);
   __ j(below, &outer_push_loop);
 
   // In case of OSR, we have to restore the XMM registers.
   if (type() == OSR) {
-    for (int i = 0; i < XMMRegister::kNumAllocatableRegisters; ++i) {
+    for (int i = 0; i < XMMRegister::NumAllocatableRegisters(); ++i) {
       XMMRegister xmm_reg = XMMRegister::FromAllocationIndex(i);
       int src_offset = i * kDoubleSize + double_regs_offset;
       __ movsd(xmm_reg, Operand(rbx, src_offset));
index 9217a94..3f01d3b 100644 (file)
@@ -119,35 +119,45 @@ void LCodeGen::Comment(const char* format, ...) {
 bool LCodeGen::GeneratePrologue() {
   ASSERT(is_generating());
 
-  ProfileEntryHookStub::MaybeCallEntryHook(masm_);
+  if (info()->IsOptimizing()) {
+    ProfileEntryHookStub::MaybeCallEntryHook(masm_);
 
 #ifdef DEBUG
-  if (strlen(FLAG_stop_at) > 0 &&
-      info_->function()->name()->IsEqualTo(CStrVector(FLAG_stop_at))) {
-    __ int3();
-  }
+    if (strlen(FLAG_stop_at) > 0 &&
+        info_->function()->name()->IsEqualTo(CStrVector(FLAG_stop_at))) {
+      __ int3();
+    }
 #endif
 
-  // Strict mode functions need to replace the receiver with undefined
-  // when called as functions (without an explicit receiver
-  // object). rcx is zero for method calls and non-zero for function
-  // calls.
-  if (!info_->is_classic_mode() || info_->is_native()) {
-    Label ok;
-    __ testq(rcx, rcx);
-    __ j(zero, &ok, Label::kNear);
-    // +1 for return address.
-    int receiver_offset = (scope()->num_parameters() + 1) * kPointerSize;
-    __ LoadRoot(kScratchRegister, Heap::kUndefinedValueRootIndex);
-    __ movq(Operand(rsp, receiver_offset), kScratchRegister);
-    __ bind(&ok);
+    // Strict mode functions need to replace the receiver with undefined
+    // when called as functions (without an explicit receiver
+    // object). rcx is zero for method calls and non-zero for function
+    // calls.
+    if (!info_->is_classic_mode() || info_->is_native()) {
+      Label ok;
+      __ testq(rcx, rcx);
+      __ j(zero, &ok, Label::kNear);
+      // +1 for return address.
+      int receiver_offset = (scope()->num_parameters() + 1) * kPointerSize;
+      __ LoadRoot(kScratchRegister, Heap::kUndefinedValueRootIndex);
+      __ movq(Operand(rsp, receiver_offset), kScratchRegister);
+      __ bind(&ok);
+    }
   }
 
   info()->set_prologue_offset(masm_->pc_offset());
-  __ push(rbp);  // Caller's frame pointer.
-  __ movq(rbp, rsp);
-  __ push(rsi);  // Callee's context.
-  __ push(rdi);  // Callee's JS function.
+  if (NeedsEagerFrame()) {
+    ASSERT(!frame_is_built_);
+    frame_is_built_ = true;
+    __ push(rbp);  // Caller's frame pointer.
+    __ movq(rbp, rsp);
+    __ push(rsi);  // Callee's context.
+    if (info()->IsStub()) {
+      __ Push(Smi::FromInt(StackFrame::STUB));
+    } else {
+      __ push(rdi);  // Callee's JS function.
+    }
+  }
 
   // Reserve space for the stack slots needed by the code.
   int slots = GetStackSlotCount();
@@ -177,7 +187,7 @@ bool LCodeGen::GeneratePrologue() {
   }
 
   // Possibly allocate a local context.
-  int heap_slots = scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
+  int heap_slots = info_->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
   if (heap_slots > 0) {
     Comment(";;; Allocate local context");
     // Argument to NewContext is the function, which is still in rdi.
@@ -213,7 +223,7 @@ bool LCodeGen::GeneratePrologue() {
   }
 
   // Trace the call.
-  if (FLAG_trace) {
+  if (FLAG_trace && info()->IsOptimizing()) {
     __ CallRuntime(Runtime::kTraceEnter, 0);
   }
   return !is_aborted();
@@ -266,9 +276,55 @@ bool LCodeGen::GenerateBody() {
 
 
 bool LCodeGen::GenerateJumpTable() {
+  Label needs_frame_not_call;
+  Label needs_frame_is_call;
   for (int i = 0; i < jump_table_.length(); i++) {
     __ bind(&jump_table_[i].label);
-    __ Jump(jump_table_[i].address, RelocInfo::RUNTIME_ENTRY);
+    Address entry = jump_table_[i].address;
+    if (jump_table_[i].needs_frame) {
+      __ movq(kScratchRegister, ExternalReference::ForDeoptEntry(entry));
+      if (jump_table_[i].is_lazy_deopt) {
+        if (needs_frame_is_call.is_bound()) {
+          __ jmp(&needs_frame_is_call);
+        } else {
+          __ bind(&needs_frame_is_call);
+          __ push(rbp);
+          __ movq(rbp, rsp);
+          __ push(rsi);
+          // This variant of deopt can only be used with stubs. Since we don't
+          // have a function pointer to install in the stack frame that we're
+          // building, install a special marker there instead.
+          ASSERT(info()->IsStub());
+          __ Move(rsi, Smi::FromInt(StackFrame::STUB));
+          __ push(rsi);
+          __ movq(rsi, MemOperand(rsp, kPointerSize));
+          __ call(kScratchRegister);
+        }
+      } else {
+        if (needs_frame_not_call.is_bound()) {
+          __ jmp(&needs_frame_not_call);
+        } else {
+          __ bind(&needs_frame_not_call);
+          __ push(rbp);
+          __ movq(rbp, rsp);
+          __ push(r8);
+          // This variant of deopt can only be used with stubs. Since we don't
+          // have a function pointer to install in the stack frame that we're
+          // building, install a special marker there instead.
+          ASSERT(info()->IsStub());
+          __ Move(rsi, Smi::FromInt(StackFrame::STUB));
+          __ push(rsi);
+          __ movq(rsi, MemOperand(rsp, kPointerSize));
+          __ jmp(kScratchRegister);
+        }
+      }
+    } else {
+      if (jump_table_[i].is_lazy_deopt) {
+        __ Call(entry, RelocInfo::RUNTIME_ENTRY);
+      } else {
+        __ Jump(entry, RelocInfo::RUNTIME_ENTRY);
+      }
+    }
   }
   return !is_aborted();
 }
@@ -280,10 +336,32 @@ bool LCodeGen::GenerateDeferredCode() {
     for (int i = 0; !is_aborted() && i < deferred_.length(); i++) {
       LDeferredCode* code = deferred_[i];
       __ bind(code->entry());
+      if (NeedsDeferredFrame()) {
+        Comment(";;; Deferred build frame",
+                code->instruction_index(),
+                code->instr()->Mnemonic());
+        ASSERT(!frame_is_built_);
+        ASSERT(info()->IsStub());
+        frame_is_built_ = true;
+        // Build the frame in such a way that esi isn't trashed.
+        __ push(rbp);  // Caller's frame pointer.
+        __ push(Operand(rbp, StandardFrameConstants::kContextOffset));
+        __ Push(Smi::FromInt(StackFrame::STUB));
+        __ lea(rbp, Operand(rsp, 2 * kPointerSize));
+      }
       Comment(";;; Deferred code @%d: %s.",
               code->instruction_index(),
               code->instr()->Mnemonic());
       code->Generate();
+      if (NeedsDeferredFrame()) {
+        Comment(";;; Deferred destroy frame",
+                code->instruction_index(),
+                code->instr()->Mnemonic());
+        ASSERT(frame_is_built_);
+        frame_is_built_ = false;
+        __ movq(rsp, rbp);
+        __ pop(rbp);
+      }
       __ jmp(code->exit());
     }
   }
@@ -396,7 +474,9 @@ void LCodeGen::WriteTranslation(LEnvironment* environment,
                    translation,
                    arguments_index,
                    arguments_count);
-  int closure_id = *info()->closure() != *environment->closure()
+  bool has_closure_id = !info()->closure().is_null() &&
+      *info()->closure() != *environment->closure();
+  int closure_id = has_closure_id
       ? DefineDeoptimizationLiteral(environment->closure())
       : Translation::kSelfLiteralId;
 
@@ -420,6 +500,9 @@ void LCodeGen::WriteTranslation(LEnvironment* environment,
     case ARGUMENTS_ADAPTOR:
       translation->BeginArgumentsAdaptorFrame(closure_id, translation_size);
       break;
+    case STUB:
+      translation->BeginCompiledStubFrame();
+      break;
   }
 
   // Inlined frames which push their arguments cause the index to be
@@ -610,20 +693,33 @@ void LCodeGen::DeoptimizeIf(Condition cc, LEnvironment* environment) {
   RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
   ASSERT(environment->HasBeenRegistered());
   int id = environment->deoptimization_index();
-  Address entry = Deoptimizer::GetDeoptimizationEntry(id, Deoptimizer::EAGER);
+  ASSERT(info()->IsOptimizing() || info()->IsStub());
+  Deoptimizer::BailoutType bailout_type = info()->IsStub()
+      ? Deoptimizer::LAZY
+      : Deoptimizer::EAGER;
+  Address entry = Deoptimizer::GetDeoptimizationEntry(id, bailout_type);
   if (entry == NULL) {
     Abort("bailout was not prepared");
     return;
   }
 
+  ASSERT(info()->IsStub() || frame_is_built_);
+  bool lazy_deopt = info()->IsStub();
   if (cc == no_condition) {
-    __ Jump(entry, RelocInfo::RUNTIME_ENTRY);
+    if (lazy_deopt) {
+      __ Call(entry, RelocInfo::RUNTIME_ENTRY);
+    } else {
+      __ Jump(entry, RelocInfo::RUNTIME_ENTRY);
+    }
   } else {
     // We often have several deopts to the same entry, reuse the last
     // jump entry if this is the case.
     if (jump_table_.is_empty() ||
-        jump_table_.last().address != entry) {
-      jump_table_.Add(JumpTableEntry(entry), zone());
+        jump_table_.last().address != entry ||
+        jump_table_.last().needs_frame != !frame_is_built_ ||
+        jump_table_.last().is_lazy_deopt != lazy_deopt) {
+      JumpTableEntry table_entry(entry, !frame_is_built_, lazy_deopt);
+      jump_table_.Add(table_entry, zone());
     }
     __ j(cc, &jump_table_.last().label);
   }
@@ -2288,15 +2384,22 @@ void LCodeGen::DoCmpT(LCmpT* instr) {
 
 
 void LCodeGen::DoReturn(LReturn* instr) {
-  if (FLAG_trace) {
+  if (FLAG_trace && info()->IsOptimizing()) {
     // Preserve the return value on the stack and rely on the runtime
     // call to return the value in the same register.
     __ push(rax);
     __ CallRuntime(Runtime::kTraceExit, 1);
   }
-  __ movq(rsp, rbp);
-  __ pop(rbp);
-  __ Ret((GetParameterCount() + 1) * kPointerSize, rcx);
+  if (NeedsEagerFrame()) {
+    __ movq(rsp, rbp);
+    __ pop(rbp);
+  }
+  if (info()->IsStub()) {
+    __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
+    __ Ret(0, r10);
+  } else {
+    __ Ret((GetParameterCount() + 1) * kPointerSize, rcx);
+  }
 }
 
 
@@ -4527,10 +4630,10 @@ void LCodeGen::DoCheckFunction(LCheckFunction* instr) {
 void LCodeGen::DoCheckMapCommon(Register reg,
                                 Handle<Map> map,
                                 CompareMapMode mode,
-                                LEnvironment* env) {
+                                LInstruction* instr) {
   Label success;
   __ CompareMap(reg, map, &success, mode);
-  DeoptimizeIf(not_equal, env);
+  DeoptimizeIf(not_equal, instr->environment());
   __ bind(&success);
 }
 
@@ -4548,7 +4651,7 @@ void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
     __ j(equal, &success);
   }
   Handle<Map> map = map_set->last();
-  DoCheckMapCommon(reg, map, REQUIRE_EXACT_MAP, instr->environment());
+  DoCheckMapCommon(reg, map, REQUIRE_EXACT_MAP, instr);
   __ bind(&success);
 }
 
@@ -4615,7 +4718,7 @@ void LCodeGen::DoCheckPrototypeMaps(LCheckPrototypeMaps* instr) {
   // Check prototype maps up to the holder.
   while (!current_prototype.is_identical_to(holder)) {
     DoCheckMapCommon(reg, Handle<Map>(current_prototype->map()),
-                     ALLOW_ELEMENT_TRANSITION_MAPS, instr->environment());
+                     ALLOW_ELEMENT_TRANSITION_MAPS, instr);
     current_prototype =
         Handle<JSObject>(JSObject::cast(current_prototype->GetPrototype()));
     // Load next prototype object.
@@ -4624,7 +4727,7 @@ void LCodeGen::DoCheckPrototypeMaps(LCheckPrototypeMaps* instr) {
 
   // Check the holder map.
     DoCheckMapCommon(reg, Handle<Map>(current_prototype->map()),
-                     ALLOW_ELEMENT_TRANSITION_MAPS, instr->environment());
+                     ALLOW_ELEMENT_TRANSITION_MAPS, instr);
 }
 
 
@@ -5160,6 +5263,7 @@ void LCodeGen::EmitIsConstructCall(Register temp) {
 
 
 void LCodeGen::EnsureSpaceForLazyDeopt(int space_needed) {
+  if (info()->IsStub()) return;
   // Ensure that we have enough space after the previous lazy-bailout
   // instruction for patching the code here.
   int current_pc = masm()->pc_offset();
index e068f14..2fa10e1 100644 (file)
@@ -63,6 +63,7 @@ class LCodeGen BASE_EMBEDDED {
         deferred_(8, info->zone()),
         osr_pc_offset_(-1),
         last_lazy_deopt_pc_(0),
+        frame_is_built_(false),
         safepoints_(info->zone()),
         resolver_(this),
         expected_safepoint_kind_(Safepoint::kSimple) {
@@ -77,6 +78,15 @@ class LCodeGen BASE_EMBEDDED {
   Heap* heap() const { return isolate()->heap(); }
   Zone* zone() const { return zone_; }
 
+  bool NeedsEagerFrame() const {
+    return GetStackSlotCount() > 0 ||
+        info()->is_non_deferred_calling() ||
+        !info()->IsStub();
+  }
+  bool NeedsDeferredFrame() const {
+    return !NeedsEagerFrame() && info()->is_deferred_calling();
+  }
+
   // Support for converting LOperands to assembler types.
   Register ToRegister(LOperand* op) const;
   XMMRegister ToDoubleRegister(LOperand* op) const;
@@ -110,7 +120,7 @@ class LCodeGen BASE_EMBEDDED {
                                        Label* map_check);
 
   void DoCheckMapCommon(Register reg, Handle<Map> map,
-                        CompareMapMode mode, LEnvironment* env);
+                        CompareMapMode mode, LInstruction* instr);
 
 // Parallel move support.
   void DoParallelMove(LParallelMove* move);
@@ -158,7 +168,7 @@ class LCodeGen BASE_EMBEDDED {
                        Register scratch);
 
   int GetStackSlotCount() const { return chunk()->spill_slot_count(); }
-  int GetParameterCount() const { return scope()->num_parameters(); }
+  int GetParameterCount() const { return info()->num_parameters(); }
 
   void Abort(const char* reason);
   void Comment(const char* format, ...);
@@ -327,11 +337,15 @@ class LCodeGen BASE_EMBEDDED {
                     int* offset);
 
   struct JumpTableEntry {
-    explicit inline JumpTableEntry(Address entry)
+    inline JumpTableEntry(Address entry, bool frame, bool is_lazy)
         : label(),
-          address(entry) { }
+          address(entry),
+          needs_frame(frame),
+          is_lazy_deopt(is_lazy) { }
     Label label;
     Address address;
+    bool needs_frame;
+    bool is_lazy_deopt;
   };
 
   void EnsureSpaceForLazyDeopt(int space_needed);
@@ -360,6 +374,7 @@ class LCodeGen BASE_EMBEDDED {
   ZoneList<LDeferredCode*> deferred_;
   int osr_pc_offset_;
   int last_lazy_deopt_pc_;
+  bool frame_is_built_;
 
   // Builder that keeps track of safepoints in the code. The table
   // itself is emitted at the end of the generated code.
@@ -374,6 +389,7 @@ class LCodeGen BASE_EMBEDDED {
    public:
     explicit PushSafepointRegistersScope(LCodeGen* codegen)
         : codegen_(codegen) {
+      ASSERT(codegen_->info()->is_calling());
       ASSERT(codegen_->expected_safepoint_kind_ == Safepoint::kSimple);
       codegen_->masm_->PushSafepointRegisters();
       codegen_->expected_safepoint_kind_ = Safepoint::kWithRegisters;
index e102803..defdafa 100644 (file)
@@ -44,10 +44,10 @@ LITHIUM_CONCRETE_INSTRUCTION_LIST(DEFINE_COMPILE)
 #undef DEFINE_COMPILE
 
 LOsrEntry::LOsrEntry() {
-  for (int i = 0; i < Register::kNumAllocatableRegisters; ++i) {
+  for (int i = 0; i < Register::NumAllocatableRegisters(); ++i) {
     register_spills_[i] = NULL;
   }
-  for (int i = 0; i < DoubleRegister::kNumAllocatableRegisters; ++i) {
+  for (int i = 0; i < DoubleRegister::NumAllocatableRegisters(); ++i) {
     double_register_spills_[i] = NULL;
   }
 }
@@ -619,6 +619,8 @@ LInstruction* LChunkBuilder::AssignEnvironment(LInstruction* instr) {
 LInstruction* LChunkBuilder::MarkAsCall(LInstruction* instr,
                                         HInstruction* hinstr,
                                         CanDeoptimize can_deoptimize) {
+  info()->MarkAsNonDeferredCalling();
+
 #ifdef DEBUG
   instr->VerifyCall();
 #endif
@@ -1617,8 +1619,12 @@ LInstruction* LChunkBuilder::DoForceRepresentation(HForceRepresentation* bad) {
 LInstruction* LChunkBuilder::DoChange(HChange* instr) {
   Representation from = instr->from();
   Representation to = instr->to();
+  // Only mark conversions that might need to allocate as calling rather than
+  // all changes. This makes simple, non-allocating conversion not have to force
+  // building a stack frame.
   if (from.IsTagged()) {
     if (to.IsDouble()) {
+      info()->MarkAsDeferredCalling();
       LOperand* value = UseRegister(instr->value());
       LNumberUntagD* res = new(zone()) LNumberUntagD(value);
       return AssignEnvironment(DefineAsRegister(res));
@@ -1636,6 +1642,7 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) {
     }
   } else if (from.IsDouble()) {
     if (to.IsTagged()) {
+      info()->MarkAsDeferredCalling();
       LOperand* value = UseRegister(instr->value());
       LOperand* temp = TempRegister();
 
@@ -1649,6 +1656,7 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) {
       return AssignEnvironment(DefineAsRegister(new(zone()) LDoubleToI(value)));
     }
   } else if (from.IsInteger32()) {
+    info()->MarkAsDeferredCalling();
     if (to.IsTagged()) {
       HValue* val = instr->value();
       LOperand* value = UseRegister(val);
@@ -2115,8 +2123,17 @@ LInstruction* LChunkBuilder::DoOsrEntry(HOsrEntry* instr) {
 
 
 LInstruction* LChunkBuilder::DoParameter(HParameter* instr) {
-  int spill_index = chunk()->GetParameterStackSlot(instr->index());
-  return DefineAsSpilled(new(zone()) LParameter, spill_index);
+  LParameter* result = new(zone()) LParameter;
+  if (info()->IsOptimizing()) {
+    int spill_index = chunk()->GetParameterStackSlot(instr->index());
+    return DefineAsSpilled(result, spill_index);
+  } else {
+    ASSERT(info()->IsStub());
+    CodeStubInterfaceDescriptor* descriptor =
+        info()->code_stub()->GetInterfaceDescriptor(info()->isolate());
+    Register reg = descriptor->register_params[instr->index()];
+    return DefineFixed(result, reg);
+  }
 }
 
 
@@ -2212,6 +2229,7 @@ LInstruction* LChunkBuilder::DoSimulate(HSimulate* instr) {
 
 
 LInstruction* LChunkBuilder::DoStackCheck(HStackCheck* instr) {
+  info()->MarkAsDeferredCalling();
   if (instr->is_function_entry()) {
     return MarkAsCall(new(zone()) LStackCheck, instr);
   } else {
index b5d435b..b37d2ac 100644 (file)
@@ -251,6 +251,11 @@ class LInstruction: public ZoneObject {
 
   void MarkAsCall() { is_call_ = true; }
 
+  // Interface to the register allocator and iterators.
+  bool ClobbersTemps() const { return is_call_; }
+  bool ClobbersRegisters() const { return is_call_; }
+  bool ClobbersDoubleRegisters() const { return is_call_; }
+
   virtual void SetDeferredLazyDeoptimizationEnvironment(LEnvironment* env) { }
 
   // Interface to the register allocator and iterators.
@@ -2266,8 +2271,9 @@ class LOsrEntry: public LTemplateInstruction<0, 0, 0> {
   // slot, i.e., that must also be restored to the spill slot on OSR entry.
   // NULL if the register has no assigned spill slot.  Indexed by allocation
   // index.
-  LOperand* register_spills_[Register::kNumAllocatableRegisters];
-  LOperand* double_register_spills_[DoubleRegister::kNumAllocatableRegisters];
+  LOperand* register_spills_[Register::kMaxNumAllocatableRegisters];
+  LOperand* double_register_spills_[
+      DoubleRegister::kMaxNumAllocatableRegisters];
 };
 
 
index 4e4f2c5..8513a68 100644 (file)
@@ -3432,7 +3432,7 @@ void MacroAssembler::EnterExitFrameEpilogue(int arg_stack_space,
         arg_stack_space * kPointerSize;
     subq(rsp, Immediate(space));
     int offset = -2 * kPointerSize;
-    for (int i = 0; i < XMMRegister::kNumAllocatableRegisters; i++) {
+    for (int i = 0; i < XMMRegister::NumAllocatableRegisters(); i++) {
       XMMRegister reg = XMMRegister::FromAllocationIndex(i);
       movsd(Operand(rbp, offset - ((i + 1) * kDoubleSize)), reg);
     }
@@ -3476,7 +3476,7 @@ void MacroAssembler::LeaveExitFrame(bool save_doubles) {
   // r15 : argv
   if (save_doubles) {
     int offset = -2 * kPointerSize;
-    for (int i = 0; i < XMMRegister::kNumAllocatableRegisters; i++) {
+    for (int i = 0; i < XMMRegister::NumAllocatableRegisters(); i++) {
       XMMRegister reg = XMMRegister::FromAllocationIndex(i);
       movsd(reg, Operand(rbp, offset - ((i + 1) * kDoubleSize)));
     }
index 0d8d6f2..716a591 100644 (file)
@@ -1414,9 +1414,9 @@ class MacroAssembler: public Assembler {
     return kNumSafepointRegisters - kSafepointPushRegisterIndices[reg_code] - 1;
   }
 
-  // Needs access to SafepointRegisterStackIndex for optimized frame
+  // Needs access to SafepointRegisterStackIndex for compiled frame
   // traversal.
-  friend class OptimizedFrame;
+  friend class CompiledFrame;
 };
 
 
index 683aa9d..0329966 100644 (file)
@@ -3210,12 +3210,19 @@ Handle<Code> KeyedLoadStubCompiler::CompileLoadElement(
   //  -- rsp[0] : return address
   // -----------------------------------
   ElementsKind elements_kind = receiver_map->elements_kind();
-  Handle<Code> stub = KeyedLoadElementStub(elements_kind).GetCode();
-
-  __ DispatchMap(rdx, receiver_map, stub, DO_SMI_CHECK);
+  if (receiver_map->has_fast_elements() ||
+      receiver_map->has_external_array_elements()) {
+    Handle<Code> stub = KeyedLoadFastElementStub(
+        receiver_map->instance_type() == JS_ARRAY_TYPE,
+        elements_kind).GetCode();
+    __ DispatchMap(rdx, receiver_map, stub, DO_SMI_CHECK);
+  } else {
+    Handle<Code> stub =
+        KeyedLoadDictionaryElementStub().GetCode();
+    __ DispatchMap(rdx, receiver_map, stub, DO_SMI_CHECK);
+  }
 
-  Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Miss();
-  __ jmp(ic, RelocInfo::CODE_TARGET);
+  GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
 
   // Return the generated code.
   return GetCode(Code::NORMAL, factory()->empty_string());
@@ -3457,140 +3464,6 @@ static void GenerateSmiKeyCheck(MacroAssembler* masm,
 }
 
 
-void KeyedLoadStubCompiler::GenerateLoadExternalArray(
-    MacroAssembler* masm,
-    ElementsKind elements_kind) {
-  // ----------- S t a t e -------------
-  //  -- rax    : key
-  //  -- rdx    : receiver
-  //  -- rsp[0] : return address
-  // -----------------------------------
-  Label slow, miss_force_generic;
-
-  // This stub is meant to be tail-jumped to, the receiver must already
-  // have been verified by the caller to not be a smi.
-
-  // Check that the key is a smi or a heap number convertible to a smi.
-  GenerateSmiKeyCheck(masm, rax, rcx, xmm0, xmm1, &miss_force_generic);
-
-  // Check that the index is in range.
-  __ movq(rbx, FieldOperand(rdx, JSObject::kElementsOffset));
-  __ SmiToInteger32(rcx, rax);
-  __ cmpq(rax, FieldOperand(rbx, ExternalArray::kLengthOffset));
-  // Unsigned comparison catches both negative and too-large values.
-  __ j(above_equal, &miss_force_generic);
-
-  // rax: index (as a smi)
-  // rdx: receiver (JSObject)
-  // rcx: untagged index
-  // rbx: elements array
-  __ movq(rbx, FieldOperand(rbx, ExternalArray::kExternalPointerOffset));
-  // rbx: base pointer of external storage
-  switch (elements_kind) {
-    case EXTERNAL_BYTE_ELEMENTS:
-      __ movsxbq(rcx, Operand(rbx, rcx, times_1, 0));
-      break;
-    case EXTERNAL_PIXEL_ELEMENTS:
-    case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
-      __ movzxbq(rcx, Operand(rbx, rcx, times_1, 0));
-      break;
-    case EXTERNAL_SHORT_ELEMENTS:
-      __ movsxwq(rcx, Operand(rbx, rcx, times_2, 0));
-      break;
-    case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
-      __ movzxwq(rcx, Operand(rbx, rcx, times_2, 0));
-      break;
-    case EXTERNAL_INT_ELEMENTS:
-      __ movsxlq(rcx, Operand(rbx, rcx, times_4, 0));
-      break;
-    case EXTERNAL_UNSIGNED_INT_ELEMENTS:
-      __ movl(rcx, Operand(rbx, rcx, times_4, 0));
-      break;
-    case EXTERNAL_FLOAT_ELEMENTS:
-      __ cvtss2sd(xmm0, Operand(rbx, rcx, times_4, 0));
-      break;
-    case EXTERNAL_DOUBLE_ELEMENTS:
-      __ movsd(xmm0, Operand(rbx, rcx, times_8, 0));
-      break;
-    default:
-      UNREACHABLE();
-      break;
-  }
-
-  // rax: index
-  // rdx: receiver
-  // For integer array types:
-  // rcx: value
-  // For floating-point array type:
-  // xmm0: value as double.
-
-  ASSERT(kSmiValueSize == 32);
-  if (elements_kind == EXTERNAL_UNSIGNED_INT_ELEMENTS) {
-    // For the UnsignedInt array type, we need to see whether
-    // the value can be represented in a Smi. If not, we need to convert
-    // it to a HeapNumber.
-    Label box_int;
-
-    __ JumpIfUIntNotValidSmiValue(rcx, &box_int, Label::kNear);
-
-    __ Integer32ToSmi(rax, rcx);
-    __ ret(0);
-
-    __ bind(&box_int);
-
-    // Allocate a HeapNumber for the int and perform int-to-double
-    // conversion.
-    // The value is zero-extended since we loaded the value from memory
-    // with movl.
-    __ cvtqsi2sd(xmm0, rcx);
-
-    __ AllocateHeapNumber(rcx, rbx, &slow);
-    // Set the value.
-    __ movsd(FieldOperand(rcx, HeapNumber::kValueOffset), xmm0);
-    __ movq(rax, rcx);
-    __ ret(0);
-  } else if (elements_kind == EXTERNAL_FLOAT_ELEMENTS ||
-             elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
-    // For the floating-point array type, we need to always allocate a
-    // HeapNumber.
-    __ AllocateHeapNumber(rcx, rbx, &slow);
-    // Set the value.
-    __ movsd(FieldOperand(rcx, HeapNumber::kValueOffset), xmm0);
-    __ movq(rax, rcx);
-    __ ret(0);
-  } else {
-    __ Integer32ToSmi(rax, rcx);
-    __ ret(0);
-  }
-
-  // Slow case: Jump to runtime.
-  __ bind(&slow);
-  Counters* counters = masm->isolate()->counters();
-  __ IncrementCounter(counters->keyed_load_external_array_slow(), 1);
-
-  // ----------- S t a t e -------------
-  //  -- rax    : key
-  //  -- rdx    : receiver
-  //  -- rsp[0]  : return address
-  // -----------------------------------
-
-  Handle<Code> ic = masm->isolate()->builtins()->KeyedLoadIC_Slow();
-  __ jmp(ic, RelocInfo::CODE_TARGET);
-
-  // Miss case: Jump to runtime.
-  __ bind(&miss_force_generic);
-
-  // ----------- S t a t e -------------
-  //  -- rax    : key
-  //  -- rdx    : receiver
-  //  -- rsp[0]  : return address
-  // -----------------------------------
-  Handle<Code> miss_ic =
-      masm->isolate()->builtins()->KeyedLoadIC_MissForceGeneric();
-  __ jmp(miss_ic, RelocInfo::CODE_TARGET);
-}
-
-
 void KeyedStoreStubCompiler::GenerateStoreExternalArray(
     MacroAssembler* masm,
     ElementsKind elements_kind) {
@@ -3780,98 +3653,6 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray(
 }
 
 
-void KeyedLoadStubCompiler::GenerateLoadFastElement(MacroAssembler* masm) {
-  // ----------- S t a t e -------------
-  //  -- rax    : key
-  //  -- rdx    : receiver
-  //  -- rsp[0] : return address
-  // -----------------------------------
-  Label miss_force_generic;
-
-  // This stub is meant to be tail-jumped to, the receiver must already
-  // have been verified by the caller to not be a smi.
-
-  // Check that the key is a smi or a heap number convertible to a smi.
-  GenerateSmiKeyCheck(masm, rax, rcx, xmm0, xmm1, &miss_force_generic);
-
-  // Get the elements array.
-  __ movq(rcx, FieldOperand(rdx, JSObject::kElementsOffset));
-  __ AssertFastElements(rcx);
-
-  // Check that the key is within bounds.
-  __ SmiCompare(rax, FieldOperand(rcx, FixedArray::kLengthOffset));
-  __ j(above_equal, &miss_force_generic);
-
-  // Load the result and make sure it's not the hole.
-  SmiIndex index = masm->SmiToIndex(rbx, rax, kPointerSizeLog2);
-  __ movq(rbx, FieldOperand(rcx,
-                            index.reg,
-                            index.scale,
-                            FixedArray::kHeaderSize));
-  __ CompareRoot(rbx, Heap::kTheHoleValueRootIndex);
-  __ j(equal, &miss_force_generic);
-  __ movq(rax, rbx);
-  __ ret(0);
-
-  __ bind(&miss_force_generic);
-  Code* code = masm->isolate()->builtins()->builtin(
-      Builtins::kKeyedLoadIC_MissForceGeneric);
-  Handle<Code> ic(code);
-  __ jmp(ic, RelocInfo::CODE_TARGET);
-}
-
-
-void KeyedLoadStubCompiler::GenerateLoadFastDoubleElement(
-    MacroAssembler* masm) {
-  // ----------- S t a t e -------------
-  //  -- rax    : key
-  //  -- rdx    : receiver
-  //  -- rsp[0] : return address
-  // -----------------------------------
-  Label miss_force_generic, slow_allocate_heapnumber;
-
-  // This stub is meant to be tail-jumped to, the receiver must already
-  // have been verified by the caller to not be a smi.
-
-  // Check that the key is a smi or a heap number convertible to a smi.
-  GenerateSmiKeyCheck(masm, rax, rcx, xmm0, xmm1, &miss_force_generic);
-
-  // Get the elements array.
-  __ movq(rcx, FieldOperand(rdx, JSObject::kElementsOffset));
-  __ AssertFastElements(rcx);
-
-  // Check that the key is within bounds.
-  __ SmiCompare(rax, FieldOperand(rcx, FixedArray::kLengthOffset));
-  __ j(above_equal, &miss_force_generic);
-
-  // Check for the hole
-  __ SmiToInteger32(kScratchRegister, rax);
-  uint32_t offset = FixedDoubleArray::kHeaderSize + sizeof(kHoleNanLower32);
-  __ cmpl(FieldOperand(rcx, kScratchRegister, times_8, offset),
-          Immediate(kHoleNanUpper32));
-  __ j(equal, &miss_force_generic);
-
-  // Always allocate a heap number for the result.
-  __ movsd(xmm0, FieldOperand(rcx, kScratchRegister, times_8,
-                              FixedDoubleArray::kHeaderSize));
-  __ AllocateHeapNumber(rcx, rbx, &slow_allocate_heapnumber);
-  // Set the value.
-  __ movq(rax, rcx);
-  __ movsd(FieldOperand(rcx, HeapNumber::kValueOffset), xmm0);
-  __ ret(0);
-
-  __ bind(&slow_allocate_heapnumber);
-  Handle<Code> slow_ic =
-      masm->isolate()->builtins()->KeyedLoadIC_Slow();
-  __ jmp(slow_ic, RelocInfo::CODE_TARGET);
-
-  __ bind(&miss_force_generic);
-  Handle<Code> miss_ic =
-      masm->isolate()->builtins()->KeyedLoadIC_MissForceGeneric();
-  __ jmp(miss_ic, RelocInfo::CODE_TARGET);
-}
-
-
 void KeyedStoreStubCompiler::GenerateStoreFastElement(
     MacroAssembler* masm,
     bool is_js_array,
index 69abd8d..649b214 100644 (file)
@@ -546,7 +546,7 @@ TEST(BootUpMemoryUse) {
       }
     } else {
       if (v8::internal::Snapshot::IsEnabled()) {
-        CHECK_LE(delta, 2500 * 1024);  // 2400.
+        CHECK_LE(delta, 2600 * 1024);  // 2400.
       } else {
         CHECK_LE(delta, 2860 * 1024);  // 2760.
       }
index 7cc462e..29a2abb 100644 (file)
@@ -152,6 +152,7 @@ var knownProblems = {
   "LazyRecompile": true,
   "ParallelRecompile": true,
   "NotifyDeoptimized": true,
+  "NotifyICMiss": true,
   "NotifyOSR": true,
   "CreateObjectLiteralBoilerplate": true,
   "CloneLiteralBoilerplate": true,
index aad07c7..046a72c 100644 (file)
             '../../src/circular-queue.h',
             '../../src/code-stubs.cc',
             '../../src/code-stubs.h',
+            '../../src/code-stubs-hydrogen.cc',
             '../../src/code.h',
             '../../src/codegen.cc',
             '../../src/codegen.h',