Revert 13157, 13145 and 13140: Crankshaft code stubs.
authordanno@chromium.org <danno@chromium.org@ce2b1a6d-e550-0410-aec6-3dcde31c8c00>
Mon, 10 Dec 2012 11:09:12 +0000 (11:09 +0000)
committerdanno@chromium.org <danno@chromium.org@ce2b1a6d-e550-0410-aec6-3dcde31c8c00>
Mon, 10 Dec 2012 11:09:12 +0000 (11:09 +0000)
R=jkummerow@chromium.org

Review URL: https://codereview.chromium.org/11498006

git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@13179 ce2b1a6d-e550-0410-aec6-3dcde31c8c00

97 files changed:
Makefile
src/arm/assembler-arm-inl.h
src/arm/assembler-arm.cc
src/arm/assembler-arm.h
src/arm/builtins-arm.cc
src/arm/code-stubs-arm.cc
src/arm/code-stubs-arm.h
src/arm/codegen-arm.cc
src/arm/codegen-arm.h
src/arm/deoptimizer-arm.cc
src/arm/lithium-arm.cc
src/arm/lithium-arm.h
src/arm/lithium-codegen-arm.cc
src/arm/lithium-codegen-arm.h
src/arm/lithium-gap-resolver-arm.cc
src/arm/macro-assembler-arm.cc
src/arm/macro-assembler-arm.h
src/arm/stub-cache-arm.cc
src/assembler.cc
src/assembler.h
src/ast.cc
src/ast.h
src/builtins.h
src/code-stubs-hydrogen.cc [deleted file]
src/code-stubs.cc
src/code-stubs.h
src/codegen.cc
src/compiler.cc
src/compiler.h
src/deoptimizer.cc
src/deoptimizer.h
src/disassembler.cc
src/frames-inl.h
src/frames.cc
src/frames.h
src/full-codegen.cc
src/full-codegen.h
src/hydrogen.cc
src/hydrogen.h
src/ia32/assembler-ia32.cc
src/ia32/assembler-ia32.h
src/ia32/builtins-ia32.cc
src/ia32/code-stubs-ia32.cc
src/ia32/code-stubs-ia32.h
src/ia32/deoptimizer-ia32.cc
src/ia32/lithium-codegen-ia32.cc
src/ia32/lithium-codegen-ia32.h
src/ia32/lithium-gap-resolver-ia32.cc
src/ia32/lithium-gap-resolver-ia32.h
src/ia32/lithium-ia32.cc
src/ia32/lithium-ia32.h
src/ia32/macro-assembler-ia32.cc
src/ia32/macro-assembler-ia32.h
src/ia32/stub-cache-ia32.cc
src/ic.cc
src/isolate.cc
src/isolate.h
src/lithium-allocator.cc
src/lithium-allocator.h
src/lithium.cc
src/lithium.h
src/log.cc
src/mips/codegen-mips.h
src/objects-inl.h
src/objects.cc
src/objects.h
src/optimizing-compiler-thread.h
src/prettyprinter.cc
src/prettyprinter.h
src/rewriter.cc
src/runtime.cc
src/runtime.h
src/safepoint-table.cc
src/serialize.cc
src/serialize.h
src/smart-pointers.h
src/spaces.cc
src/stub-cache.h
src/utils.h
src/x64/assembler-x64.cc
src/x64/assembler-x64.h
src/x64/builtins-x64.cc
src/x64/code-stubs-x64.cc
src/x64/code-stubs-x64.h
src/x64/codegen-x64.h
src/x64/deoptimizer-x64.cc
src/x64/lithium-codegen-x64.cc
src/x64/lithium-codegen-x64.h
src/x64/lithium-x64.cc
src/x64/lithium-x64.h
src/x64/macro-assembler-x64.cc
src/x64/macro-assembler-x64.h
src/x64/stub-cache-x64.cc
test/cctest/test-disasm-ia32.cc
test/cctest/test-mark-compact.cc
test/mjsunit/fuzz-natives-part1.js
tools/gyp/v8.gyp

index cf948383b83f74d623307324aaad92c225c20422..b65ea4c9f949c9329dc0cc3b94e1267cd08d8df3 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -81,12 +81,6 @@ endif
 ifeq ($(liveobjectlist), on)
   GYPFLAGS += -Dv8_use_liveobjectlist=true
 endif
-# vfp2=off
-ifeq ($(vfp2), off)
-  GYPFLAGS += -Dv8_can_use_vfp2_instructions=false
-else
-  GYPFLAGS += -Dv8_can_use_vfp2_instructions=true
-endif
 # vfp3=off
 ifeq ($(vfp3), off)
   GYPFLAGS += -Dv8_can_use_vfp3_instructions=false
index a31b7e936ef255883c04177af3e1440e447bd063..acd61feff89711daf952ae3714efc7c88a030545 100644 (file)
@@ -47,33 +47,6 @@ namespace v8 {
 namespace internal {
 
 
-int Register::NumAllocatableRegisters() {
-  if (CpuFeatures::IsSupported(VFP2)) {
-    return kMaxNumAllocatableRegisters;
-  } else {
-    return kMaxNumAllocatableRegisters - kGPRsPerNonVFP2Double;
-  }
-}
-
-
-int DwVfpRegister::NumRegisters() {
-  if (CpuFeatures::IsSupported(VFP2)) {
-    return DwVfpRegister::kNumRegisters;
-  } else {
-    return 1;
-  }
-}
-
-
-int DwVfpRegister::NumAllocatableRegisters() {
-  if (CpuFeatures::IsSupported(VFP2)) {
-    return DwVfpRegister::kMaxNumAllocatableRegisters;
-  } else {
-    return 1;
-  }
-}
-
-
 int DwVfpRegister::ToAllocationIndex(DwVfpRegister reg) {
   ASSERT(!reg.is(kDoubleRegZero));
   ASSERT(!reg.is(kScratchDoubleReg));
index 52edb3925a51283e17d907681dc318b166307bd2..47ea0e20666e2f44621e42959b7094067c43ed79 100644 (file)
@@ -85,33 +85,6 @@ static unsigned CpuFeaturesImpliedByCompiler() {
 }
 
 
-const char* DwVfpRegister::AllocationIndexToString(int index) {
-  if (CpuFeatures::IsSupported(VFP2)) {
-    ASSERT(index >= 0 && index < kMaxNumAllocatableRegisters);
-    const char* const names[] = {
-      "d0",
-      "d1",
-      "d2",
-      "d3",
-      "d4",
-      "d5",
-      "d6",
-      "d7",
-      "d8",
-      "d9",
-      "d10",
-      "d11",
-      "d12",
-      "d13"
-    };
-    return names[index];
-  } else {
-    ASSERT(index == 0);
-    return "sfpd0";
-  }
-}
-
-
 void CpuFeatures::Probe() {
   unsigned standard_features = static_cast<unsigned>(
       OS::CpuFeaturesImpliedByPlatform()) | CpuFeaturesImpliedByCompiler();
index a361c7ec1180fd8af3ebb76cd344a49d5ee9c2c8..3b9bb804fd331c144563d6522239c6967933a817 100644 (file)
@@ -71,24 +71,21 @@ namespace internal {
 // Core register
 struct Register {
   static const int kNumRegisters = 16;
-  static const int kMaxNumAllocatableRegisters = 8;
+  static const int kNumAllocatableRegisters = 8;
   static const int kSizeInBytes = 4;
-  static const int kGPRsPerNonVFP2Double = 2;
-
-  inline static int NumAllocatableRegisters();
 
   static int ToAllocationIndex(Register reg) {
-    ASSERT(reg.code() < kMaxNumAllocatableRegisters);
+    ASSERT(reg.code() < kNumAllocatableRegisters);
     return reg.code();
   }
 
   static Register FromAllocationIndex(int index) {
-    ASSERT(index >= 0 && index < kMaxNumAllocatableRegisters);
+    ASSERT(index >= 0 && index < kNumAllocatableRegisters);
     return from_code(index);
   }
 
   static const char* AllocationIndexToString(int index) {
-    ASSERT(index >= 0 && index < kMaxNumAllocatableRegisters);
+    ASSERT(index >= 0 && index < kNumAllocatableRegisters);
     const char* const names[] = {
       "r0",
       "r1",
@@ -168,6 +165,7 @@ const Register sp  = { kRegister_sp_Code };
 const Register lr  = { kRegister_lr_Code };
 const Register pc  = { kRegister_pc_Code };
 
+
 // Single word VFP register.
 struct SwVfpRegister {
   bool is_valid() const { return 0 <= code_ && code_ < 32; }
@@ -198,19 +196,37 @@ struct DwVfpRegister {
   //  d14: 0.0
   //  d15: scratch register.
   static const int kNumReservedRegisters = 2;
-  static const int kMaxNumAllocatableRegisters = kNumRegisters -
+  static const int kNumAllocatableRegisters = kNumRegisters -
       kNumReservedRegisters;
 
-  inline static int NumRegisters();
-  inline static int NumAllocatableRegisters();
   inline static int ToAllocationIndex(DwVfpRegister reg);
-  static const char* AllocationIndexToString(int index);
 
   static DwVfpRegister FromAllocationIndex(int index) {
-    ASSERT(index >= 0 && index < kMaxNumAllocatableRegisters);
+    ASSERT(index >= 0 && index < kNumAllocatableRegisters);
     return from_code(index);
   }
 
+  static const char* AllocationIndexToString(int index) {
+    ASSERT(index >= 0 && index < kNumAllocatableRegisters);
+    const char* const names[] = {
+      "d0",
+      "d1",
+      "d2",
+      "d3",
+      "d4",
+      "d5",
+      "d6",
+      "d7",
+      "d8",
+      "d9",
+      "d10",
+      "d11",
+      "d12",
+      "d13"
+    };
+    return names[index];
+  }
+
   static DwVfpRegister from_code(int code) {
     DwVfpRegister r = { code };
     return r;
@@ -307,9 +323,6 @@ const DwVfpRegister d13 = { 13 };
 const DwVfpRegister d14 = { 14 };
 const DwVfpRegister d15 = { 15 };
 
-const Register sfpd_lo  = { kRegister_r6_Code };
-const Register sfpd_hi  = { kRegister_r7_Code };
-
 // Aliases for double registers.  Defined using #define instead of
 // "static const DwVfpRegister&" because Clang complains otherwise when a
 // compilation unit that includes this header doesn't use the variables.
index 28e00dde4c9002b16a559d002bb9c1f82d7fc650..24d14e8c8a1efb62bbea08e366f4f1816bbd9d1f 100644 (file)
@@ -1259,26 +1259,6 @@ CODE_AGE_LIST(DEFINE_CODE_AGE_BUILTIN_GENERATOR)
 #undef DEFINE_CODE_AGE_BUILTIN_GENERATOR
 
 
-void Builtins::Generate_NotifyICMiss(MacroAssembler* masm) {
-  {
-    FrameScope scope(masm, StackFrame::INTERNAL);
-
-    // Preserve registers across notification, this is important for compiled
-    // stubs that tail call the runtime on deopts passing their parameters in
-    // registers.
-    __ stm(db_w, sp, kJSCallerSaved | kCalleeSaved);
-    // Pass the function and deoptimization type to the runtime system.
-    __ CallRuntime(Runtime::kNotifyICMiss, 0);
-    __ ldm(ia_w, sp, kJSCallerSaved | kCalleeSaved);
-  }
-
-  __ mov(ip, lr);  // Stash the miss continuation
-  __ add(sp, sp, Operand(kPointerSize));  // Ignore state
-  __ pop(lr);  // Restore LR to continuation in JSFunction
-  __ mov(pc, ip);  // Jump to miss handler
-}
-
-
 static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm,
                                              Deoptimizer::BailoutType type) {
   {
index a571f9f7cd1068d1c5989b091b2169af9e7a3533..9484f85f97e38ff87de543ec4c25dee3b9d6984e 100644 (file)
@@ -37,17 +37,6 @@ namespace v8 {
 namespace internal {
 
 
-void KeyedLoadFastElementStub::InitializeInterfaceDescriptor(
-    Isolate* isolate,
-    CodeStubInterfaceDescriptor* descriptor) {
-  static Register registers[] = { r1, r0 };
-  descriptor->register_param_count_ = 2;
-  descriptor->register_params_ = registers;
-  descriptor->deoptimization_handler_ =
-      isolate->builtins()->KeyedLoadIC_Miss();
-}
-
-
 #define __ ACCESS_MASM(masm)
 
 static void EmitIdenticalObjectComparison(MacroAssembler* masm,
@@ -514,7 +503,7 @@ void FastCloneShallowObjectStub::Generate(MacroAssembler* masm) {
 // 52 fraction bits (20 in the first word, 32 in the second).  Zeros is a
 // scratch register.  Destroys the source register.  No GC occurs during this
 // stub so you don't have to set up the frame.
-class ConvertToDoubleStub : public PlatformCodeStub {
+class ConvertToDoubleStub : public CodeStub {
  public:
   ConvertToDoubleStub(Register result_reg_1,
                       Register result_reg_2,
@@ -3579,10 +3568,10 @@ void MathPowStub::Generate(MacroAssembler* masm) {
   const Register exponent = r2;
   const Register heapnumbermap = r5;
   const Register heapnumber = r0;
-  const DwVfpRegister double_base = d1;
-  const DwVfpRegister double_exponent = d2;
-  const DwVfpRegister double_result = d3;
-  const DwVfpRegister double_scratch = d0;
+  const DoubleRegister double_base = d1;
+  const DoubleRegister double_exponent = d2;
+  const DoubleRegister double_result = d3;
+  const DoubleRegister double_scratch = d0;
   const SwVfpRegister single_scratch = s0;
   const Register scratch = r9;
   const Register scratch2 = r7;
@@ -3792,29 +3781,12 @@ void CodeStub::GenerateStubsAheadOfTime() {
 
 
 void CodeStub::GenerateFPStubs() {
-  SaveFPRegsMode mode = CpuFeatures::IsSupported(VFP2)
-      ? kSaveFPRegs
-      : kDontSaveFPRegs;
-  CEntryStub save_doubles(1, mode);
-  StoreBufferOverflowStub stub(mode);
-  // These stubs might already be in the snapshot, detect that and don't
-  // regenerate, which would lead to code stub initialization state being messed
-  // up.
-  Code* save_doubles_code = NULL;
-  Code* store_buffer_overflow_code = NULL;
-  if (!save_doubles.FindCodeInCache(&save_doubles_code, ISOLATE)) {
-    if (CpuFeatures::IsSupported(VFP2)) {
-      CpuFeatures::Scope scope2(VFP2);
-      save_doubles_code = *save_doubles.GetCode();
-      store_buffer_overflow_code = *stub.GetCode();
-    } else {
-      save_doubles_code = *save_doubles.GetCode();
-      store_buffer_overflow_code = *stub.GetCode();
-    }
-    save_doubles_code->set_is_pregenerated(true);
-    store_buffer_overflow_code->set_is_pregenerated(true);
-  }
-  ISOLATE->set_fp_stubs_generated(true);
+  CEntryStub save_doubles(1, kSaveFPRegs);
+  Handle<Code> code = save_doubles.GetCode();
+  code->set_is_pregenerated(true);
+  StoreBufferOverflowStub stub(kSaveFPRegs);
+  stub.GetCode()->set_is_pregenerated(true);
+  code->GetIsolate()->set_fp_stubs_generated(true);
 }
 
 
index 6f964a89677821b0dcdcad2b9272ca2cd3b64fea..0443cf799cc08e5d83e8ebe88f6de929dc8b5d12 100644 (file)
@@ -36,7 +36,7 @@ namespace internal {
 
 // Compute a transcendental math function natively, or call the
 // TranscendentalCache runtime function.
-class TranscendentalCacheStub: public PlatformCodeStub {
+class TranscendentalCacheStub: public CodeStub {
  public:
   enum ArgumentType {
     TAGGED = 0 << TranscendentalCache::kTranscendentalTypeBits,
@@ -58,7 +58,7 @@ class TranscendentalCacheStub: public PlatformCodeStub {
 };
 
 
-class StoreBufferOverflowStub: public PlatformCodeStub {
+class StoreBufferOverflowStub: public CodeStub {
  public:
   explicit StoreBufferOverflowStub(SaveFPRegsMode save_fp)
       : save_doubles_(save_fp) { }
@@ -77,7 +77,7 @@ class StoreBufferOverflowStub: public PlatformCodeStub {
 };
 
 
-class UnaryOpStub: public PlatformCodeStub {
+class UnaryOpStub: public CodeStub {
  public:
   UnaryOpStub(Token::Value op,
               UnaryOverwriteMode mode,
@@ -219,7 +219,7 @@ enum StringAddFlags {
 };
 
 
-class StringAddStub: public PlatformCodeStub {
+class StringAddStub: public CodeStub {
  public:
   explicit StringAddStub(StringAddFlags flags) : flags_(flags) {}
 
@@ -242,7 +242,7 @@ class StringAddStub: public PlatformCodeStub {
 };
 
 
-class SubStringStub: public PlatformCodeStub {
+class SubStringStub: public CodeStub {
  public:
   SubStringStub() {}
 
@@ -255,7 +255,7 @@ class SubStringStub: public PlatformCodeStub {
 
 
 
-class StringCompareStub: public PlatformCodeStub {
+class StringCompareStub: public CodeStub {
  public:
   StringCompareStub() { }
 
@@ -295,7 +295,7 @@ class StringCompareStub: public PlatformCodeStub {
 // This stub can convert a signed int32 to a heap number (double).  It does
 // not work for int32s that are in Smi range!  No GC occurs during this stub
 // so you don't have to set up the frame.
-class WriteInt32ToHeapNumberStub : public PlatformCodeStub {
+class WriteInt32ToHeapNumberStub : public CodeStub {
  public:
   WriteInt32ToHeapNumberStub(Register the_int,
                              Register the_heap_number,
@@ -329,7 +329,7 @@ class WriteInt32ToHeapNumberStub : public PlatformCodeStub {
 };
 
 
-class NumberToStringStub: public PlatformCodeStub {
+class NumberToStringStub: public CodeStub {
  public:
   NumberToStringStub() { }
 
@@ -355,7 +355,7 @@ class NumberToStringStub: public PlatformCodeStub {
 };
 
 
-class RecordWriteStub: public PlatformCodeStub {
+class RecordWriteStub: public CodeStub {
  public:
   RecordWriteStub(Register object,
                   Register value,
@@ -511,7 +511,7 @@ class RecordWriteStub: public PlatformCodeStub {
     Register GetRegThatIsNotOneOf(Register r1,
                                   Register r2,
                                   Register r3) {
-      for (int i = 0; i < Register::NumAllocatableRegisters(); i++) {
+      for (int i = 0; i < Register::kNumAllocatableRegisters; i++) {
         Register candidate = Register::FromAllocationIndex(i);
         if (candidate.is(r1)) continue;
         if (candidate.is(r2)) continue;
@@ -570,7 +570,7 @@ class RecordWriteStub: public PlatformCodeStub {
 // Enter C code from generated RegExp code in a way that allows
 // the C code to fix the return address in case of a GC.
 // Currently only needed on ARM.
-class RegExpCEntryStub: public PlatformCodeStub {
+class RegExpCEntryStub: public CodeStub {
  public:
   RegExpCEntryStub() {}
   virtual ~RegExpCEntryStub() {}
@@ -589,7 +589,7 @@ class RegExpCEntryStub: public PlatformCodeStub {
 // keep the code which called into native pinned in the memory. Currently the
 // simplest approach is to generate such stub early enough so it can never be
 // moved by GC
-class DirectCEntryStub: public PlatformCodeStub {
+class DirectCEntryStub: public CodeStub {
  public:
   DirectCEntryStub() {}
   void Generate(MacroAssembler* masm);
@@ -739,7 +739,7 @@ class FloatingPointHelper : public AllStatic {
 };
 
 
-class StringDictionaryLookupStub: public PlatformCodeStub {
+class StringDictionaryLookupStub: public CodeStub {
  public:
   enum LookupMode { POSITIVE_LOOKUP, NEGATIVE_LOOKUP };
 
index 5e8739cd6a5d89ff47e5213f68bb0e150ade3675..bb771b18e23a88161f8c441f4c9f37ae7246ab92 100644 (file)
@@ -73,10 +73,10 @@ UnaryMathFunction CreateExpFunction() {
 
   {
     CpuFeatures::Scope use_vfp(VFP2);
-    DwVfpRegister input = d0;
-    DwVfpRegister result = d1;
-    DwVfpRegister double_scratch1 = d2;
-    DwVfpRegister double_scratch2 = d3;
+    DoubleRegister input = d0;
+    DoubleRegister result = d1;
+    DoubleRegister double_scratch1 = d2;
+    DoubleRegister double_scratch2 = d3;
     Register temp1 = r4;
     Register temp2 = r5;
     Register temp3 = r6;
@@ -571,10 +571,10 @@ static MemOperand ExpConstant(int index, Register base) {
 
 
 void MathExpGenerator::EmitMathExp(MacroAssembler* masm,
-                                   DwVfpRegister input,
-                                   DwVfpRegister result,
-                                   DwVfpRegister double_scratch1,
-                                   DwVfpRegister double_scratch2,
+                                   DoubleRegister input,
+                                   DoubleRegister result,
+                                   DoubleRegister double_scratch1,
+                                   DoubleRegister double_scratch2,
                                    Register temp1,
                                    Register temp2,
                                    Register temp3) {
index 75899a948e7dcec3a56737a28a6c1f235463c0d0..8f0033e2cebe460b2f62b4ee484cc2fe09a2fa46 100644 (file)
@@ -44,10 +44,6 @@ enum TypeofState { INSIDE_TYPEOF, NOT_INSIDE_TYPEOF };
 
 class CodeGenerator: public AstVisitor {
  public:
-  CodeGenerator() {
-    InitializeAstVisitor();
-  }
-
   static bool MakeCode(CompilationInfo* info);
 
   // Printing of AST, etc. as requested by flags.
@@ -72,8 +68,6 @@ class CodeGenerator: public AstVisitor {
                               int pos,
                               bool right_here = false);
 
-  DEFINE_AST_VISITOR_SUBCLASS_MEMBERS();
-
  private:
   DISALLOW_COPY_AND_ASSIGN(CodeGenerator);
 };
@@ -98,10 +92,10 @@ class StringCharLoadGenerator : public AllStatic {
 class MathExpGenerator : public AllStatic {
  public:
   static void EmitMathExp(MacroAssembler* masm,
-                          DwVfpRegister input,
-                          DwVfpRegister result,
-                          DwVfpRegister double_scratch1,
-                          DwVfpRegister double_scratch2,
+                          DoubleRegister input,
+                          DoubleRegister result,
+                          DoubleRegister double_scratch1,
+                          DoubleRegister double_scratch2,
                           Register temp1,
                           Register temp2,
                           Register temp3);
index dcebd558c5245a4f1ac7583ace8400686b7eb88b..ee2a581a57dcb443e22f1395dab540afa6e3927b 100644 (file)
@@ -212,7 +212,7 @@ static int LookupBailoutId(DeoptimizationInputData* data, BailoutId ast_id) {
 
 void Deoptimizer::DoComputeOsrOutputFrame() {
   DeoptimizationInputData* data = DeoptimizationInputData::cast(
-      compiled_code_->deoptimization_data());
+      optimized_code_->deoptimization_data());
   unsigned ast_id = data->OsrAstId()->value();
 
   int bailout_id = LookupBailoutId(data, BailoutId(ast_id));
@@ -246,7 +246,7 @@ void Deoptimizer::DoComputeOsrOutputFrame() {
   unsigned input_frame_size = input_->GetFrameSize();
   ASSERT(fixed_size + height_in_bytes == input_frame_size);
 
-  unsigned stack_slot_size = compiled_code_->stack_slots() * kPointerSize;
+  unsigned stack_slot_size = optimized_code_->stack_slots() * kPointerSize;
   unsigned outgoing_height = data->ArgumentsStackHeight(bailout_id)->value();
   unsigned outgoing_size = outgoing_height * kPointerSize;
   unsigned output_frame_size = fixed_size + stack_slot_size + outgoing_size;
@@ -338,7 +338,7 @@ void Deoptimizer::DoComputeOsrOutputFrame() {
 
     unsigned pc_offset = data->OsrPcOffset()->value();
     uint32_t pc = reinterpret_cast<uint32_t>(
-        compiled_code_->entry() + pc_offset);
+        optimized_code_->entry() + pc_offset);
     output_[0]->SetPc(pc);
   }
   Code* continuation = isolate_->builtins()->builtin(Builtins::kNotifyOSR);
@@ -451,70 +451,6 @@ void Deoptimizer::DoComputeArgumentsAdaptorFrame(TranslationIterator* iterator,
 }
 
 
-void Deoptimizer::DoCompiledStubFrame(TranslationIterator* iterator,
-                                      int frame_index) {
-  //
-  //               FROM                                  TO             <-fp
-  //    |          ....           |          |          ....           |
-  //    +-------------------------+          +-------------------------+
-  //    | JSFunction continuation |          | JSFunction continuation |
-  //    +-------------------------+          +-------------------------+<-sp
-  // |  |   saved frame (fp)      |
-  // |  +=========================+<-fp
-  // |  |   JSFunction context    |
-  // v  +-------------------------+
-  //    |   COMPILED_STUB marker  |          fp = saved frame
-  //    +-------------------------+          f8 = JSFunction context
-  //    |                         |
-  //    | ...                     |
-  //    |                         |
-  //    +-------------------------+<-sp
-  //
-  //
-  int output_frame_size = 1 * kPointerSize;
-  FrameDescription* output_frame =
-      new(output_frame_size) FrameDescription(output_frame_size, 0);
-  Code* notify_miss =
-      isolate_->builtins()->builtin(Builtins::kNotifyICMiss);
-  output_frame->SetState(Smi::FromInt(FullCodeGenerator::NO_REGISTERS));
-  output_frame->SetContinuation(
-      reinterpret_cast<intptr_t>(notify_miss->entry()));
-
-  ASSERT(compiled_code_->kind() == Code::COMPILED_STUB);
-  int major_key = compiled_code_->major_key();
-  CodeStubInterfaceDescriptor* descriptor =
-      isolate_->code_stub_interface_descriptor(major_key);
-  Handle<Code> miss_ic(descriptor->deoptimization_handler_);
-  output_frame->SetPc(reinterpret_cast<intptr_t>(miss_ic->instruction_start()));
-  unsigned input_frame_size = input_->GetFrameSize();
-  intptr_t value = input_->GetFrameSlot(input_frame_size - kPointerSize);
-  output_frame->SetFrameSlot(0, value);
-  value = input_->GetFrameSlot(input_frame_size - 2 * kPointerSize);
-  output_frame->SetRegister(fp.code(), value);
-  output_frame->SetFp(value);
-  value = input_->GetFrameSlot(input_frame_size - 3 * kPointerSize);
-  output_frame->SetRegister(cp.code(), value);
-
-  Translation::Opcode opcode =
-      static_cast<Translation::Opcode>(iterator->Next());
-  ASSERT(opcode == Translation::REGISTER);
-  USE(opcode);
-  int input_reg = iterator->Next();
-  intptr_t input_value = input_->GetRegister(input_reg);
-  output_frame->SetRegister(r1.code(), input_value);
-
-  int32_t next = iterator->Next();
-  opcode = static_cast<Translation::Opcode>(next);
-  ASSERT(opcode == Translation::REGISTER);
-  input_reg = iterator->Next();
-  input_value = input_->GetRegister(input_reg);
-  output_frame->SetRegister(r0.code(), input_value);
-
-  ASSERT(frame_index == 0);
-  output_[frame_index] = output_frame;
-}
-
-
 void Deoptimizer::DoComputeConstructStubFrame(TranslationIterator* iterator,
                                               int frame_index) {
   Builtins* builtins = isolate_->builtins();
@@ -942,7 +878,7 @@ void Deoptimizer::FillInputFrame(Address tos, JavaScriptFrame* frame) {
   }
   input_->SetRegister(sp.code(), reinterpret_cast<intptr_t>(frame->sp()));
   input_->SetRegister(fp.code(), reinterpret_cast<intptr_t>(frame->fp()));
-  for (int i = 0; i < DoubleRegister::NumAllocatableRegisters(); i++) {
+  for (int i = 0; i < DoubleRegister::kNumAllocatableRegisters; i++) {
     input_->SetDoubleRegister(i, 0.0);
   }
 
@@ -962,6 +898,7 @@ void Deoptimizer::EntryGenerator::Generate() {
 
   Isolate* isolate = masm()->isolate();
 
+  CpuFeatures::Scope scope(VFP3);
   // Save all general purpose registers before messing with them.
   const int kNumberOfRegisters = Register::kNumRegisters;
 
@@ -969,29 +906,23 @@ void Deoptimizer::EntryGenerator::Generate() {
   RegList restored_regs = kJSCallerSaved | kCalleeSaved | ip.bit();
 
   const int kDoubleRegsSize =
-      kDoubleSize * DwVfpRegister::kMaxNumAllocatableRegisters;
-
-  if (CpuFeatures::IsSupported(VFP2)) {
-    CpuFeatures::Scope scope(VFP2);
-    // Save all VFP registers before messing with them.
-    DwVfpRegister first = DwVfpRegister::FromAllocationIndex(0);
-    DwVfpRegister last =
-        DwVfpRegister::FromAllocationIndex(
-            DwVfpRegister::kMaxNumAllocatableRegisters - 1);
-    ASSERT(last.code() > first.code());
-    ASSERT((last.code() - first.code()) ==
-           (DwVfpRegister::kMaxNumAllocatableRegisters - 1));
+      kDoubleSize * DwVfpRegister::kNumAllocatableRegisters;
+
+  // Save all VFP registers before messing with them.
+  DwVfpRegister first = DwVfpRegister::FromAllocationIndex(0);
+  DwVfpRegister last =
+      DwVfpRegister::FromAllocationIndex(
+          DwVfpRegister::kNumAllocatableRegisters - 1);
+  ASSERT(last.code() > first.code());
+  ASSERT((last.code() - first.code()) ==
+      (DwVfpRegister::kNumAllocatableRegisters - 1));
 #ifdef DEBUG
-    int max = DwVfpRegister::kMaxNumAllocatableRegisters - 1;
-    for (int i = 0; i <= max; i++) {
-      ASSERT((DwVfpRegister::FromAllocationIndex(i).code() <= last.code()) &&
-             (DwVfpRegister::FromAllocationIndex(i).code() >= first.code()));
-    }
-#endif
-    __ vstm(db_w, sp, first, last);
-  } else {
-    __ sub(sp, sp, Operand(kDoubleRegsSize));
+  for (int i = 0; i <= (DwVfpRegister::kNumAllocatableRegisters - 1); i++) {
+    ASSERT((DwVfpRegister::FromAllocationIndex(i).code() <= last.code()) &&
+           (DwVfpRegister::FromAllocationIndex(i).code() >= first.code()));
   }
+#endif
+  __ vstm(db_w, sp, first, last);
 
   // Push all 16 registers (needed to populate FrameDescription::registers_).
   // TODO(1588) Note that using pc with stm is deprecated, so we should perhaps
@@ -1050,17 +981,14 @@ void Deoptimizer::EntryGenerator::Generate() {
     __ str(r2, MemOperand(r1, offset));
   }
 
-  if (CpuFeatures::IsSupported(VFP2)) {
-    CpuFeatures::Scope scope(VFP2);
-    // Copy VFP registers to
-    // double_registers_[DoubleRegister::kMaxNumAllocatableRegisters]
-    int double_regs_offset = FrameDescription::double_registers_offset();
-    for (int i = 0; i < DwVfpRegister::NumAllocatableRegisters(); ++i) {
-      int dst_offset = i * kDoubleSize + double_regs_offset;
-      int src_offset = i * kDoubleSize + kNumberOfRegisters * kPointerSize;
-      __ vldr(d0, sp, src_offset);
-      __ vstr(d0, r1, dst_offset);
-    }
+  // Copy VFP registers to
+  // double_registers_[DoubleRegister::kNumAllocatableRegisters]
+  int double_regs_offset = FrameDescription::double_registers_offset();
+  for (int i = 0; i < DwVfpRegister::kNumAllocatableRegisters; ++i) {
+    int dst_offset = i * kDoubleSize + double_regs_offset;
+    int src_offset = i * kDoubleSize + kNumberOfRegisters * kPointerSize;
+    __ vldr(d0, sp, src_offset);
+    __ vstr(d0, r1, dst_offset);
   }
 
   // Remove the bailout id, eventually return address, and the saved registers
@@ -1081,13 +1009,10 @@ void Deoptimizer::EntryGenerator::Generate() {
   // frame description.
   __ add(r3,  r1, Operand(FrameDescription::frame_content_offset()));
   Label pop_loop;
-  Label pop_loop_header;
-  __ b(&pop_loop_header);
   __ bind(&pop_loop);
   __ pop(r4);
   __ str(r4, MemOperand(r3, 0));
   __ add(r3, r3, Operand(sizeof(uint32_t)));
-  __ bind(&pop_loop_header);
   __ cmp(r2, sp);
   __ b(ne, &pop_loop);
 
@@ -1104,29 +1029,24 @@ void Deoptimizer::EntryGenerator::Generate() {
   __ pop(r0);  // Restore deoptimizer object (class Deoptimizer).
 
   // Replace the current (input) frame with the output frames.
-  Label outer_push_loop, inner_push_loop,
-      outer_loop_header, inner_loop_header;
+  Label outer_push_loop, inner_push_loop;
   // Outer loop state: r0 = current "FrameDescription** output_",
   // r1 = one past the last FrameDescription**.
   __ ldr(r1, MemOperand(r0, Deoptimizer::output_count_offset()));
   __ ldr(r0, MemOperand(r0, Deoptimizer::output_offset()));  // r0 is output_.
   __ add(r1, r0, Operand(r1, LSL, 2));
-  __ jmp(&outer_loop_header);
   __ bind(&outer_push_loop);
   // Inner loop state: r2 = current FrameDescription*, r3 = loop index.
   __ ldr(r2, MemOperand(r0, 0));  // output_[ix]
   __ ldr(r3, MemOperand(r2, FrameDescription::frame_size_offset()));
-  __ jmp(&inner_loop_header);
   __ bind(&inner_push_loop);
   __ sub(r3, r3, Operand(sizeof(uint32_t)));
   __ add(r6, r2, Operand(r3));
   __ ldr(r7, MemOperand(r6, FrameDescription::frame_content_offset()));
   __ push(r7);
-  __ bind(&inner_loop_header);
   __ cmp(r3, Operand(0));
   __ b(ne, &inner_push_loop);  // test for gt?
   __ add(r0, r0, Operand(kPointerSize));
-  __ bind(&outer_loop_header);
   __ cmp(r0, r1);
   __ b(lt, &outer_push_loop);
 
index f44ca98aad059da26ffb7bfeb6aa320d9247e5ca..42036737333877b8c3f39a0ceb43dea3457c6943 100644 (file)
@@ -42,10 +42,10 @@ LITHIUM_CONCRETE_INSTRUCTION_LIST(DEFINE_COMPILE)
 #undef DEFINE_COMPILE
 
 LOsrEntry::LOsrEntry() {
-  for (int i = 0; i < Register::NumAllocatableRegisters(); ++i) {
+  for (int i = 0; i < Register::kNumAllocatableRegisters; ++i) {
     register_spills_[i] = NULL;
   }
-  for (int i = 0; i < DoubleRegister::NumAllocatableRegisters(); ++i) {
+  for (int i = 0; i < DoubleRegister::kNumAllocatableRegisters; ++i) {
     double_register_spills_[i] = NULL;
   }
 }
@@ -612,7 +612,6 @@ LInstruction* LChunkBuilder::AssignEnvironment(LInstruction* instr) {
 LInstruction* LChunkBuilder::MarkAsCall(LInstruction* instr,
                                         HInstruction* hinstr,
                                         CanDeoptimize can_deoptimize) {
-  info()->MarkAsNonDeferredCalling();
 #ifdef DEBUG
   instr->VerifyCall();
 #endif
@@ -1695,7 +1694,6 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) {
   Representation to = instr->to();
   if (from.IsTagged()) {
     if (to.IsDouble()) {
-      info()->MarkAsDeferredCalling();
       LOperand* value = UseRegister(instr->value());
       LNumberUntagD* res = new(zone()) LNumberUntagD(value);
       return AssignEnvironment(DefineAsRegister(res));
@@ -1720,7 +1718,6 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) {
     }
   } else if (from.IsDouble()) {
     if (to.IsTagged()) {
-      info()->MarkAsDeferredCalling();
       LOperand* value = UseRegister(instr->value());
       LOperand* temp1 = TempRegister();
       LOperand* temp2 = TempRegister();
@@ -1740,7 +1737,6 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) {
       return AssignEnvironment(DefineAsRegister(res));
     }
   } else if (from.IsInteger32()) {
-    info()->MarkAsDeferredCalling();
     if (to.IsTagged()) {
       HValue* val = instr->value();
       LOperand* value = UseRegisterAtStart(val);
@@ -1978,16 +1974,7 @@ LInstruction* LChunkBuilder::DoLoadKeyed(HLoadKeyed* instr) {
         (instr->representation().IsDouble() &&
          ((elements_kind == EXTERNAL_FLOAT_ELEMENTS) ||
           (elements_kind == EXTERNAL_DOUBLE_ELEMENTS))));
-    // float->double conversion on non-VFP2 requires an extra scratch
-    // register. For convenience, just mark the elements register as "UseTemp"
-    // so that it can be used as a temp during the float->double conversion
-    // after it's no longer needed after the float load.
-    bool needs_temp =
-        !CpuFeatures::IsSupported(VFP2) &&
-        (elements_kind == EXTERNAL_FLOAT_ELEMENTS);
-    LOperand* external_pointer = needs_temp
-        ? UseTempRegister(instr->elements())
-        : UseRegister(instr->elements());
+    LOperand* external_pointer = UseRegister(instr->elements());
     result = new(zone()) LLoadKeyed(external_pointer, key);
   }
 
@@ -2205,17 +2192,8 @@ LInstruction* LChunkBuilder::DoOsrEntry(HOsrEntry* instr) {
 
 
 LInstruction* LChunkBuilder::DoParameter(HParameter* instr) {
-  LParameter* result = new(zone()) LParameter;
-  if (info()->IsOptimizing()) {
-    int spill_index = chunk()->GetParameterStackSlot(instr->index());
-    return DefineAsSpilled(result, spill_index);
-  } else {
-    ASSERT(info()->IsStub());
-    CodeStubInterfaceDescriptor* descriptor =
-        info()->code_stub()->GetInterfaceDescriptor(info()->isolate());
-    Register reg = descriptor->register_params_[instr->index()];
-    return DefineFixed(result, reg);
-  }
+  int spill_index = chunk()->GetParameterStackSlot(instr->index());
+  return DefineAsSpilled(new(zone()) LParameter, spill_index);
 }
 
 
index 1b589ced6489ac2f23d27c4bd8cc3b5823395561..7397b4bc8d94d38dc8ee6567ebb7559280b1d2c6 100644 (file)
@@ -255,11 +255,6 @@ class LInstruction: public ZoneObject {
 
   void MarkAsCall() { is_call_ = true; }
 
-  // Interface to the register allocator and iterators.
-  bool ClobbersTemps() const { return is_call_; }
-  bool ClobbersRegisters() const { return is_call_; }
-  bool ClobbersDoubleRegisters() const { return is_call_; }
-
   // Interface to the register allocator and iterators.
   bool IsMarkedAsCall() const { return is_call_; }
 
@@ -2364,9 +2359,8 @@ class LOsrEntry: public LTemplateInstruction<0, 0, 0> {
   // slot, i.e., that must also be restored to the spill slot on OSR entry.
   // NULL if the register has no assigned spill slot.  Indexed by allocation
   // index.
-  LOperand* register_spills_[Register::kMaxNumAllocatableRegisters];
-  LOperand* double_register_spills_[
-      DoubleRegister::kMaxNumAllocatableRegisters];
+  LOperand* register_spills_[Register::kNumAllocatableRegisters];
+  LOperand* double_register_spills_[DoubleRegister::kNumAllocatableRegisters];
 };
 
 
index 53d99814bb5d56cc10cb7c1aaed206f730ce1c3d..06b021669bd1792e150796f844f135935179dbdc 100644 (file)
@@ -65,6 +65,8 @@ bool LCodeGen::GenerateCode() {
   HPhase phase("Z_Code generation", chunk());
   ASSERT(is_unused());
   status_ = GENERATING;
+  CpuFeatures::Scope scope1(VFP3);
+  CpuFeatures::Scope scope2(ARMv7);
 
   CodeStub::GenerateFPStubs();
 
@@ -116,38 +118,37 @@ void LCodeGen::Comment(const char* format, ...) {
 bool LCodeGen::GeneratePrologue() {
   ASSERT(is_generating());
 
-  if (info()->IsOptimizing()) {
-    ProfileEntryHookStub::MaybeCallEntryHook(masm_);
+  ProfileEntryHookStub::MaybeCallEntryHook(masm_);
 
 #ifdef DEBUG
-    if (strlen(FLAG_stop_at) > 0 &&
-        info_->function()->name()->IsEqualTo(CStrVector(FLAG_stop_at))) {
-      __ stop("stop_at");
-    }
+  if (strlen(FLAG_stop_at) > 0 &&
+      info_->function()->name()->IsEqualTo(CStrVector(FLAG_stop_at))) {
+    __ stop("stop_at");
+  }
 #endif
 
-    // r1: Callee's JS function.
-    // cp: Callee's context.
-    // fp: Caller's frame pointer.
-    // lr: Caller's pc.
+  // r1: Callee's JS function.
+  // cp: Callee's context.
+  // fp: Caller's frame pointer.
+  // lr: Caller's pc.
 
-    // Strict mode functions and builtins need to replace the receiver
-    // with undefined when called as functions (without an explicit
-    // receiver object). r5 is zero for method calls and non-zero for
-    // function calls.
-    if (!info_->is_classic_mode() || info_->is_native()) {
-      Label ok;
-      __ cmp(r5, Operand(0));
-      __ b(eq, &ok);
-      int receiver_offset = scope()->num_parameters() * kPointerSize;
-      __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
-      __ str(r2, MemOperand(sp, receiver_offset));
-      __ bind(&ok);
-    }
+  // Strict mode functions and builtins need to replace the receiver
+  // with undefined when called as functions (without an explicit
+  // receiver object). r5 is zero for method calls and non-zero for
+  // function calls.
+  if (!info_->is_classic_mode() || info_->is_native()) {
+    Label ok;
+    __ cmp(r5, Operand(0));
+    __ b(eq, &ok);
+    int receiver_offset = scope()->num_parameters() * kPointerSize;
+    __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
+    __ str(r2, MemOperand(sp, receiver_offset));
+    __ bind(&ok);
   }
 
+
   info()->set_prologue_offset(masm_->pc_offset());
-  if (NeedsEagerFrame()) {
+  {
     PredictableCodeSizeScope predictible_code_size_scope(
         masm_, kNoCodeAgeSequenceLength * Assembler::kInstrSize);
     // The following three instructions must remain together and unmodified
@@ -158,7 +159,6 @@ bool LCodeGen::GeneratePrologue() {
     __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
     // Adjust FP to point to saved FP.
     __ add(fp, sp, Operand(2 * kPointerSize));
-    frame_is_built_ = true;
   }
 
   // Reserve space for the stack slots needed by the code.
@@ -178,7 +178,7 @@ bool LCodeGen::GeneratePrologue() {
   }
 
   // Possibly allocate a local context.
-  int heap_slots = info()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
+  int heap_slots = scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
   if (heap_slots > 0) {
     Comment(";;; Allocate local context");
     // Argument to NewContext is the function, which is in r1.
@@ -214,7 +214,7 @@ bool LCodeGen::GeneratePrologue() {
   }
 
   // Trace the call.
-  if (FLAG_trace && info()->IsOptimizing()) {
+  if (FLAG_trace) {
     __ CallRuntime(Runtime::kTraceEnter, 0);
   }
   return !is_aborted();
@@ -272,31 +272,10 @@ bool LCodeGen::GenerateDeferredCode() {
     for (int i = 0; !is_aborted() && i < deferred_.length(); i++) {
       LDeferredCode* code = deferred_[i];
       __ bind(code->entry());
-      if (NeedsDeferredFrame()) {
-        Comment(";;; Deferred build frame",
-                code->instruction_index(),
-                code->instr()->Mnemonic());
-        ASSERT(!frame_is_built_);
-        ASSERT(info()->IsStub());
-        frame_is_built_ = true;
-        __ stm(db_w, sp, cp.bit() | fp.bit() | lr.bit());
-        __ mov(scratch0(), Operand(Smi::FromInt(StackFrame::STUB)));
-        __ push(scratch0());
-        __ add(fp, sp, Operand(2 * kPointerSize));
-      }
       Comment(";;; Deferred code @%d: %s.",
               code->instruction_index(),
               code->instr()->Mnemonic());
       code->Generate();
-      if (NeedsDeferredFrame()) {
-        Comment(";;; Deferred destroy frame",
-                code->instruction_index(),
-                code->instr()->Mnemonic());
-        ASSERT(frame_is_built_);
-        __ pop(ip);
-        __ ldm(ia_w, sp, cp.bit() | fp.bit() | lr.bit());
-        frame_is_built_ = false;
-      }
       __ jmp(code->exit());
     }
   }
@@ -318,68 +297,24 @@ bool LCodeGen::GenerateDeoptJumpTable() {
   // Each entry in the jump table generates one instruction and inlines one
   // 32bit data after it.
   if (!is_int24((masm()->pc_offset() / Assembler::kInstrSize) +
-      deopt_jump_table_.length() * 7)) {
+      deopt_jump_table_.length() * 2)) {
     Abort("Generated code is too large");
   }
 
+  // Block the constant pool emission during the jump table emission.
+  __ BlockConstPoolFor(deopt_jump_table_.length());
   __ RecordComment("[ Deoptimisation jump table");
   Label table_start;
   __ bind(&table_start);
-  Label needs_frame_not_call;
-  Label needs_frame_is_call;
   for (int i = 0; i < deopt_jump_table_.length(); i++) {
     __ bind(&deopt_jump_table_[i].label);
-    Address entry = deopt_jump_table_[i].address;
-    if (deopt_jump_table_[i].needs_frame) {
-      __ mov(ip, Operand(ExternalReference::ForDeoptEntry(entry)));
-      if (deopt_jump_table_[i].is_lazy_deopt) {
-        if (needs_frame_is_call.is_bound()) {
-          __ b(&needs_frame_is_call);
-        } else {
-          __ bind(&needs_frame_is_call);
-          __ stm(db_w, sp, cp.bit() | fp.bit() | lr.bit());
-          // This variant of deopt can only be used with stubs. Since we don't
-          // have a function pointer to install in the stack frame that we're
-          // building, install a special marker there instead.
-          ASSERT(info()->IsStub());
-          __ mov(scratch0(), Operand(Smi::FromInt(StackFrame::STUB)));
-          __ push(scratch0());
-          __ add(fp, sp, Operand(2 * kPointerSize));
-          __ mov(lr, Operand(pc), LeaveCC, al);
-          __ mov(pc, ip);
-        }
-      } else {
-        if (needs_frame_not_call.is_bound()) {
-          __ b(&needs_frame_not_call);
-        } else {
-          __ bind(&needs_frame_not_call);
-          __ stm(db_w, sp, cp.bit() | fp.bit() | lr.bit());
-          // This variant of deopt can only be used with stubs. Since we don't
-          // have a function pointer to install in the stack frame that we're
-          // building, install a special marker there instead.
-          ASSERT(info()->IsStub());
-          __ mov(scratch0(), Operand(Smi::FromInt(StackFrame::STUB)));
-          __ push(scratch0());
-          __ add(fp, sp, Operand(2 * kPointerSize));
-          __ mov(pc, ip);
-        }
-      }
-    } else {
-      if (deopt_jump_table_[i].is_lazy_deopt) {
-        __ mov(lr, Operand(pc), LeaveCC, al);
-        __ mov(pc, Operand(ExternalReference::ForDeoptEntry(entry)));
-      } else {
-        __ mov(pc, Operand(ExternalReference::ForDeoptEntry(entry)));
-      }
-    }
-    masm()->CheckConstPool(false, false);
+    __ ldr(pc, MemOperand(pc, Assembler::kInstrSize - Assembler::kPcLoadDelta));
+    __ dd(reinterpret_cast<uint32_t>(deopt_jump_table_[i].address));
   }
+  ASSERT(masm()->InstructionsGeneratedSince(&table_start) ==
+      deopt_jump_table_.length() * 2);
   __ RecordComment("]");
 
-  // Force constant pool emission at the end of the deopt jump table to make
-  // sure that no constant pools are emitted after.
-  masm()->CheckConstPool(true, false);
-
   // The deoptimization jump table is the last part of the instruction
   // sequence. Mark the generated code as done unless we bailed out.
   if (!is_aborted()) status_ = DONE;
@@ -399,8 +334,8 @@ Register LCodeGen::ToRegister(int index) const {
 }
 
 
-DwVfpRegister LCodeGen::ToDoubleRegister(int index) const {
-  return DwVfpRegister::FromAllocationIndex(index);
+DoubleRegister LCodeGen::ToDoubleRegister(int index) const {
+  return DoubleRegister::FromAllocationIndex(index);
 }
 
 
@@ -441,15 +376,15 @@ Register LCodeGen::EmitLoadRegister(LOperand* op, Register scratch) {
 }
 
 
-DwVfpRegister LCodeGen::ToDoubleRegister(LOperand* op) const {
+DoubleRegister LCodeGen::ToDoubleRegister(LOperand* op) const {
   ASSERT(op->IsDoubleRegister());
   return ToDoubleRegister(op->index());
 }
 
 
-DwVfpRegister LCodeGen::EmitLoadDoubleRegister(LOperand* op,
-                                               SwVfpRegister flt_scratch,
-                                               DwVfpRegister dbl_scratch) {
+DoubleRegister LCodeGen::EmitLoadDoubleRegister(LOperand* op,
+                                                SwVfpRegister flt_scratch,
+                                                DoubleRegister dbl_scratch) {
   if (op->IsDoubleRegister()) {
     return ToDoubleRegister(op->index());
   } else if (op->IsConstantOperand()) {
@@ -585,9 +520,7 @@ void LCodeGen::WriteTranslation(LEnvironment* environment,
                    translation,
                    arguments_index,
                    arguments_count);
-  bool has_closure_id = !info()->closure().is_null() &&
-      *info()->closure() != *environment->closure();
-  int closure_id = has_closure_id
+  int closure_id = *info()->closure() != *environment->closure()
       ? DefineDeoptimizationLiteral(environment->closure())
       : Translation::kSelfLiteralId;
 
@@ -608,9 +541,6 @@ void LCodeGen::WriteTranslation(LEnvironment* environment,
       ASSERT(height == 0);
       translation->BeginSetterStubFrame(closure_id);
       break;
-    case STUB:
-      translation->BeginCompiledStubFrame();
-      break;
     case ARGUMENTS_ADAPTOR:
       translation->BeginArgumentsAdaptorFrame(closure_id, translation_size);
       break;
@@ -806,11 +736,7 @@ void LCodeGen::DeoptimizeIf(Condition cc, LEnvironment* environment) {
   RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
   ASSERT(environment->HasBeenRegistered());
   int id = environment->deoptimization_index();
-
-  Deoptimizer::BailoutType bailout_type = info()->IsStub()
-      ? Deoptimizer::LAZY
-      : Deoptimizer::EAGER;
-  Address entry = Deoptimizer::GetDeoptimizationEntry(id, bailout_type);
+  Address entry = Deoptimizer::GetDeoptimizationEntry(id, Deoptimizer::EAGER);
   if (entry == NULL) {
     Abort("bailout was not prepared");
     return;
@@ -826,19 +752,14 @@ void LCodeGen::DeoptimizeIf(Condition cc, LEnvironment* environment) {
 
   if (FLAG_trap_on_deopt) __ stop("trap_on_deopt", cc);
 
-  bool needs_lazy_deopt = info()->IsStub();
-  ASSERT(info()->IsStub() || frame_is_built_);
-  if (cc == al && !needs_lazy_deopt) {
+  if (cc == al) {
     __ Jump(entry, RelocInfo::RUNTIME_ENTRY);
   } else {
     // We often have several deopts to the same entry, reuse the last
     // jump entry if this is the case.
     if (deopt_jump_table_.is_empty() ||
-        (deopt_jump_table_.last().address != entry) ||
-        (deopt_jump_table_.last().is_lazy_deopt != needs_lazy_deopt) ||
-        (deopt_jump_table_.last().needs_frame != !frame_is_built_)) {
-      JumpTableEntry table_entry(entry, !frame_is_built_, needs_lazy_deopt);
-      deopt_jump_table_.Add(table_entry, zone());
+        (deopt_jump_table_.last().address != entry)) {
+      deopt_jump_table_.Add(JumpTableEntry(entry), zone());
     }
     __ b(cc, &deopt_jump_table_.last().label);
   }
@@ -1447,7 +1368,6 @@ void LCodeGen::DoDeferredBinaryOpStub(LPointerMap* pointer_map,
                                       LOperand* left_argument,
                                       LOperand* right_argument,
                                       Token::Value op) {
-  CpuFeatures::Scope vfp_scope(VFP2);
   Register left = ToRegister(left_argument);
   Register right = ToRegister(right_argument);
 
@@ -1733,7 +1653,6 @@ void LCodeGen::DoConstantI(LConstantI* instr) {
 void LCodeGen::DoConstantD(LConstantD* instr) {
   ASSERT(instr->result()->IsDoubleRegister());
   DwVfpRegister result = ToDoubleRegister(instr->result());
-  CpuFeatures::Scope scope(VFP2);
   double v = instr->value();
   __ Vmov(result, v, scratch0());
 }
@@ -1911,10 +1830,9 @@ void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
     __ mov(result_reg, right_op, LeaveCC, NegateCondition(condition));
   } else {
     ASSERT(instr->hydrogen()->representation().IsDouble());
-    CpuFeatures::Scope scope(VFP2);
-    DwVfpRegister left_reg = ToDoubleRegister(left);
-    DwVfpRegister right_reg = ToDoubleRegister(right);
-    DwVfpRegister result_reg = ToDoubleRegister(instr->result());
+    DoubleRegister left_reg = ToDoubleRegister(left);
+    DoubleRegister right_reg = ToDoubleRegister(right);
+    DoubleRegister result_reg = ToDoubleRegister(instr->result());
     Label check_nan_left, check_zero, return_left, return_right, done;
     __ VFPCompareAndSetFlags(left_reg, right_reg);
     __ b(vs, &check_nan_left);
@@ -1957,10 +1875,9 @@ void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
 
 
 void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
-  CpuFeatures::Scope scope(VFP2);
-  DwVfpRegister left = ToDoubleRegister(instr->left());
-  DwVfpRegister right = ToDoubleRegister(instr->right());
-  DwVfpRegister result = ToDoubleRegister(instr->result());
+  DoubleRegister left = ToDoubleRegister(instr->left());
+  DoubleRegister right = ToDoubleRegister(instr->right());
+  DoubleRegister result = ToDoubleRegister(instr->result());
   switch (instr->op()) {
     case Token::ADD:
       __ vadd(result, left, right);
@@ -2048,8 +1965,7 @@ void LCodeGen::DoBranch(LBranch* instr) {
     __ cmp(reg, Operand(0));
     EmitBranch(true_block, false_block, ne);
   } else if (r.IsDouble()) {
-    CpuFeatures::Scope scope(VFP2);
-    DwVfpRegister reg = ToDoubleRegister(instr->value());
+    DoubleRegister reg = ToDoubleRegister(instr->value());
     Register scratch = scratch0();
 
     // Test the double value. Zero and NaN are false.
@@ -2134,9 +2050,8 @@ void LCodeGen::DoBranch(LBranch* instr) {
       }
 
       if (expected.Contains(ToBooleanStub::HEAP_NUMBER)) {
-        CpuFeatures::Scope scope(VFP2);
         // heap number -> false iff +0, -0, or NaN.
-        DwVfpRegister dbl_scratch = double_scratch0();
+        DoubleRegister dbl_scratch = double_scratch0();
         Label not_heap_number;
         __ CompareRoot(map, Heap::kHeapNumberMapRootIndex);
         __ b(ne, &not_heap_number);
@@ -2214,7 +2129,6 @@ void LCodeGen::DoCmpIDAndBranch(LCmpIDAndBranch* instr) {
     EmitGoto(next_block);
   } else {
     if (instr->is_double()) {
-      CpuFeatures::Scope scope(VFP2);
       // Compare left and right operands as doubles and load the
       // resulting flags into the normal status register.
       __ VFPCompareAndSetFlags(ToDoubleRegister(left), ToDoubleRegister(right));
@@ -2753,21 +2667,16 @@ void LCodeGen::DoCmpT(LCmpT* instr) {
 
 
 void LCodeGen::DoReturn(LReturn* instr) {
-  if (FLAG_trace && info()->IsOptimizing()) {
+  if (FLAG_trace) {
     // Push the return value on the stack as the parameter.
     // Runtime::TraceExit returns its parameter in r0.
     __ push(r0);
     __ CallRuntime(Runtime::kTraceExit, 1);
   }
-  if (NeedsEagerFrame()) {
-    int32_t sp_delta = (GetParameterCount() + 1) * kPointerSize;
-    __ mov(sp, fp);
-    __ ldm(ia_w, sp, fp.bit() | lr.bit());
-    __ add(sp, sp, Operand(sp_delta));
-  }
-  if (info()->IsStub()) {
-    __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
-  }
+  int32_t sp_delta = (GetParameterCount() + 1) * kPointerSize;
+  __ mov(sp, fp);
+  __ ldm(ia_w, sp, fp.bit() | lr.bit());
+  __ add(sp, sp, Operand(sp_delta));
   __ Jump(lr);
 }
 
@@ -3117,63 +3026,17 @@ void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
 
   if (elements_kind == EXTERNAL_FLOAT_ELEMENTS ||
       elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
+    CpuFeatures::Scope scope(VFP3);
     DwVfpRegister result = ToDoubleRegister(instr->result());
     Operand operand = key_is_constant
         ? Operand(constant_key << element_size_shift)
         : Operand(key, LSL, shift_size);
     __ add(scratch0(), external_pointer, operand);
-    if (CpuFeatures::IsSupported(VFP2)) {
-      CpuFeatures::Scope scope(VFP2);
-      if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
-        __ vldr(result.low(), scratch0(), additional_offset);
-        __ vcvt_f64_f32(result, result.low());
-      } else  {  // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS
-        __ vldr(result, scratch0(), additional_offset);
-      }
-    } else {
-      if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
-        Register value = external_pointer;
-        __ ldr(value, MemOperand(scratch0(), additional_offset));
-        __ and_(sfpd_lo, value, Operand(kBinary32MantissaMask));
-
-        __ mov(scratch0(), Operand(value, LSR, kBinary32MantissaBits));
-        __ and_(scratch0(), scratch0(),
-                Operand(kBinary32ExponentMask >> kBinary32MantissaBits));
-
-        Label exponent_rebiased;
-        __ teq(scratch0(), Operand(0x00));
-        __ b(eq, &exponent_rebiased);
-
-        __ teq(scratch0(), Operand(0xff));
-        __ mov(scratch0(), Operand(0x7ff), LeaveCC, eq);
-        __ b(eq, &exponent_rebiased);
-
-        // Rebias exponent.
-        __ add(scratch0(),
-               scratch0(),
-               Operand(-kBinary32ExponentBias + HeapNumber::kExponentBias));
-
-        __ bind(&exponent_rebiased);
-        __ and_(sfpd_hi, value, Operand(kBinary32SignMask));
-        __ orr(sfpd_hi, sfpd_hi,
-               Operand(scratch0(), LSL, HeapNumber::kMantissaBitsInTopWord));
-
-        // Shift mantissa.
-        static const int kMantissaShiftForHiWord =
-            kBinary32MantissaBits - HeapNumber::kMantissaBitsInTopWord;
-
-        static const int kMantissaShiftForLoWord =
-            kBitsPerInt - kMantissaShiftForHiWord;
-
-        __ orr(sfpd_hi, sfpd_hi,
-               Operand(sfpd_lo, LSR, kMantissaShiftForHiWord));
-        __ mov(sfpd_lo, Operand(sfpd_lo, LSL, kMantissaShiftForLoWord));
-
-      } else {
-        __ ldr(sfpd_lo, MemOperand(scratch0(), additional_offset));
-        __ ldr(sfpd_hi, MemOperand(scratch0(),
-                                   additional_offset + kPointerSize));
-      }
+    if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
+      __ vldr(result.low(), scratch0(), additional_offset);
+      __ vcvt_f64_f32(result, result.low());
+    } else  {  // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS
+      __ vldr(result, scratch0(), additional_offset);
     }
   } else {
     Register result = ToRegister(instr->result());
@@ -3242,28 +3105,23 @@ void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) {
     key = ToRegister(instr->key());
   }
 
-  int base_offset = (FixedDoubleArray::kHeaderSize - kHeapObjectTag) +
-      ((constant_key + instr->additional_index()) << element_size_shift);
+  Operand operand = key_is_constant
+      ? Operand(((constant_key + instr->additional_index()) <<
+                 element_size_shift) +
+                FixedDoubleArray::kHeaderSize - kHeapObjectTag)
+      : Operand(key, LSL, shift_size);
+  __ add(elements, elements, operand);
   if (!key_is_constant) {
-    __ add(elements, elements, Operand(key, LSL, shift_size));
-  }
-  if (CpuFeatures::IsSupported(VFP2)) {
-    CpuFeatures::Scope scope(VFP2);
-    __ add(elements, elements, Operand(base_offset));
-    __ vldr(result, elements, 0);
-    if (instr->hydrogen()->RequiresHoleCheck()) {
-      __ ldr(scratch, MemOperand(elements, sizeof(kHoleNanLower32)));
-      __ cmp(scratch, Operand(kHoleNanUpper32));
-      DeoptimizeIf(eq, instr->environment());
-    }
-  } else {
-      __ ldr(sfpd_hi, MemOperand(elements, base_offset + kPointerSize));
-      __ ldr(sfpd_lo, MemOperand(elements, base_offset));
-    if (instr->hydrogen()->RequiresHoleCheck()) {
-      ASSERT(kPointerSize == sizeof(kHoleNanLower32));
-      __ cmp(sfpd_hi, Operand(kHoleNanUpper32));
-      DeoptimizeIf(eq, instr->environment());
-    }
+    __ add(elements, elements,
+           Operand((FixedDoubleArray::kHeaderSize - kHeapObjectTag) +
+                   (instr->additional_index() << element_size_shift)));
+  }
+
+  __ vldr(result, elements, 0);
+  if (instr->hydrogen()->RequiresHoleCheck()) {
+    __ ldr(scratch, MemOperand(elements, sizeof(kHoleNanLower32)));
+    __ cmp(scratch, Operand(kHoleNanUpper32));
+    DeoptimizeIf(eq, instr->environment());
   }
 }
 
@@ -3699,7 +3557,6 @@ void LCodeGen::EmitIntegerMathAbs(LUnaryMathOperation* instr) {
 
 
 void LCodeGen::DoMathAbs(LUnaryMathOperation* instr) {
-  CpuFeatures::Scope scope(VFP2);
   // Class for deferred case.
   class DeferredMathAbsTaggedHeapNumber: public LDeferredCode {
    public:
@@ -3736,8 +3593,7 @@ void LCodeGen::DoMathAbs(LUnaryMathOperation* instr) {
 
 
 void LCodeGen::DoMathFloor(LUnaryMathOperation* instr) {
-  CpuFeatures::Scope scope(VFP2);
-  DwVfpRegister input = ToDoubleRegister(instr->value());
+  DoubleRegister input = ToDoubleRegister(instr->value());
   Register result = ToRegister(instr->result());
   Register scratch = scratch0();
 
@@ -3762,8 +3618,7 @@ void LCodeGen::DoMathFloor(LUnaryMathOperation* instr) {
 
 
 void LCodeGen::DoMathRound(LUnaryMathOperation* instr) {
-  CpuFeatures::Scope scope(VFP2);
-  DwVfpRegister input = ToDoubleRegister(instr->value());
+  DoubleRegister input = ToDoubleRegister(instr->value());
   Register result = ToRegister(instr->result());
   DwVfpRegister double_scratch1 = ToDoubleRegister(instr->temp());
   Register scratch = scratch0();
@@ -3828,18 +3683,16 @@ void LCodeGen::DoMathRound(LUnaryMathOperation* instr) {
 
 
 void LCodeGen::DoMathSqrt(LUnaryMathOperation* instr) {
-  CpuFeatures::Scope scope(VFP2);
-  DwVfpRegister input = ToDoubleRegister(instr->value());
-  DwVfpRegister result = ToDoubleRegister(instr->result());
+  DoubleRegister input = ToDoubleRegister(instr->value());
+  DoubleRegister result = ToDoubleRegister(instr->result());
   __ vsqrt(result, input);
 }
 
 
 void LCodeGen::DoMathPowHalf(LUnaryMathOperation* instr) {
-  CpuFeatures::Scope scope(VFP2);
-  DwVfpRegister input = ToDoubleRegister(instr->value());
-  DwVfpRegister result = ToDoubleRegister(instr->result());
-  DwVfpRegister temp = ToDoubleRegister(instr->temp());
+  DoubleRegister input = ToDoubleRegister(instr->value());
+  DoubleRegister result = ToDoubleRegister(instr->result());
+  DoubleRegister temp = ToDoubleRegister(instr->temp());
 
   // Note that according to ECMA-262 15.8.2.13:
   // Math.pow(-Infinity, 0.5) == Infinity
@@ -3858,7 +3711,6 @@ void LCodeGen::DoMathPowHalf(LUnaryMathOperation* instr) {
 
 
 void LCodeGen::DoPower(LPower* instr) {
-  CpuFeatures::Scope scope(VFP2);
   Representation exponent_type = instr->hydrogen()->right()->representation();
   // Having marked this as a call, we can use any registers.
   // Just make sure that the input/output registers are the expected ones.
@@ -3891,7 +3743,6 @@ void LCodeGen::DoPower(LPower* instr) {
 
 
 void LCodeGen::DoRandom(LRandom* instr) {
-  CpuFeatures::Scope scope(VFP2);
   class DeferredDoRandom: public LDeferredCode {
    public:
     DeferredDoRandom(LCodeGen* codegen, LRandom* instr)
@@ -3970,11 +3821,10 @@ void LCodeGen::DoDeferredRandom(LRandom* instr) {
 
 
 void LCodeGen::DoMathExp(LMathExp* instr) {
-  CpuFeatures::Scope scope(VFP2);
-  DwVfpRegister input = ToDoubleRegister(instr->value());
-  DwVfpRegister result = ToDoubleRegister(instr->result());
-  DwVfpRegister double_scratch1 = ToDoubleRegister(instr->double_temp());
-  DwVfpRegister double_scratch2 = double_scratch0();
+  DoubleRegister input = ToDoubleRegister(instr->value());
+  DoubleRegister result = ToDoubleRegister(instr->result());
+  DoubleRegister double_scratch1 = ToDoubleRegister(instr->double_temp());
+  DoubleRegister double_scratch2 = double_scratch0();
   Register temp1 = ToRegister(instr->temp1());
   Register temp2 = ToRegister(instr->temp2());
 
@@ -4260,7 +4110,6 @@ void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
 
 
 void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
-  CpuFeatures::Scope scope(VFP2);
   Register external_pointer = ToRegister(instr->elements());
   Register key = no_reg;
   ElementsKind elements_kind = instr->elements_kind();
@@ -4331,7 +4180,6 @@ void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
 
 
 void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) {
-  CpuFeatures::Scope scope(VFP2);
   DwVfpRegister value = ToDoubleRegister(instr->value());
   Register elements = ToRegister(instr->elements());
   Register key = no_reg;
@@ -4608,7 +4456,6 @@ void LCodeGen::DoStringLength(LStringLength* instr) {
 
 
 void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
-  CpuFeatures::Scope scope(VFP2);
   LOperand* input = instr->value();
   ASSERT(input->IsRegister() || input->IsStackSlot());
   LOperand* output = instr->result();
@@ -4626,7 +4473,6 @@ void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
 
 
 void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) {
-  CpuFeatures::Scope scope(VFP2);
   LOperand* input = instr->value();
   LOperand* output = instr->result();
 
@@ -4688,49 +4534,13 @@ void LCodeGen::DoNumberTagU(LNumberTagU* instr) {
 }
 
 
-// Convert unsigned integer with specified number of leading zeroes in binary
-// representation to IEEE 754 double.
-// Integer to convert is passed in register hiword.
-// Resulting double is returned in registers hiword:loword.
-// This functions does not work correctly for 0.
-static void GenerateUInt2Double(MacroAssembler* masm,
-                                Register hiword,
-                                Register loword,
-                                Register scratch,
-                                int leading_zeroes) {
-  const int meaningful_bits = kBitsPerInt - leading_zeroes - 1;
-  const int biased_exponent = HeapNumber::kExponentBias + meaningful_bits;
-
-  const int mantissa_shift_for_hi_word =
-      meaningful_bits - HeapNumber::kMantissaBitsInTopWord;
-  const int mantissa_shift_for_lo_word =
-      kBitsPerInt - mantissa_shift_for_hi_word;
-  masm->mov(scratch, Operand(biased_exponent << HeapNumber::kExponentShift));
-  if (mantissa_shift_for_hi_word > 0) {
-    masm->mov(loword, Operand(hiword, LSL, mantissa_shift_for_lo_word));
-    masm->orr(hiword, scratch,
-              Operand(hiword, LSR, mantissa_shift_for_hi_word));
-  } else {
-    masm->mov(loword, Operand(0, RelocInfo::NONE));
-    masm->orr(hiword, scratch,
-              Operand(hiword, LSL, -mantissa_shift_for_hi_word));
-  }
-
-  // If least significant bit of biased exponent was not 1 it was corrupted
-  // by most significant bit of mantissa so we should fix that.
-  if (!(biased_exponent & 1)) {
-    masm->bic(hiword, hiword, Operand(1 << HeapNumber::kExponentShift));
-  }
-}
-
-
 void LCodeGen::DoDeferredNumberTagI(LInstruction* instr,
                                     LOperand* value,
                                     IntegerSignedness signedness) {
   Label slow;
   Register src = ToRegister(value);
   Register dst = ToRegister(instr->result());
-  DwVfpRegister dbl_scratch = double_scratch0();
+  DoubleRegister dbl_scratch = double_scratch0();
   SwVfpRegister flt_scratch = dbl_scratch.low();
 
   // Preserve the value of all registers.
@@ -4745,40 +4555,16 @@ void LCodeGen::DoDeferredNumberTagI(LInstruction* instr,
       __ SmiUntag(src, dst);
       __ eor(src, src, Operand(0x80000000));
     }
-    if (CpuFeatures::IsSupported(VFP2)) {
-      CpuFeatures::Scope scope(VFP2);
-      __ vmov(flt_scratch, src);
-      __ vcvt_f64_s32(dbl_scratch, flt_scratch);
-    } else {
-      FloatingPointHelper::Destination dest =
-          FloatingPointHelper::kCoreRegisters;
-      FloatingPointHelper::ConvertIntToDouble(masm(), src, dest, d0,
-                                              sfpd_lo, sfpd_hi,
-                                              scratch0(), s0);
-    }
+    __ vmov(flt_scratch, src);
+    __ vcvt_f64_s32(dbl_scratch, flt_scratch);
   } else {
-    if (CpuFeatures::IsSupported(VFP2)) {
-      CpuFeatures::Scope scope(VFP2);
-      __ vmov(flt_scratch, src);
-      __ vcvt_f64_u32(dbl_scratch, flt_scratch);
-    } else {
-      Label no_leading_zero, done;
-      __ tst(src, Operand(0x80000000));
-      __ b(ne, &no_leading_zero);
-
-      // Integer has one leading zeros.
-      GenerateUInt2Double(masm(), sfpd_hi, sfpd_lo, r9, 1);
-      __ b(&done);
-
-      __ bind(&no_leading_zero);
-      GenerateUInt2Double(masm(), sfpd_hi, sfpd_lo, r9, 0);
-      __ b(&done);
-    }
+    __ vmov(flt_scratch, src);
+    __ vcvt_f64_u32(dbl_scratch, flt_scratch);
   }
 
   if (FLAG_inline_new) {
-    __ LoadRoot(scratch0(), Heap::kHeapNumberMapRootIndex);
-    __ AllocateHeapNumber(r5, r3, r4, scratch0(), &slow, DONT_TAG_RESULT);
+    __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
+    __ AllocateHeapNumber(r5, r3, r4, r6, &slow, DONT_TAG_RESULT);
     __ Move(dst, r5);
     __ b(&done);
   }
@@ -4798,13 +4584,7 @@ void LCodeGen::DoDeferredNumberTagI(LInstruction* instr,
   // Done. Put the value in dbl_scratch into the value of the allocated heap
   // number.
   __ bind(&done);
-  if (CpuFeatures::IsSupported(VFP2)) {
-    CpuFeatures::Scope scope(VFP2);
-    __ vstr(dbl_scratch, dst, HeapNumber::kValueOffset);
-  } else {
-    __ str(sfpd_lo, MemOperand(dst, HeapNumber::kMantissaOffset));
-    __ str(sfpd_hi, MemOperand(dst, HeapNumber::kExponentOffset));
-  }
+  __ vstr(dbl_scratch, dst, HeapNumber::kValueOffset);
   __ add(dst, dst, Operand(kHeapObjectTag));
   __ StoreToSafepointRegisterSlot(dst, dst);
 }
@@ -4821,7 +4601,7 @@ void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
     LNumberTagD* instr_;
   };
 
-  DwVfpRegister input_reg = ToDoubleRegister(instr->value());
+  DoubleRegister input_reg = ToDoubleRegister(instr->value());
   Register scratch = scratch0();
   Register reg = ToRegister(instr->result());
   Register temp1 = ToRegister(instr->temp());
@@ -4837,13 +4617,7 @@ void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
     __ jmp(deferred->entry());
   }
   __ bind(deferred->exit());
-  if (CpuFeatures::IsSupported(VFP2)) {
-    CpuFeatures::Scope scope(VFP2);
-    __ vstr(input_reg, reg, HeapNumber::kValueOffset);
-  } else {
-    __ str(sfpd_lo, MemOperand(reg, HeapNumber::kValueOffset));
-    __ str(sfpd_hi, MemOperand(reg, HeapNumber::kValueOffset + kPointerSize));
-  }
+  __ vstr(input_reg, reg, HeapNumber::kValueOffset);
   // Now that we have finished with the object's real address tag it
   __ add(reg, reg, Operand(kHeapObjectTag));
 }
@@ -4884,14 +4658,13 @@ void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
 
 
 void LCodeGen::EmitNumberUntagD(Register input_reg,
-                                DwVfpRegister result_reg,
+                                DoubleRegister result_reg,
                                 bool deoptimize_on_undefined,
                                 bool deoptimize_on_minus_zero,
                                 LEnvironment* env) {
   Register scratch = scratch0();
   SwVfpRegister flt_scratch = double_scratch0().low();
   ASSERT(!result_reg.is(double_scratch0()));
-  CpuFeatures::Scope scope(VFP2);
 
   Label load_smi, heap_number, done;
 
@@ -4966,7 +4739,6 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
   __ cmp(scratch1, Operand(ip));
 
   if (instr->truncating()) {
-    CpuFeatures::Scope scope(VFP2);
     Register scratch3 = ToRegister(instr->temp2());
     ASSERT(!scratch3.is(input_reg) &&
            !scratch3.is(scratch1) &&
@@ -5057,7 +4829,7 @@ void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
   ASSERT(result->IsDoubleRegister());
 
   Register input_reg = ToRegister(input);
-  DwVfpRegister result_reg = ToDoubleRegister(result);
+  DoubleRegister result_reg = ToDoubleRegister(result);
 
   EmitNumberUntagD(input_reg, result_reg,
                    instr->hydrogen()->deoptimize_on_undefined(),
@@ -5205,16 +4977,14 @@ void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
 
 
 void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) {
-  CpuFeatures::Scope vfp_scope(VFP2);
-  DwVfpRegister value_reg = ToDoubleRegister(instr->unclamped());
+  DoubleRegister value_reg = ToDoubleRegister(instr->unclamped());
   Register result_reg = ToRegister(instr->result());
-  DwVfpRegister temp_reg = ToDoubleRegister(instr->temp());
+  DoubleRegister temp_reg = ToDoubleRegister(instr->temp());
   __ ClampDoubleToUint8(result_reg, value_reg, temp_reg);
 }
 
 
 void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) {
-  CpuFeatures::Scope scope(VFP2);
   Register unclamped_reg = ToRegister(instr->unclamped());
   Register result_reg = ToRegister(instr->result());
   __ ClampUint8(result_reg, unclamped_reg);
@@ -5222,11 +4992,10 @@ void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) {
 
 
 void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
-  CpuFeatures::Scope scope(VFP2);
   Register scratch = scratch0();
   Register input_reg = ToRegister(instr->unclamped());
   Register result_reg = ToRegister(instr->result());
-  DwVfpRegister temp_reg = ToDoubleRegister(instr->temp());
+  DoubleRegister temp_reg = ToDoubleRegister(instr->temp());
   Label is_smi, done, heap_number;
 
   // Both smi and heap number cases are handled.
@@ -5803,7 +5572,6 @@ void LCodeGen::EmitIsConstructCall(Register temp1, Register temp2) {
 
 
 void LCodeGen::EnsureSpaceForLazyDeopt() {
-  if (info()->IsStub()) return;
   // Ensure that we have enough space after the previous lazy-bailout
   // instruction for patching the code here.
   int current_pc = masm()->pc_offset();
index e7afcbf1bbc675f86b59b364581ee88dc8025e0d..921285b0d228c8d023038e2422e9772d735135b3 100644 (file)
@@ -61,7 +61,6 @@ class LCodeGen BASE_EMBEDDED {
         deferred_(8, info->zone()),
         osr_pc_offset_(-1),
         last_lazy_deopt_pc_(0),
-        frame_is_built_(false),
         safepoints_(info->zone()),
         resolver_(this),
         expected_safepoint_kind_(Safepoint::kSimple) {
@@ -77,15 +76,6 @@ class LCodeGen BASE_EMBEDDED {
   Heap* heap() const { return isolate()->heap(); }
   Zone* zone() const { return zone_; }
 
-  bool NeedsEagerFrame() const {
-    return GetStackSlotCount() > 0 ||
-        info()->is_non_deferred_calling() ||
-        !info()->IsStub();
-  }
-  bool NeedsDeferredFrame() const {
-    return !NeedsEagerFrame() && info()->is_deferred_calling();
-  }
-
   // Support for converting LOperands to assembler types.
   // LOperand must be a register.
   Register ToRegister(LOperand* op) const;
@@ -94,12 +84,12 @@ class LCodeGen BASE_EMBEDDED {
   Register EmitLoadRegister(LOperand* op, Register scratch);
 
   // LOperand must be a double register.
-  DwVfpRegister ToDoubleRegister(LOperand* op) const;
+  DoubleRegister ToDoubleRegister(LOperand* op) const;
 
   // LOperand is loaded into dbl_scratch, unless already a double register.
-  DwVfpRegister EmitLoadDoubleRegister(LOperand* op,
-                                       SwVfpRegister flt_scratch,
-                                       DwVfpRegister dbl_scratch);
+  DoubleRegister EmitLoadDoubleRegister(LOperand* op,
+                                        SwVfpRegister flt_scratch,
+                                        DoubleRegister dbl_scratch);
   int ToInteger32(LConstantOperand* op) const;
   double ToDouble(LConstantOperand* op) const;
   Operand ToOperand(LOperand* op);
@@ -203,7 +193,7 @@ class LCodeGen BASE_EMBEDDED {
                        Register temporary2);
 
   int GetStackSlotCount() const { return chunk()->spill_slot_count(); }
-  int GetParameterCount() const { return info()->num_parameters(); }
+  int GetParameterCount() const { return scope()->num_parameters(); }
 
   void Abort(const char* reason);
   void Comment(const char* format, ...);
@@ -285,7 +275,7 @@ class LCodeGen BASE_EMBEDDED {
   void PopulateDeoptimizationLiteralsWithInlinedFunctions();
 
   Register ToRegister(int index) const;
-  DwVfpRegister ToDoubleRegister(int index) const;
+  DoubleRegister ToDoubleRegister(int index) const;
 
   // Specific math operations - used from DoUnaryMathOperation.
   void EmitIntegerMathAbs(LUnaryMathOperation* instr);
@@ -318,7 +308,7 @@ class LCodeGen BASE_EMBEDDED {
   void EmitGoto(int block);
   void EmitBranch(int left_block, int right_block, Condition cc);
   void EmitNumberUntagD(Register input,
-                        DwVfpRegister result,
+                        DoubleRegister result,
                         bool deoptimize_on_undefined,
                         bool deoptimize_on_minus_zero,
                         LEnvironment* env);
@@ -379,15 +369,11 @@ class LCodeGen BASE_EMBEDDED {
                                            LEnvironment* environment);
 
   struct JumpTableEntry {
-    inline JumpTableEntry(Address entry, bool frame, bool is_lazy)
+    explicit inline JumpTableEntry(Address entry)
         : label(),
-          address(entry),
-          needs_frame(frame),
-          is_lazy_deopt(is_lazy) { }
+          address(entry) { }
     Label label;
     Address address;
-    bool needs_frame;
-    bool is_lazy_deopt;
   };
 
   void EnsureSpaceForLazyDeopt();
@@ -416,7 +402,6 @@ class LCodeGen BASE_EMBEDDED {
   ZoneList<LDeferredCode*> deferred_;
   int osr_pc_offset_;
   int last_lazy_deopt_pc_;
-  bool frame_is_built_;
 
   // Builder that keeps track of safepoints in the code. The table
   // itself is emitted at the end of the generated code.
@@ -432,7 +417,6 @@ class LCodeGen BASE_EMBEDDED {
     PushSafepointRegistersScope(LCodeGen* codegen,
                                 Safepoint::Kind kind)
         : codegen_(codegen) {
-      ASSERT(codegen_->info()->is_calling());
       ASSERT(codegen_->expected_safepoint_kind_ == Safepoint::kSimple);
       codegen_->expected_safepoint_kind_ = kind;
 
index 4df1338b99d8985d8b0a592c0a37901d758a6a99..c100720d89cd6130ad3e3a04a9528c48c045f5f7 100644 (file)
@@ -171,10 +171,8 @@ void LGapResolver::BreakCycle(int index) {
   } else if (source->IsStackSlot()) {
     __ ldr(kSavedValueRegister, cgen_->ToMemOperand(source));
   } else if (source->IsDoubleRegister()) {
-    CpuFeatures::Scope scope(VFP2);
     __ vmov(kScratchDoubleReg, cgen_->ToDoubleRegister(source));
   } else if (source->IsDoubleStackSlot()) {
-    CpuFeatures::Scope scope(VFP2);
     __ vldr(kScratchDoubleReg, cgen_->ToMemOperand(source));
   } else {
     UNREACHABLE();
@@ -194,10 +192,8 @@ void LGapResolver::RestoreValue() {
   } else if (saved_destination_->IsStackSlot()) {
     __ str(kSavedValueRegister, cgen_->ToMemOperand(saved_destination_));
   } else if (saved_destination_->IsDoubleRegister()) {
-    CpuFeatures::Scope scope(VFP2);
     __ vmov(cgen_->ToDoubleRegister(saved_destination_), kScratchDoubleReg);
   } else if (saved_destination_->IsDoubleStackSlot()) {
-    CpuFeatures::Scope scope(VFP2);
     __ vstr(kScratchDoubleReg, cgen_->ToMemOperand(saved_destination_));
   } else {
     UNREACHABLE();
@@ -233,8 +229,7 @@ void LGapResolver::EmitMove(int index) {
       MemOperand destination_operand = cgen_->ToMemOperand(destination);
       if (in_cycle_) {
         if (!destination_operand.OffsetIsUint12Encodable()) {
-          CpuFeatures::Scope scope(VFP2);
-            // ip is overwritten while saving the value to the destination.
+          // ip is overwritten while saving the value to the destination.
           // Therefore we can't use ip.  It is OK if the read from the source
           // destroys ip, since that happens before the value is read.
           __ vldr(kScratchDoubleReg.low(), source_operand);
@@ -272,8 +267,7 @@ void LGapResolver::EmitMove(int index) {
     }
 
   } else if (source->IsDoubleRegister()) {
-    CpuFeatures::Scope scope(VFP2);
-    DwVfpRegister source_register = cgen_->ToDoubleRegister(source);
+    DoubleRegister source_register = cgen_->ToDoubleRegister(source);
     if (destination->IsDoubleRegister()) {
       __ vmov(cgen_->ToDoubleRegister(destination), source_register);
     } else {
@@ -282,8 +276,7 @@ void LGapResolver::EmitMove(int index) {
     }
 
   } else if (source->IsDoubleStackSlot()) {
-    CpuFeatures::Scope scope(VFP2);
-      MemOperand source_operand = cgen_->ToMemOperand(source);
+    MemOperand source_operand = cgen_->ToMemOperand(source);
     if (destination->IsDoubleRegister()) {
       __ vldr(cgen_->ToDoubleRegister(destination), source_operand);
     } else {
index 33b557e43cdd702d1e364a6564151cd63166b476..dc1dc1da9b68de5ce5753500fe4155ca951a00fb 100644 (file)
@@ -290,7 +290,7 @@ void MacroAssembler::Move(Register dst, Register src, Condition cond) {
 }
 
 
-void MacroAssembler::Move(DwVfpRegister dst, DwVfpRegister src) {
+void MacroAssembler::Move(DoubleRegister dst, DoubleRegister src) {
   ASSERT(CpuFeatures::IsSupported(VFP2));
   CpuFeatures::Scope scope(VFP2);
   if (!dst.is(src)) {
@@ -643,19 +643,19 @@ void MacroAssembler::PopSafepointRegisters() {
 
 void MacroAssembler::PushSafepointRegistersAndDoubles() {
   PushSafepointRegisters();
-  sub(sp, sp, Operand(DwVfpRegister::NumAllocatableRegisters() *
+  sub(sp, sp, Operand(DwVfpRegister::kNumAllocatableRegisters *
                       kDoubleSize));
-  for (int i = 0; i < DwVfpRegister::NumAllocatableRegisters(); i++) {
+  for (int i = 0; i < DwVfpRegister::kNumAllocatableRegisters; i++) {
     vstr(DwVfpRegister::FromAllocationIndex(i), sp, i * kDoubleSize);
   }
 }
 
 
 void MacroAssembler::PopSafepointRegistersAndDoubles() {
-  for (int i = 0; i < DwVfpRegister::NumAllocatableRegisters(); i++) {
+  for (int i = 0; i < DwVfpRegister::kNumAllocatableRegisters; i++) {
     vldr(DwVfpRegister::FromAllocationIndex(i), sp, i * kDoubleSize);
   }
-  add(sp, sp, Operand(DwVfpRegister::NumAllocatableRegisters() *
+  add(sp, sp, Operand(DwVfpRegister::kNumAllocatableRegisters *
                       kDoubleSize));
   PopSafepointRegisters();
 }
@@ -691,7 +691,7 @@ MemOperand MacroAssembler::SafepointRegisterSlot(Register reg) {
 
 MemOperand MacroAssembler::SafepointRegistersAndDoublesSlot(Register reg) {
   // General purpose registers are pushed last on the stack.
-  int doubles_size = DwVfpRegister::NumAllocatableRegisters() * kDoubleSize;
+  int doubles_size = DwVfpRegister::kNumAllocatableRegisters * kDoubleSize;
   int register_offset = SafepointRegisterStackIndex(reg.code()) * kPointerSize;
   return MemOperand(sp, doubles_size + register_offset);
 }
@@ -967,7 +967,7 @@ void MacroAssembler::LeaveExitFrame(bool save_doubles,
   }
 }
 
-void MacroAssembler::GetCFunctionDoubleResult(const DwVfpRegister dst) {
+void MacroAssembler::GetCFunctionDoubleResult(const DoubleRegister dst) {
   ASSERT(CpuFeatures::IsSupported(VFP2));
   if (use_eabi_hardfloat()) {
     Move(dst, d0);
@@ -2729,10 +2729,7 @@ void MacroAssembler::CallRuntimeSaveDoubles(Runtime::FunctionId id) {
   const Runtime::Function* function = Runtime::FunctionForId(id);
   mov(r0, Operand(function->nargs));
   mov(r1, Operand(ExternalReference(function, isolate())));
-  SaveFPRegsMode mode = CpuFeatures::IsSupported(VFP2)
-      ? kSaveFPRegs
-      : kDontSaveFPRegs;
-  CEntryStub stub(1, mode);
+  CEntryStub stub(1, kSaveFPRegs);
   CallStub(&stub);
 }
 
@@ -3408,9 +3405,9 @@ int MacroAssembler::CalculateStackPassedWords(int num_reg_arguments,
   if (use_eabi_hardfloat()) {
     // In the hard floating point calling convention, we can use
     // all double registers to pass doubles.
-    if (num_double_arguments > DoubleRegister::NumRegisters()) {
+    if (num_double_arguments > DoubleRegister::kNumRegisters) {
       stack_passed_words +=
-          2 * (num_double_arguments - DoubleRegister::NumRegisters());
+          2 * (num_double_arguments - DoubleRegister::kNumRegisters);
     }
   } else {
     // In the soft floating point calling convention, every double
@@ -3451,7 +3448,7 @@ void MacroAssembler::PrepareCallCFunction(int num_reg_arguments,
 }
 
 
-void MacroAssembler::SetCallCDoubleArguments(DwVfpRegister dreg) {
+void MacroAssembler::SetCallCDoubleArguments(DoubleRegister dreg) {
   ASSERT(CpuFeatures::IsSupported(VFP2));
   if (use_eabi_hardfloat()) {
     Move(d0, dreg);
@@ -3461,8 +3458,8 @@ void MacroAssembler::SetCallCDoubleArguments(DwVfpRegister dreg) {
 }
 
 
-void MacroAssembler::SetCallCDoubleArguments(DwVfpRegister dreg1,
-                                             DwVfpRegister dreg2) {
+void MacroAssembler::SetCallCDoubleArguments(DoubleRegister dreg1,
+                                             DoubleRegister dreg2) {
   ASSERT(CpuFeatures::IsSupported(VFP2));
   if (use_eabi_hardfloat()) {
     if (dreg2.is(d0)) {
@@ -3480,7 +3477,7 @@ void MacroAssembler::SetCallCDoubleArguments(DwVfpRegister dreg1,
 }
 
 
-void MacroAssembler::SetCallCDoubleArguments(DwVfpRegister dreg,
+void MacroAssembler::SetCallCDoubleArguments(DoubleRegister dreg,
                                              Register reg) {
   ASSERT(CpuFeatures::IsSupported(VFP2));
   if (use_eabi_hardfloat()) {
@@ -3763,8 +3760,8 @@ void MacroAssembler::ClampUint8(Register output_reg, Register input_reg) {
 
 
 void MacroAssembler::ClampDoubleToUint8(Register result_reg,
-                                        DwVfpRegister input_reg,
-                                        DwVfpRegister temp_double_reg) {
+                                        DoubleRegister input_reg,
+                                        DoubleRegister temp_double_reg) {
   Label above_zero;
   Label done;
   Label in_bounds;
index 5321a0d81ef40a8aed33f67aebbc3fb5b1878c72..15cef16f057a0a03244503168aa9ceb1002bd49b 100644 (file)
@@ -178,7 +178,7 @@ class MacroAssembler: public Assembler {
   // Register move. May do nothing if the registers are identical.
   void Move(Register dst, Handle<Object> value);
   void Move(Register dst, Register src, Condition cond = al);
-  void Move(DwVfpRegister dst, DwVfpRegister src);
+  void Move(DoubleRegister dst, DoubleRegister src);
 
   // Load an object from the root table.
   void LoadRoot(Register destination,
@@ -1066,9 +1066,9 @@ class MacroAssembler: public Assembler {
   // whether soft or hard floating point ABI is used. These functions
   // abstract parameter passing for the three different ways we call
   // C functions from generated code.
-  void SetCallCDoubleArguments(DwVfpRegister dreg);
-  void SetCallCDoubleArguments(DwVfpRegister dreg1, DwVfpRegister dreg2);
-  void SetCallCDoubleArguments(DwVfpRegister dreg, Register reg);
+  void SetCallCDoubleArguments(DoubleRegister dreg);
+  void SetCallCDoubleArguments(DoubleRegister dreg1, DoubleRegister dreg2);
+  void SetCallCDoubleArguments(DoubleRegister dreg, Register reg);
 
   // Calls a C function and cleans up the space for arguments allocated
   // by PrepareCallCFunction. The called function is not allowed to trigger a
@@ -1084,7 +1084,7 @@ class MacroAssembler: public Assembler {
                      int num_reg_arguments,
                      int num_double_arguments);
 
-  void GetCFunctionDoubleResult(const DwVfpRegister dst);
+  void GetCFunctionDoubleResult(const DoubleRegister dst);
 
   // Calls an API function.  Allocates HandleScope, extracts returned value
   // from handle and propagates exceptions.  Restores context.  stack_space
@@ -1297,8 +1297,8 @@ class MacroAssembler: public Assembler {
   void ClampUint8(Register output_reg, Register input_reg);
 
   void ClampDoubleToUint8(Register result_reg,
-                          DwVfpRegister input_reg,
-                          DwVfpRegister temp_double_reg);
+                          DoubleRegister input_reg,
+                          DoubleRegister temp_double_reg);
 
 
   void LoadInstanceDescriptors(Register map, Register descriptors);
@@ -1373,9 +1373,9 @@ class MacroAssembler: public Assembler {
   // This handle will be patched with the code object on installation.
   Handle<Object> code_object_;
 
-  // Needs access to SafepointRegisterStackIndex for compiled frame
+  // Needs access to SafepointRegisterStackIndex for optimized frame
   // traversal.
-  friend class CompiledFrame;
+  friend class OptimizedFrame;
 };
 
 
index dd5db3054e9bf42db69cd5236a759fd438275d95..a194dfae5b0099a00e3ef77ec7be6c5d7ab7d5b4 100644 (file)
@@ -1053,6 +1053,42 @@ static void StoreIntAsFloat(MacroAssembler* masm,
 }
 
 
+// Convert unsigned integer with specified number of leading zeroes in binary
+// representation to IEEE 754 double.
+// Integer to convert is passed in register hiword.
+// Resulting double is returned in registers hiword:loword.
+// This functions does not work correctly for 0.
+static void GenerateUInt2Double(MacroAssembler* masm,
+                                Register hiword,
+                                Register loword,
+                                Register scratch,
+                                int leading_zeroes) {
+  const int meaningful_bits = kBitsPerInt - leading_zeroes - 1;
+  const int biased_exponent = HeapNumber::kExponentBias + meaningful_bits;
+
+  const int mantissa_shift_for_hi_word =
+      meaningful_bits - HeapNumber::kMantissaBitsInTopWord;
+
+  const int mantissa_shift_for_lo_word =
+      kBitsPerInt - mantissa_shift_for_hi_word;
+
+  __ mov(scratch, Operand(biased_exponent << HeapNumber::kExponentShift));
+  if (mantissa_shift_for_hi_word > 0) {
+    __ mov(loword, Operand(hiword, LSL, mantissa_shift_for_lo_word));
+    __ orr(hiword, scratch, Operand(hiword, LSR, mantissa_shift_for_hi_word));
+  } else {
+    __ mov(loword, Operand(0, RelocInfo::NONE));
+    __ orr(hiword, scratch, Operand(hiword, LSL, mantissa_shift_for_hi_word));
+  }
+
+  // If least significant bit of biased exponent was not 1 it was corrupted
+  // by most significant bit of mantissa so we should fix that.
+  if (!(biased_exponent & 1)) {
+    __ bic(hiword, hiword, Operand(1 << HeapNumber::kExponentShift));
+  }
+}
+
+
 #undef __
 #define __ ACCESS_MASM(masm())
 
@@ -3283,17 +3319,9 @@ Handle<Code> KeyedLoadStubCompiler::CompileLoadElement(
   //  -- r1    : receiver
   // -----------------------------------
   ElementsKind elements_kind = receiver_map->elements_kind();
-  if (receiver_map->has_fast_elements() ||
-      receiver_map->has_external_array_elements()) {
-    Handle<Code> stub = KeyedLoadFastElementStub(
-        receiver_map->instance_type() == JS_ARRAY_TYPE,
-        elements_kind).GetCode();
-    __ DispatchMap(r1, r2, receiver_map, stub, DO_SMI_CHECK);
-  } else {
-    Handle<Code> stub =
-        KeyedLoadDictionaryElementStub().GetCode();
-    __ DispatchMap(r1, r2, receiver_map, stub, DO_SMI_CHECK);
-  }
+  Handle<Code> stub = KeyedLoadElementStub(elements_kind).GetCode();
+
+  __ DispatchMap(r1, r2, receiver_map, stub, DO_SMI_CHECK);
 
   Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Miss();
   __ Jump(ic, RelocInfo::CODE_TARGET);
@@ -3698,6 +3726,339 @@ static void GenerateSmiKeyCheck(MacroAssembler* masm,
 }
 
 
+void KeyedLoadStubCompiler::GenerateLoadExternalArray(
+    MacroAssembler* masm,
+    ElementsKind elements_kind) {
+  // ---------- S t a t e --------------
+  //  -- lr     : return address
+  //  -- r0     : key
+  //  -- r1     : receiver
+  // -----------------------------------
+  Label miss_force_generic, slow, failed_allocation;
+
+  Register key = r0;
+  Register receiver = r1;
+
+  // This stub is meant to be tail-jumped to, the receiver must already
+  // have been verified by the caller to not be a smi.
+
+  // Check that the key is a smi or a heap number convertible to a smi.
+  GenerateSmiKeyCheck(masm, key, r4, r5, d1, d2, &miss_force_generic);
+
+  __ ldr(r3, FieldMemOperand(receiver, JSObject::kElementsOffset));
+  // r3: elements array
+
+  // Check that the index is in range.
+  __ ldr(ip, FieldMemOperand(r3, ExternalArray::kLengthOffset));
+  __ cmp(key, ip);
+  // Unsigned comparison catches both negative and too-large values.
+  __ b(hs, &miss_force_generic);
+
+  __ ldr(r3, FieldMemOperand(r3, ExternalArray::kExternalPointerOffset));
+  // r3: base pointer of external storage
+
+  // We are not untagging smi key and instead work with it
+  // as if it was premultiplied by 2.
+  STATIC_ASSERT((kSmiTag == 0) && (kSmiTagSize == 1));
+
+  Register value = r2;
+  switch (elements_kind) {
+    case EXTERNAL_BYTE_ELEMENTS:
+      __ ldrsb(value, MemOperand(r3, key, LSR, 1));
+      break;
+    case EXTERNAL_PIXEL_ELEMENTS:
+    case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
+      __ ldrb(value, MemOperand(r3, key, LSR, 1));
+      break;
+    case EXTERNAL_SHORT_ELEMENTS:
+      __ ldrsh(value, MemOperand(r3, key, LSL, 0));
+      break;
+    case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
+      __ ldrh(value, MemOperand(r3, key, LSL, 0));
+      break;
+    case EXTERNAL_INT_ELEMENTS:
+    case EXTERNAL_UNSIGNED_INT_ELEMENTS:
+      __ ldr(value, MemOperand(r3, key, LSL, 1));
+      break;
+    case EXTERNAL_FLOAT_ELEMENTS:
+      if (CpuFeatures::IsSupported(VFP2)) {
+        CpuFeatures::Scope scope(VFP2);
+        __ add(r2, r3, Operand(key, LSL, 1));
+        __ vldr(s0, r2, 0);
+      } else {
+        __ ldr(value, MemOperand(r3, key, LSL, 1));
+      }
+      break;
+    case EXTERNAL_DOUBLE_ELEMENTS:
+      if (CpuFeatures::IsSupported(VFP2)) {
+        CpuFeatures::Scope scope(VFP2);
+        __ add(r2, r3, Operand(key, LSL, 2));
+        __ vldr(d0, r2, 0);
+      } else {
+        __ add(r4, r3, Operand(key, LSL, 2));
+        // r4: pointer to the beginning of the double we want to load.
+        __ ldr(r2, MemOperand(r4, 0));
+        __ ldr(r3, MemOperand(r4, Register::kSizeInBytes));
+      }
+      break;
+    case FAST_ELEMENTS:
+    case FAST_SMI_ELEMENTS:
+    case FAST_DOUBLE_ELEMENTS:
+    case FAST_HOLEY_ELEMENTS:
+    case FAST_HOLEY_SMI_ELEMENTS:
+    case FAST_HOLEY_DOUBLE_ELEMENTS:
+    case DICTIONARY_ELEMENTS:
+    case NON_STRICT_ARGUMENTS_ELEMENTS:
+      UNREACHABLE();
+      break;
+  }
+
+  // For integer array types:
+  // r2: value
+  // For float array type:
+  // s0: value (if VFP3 is supported)
+  // r2: value (if VFP3 is not supported)
+  // For double array type:
+  // d0: value (if VFP3 is supported)
+  // r2/r3: value (if VFP3 is not supported)
+
+  if (elements_kind == EXTERNAL_INT_ELEMENTS) {
+    // For the Int and UnsignedInt array types, we need to see whether
+    // the value can be represented in a Smi. If not, we need to convert
+    // it to a HeapNumber.
+    Label box_int;
+    __ cmp(value, Operand(0xC0000000));
+    __ b(mi, &box_int);
+    // Tag integer as smi and return it.
+    __ mov(r0, Operand(value, LSL, kSmiTagSize));
+    __ Ret();
+
+    __ bind(&box_int);
+    if (CpuFeatures::IsSupported(VFP2)) {
+      CpuFeatures::Scope scope(VFP2);
+      // Allocate a HeapNumber for the result and perform int-to-double
+      // conversion.  Don't touch r0 or r1 as they are needed if allocation
+      // fails.
+      __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
+
+      __ AllocateHeapNumber(r5, r3, r4, r6, &slow, DONT_TAG_RESULT);
+      // Now we can use r0 for the result as key is not needed any more.
+      __ add(r0, r5, Operand(kHeapObjectTag));
+      __ vmov(s0, value);
+      __ vcvt_f64_s32(d0, s0);
+      __ vstr(d0, r5, HeapNumber::kValueOffset);
+      __ Ret();
+    } else {
+      // Allocate a HeapNumber for the result and perform int-to-double
+      // conversion.  Don't touch r0 or r1 as they are needed if allocation
+      // fails.
+      __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
+      __ AllocateHeapNumber(r5, r3, r4, r6, &slow, TAG_RESULT);
+      // Now we can use r0 for the result as key is not needed any more.
+      __ mov(r0, r5);
+      Register dst_mantissa = r1;
+      Register dst_exponent = r3;
+      FloatingPointHelper::Destination dest =
+          FloatingPointHelper::kCoreRegisters;
+      FloatingPointHelper::ConvertIntToDouble(masm,
+                                              value,
+                                              dest,
+                                              d0,
+                                              dst_mantissa,
+                                              dst_exponent,
+                                              r9,
+                                              s0);
+      __ str(dst_mantissa, FieldMemOperand(r0, HeapNumber::kMantissaOffset));
+      __ str(dst_exponent, FieldMemOperand(r0, HeapNumber::kExponentOffset));
+      __ Ret();
+    }
+  } else if (elements_kind == EXTERNAL_UNSIGNED_INT_ELEMENTS) {
+    // The test is different for unsigned int values. Since we need
+    // the value to be in the range of a positive smi, we can't
+    // handle either of the top two bits being set in the value.
+    if (CpuFeatures::IsSupported(VFP2)) {
+      CpuFeatures::Scope scope(VFP2);
+      Label box_int, done;
+      __ tst(value, Operand(0xC0000000));
+      __ b(ne, &box_int);
+      // Tag integer as smi and return it.
+      __ mov(r0, Operand(value, LSL, kSmiTagSize));
+      __ Ret();
+
+      __ bind(&box_int);
+      __ vmov(s0, value);
+      // Allocate a HeapNumber for the result and perform int-to-double
+      // conversion. Don't use r0 and r1 as AllocateHeapNumber clobbers all
+      // registers - also when jumping due to exhausted young space.
+      __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
+      __ AllocateHeapNumber(r2, r3, r4, r6, &slow, DONT_TAG_RESULT);
+
+      __ vcvt_f64_u32(d0, s0);
+      __ vstr(d0, r2, HeapNumber::kValueOffset);
+
+      __ add(r0, r2, Operand(kHeapObjectTag));
+      __ Ret();
+    } else {
+      // Check whether unsigned integer fits into smi.
+      Label box_int_0, box_int_1, done;
+      __ tst(value, Operand(0x80000000));
+      __ b(ne, &box_int_0);
+      __ tst(value, Operand(0x40000000));
+      __ b(ne, &box_int_1);
+      // Tag integer as smi and return it.
+      __ mov(r0, Operand(value, LSL, kSmiTagSize));
+      __ Ret();
+
+      Register hiword = value;  // r2.
+      Register loword = r3;
+
+      __ bind(&box_int_0);
+      // Integer does not have leading zeros.
+      GenerateUInt2Double(masm, hiword, loword, r4, 0);
+      __ b(&done);
+
+      __ bind(&box_int_1);
+      // Integer has one leading zero.
+      GenerateUInt2Double(masm, hiword, loword, r4, 1);
+
+
+      __ bind(&done);
+      // Integer was converted to double in registers hiword:loword.
+      // Wrap it into a HeapNumber. Don't use r0 and r1 as AllocateHeapNumber
+      // clobbers all registers - also when jumping due to exhausted young
+      // space.
+      __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
+      __ AllocateHeapNumber(r4, r5, r7, r6, &slow, TAG_RESULT);
+
+      __ str(hiword, FieldMemOperand(r4, HeapNumber::kExponentOffset));
+      __ str(loword, FieldMemOperand(r4, HeapNumber::kMantissaOffset));
+
+      __ mov(r0, r4);
+      __ Ret();
+    }
+  } else if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
+    // For the floating-point array type, we need to always allocate a
+    // HeapNumber.
+    if (CpuFeatures::IsSupported(VFP2)) {
+      CpuFeatures::Scope scope(VFP2);
+      // Allocate a HeapNumber for the result. Don't use r0 and r1 as
+      // AllocateHeapNumber clobbers all registers - also when jumping due to
+      // exhausted young space.
+      __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
+      __ AllocateHeapNumber(r2, r3, r4, r6, &slow, DONT_TAG_RESULT);
+      __ vcvt_f64_f32(d0, s0);
+      __ vstr(d0, r2, HeapNumber::kValueOffset);
+
+      __ add(r0, r2, Operand(kHeapObjectTag));
+      __ Ret();
+    } else {
+      // Allocate a HeapNumber for the result. Don't use r0 and r1 as
+      // AllocateHeapNumber clobbers all registers - also when jumping due to
+      // exhausted young space.
+      __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
+      __ AllocateHeapNumber(r3, r4, r5, r6, &slow, TAG_RESULT);
+      // VFP is not available, do manual single to double conversion.
+
+      // r2: floating point value (binary32)
+      // r3: heap number for result
+
+      // Extract mantissa to r0. OK to clobber r0 now as there are no jumps to
+      // the slow case from here.
+      __ and_(r0, value, Operand(kBinary32MantissaMask));
+
+      // Extract exponent to r1. OK to clobber r1 now as there are no jumps to
+      // the slow case from here.
+      __ mov(r1, Operand(value, LSR, kBinary32MantissaBits));
+      __ and_(r1, r1, Operand(kBinary32ExponentMask >> kBinary32MantissaBits));
+
+      Label exponent_rebiased;
+      __ teq(r1, Operand(0x00));
+      __ b(eq, &exponent_rebiased);
+
+      __ teq(r1, Operand(0xff));
+      __ mov(r1, Operand(0x7ff), LeaveCC, eq);
+      __ b(eq, &exponent_rebiased);
+
+      // Rebias exponent.
+      __ add(r1,
+             r1,
+             Operand(-kBinary32ExponentBias + HeapNumber::kExponentBias));
+
+      __ bind(&exponent_rebiased);
+      __ and_(r2, value, Operand(kBinary32SignMask));
+      value = no_reg;
+      __ orr(r2, r2, Operand(r1, LSL, HeapNumber::kMantissaBitsInTopWord));
+
+      // Shift mantissa.
+      static const int kMantissaShiftForHiWord =
+          kBinary32MantissaBits - HeapNumber::kMantissaBitsInTopWord;
+
+      static const int kMantissaShiftForLoWord =
+          kBitsPerInt - kMantissaShiftForHiWord;
+
+      __ orr(r2, r2, Operand(r0, LSR, kMantissaShiftForHiWord));
+      __ mov(r0, Operand(r0, LSL, kMantissaShiftForLoWord));
+
+      __ str(r2, FieldMemOperand(r3, HeapNumber::kExponentOffset));
+      __ str(r0, FieldMemOperand(r3, HeapNumber::kMantissaOffset));
+
+      __ mov(r0, r3);
+      __ Ret();
+    }
+  } else if (elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
+    if (CpuFeatures::IsSupported(VFP2)) {
+      CpuFeatures::Scope scope(VFP2);
+      // Allocate a HeapNumber for the result. Don't use r0 and r1 as
+      // AllocateHeapNumber clobbers all registers - also when jumping due to
+      // exhausted young space.
+      __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
+      __ AllocateHeapNumber(r2, r3, r4, r6, &slow, DONT_TAG_RESULT);
+      __ vstr(d0, r2, HeapNumber::kValueOffset);
+
+      __ add(r0, r2, Operand(kHeapObjectTag));
+      __ Ret();
+    } else {
+      // Allocate a HeapNumber for the result. Don't use r0 and r1 as
+      // AllocateHeapNumber clobbers all registers - also when jumping due to
+      // exhausted young space.
+      __ LoadRoot(r7, Heap::kHeapNumberMapRootIndex);
+      __ AllocateHeapNumber(r4, r5, r6, r7, &slow, TAG_RESULT);
+
+      __ str(r2, FieldMemOperand(r4, HeapNumber::kMantissaOffset));
+      __ str(r3, FieldMemOperand(r4, HeapNumber::kExponentOffset));
+      __ mov(r0, r4);
+      __ Ret();
+    }
+
+  } else {
+    // Tag integer as smi and return it.
+    __ mov(r0, Operand(value, LSL, kSmiTagSize));
+    __ Ret();
+  }
+
+  // Slow case, key and receiver still in r0 and r1.
+  __ bind(&slow);
+  __ IncrementCounter(
+      masm->isolate()->counters()->keyed_load_external_array_slow(),
+      1, r2, r3);
+
+  // ---------- S t a t e --------------
+  //  -- lr     : return address
+  //  -- r0     : key
+  //  -- r1     : receiver
+  // -----------------------------------
+
+  __ Push(r1, r0);
+
+  __ TailCallRuntime(Runtime::kKeyedGetProperty, 2, 1);
+
+  __ bind(&miss_force_generic);
+  Handle<Code> stub =
+      masm->isolate()->builtins()->KeyedLoadIC_MissForceGeneric();
+  __ Jump(stub, RelocInfo::CODE_TARGET);
+}
+
+
 void KeyedStoreStubCompiler::GenerateStoreExternalArray(
     MacroAssembler* masm,
     ElementsKind elements_kind) {
@@ -4042,6 +4403,118 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray(
 }
 
 
+void KeyedLoadStubCompiler::GenerateLoadFastElement(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- lr    : return address
+  //  -- r0    : key
+  //  -- r1    : receiver
+  // -----------------------------------
+  Label miss_force_generic;
+
+  // This stub is meant to be tail-jumped to, the receiver must already
+  // have been verified by the caller to not be a smi.
+
+  // Check that the key is a smi or a heap number convertible to a smi.
+  GenerateSmiKeyCheck(masm, r0, r4, r5, d1, d2, &miss_force_generic);
+
+  // Get the elements array.
+  __ ldr(r2, FieldMemOperand(r1, JSObject::kElementsOffset));
+  __ AssertFastElements(r2);
+
+  // Check that the key is within bounds.
+  __ ldr(r3, FieldMemOperand(r2, FixedArray::kLengthOffset));
+  __ cmp(r0, Operand(r3));
+  __ b(hs, &miss_force_generic);
+
+  // Load the result and make sure it's not the hole.
+  __ add(r3, r2, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+  STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2);
+  __ ldr(r4,
+         MemOperand(r3, r0, LSL, kPointerSizeLog2 - kSmiTagSize));
+  __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
+  __ cmp(r4, ip);
+  __ b(eq, &miss_force_generic);
+  __ mov(r0, r4);
+  __ Ret();
+
+  __ bind(&miss_force_generic);
+  Handle<Code> stub =
+      masm->isolate()->builtins()->KeyedLoadIC_MissForceGeneric();
+  __ Jump(stub, RelocInfo::CODE_TARGET);
+}
+
+
+void KeyedLoadStubCompiler::GenerateLoadFastDoubleElement(
+    MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- lr    : return address
+  //  -- r0    : key
+  //  -- r1    : receiver
+  // -----------------------------------
+  Label miss_force_generic, slow_allocate_heapnumber;
+
+  Register key_reg = r0;
+  Register receiver_reg = r1;
+  Register elements_reg = r2;
+  Register heap_number_reg = r2;
+  Register indexed_double_offset = r3;
+  Register scratch = r4;
+  Register scratch2 = r5;
+  Register scratch3 = r6;
+  Register heap_number_map = r7;
+
+  // This stub is meant to be tail-jumped to, the receiver must already
+  // have been verified by the caller to not be a smi.
+
+  // Check that the key is a smi or a heap number convertible to a smi.
+  GenerateSmiKeyCheck(masm, key_reg, r4, r5, d1, d2, &miss_force_generic);
+
+  // Get the elements array.
+  __ ldr(elements_reg,
+         FieldMemOperand(receiver_reg, JSObject::kElementsOffset));
+
+  // Check that the key is within bounds.
+  __ ldr(scratch, FieldMemOperand(elements_reg, FixedArray::kLengthOffset));
+  __ cmp(key_reg, Operand(scratch));
+  __ b(hs, &miss_force_generic);
+
+  // Load the upper word of the double in the fixed array and test for NaN.
+  __ add(indexed_double_offset, elements_reg,
+         Operand(key_reg, LSL, kDoubleSizeLog2 - kSmiTagSize));
+  uint32_t upper_32_offset = FixedArray::kHeaderSize + sizeof(kHoleNanLower32);
+  __ ldr(scratch, FieldMemOperand(indexed_double_offset, upper_32_offset));
+  __ cmp(scratch, Operand(kHoleNanUpper32));
+  __ b(&miss_force_generic, eq);
+
+  // Non-NaN. Allocate a new heap number and copy the double value into it.
+  __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
+  __ AllocateHeapNumber(heap_number_reg, scratch2, scratch3,
+                        heap_number_map, &slow_allocate_heapnumber, TAG_RESULT);
+
+  // Don't need to reload the upper 32 bits of the double, it's already in
+  // scratch.
+  __ str(scratch, FieldMemOperand(heap_number_reg,
+                                  HeapNumber::kExponentOffset));
+  __ ldr(scratch, FieldMemOperand(indexed_double_offset,
+                                  FixedArray::kHeaderSize));
+  __ str(scratch, FieldMemOperand(heap_number_reg,
+                                  HeapNumber::kMantissaOffset));
+
+  __ mov(r0, heap_number_reg);
+  __ Ret();
+
+  __ bind(&slow_allocate_heapnumber);
+  Handle<Code> slow_ic =
+      masm->isolate()->builtins()->KeyedLoadIC_Slow();
+  __ Jump(slow_ic, RelocInfo::CODE_TARGET);
+
+  __ bind(&miss_force_generic);
+  Handle<Code> miss_ic =
+      masm->isolate()->builtins()->KeyedLoadIC_MissForceGeneric();
+  __ Jump(miss_ic, RelocInfo::CODE_TARGET);
+}
+
+
 void KeyedStoreStubCompiler::GenerateStoreFastElement(
     MacroAssembler* masm,
     bool is_js_array,
index ccaf290d9eb47818782accc6707b08dd796f9b05..25157be2eb42524c9901b4dbb8050b6cc224bb12 100644 (file)
@@ -1375,11 +1375,6 @@ ExternalReference ExternalReference::page_flags(Page* page) {
 }
 
 
-ExternalReference ExternalReference::ForDeoptEntry(Address entry) {
-  return ExternalReference(entry);
-}
-
-
 // Helper function to compute x^y, where y is known to be an
 // integer. Uses binary decomposition to limit the number of
 // multiplications; see the discussion in "Hacker's Delight" by Henry
index 111c1d95f73b8a971397d1c03da56ae054f338e1..4639374c225daf7c929976891c594e5c7caa4a8e 100644 (file)
@@ -736,8 +736,6 @@ class ExternalReference BASE_EMBEDDED {
 
   static ExternalReference page_flags(Page* page);
 
-  static ExternalReference ForDeoptEntry(Address entry);
-
   Address address() const {return reinterpret_cast<Address>(address_);}
 
 #ifdef ENABLE_DEBUGGER_SUPPORT
index c43b9138bb8c81c7fc1e4a7bdb9c8be6fae18e68..232cb739a1db52769d3ea182658c821ac105033c 100644 (file)
@@ -616,6 +616,14 @@ void ObjectLiteral::Property::RecordTypeFeedback(TypeFeedbackOracle* oracle) {
 // ----------------------------------------------------------------------------
 // Implementation of AstVisitor
 
+bool AstVisitor::CheckStackOverflow() {
+  if (stack_overflow_) return true;
+  StackLimitCheck check(isolate_);
+  if (!check.HasOverflowed()) return false;
+  return (stack_overflow_ = true);
+}
+
+
 void AstVisitor::VisitDeclarations(ZoneList<Declaration*>* declarations) {
   for (int i = 0; i < declarations->length(); i++) {
     Visit(declarations->at(i));
index a0a7a7320649fdff523c509f9f49e7760b4b3618..d299f19a23511a818b01c5e703e6626fc1df42ac 100644 (file)
--- a/src/ast.h
+++ b/src/ast.h
@@ -2492,51 +2492,40 @@ inline ModuleVariable::ModuleVariable(VariableProxy* proxy)
 
 class AstVisitor BASE_EMBEDDED {
  public:
-  AstVisitor() {}
+  AstVisitor() : isolate_(Isolate::Current()), stack_overflow_(false) { }
   virtual ~AstVisitor() { }
 
   // Stack overflow check and dynamic dispatch.
-  virtual void Visit(AstNode* node) = 0;
+  void Visit(AstNode* node) { if (!CheckStackOverflow()) node->Accept(this); }
 
   // Iteration left-to-right.
   virtual void VisitDeclarations(ZoneList<Declaration*>* declarations);
   virtual void VisitStatements(ZoneList<Statement*>* statements);
   virtual void VisitExpressions(ZoneList<Expression*>* expressions);
 
+  // Stack overflow tracking support.
+  bool HasStackOverflow() const { return stack_overflow_; }
+  bool CheckStackOverflow();
+
+  // If a stack-overflow exception is encountered when visiting a
+  // node, calling SetStackOverflow will make sure that the visitor
+  // bails out without visiting more nodes.
+  void SetStackOverflow() { stack_overflow_ = true; }
+  void ClearStackOverflow() { stack_overflow_ = false; }
+
   // Individual AST nodes.
 #define DEF_VISIT(type)                         \
   virtual void Visit##type(type* node) = 0;
   AST_NODE_LIST(DEF_VISIT)
 #undef DEF_VISIT
-};
 
+ protected:
+  Isolate* isolate() { return isolate_; }
 
-#define DEFINE_AST_VISITOR_SUBCLASS_MEMBERS()                       \
-public:                                                             \
-  virtual void Visit(AstNode* node) {                               \
-    if (!CheckStackOverflow()) node->Accept(this);                  \
-  }                                                                 \
-                                                                    \
-  void SetStackOverflow() { stack_overflow_ = true; }               \
-  void ClearStackOverflow() { stack_overflow_ = false; }            \
-  bool HasStackOverflow() const { return stack_overflow_; }         \
-                                                                    \
-  bool CheckStackOverflow() {                                       \
-    if (stack_overflow_) return true;                               \
-    StackLimitCheck check(isolate_);                                \
-    if (!check.HasOverflowed()) return false;                       \
-    return (stack_overflow_ = true);                                \
-  }                                                                 \
-                                                                    \
-private:                                                            \
-  void InitializeAstVisitor() {                                     \
-    isolate_ = Isolate::Current();                                  \
-    stack_overflow_ = false;                                        \
-  }                                                                 \
-  Isolate* isolate() { return isolate_; }                           \
-                                                                    \
-  Isolate* isolate_;                                                \
-  bool stack_overflow_
+ private:
+  Isolate* isolate_;
+  bool stack_overflow_;
+};
 
 
 // ----------------------------------------------------------------------------
index 1ca405337b2cb2a57ff701774b2a6815a6efa10e..a2f752e052fd881984e04b535cfb5a3077f57c3f 100644 (file)
@@ -107,8 +107,6 @@ enum BuiltinExtraArguments {
                                     Code::kNoExtraICState)              \
   V(NotifyLazyDeoptimized,          BUILTIN, UNINITIALIZED,             \
                                     Code::kNoExtraICState)              \
-  V(NotifyICMiss,                   BUILTIN, UNINITIALIZED,             \
-                                    Code::kNoExtraICState)              \
   V(NotifyOSR,                      BUILTIN, UNINITIALIZED,             \
                                     Code::kNoExtraICState)              \
                                                                         \
@@ -388,7 +386,6 @@ class Builtins {
   static void Generate_NotifyDeoptimized(MacroAssembler* masm);
   static void Generate_NotifyLazyDeoptimized(MacroAssembler* masm);
   static void Generate_NotifyOSR(MacroAssembler* masm);
-  static void Generate_NotifyICMiss(MacroAssembler* masm);
   static void Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm);
 
   static void Generate_FunctionCall(MacroAssembler* masm);
diff --git a/src/code-stubs-hydrogen.cc b/src/code-stubs-hydrogen.cc
deleted file mode 100644 (file)
index 74bd93f..0000000
+++ /dev/null
@@ -1,135 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-//       notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-//       copyright notice, this list of conditions and the following
-//       disclaimer in the documentation and/or other materials provided
-//       with the distribution.
-//     * Neither the name of Google Inc. nor the names of its
-//       contributors may be used to endorse or promote products derived
-//       from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#include "code-stubs.h"
-#include "hydrogen.h"
-#include "lithium.h"
-
-namespace v8 {
-namespace internal {
-
-
-Handle<Code> HydrogenCodeStub::CodeFromGraph(HGraph* graph) {
-  graph->OrderBlocks();
-  graph->AssignDominators();
-  graph->CollectPhis();
-  graph->InsertRepresentationChanges();
-  graph->EliminateRedundantBoundsChecks();
-  LChunk* chunk = LChunk::NewChunk(graph);
-  ASSERT(chunk != NULL);
-  Handle<Code> stub = chunk->Codegen(Code::COMPILED_STUB);
-  return stub;
-}
-
-
-class CodeStubGraphBuilderBase : public HGraphBuilder {
- public:
-  CodeStubGraphBuilderBase(Isolate* isolate, HydrogenCodeStub* stub)
-      : HGraphBuilder(&info_), info_(stub, isolate) {}
-  virtual bool BuildGraph();
-
- protected:
-  virtual void BuildCodeStub() = 0;
-  HParameter* GetParameter(int parameter) { return parameters_[parameter]; }
-  HydrogenCodeStub* stub() { return info_.code_stub(); }
-
- private:
-  SmartArrayPointer<HParameter*> parameters_;
-  CompilationInfoWithZone info_;
-};
-
-
-bool CodeStubGraphBuilderBase::BuildGraph() {
-  if (FLAG_trace_hydrogen) {
-    PrintF("-----------------------------------------------------------\n");
-    PrintF("Compiling stub using hydrogen\n");
-    HTracer::Instance()->TraceCompilation(&info_);
-  }
-  HBasicBlock* next_block = graph()->CreateBasicBlock();
-  next_block->SetInitialEnvironment(graph()->start_environment());
-  HGoto* jump = new(zone()) HGoto(next_block);
-  graph()->entry_block()->Finish(jump);
-  set_current_block(next_block);
-
-  int major_key = stub()->MajorKey();
-  CodeStubInterfaceDescriptor* descriptor =
-      info_.isolate()->code_stub_interface_descriptor(major_key);
-  if (descriptor->register_param_count_ < 0) {
-    stub()->InitializeInterfaceDescriptor(info_.isolate(), descriptor);
-  }
-  parameters_.Reset(new HParameter*[descriptor->register_param_count_]);
-
-  HGraph* graph = this->graph();
-  Zone* zone = this->zone();
-  for (int i = 0; i < descriptor->register_param_count_; ++i) {
-    HParameter* param = new(zone) HParameter(i);
-    AddInstruction(param);
-    graph->start_environment()->Push(param);
-    parameters_[i] = param;
-  }
-  AddSimulate(BailoutId::StubEntry());
-
-  BuildCodeStub();
-
-  return true;
-}
-
-template <class Stub>
-class CodeStubGraphBuilder: public CodeStubGraphBuilderBase {
- public:
-  explicit CodeStubGraphBuilder(Stub* stub)
-      : CodeStubGraphBuilderBase(Isolate::Current(), stub) {}
-
- protected:
-  virtual void BuildCodeStub();
-  Stub* casted_stub() { return static_cast<Stub*>(stub()); }
-};
-
-
-template <>
-void CodeStubGraphBuilder<KeyedLoadFastElementStub>::BuildCodeStub() {
-  Zone* zone = this->zone();
-
-  HInstruction* load = BuildUncheckedMonomorphicElementAccess(
-      GetParameter(0), GetParameter(1), NULL, NULL,
-      casted_stub()->is_js_array(), casted_stub()->elements_kind(), false);
-  AddInstruction(load);
-
-  HReturn* ret = new(zone) HReturn(load);
-  current_block()->Finish(ret);
-}
-
-
-Handle<Code> KeyedLoadFastElementStub::GenerateCode() {
-  CodeStubGraphBuilder<KeyedLoadFastElementStub> builder(this);
-  return CodeFromGraph(builder.CreateGraph());
-}
-
-
-} }  // namespace v8::internal
index c7d4c805d0c87824ecd1cbdb23afc49dfa710f02..276c87ebd00a227f162ed93cf81770ddcce05830 100644 (file)
@@ -48,6 +48,20 @@ bool CodeStub::FindCodeInCache(Code** code_out, Isolate* isolate) {
 }
 
 
+void CodeStub::GenerateCode(MacroAssembler* masm) {
+  // Update the static counter each time a new code stub is generated.
+  masm->isolate()->counters()->code_stubs()->Increment();
+
+  // Nested stubs are not allowed for leaves.
+  AllowStubCallsScope allow_scope(masm, false);
+
+  // Generate the code for the stub.
+  masm->set_generating_stub(true);
+  NoCurrentFrameScope scope(masm);
+  Generate(masm);
+}
+
+
 SmartArrayPointer<const char> CodeStub::GetName() {
   char buffer[100];
   NoAllocationStringAllocator allocator(buffer,
@@ -58,7 +72,8 @@ SmartArrayPointer<const char> CodeStub::GetName() {
 }
 
 
-void CodeStub::RecordCodeGeneration(Code* code, Isolate* isolate) {
+void CodeStub::RecordCodeGeneration(Code* code, MacroAssembler* masm) {
+  Isolate* isolate = masm->isolate();
   SmartArrayPointer<const char> name = GetName();
   PROFILE(isolate, CodeCreateEvent(Logger::STUB_TAG, code, *name));
   GDBJIT(AddCode(GDBJITInterface::STUB, *name, code));
@@ -72,39 +87,6 @@ int CodeStub::GetCodeKind() {
 }
 
 
-Handle<Code> PlatformCodeStub::GenerateCode() {
-  Isolate* isolate = Isolate::Current();
-  Factory* factory = isolate->factory();
-
-  // Generate the new code.
-  MacroAssembler masm(isolate, NULL, 256);
-
-  {
-    // Update the static counter each time a new code stub is generated.
-    isolate->counters()->code_stubs()->Increment();
-
-    // Nested stubs are not allowed for leaves.
-    AllowStubCallsScope allow_scope(&masm, false);
-
-    // Generate the code for the stub.
-    masm.set_generating_stub(true);
-    NoCurrentFrameScope scope(&masm);
-    Generate(&masm);
-  }
-
-  // Create the code object.
-  CodeDesc desc;
-  masm.GetCode(&desc);
-
-  // Copy the generated code into a heap object.
-  Code::Flags flags = Code::ComputeFlags(
-      static_cast<Code::Kind>(GetCodeKind()), GetICState());
-  Handle<Code> new_object = factory->NewCode(
-      desc, flags, masm.CodeObject(), NeedsImmovableCode());
-  return new_object;
-}
-
-
 Handle<Code> CodeStub::GetCode() {
   Isolate* isolate = Isolate::Current();
   Factory* factory = isolate->factory();
@@ -120,10 +102,23 @@ Handle<Code> CodeStub::GetCode() {
   {
     HandleScope scope(isolate);
 
-    Handle<Code> new_object = GenerateCode();
+    // Generate the new code.
+    MacroAssembler masm(isolate, NULL, 256);
+    GenerateCode(&masm);
+
+    // Create the code object.
+    CodeDesc desc;
+    masm.GetCode(&desc);
+
+    // Copy the generated code into a heap object.
+    Code::Flags flags = Code::ComputeFlags(
+        static_cast<Code::Kind>(GetCodeKind()),
+        GetICState());
+    Handle<Code> new_object = factory->NewCode(
+        desc, flags, masm.CodeObject(), NeedsImmovableCode());
     new_object->set_major_key(MajorKey());
     FinishCode(new_object);
-    RecordCodeGeneration(*new_object, isolate);
+    RecordCodeGeneration(*new_object, &masm);
 
 #ifdef ENABLE_DISASSEMBLER
     if (FLAG_print_code_stubs) {
@@ -421,8 +416,36 @@ void JSEntryStub::FinishCode(Handle<Code> code) {
 }
 
 
-void KeyedLoadDictionaryElementStub::Generate(MacroAssembler* masm) {
-  KeyedLoadStubCompiler::GenerateLoadDictionaryElement(masm);
+void KeyedLoadElementStub::Generate(MacroAssembler* masm) {
+  switch (elements_kind_) {
+    case FAST_ELEMENTS:
+    case FAST_HOLEY_ELEMENTS:
+    case FAST_SMI_ELEMENTS:
+    case FAST_HOLEY_SMI_ELEMENTS:
+      KeyedLoadStubCompiler::GenerateLoadFastElement(masm);
+      break;
+    case FAST_DOUBLE_ELEMENTS:
+    case FAST_HOLEY_DOUBLE_ELEMENTS:
+      KeyedLoadStubCompiler::GenerateLoadFastDoubleElement(masm);
+      break;
+    case EXTERNAL_BYTE_ELEMENTS:
+    case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
+    case EXTERNAL_SHORT_ELEMENTS:
+    case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
+    case EXTERNAL_INT_ELEMENTS:
+    case EXTERNAL_UNSIGNED_INT_ELEMENTS:
+    case EXTERNAL_FLOAT_ELEMENTS:
+    case EXTERNAL_DOUBLE_ELEMENTS:
+    case EXTERNAL_PIXEL_ELEMENTS:
+      KeyedLoadStubCompiler::GenerateLoadExternalArray(masm, elements_kind_);
+      break;
+    case DICTIONARY_ELEMENTS:
+      KeyedLoadStubCompiler::GenerateLoadDictionaryElement(masm);
+      break;
+    case NON_STRICT_ARGUMENTS_ELEMENTS:
+      UNREACHABLE();
+      break;
+  }
 }
 
 
index 527abde958843aa2bbaa255949b43c0a4316efb8..ae113f5729a58d26bcdfeffff0ee5011a2570954 100644 (file)
@@ -162,29 +162,20 @@ class CodeStub BASE_EMBEDDED {
   // Lookup the code in the (possibly custom) cache.
   bool FindCodeInCache(Code** code_out, Isolate* isolate);
 
-  // Returns information for computing the number key.
-  virtual Major MajorKey() = 0;
-  virtual int MinorKey() = 0;
-
  protected:
   static bool CanUseFPRegisters();
 
-  // Generates the assembler code for the stub.
-  virtual Handle<Code> GenerateCode() = 0;
-
-  // BinaryOpStub needs to override this.
-  virtual InlineCacheState GetICState() {
-    return UNINITIALIZED;
-  }
+ private:
+  // Nonvirtual wrapper around the stub-specific Generate function.  Call
+  // this function to set up the macro assembler and generate the code.
+  void GenerateCode(MacroAssembler* masm);
 
-  // Returns whether the code generated for this stub needs to be allocated as
-  // a fixed (non-moveable) code object.
-  virtual bool NeedsImmovableCode() { return false; }
+  // Generates the assembler code for the stub.
+  virtual void Generate(MacroAssembler* masm) = 0;
 
- private:
   // Perform bookkeeping required after code generation when stub code is
   // initially generated.
-  void RecordCodeGeneration(Code* code, Isolate* isolate);
+  void RecordCodeGeneration(Code* code, MacroAssembler* masm);
 
   // Finish the code object after it has been generated.
   virtual void FinishCode(Handle<Code> code) { }
@@ -193,9 +184,18 @@ class CodeStub BASE_EMBEDDED {
   // registering stub in the stub cache.
   virtual void Activate(Code* code) { }
 
+  // Returns information for computing the number key.
+  virtual Major MajorKey() = 0;
+  virtual int MinorKey() = 0;
+
   // BinaryOpStub needs to override this.
   virtual int GetCodeKind();
 
+  // BinaryOpStub needs to override this.
+  virtual InlineCacheState GetICState() {
+    return UNINITIALIZED;
+  }
+
   // Add the code to a specialized cache, specific to an individual
   // stub type. Please note, this method must add the code object to a
   // roots object, otherwise we will remove the code during GC.
@@ -213,6 +213,10 @@ class CodeStub BASE_EMBEDDED {
   SmartArrayPointer<const char> GetName();
   virtual void PrintName(StringStream* stream);
 
+  // Returns whether the code generated for this stub needs to be allocated as
+  // a fixed (non-moveable) code object.
+  virtual bool NeedsImmovableCode() { return false; }
+
   // Computes the key based on major and minor.
   uint32_t GetKey() {
     ASSERT(static_cast<int>(MajorKey()) < NUMBER_OF_IDS);
@@ -228,51 +232,6 @@ class CodeStub BASE_EMBEDDED {
 };
 
 
-class PlatformCodeStub : public CodeStub {
- public:
-  // Retrieve the code for the stub. Generate the code if needed.
-  virtual Handle<Code> GenerateCode();
-
-  virtual int GetCodeKind() { return Code::STUB; }
-
- protected:
-  // Generates the assembler code for the stub.
-  virtual void Generate(MacroAssembler* masm) = 0;
-};
-
-
-struct CodeStubInterfaceDescriptor {
-  CodeStubInterfaceDescriptor()
-      : register_param_count_(-1),
-        register_params_(NULL) { }
-  int register_param_count_;
-  Register* register_params_;
-  Handle<Code> deoptimization_handler_;
-};
-
-
-class HGraph;
-struct Register;
-class HydrogenCodeStub : public CodeStub {
- public:
-  // Retrieve the code for the stub. Generate the code if needed.
-  virtual Handle<Code> GenerateCode() = 0;
-
-  virtual int GetCodeKind() { return Code::COMPILED_STUB; }
-
-  CodeStubInterfaceDescriptor* GetInterfaceDescriptor(Isolate* isolate) {
-    return isolate->code_stub_interface_descriptor(MajorKey());
-  }
-
-  virtual void InitializeInterfaceDescriptor(
-      Isolate* isolate,
-      CodeStubInterfaceDescriptor* descriptor) = 0;
-
- protected:
-  Handle<Code> CodeFromGraph(HGraph* graph);
-};
-
-
 // Helper interface to prepare to/restore after making runtime calls.
 class RuntimeCallHelper {
  public:
@@ -330,7 +289,7 @@ class NopRuntimeCallHelper : public RuntimeCallHelper {
 };
 
 
-class StackCheckStub : public PlatformCodeStub {
+class StackCheckStub : public CodeStub {
  public:
   StackCheckStub() { }
 
@@ -342,7 +301,7 @@ class StackCheckStub : public PlatformCodeStub {
 };
 
 
-class InterruptStub : public PlatformCodeStub {
+class InterruptStub : public CodeStub {
  public:
   InterruptStub() { }
 
@@ -354,7 +313,7 @@ class InterruptStub : public PlatformCodeStub {
 };
 
 
-class ToNumberStub: public PlatformCodeStub {
+class ToNumberStub: public CodeStub {
  public:
   ToNumberStub() { }
 
@@ -366,7 +325,7 @@ class ToNumberStub: public PlatformCodeStub {
 };
 
 
-class FastNewClosureStub : public PlatformCodeStub {
+class FastNewClosureStub : public CodeStub {
  public:
   explicit FastNewClosureStub(LanguageMode language_mode)
     : language_mode_(language_mode) { }
@@ -382,7 +341,7 @@ class FastNewClosureStub : public PlatformCodeStub {
 };
 
 
-class FastNewContextStub : public PlatformCodeStub {
+class FastNewContextStub : public CodeStub {
  public:
   static const int kMaximumSlots = 64;
 
@@ -400,7 +359,7 @@ class FastNewContextStub : public PlatformCodeStub {
 };
 
 
-class FastNewBlockContextStub : public PlatformCodeStub {
+class FastNewBlockContextStub : public CodeStub {
  public:
   static const int kMaximumSlots = 64;
 
@@ -418,7 +377,7 @@ class FastNewBlockContextStub : public PlatformCodeStub {
 };
 
 
-class FastCloneShallowArrayStub : public PlatformCodeStub {
+class FastCloneShallowArrayStub : public CodeStub {
  public:
   // Maximum length of copied elements array.
   static const int kMaximumClonedLength = 8;
@@ -451,7 +410,7 @@ class FastCloneShallowArrayStub : public PlatformCodeStub {
 };
 
 
-class FastCloneShallowObjectStub : public PlatformCodeStub {
+class FastCloneShallowObjectStub : public CodeStub {
  public:
   // Maximum number of properties in copied object.
   static const int kMaximumClonedProperties = 6;
@@ -471,7 +430,7 @@ class FastCloneShallowObjectStub : public PlatformCodeStub {
 };
 
 
-class InstanceofStub: public PlatformCodeStub {
+class InstanceofStub: public CodeStub {
  public:
   enum Flags {
     kNoFlags = 0,
@@ -509,7 +468,7 @@ class InstanceofStub: public PlatformCodeStub {
 };
 
 
-class MathPowStub: public PlatformCodeStub {
+class MathPowStub: public CodeStub {
  public:
   enum ExponentType { INTEGER, DOUBLE, TAGGED, ON_STACK};
 
@@ -525,7 +484,7 @@ class MathPowStub: public PlatformCodeStub {
 };
 
 
-class BinaryOpStub: public PlatformCodeStub {
+class BinaryOpStub: public CodeStub {
  public:
   BinaryOpStub(Token::Value op, OverwriteMode mode)
       : op_(op),
@@ -641,7 +600,7 @@ class BinaryOpStub: public PlatformCodeStub {
 };
 
 
-class ICCompareStub: public PlatformCodeStub {
+class ICCompareStub: public CodeStub {
  public:
   ICCompareStub(Token::Value op,
                 CompareIC::State left,
@@ -707,7 +666,7 @@ class ICCompareStub: public PlatformCodeStub {
 };
 
 
-class CEntryStub : public PlatformCodeStub {
+class CEntryStub : public CodeStub {
  public:
   explicit CEntryStub(int result_size,
                       SaveFPRegsMode save_doubles = kDontSaveFPRegs)
@@ -741,7 +700,7 @@ class CEntryStub : public PlatformCodeStub {
 };
 
 
-class JSEntryStub : public PlatformCodeStub {
+class JSEntryStub : public CodeStub {
  public:
   JSEntryStub() { }
 
@@ -775,7 +734,7 @@ class JSConstructEntryStub : public JSEntryStub {
 };
 
 
-class ArgumentsAccessStub: public PlatformCodeStub {
+class ArgumentsAccessStub: public CodeStub {
  public:
   enum Type {
     READ_ELEMENT,
@@ -802,7 +761,7 @@ class ArgumentsAccessStub: public PlatformCodeStub {
 };
 
 
-class RegExpExecStub: public PlatformCodeStub {
+class RegExpExecStub: public CodeStub {
  public:
   RegExpExecStub() { }
 
@@ -814,7 +773,7 @@ class RegExpExecStub: public PlatformCodeStub {
 };
 
 
-class RegExpConstructResultStub: public PlatformCodeStub {
+class RegExpConstructResultStub: public CodeStub {
  public:
   RegExpConstructResultStub() { }
 
@@ -826,7 +785,7 @@ class RegExpConstructResultStub: public PlatformCodeStub {
 };
 
 
-class CallFunctionStub: public PlatformCodeStub {
+class CallFunctionStub: public CodeStub {
  public:
   CallFunctionStub(int argc, CallFunctionFlags flags)
       : argc_(argc), flags_(flags) { }
@@ -867,7 +826,7 @@ class CallFunctionStub: public PlatformCodeStub {
 };
 
 
-class CallConstructStub: public PlatformCodeStub {
+class CallConstructStub: public CodeStub {
  public:
   explicit CallConstructStub(CallFunctionFlags flags) : flags_(flags) {}
 
@@ -1058,54 +1017,25 @@ class AllowStubCallsScope {
 };
 
 
-class KeyedLoadDictionaryElementStub : public PlatformCodeStub {
+class KeyedLoadElementStub : public CodeStub {
  public:
-  KeyedLoadDictionaryElementStub() {}
+  explicit KeyedLoadElementStub(ElementsKind elements_kind)
+      : elements_kind_(elements_kind)
+  { }
 
   Major MajorKey() { return KeyedLoadElement; }
-  int MinorKey() { return DICTIONARY_ELEMENTS; }
+  int MinorKey() { return elements_kind_; }
 
   void Generate(MacroAssembler* masm);
 
  private:
-  DISALLOW_COPY_AND_ASSIGN(KeyedLoadDictionaryElementStub);
-};
-
-
-class KeyedLoadFastElementStub : public HydrogenCodeStub {
- public:
-  KeyedLoadFastElementStub(bool is_js_array, ElementsKind elements_kind) {
-    bit_field_ = ElementsKindBits::encode(elements_kind) |
-        IsJSArrayBits::encode(is_js_array);
-  }
-
-  Major MajorKey() { return KeyedLoadElement; }
-  int MinorKey() { return bit_field_; }
-
-  bool is_js_array() const {
-    return IsJSArrayBits::decode(bit_field_);
-  }
-
-  ElementsKind elements_kind() const {
-    return ElementsKindBits::decode(bit_field_);
-  }
-
-  virtual Handle<Code> GenerateCode();
-
-  virtual void InitializeInterfaceDescriptor(
-      Isolate* isolate,
-      CodeStubInterfaceDescriptor* descriptor);
-
- private:
-  class IsJSArrayBits: public BitField<bool, 8, 1> {};
-  class ElementsKindBits: public BitField<ElementsKind, 0, 8> {};
-  uint32_t bit_field_;
+  ElementsKind elements_kind_;
 
-  DISALLOW_COPY_AND_ASSIGN(KeyedLoadFastElementStub);
+  DISALLOW_COPY_AND_ASSIGN(KeyedLoadElementStub);
 };
 
 
-class KeyedStoreElementStub : public PlatformCodeStub {
+class KeyedStoreElementStub : public CodeStub {
  public:
   KeyedStoreElementStub(bool is_js_array,
                         ElementsKind elements_kind,
@@ -1140,7 +1070,7 @@ class KeyedStoreElementStub : public PlatformCodeStub {
 };
 
 
-class ToBooleanStub: public PlatformCodeStub {
+class ToBooleanStub: public CodeStub {
  public:
   enum Type {
     UNDEFINED,
@@ -1210,7 +1140,7 @@ class ToBooleanStub: public PlatformCodeStub {
 };
 
 
-class ElementsTransitionAndStoreStub : public PlatformCodeStub {
+class ElementsTransitionAndStoreStub : public CodeStub {
  public:
   ElementsTransitionAndStoreStub(ElementsKind from,
                                  ElementsKind to,
@@ -1251,7 +1181,7 @@ class ElementsTransitionAndStoreStub : public PlatformCodeStub {
 };
 
 
-class StoreArrayLiteralElementStub : public PlatformCodeStub {
+class StoreArrayLiteralElementStub : public CodeStub {
  public:
   StoreArrayLiteralElementStub()
         : fp_registers_(CanUseFPRegisters()) { }
@@ -1270,7 +1200,7 @@ class StoreArrayLiteralElementStub : public PlatformCodeStub {
 };
 
 
-class ProfileEntryHookStub : public PlatformCodeStub {
+class ProfileEntryHookStub : public CodeStub {
  public:
   explicit ProfileEntryHookStub() {}
 
index c8bdf687b844aedb5c4e3142687d5be87a9ed5b9..83ac854a079259b1aaceba57a06e5465fd5d61ac 100644 (file)
@@ -121,21 +121,19 @@ void CodeGenerator::PrintCode(Handle<Code> code, CompilationInfo* info) {
   if (print_code) {
     // Print the source code if available.
     FunctionLiteral* function = info->function();
-    if (code->kind() != Code::COMPILED_STUB) {
-      Handle<Script> script = info->script();
-      if (!script->IsUndefined() && !script->source()->IsUndefined()) {
-        PrintF("--- Raw source ---\n");
-        StringInputBuffer stream(String::cast(script->source()));
-        stream.Seek(function->start_position());
-        // fun->end_position() points to the last character in the stream. We
-        // need to compensate by adding one to calculate the length.
-        int source_len =
-            function->end_position() - function->start_position() + 1;
-        for (int i = 0; i < source_len; i++) {
-          if (stream.has_more()) PrintF("%c", stream.GetNext());
-        }
-        PrintF("\n\n");
+    Handle<Script> script = info->script();
+    if (!script->IsUndefined() && !script->source()->IsUndefined()) {
+      PrintF("--- Raw source ---\n");
+      StringInputBuffer stream(String::cast(script->source()));
+      stream.Seek(function->start_position());
+      // fun->end_position() points to the last character in the stream. We
+      // need to compensate by adding one to calculate the length.
+      int source_len =
+          function->end_position() - function->start_position() + 1;
+      for (int i = 0; i < source_len; i++) {
+        if (stream.has_more()) PrintF("%c", stream.GetNext());
       }
+      PrintF("\n\n");
     }
     if (info->IsOptimizing()) {
       if (FLAG_print_unopt_code) {
@@ -147,12 +145,7 @@ void CodeGenerator::PrintCode(Handle<Code> code, CompilationInfo* info) {
     } else {
       PrintF("--- Code ---\n");
     }
-    if (info->IsStub()) {
-      CodeStub::Major major_key = info->code_stub()->MajorKey();
-      code->Disassemble(CodeStub::MajorName(major_key, false));
-    } else {
-      code->Disassemble(*function->debug_name()->ToCString());
-    }
+    code->Disassemble(*function->debug_name()->ToCString());
   }
 #endif  // ENABLE_DISASSEMBLER
 }
index ceac829cf80351d1a8d483b020047cd10a0df706..5779aae81bcb92e1b8bff82f68b177bb76bcd871 100644 (file)
@@ -55,7 +55,7 @@ CompilationInfo::CompilationInfo(Handle<Script> script, Zone* zone)
     : flags_(LanguageModeField::encode(CLASSIC_MODE)),
       script_(script),
       osr_ast_id_(BailoutId::None()) {
-  Initialize(script->GetIsolate(), BASE, zone);
+  Initialize(zone);
 }
 
 
@@ -65,7 +65,7 @@ CompilationInfo::CompilationInfo(Handle<SharedFunctionInfo> shared_info,
       shared_info_(shared_info),
       script_(Handle<Script>(Script::cast(shared_info->script()))),
       osr_ast_id_(BailoutId::None()) {
-  Initialize(script_->GetIsolate(), BASE, zone);
+  Initialize(zone);
 }
 
 
@@ -76,22 +76,12 @@ CompilationInfo::CompilationInfo(Handle<JSFunction> closure, Zone* zone)
       script_(Handle<Script>(Script::cast(shared_info_->script()))),
       context_(closure->context()),
       osr_ast_id_(BailoutId::None()) {
-  Initialize(script_->GetIsolate(), BASE, zone);
+  Initialize(zone);
 }
 
 
-CompilationInfo::CompilationInfo(HydrogenCodeStub* stub,
-                                 Isolate* isolate, Zone* zone)
-    : flags_(LanguageModeField::encode(CLASSIC_MODE) |
-             IsLazy::encode(true)),
-      osr_ast_id_(BailoutId::None()) {
-  Initialize(isolate, STUB, zone);
-  code_stub_ = stub;
-}
-
-
-void CompilationInfo::Initialize(Isolate* isolate, Mode mode, Zone* zone) {
-  isolate_ = isolate;
+void CompilationInfo::Initialize(Zone* zone) {
+  isolate_ = script_->GetIsolate();
   function_ = NULL;
   scope_ = NULL;
   global_scope_ = NULL;
@@ -99,13 +89,8 @@ void CompilationInfo::Initialize(Isolate* isolate, Mode mode, Zone* zone) {
   pre_parse_data_ = NULL;
   zone_ = zone;
   deferred_handles_ = NULL;
-  code_stub_ = NULL;
   prologue_offset_ = kPrologueOffsetNotSet;
-  if (mode == STUB) {
-    mode_ = STUB;
-    return;
-  }
-  mode_ = V8::UseCrankshaft() ? mode : NONOPT;
+  mode_ = V8::UseCrankshaft() ? BASE : NONOPT;
   if (script_->type()->value() == Script::TYPE_NATIVE) {
     MarkAsNative();
   }
@@ -122,33 +107,6 @@ CompilationInfo::~CompilationInfo() {
 }
 
 
-int CompilationInfo::num_parameters() const {
-  if (IsStub()) {
-    return 0;
-  } else {
-    return scope()->num_parameters();
-  }
-}
-
-
-int CompilationInfo::num_heap_slots() const {
-  if (IsStub()) {
-    return 0;
-  } else {
-    return scope()->num_heap_slots();
-  }
-}
-
-
-Code::Flags CompilationInfo::flags() const {
-  if (IsStub()) {
-    return Code::ComputeFlags(Code::COMPILED_STUB);
-  } else {
-    return Code::ComputeFlags(Code::OPTIMIZED_FUNCTION);
-  }
-}
-
-
 // Disable optimization for the rest of the compilation pipeline.
 void CompilationInfo::DisableOptimization() {
   bool is_optimizable_closure =
@@ -359,13 +317,13 @@ OptimizingCompiler::Status OptimizingCompiler::CreateGraph() {
   if (FLAG_trace_hydrogen) {
     PrintF("-----------------------------------------------------------\n");
     PrintF("Compiling method %s using hydrogen\n", *name->ToCString());
-    HTracer::Instance()->TraceCompilation(info());
+    HTracer::Instance()->TraceCompilation(info()->function());
   }
   Handle<Context> native_context(
       info()->closure()->context()->native_context());
   oracle_ = new(info()->zone()) TypeFeedbackOracle(
       code, native_context, info()->isolate(), info()->zone());
-  graph_builder_ = new(info()->zone()) HOptimizedGraphBuilder(info(), oracle_);
+  graph_builder_ = new(info()->zone()) HGraphBuilder(info(), oracle_);
 
   Timer t(this, &time_taken_to_create_graph_);
   graph_ = graph_builder_->CreateGraph();
@@ -418,7 +376,7 @@ OptimizingCompiler::Status OptimizingCompiler::GenerateAndInstallCode() {
     Timer timer(this, &time_taken_to_codegen_);
     ASSERT(chunk_ != NULL);
     ASSERT(graph_ != NULL);
-    Handle<Code> optimized_code = chunk_->Codegen(Code::OPTIMIZED_FUNCTION);
+    Handle<Code> optimized_code = chunk_->Codegen();
     if (optimized_code.is_null()) {
       info()->set_bailout_reason("code generation failed");
       return AbortOptimization();
index 6d374d9d9d7823941119836f198a5ad849506dd4..653d5f124d40d79627cec871420daae3dccd1643 100644 (file)
@@ -38,7 +38,6 @@ namespace internal {
 static const int kPrologueOffsetNotSet = -1;
 
 class ScriptDataImpl;
-class HydrogenCodeStub;
 
 // CompilationInfo encapsulates some information known at compile time.  It
 // is constructed based on the resources available at compile-time.
@@ -47,7 +46,6 @@ class CompilationInfo {
   CompilationInfo(Handle<Script> script, Zone* zone);
   CompilationInfo(Handle<SharedFunctionInfo> shared_info, Zone* zone);
   CompilationInfo(Handle<JSFunction> closure, Zone* zone);
-  CompilationInfo(HydrogenCodeStub* stub, Isolate* isolate, Zone* zone);
 
   virtual ~CompilationInfo();
 
@@ -74,14 +72,10 @@ class CompilationInfo {
   Handle<JSFunction> closure() const { return closure_; }
   Handle<SharedFunctionInfo> shared_info() const { return shared_info_; }
   Handle<Script> script() const { return script_; }
-  HydrogenCodeStub* code_stub() {return code_stub_; }
   v8::Extension* extension() const { return extension_; }
   ScriptDataImpl* pre_parse_data() const { return pre_parse_data_; }
   Handle<Context> context() const { return context_; }
   BailoutId osr_ast_id() const { return osr_ast_id_; }
-  int num_parameters() const;
-  int num_heap_slots() const;
-  Code::Flags flags() const;
 
   void MarkAsEval() {
     ASSERT(!is_lazy());
@@ -104,31 +98,9 @@ class CompilationInfo {
   void MarkAsNative() {
     flags_ |= IsNative::encode(true);
   }
-
   bool is_native() const {
     return IsNative::decode(flags_);
   }
-
-  bool is_calling() const {
-    return is_deferred_calling() || is_non_deferred_calling();
-  }
-
-  void MarkAsDeferredCalling() {
-    flags_ |= IsDeferredCalling::encode(true);
-  }
-
-  bool is_deferred_calling() const {
-    return IsDeferredCalling::decode(flags_);
-  }
-
-  void MarkAsNonDeferredCalling() {
-    flags_ |= IsNonDeferredCalling::encode(true);
-  }
-
-  bool is_non_deferred_calling() const {
-    return IsNonDeferredCalling::decode(flags_);
-  }
-
   void SetFunction(FunctionLiteral* literal) {
     ASSERT(function_ == NULL);
     function_ = literal;
@@ -179,7 +151,6 @@ class CompilationInfo {
   // Accessors for the different compilation modes.
   bool IsOptimizing() const { return mode_ == OPTIMIZE; }
   bool IsOptimizable() const { return mode_ == BASE; }
-  bool IsStub() const { return mode_ == STUB; }
   void SetOptimizing(BailoutId osr_ast_id) {
     SetMode(OPTIMIZE);
     osr_ast_id_ = osr_ast_id;
@@ -238,11 +209,10 @@ class CompilationInfo {
   enum Mode {
     BASE,
     OPTIMIZE,
-    NONOPT,
-    STUB
+    NONOPT
   };
 
-  void Initialize(Isolate* isolate, Mode mode, Zone* zone);
+  void Initialize(Zone* zone);
 
   void SetMode(Mode mode) {
     ASSERT(V8::UseCrankshaft());
@@ -268,12 +238,6 @@ class CompilationInfo {
   // If compiling for debugging produce just full code matching the
   // initial mode setting.
   class IsCompilingForDebugging: public BitField<bool, 8, 1> {};
-  // If the compiled code contains calls that require building a frame
-  class IsCalling: public BitField<bool, 9, 1> {};
-  // If the compiled code contains calls that require building a frame
-  class IsDeferredCalling: public BitField<bool, 10, 1> {};
-  // If the compiled code contains calls that require building a frame
-  class IsNonDeferredCalling: public BitField<bool, 11, 1> {};
 
 
   unsigned flags_;
@@ -286,8 +250,6 @@ class CompilationInfo {
   Scope* scope_;
   // The global scope provided as a convenience.
   Scope* global_scope_;
-  // For compiled stubs, the stub object
-  HydrogenCodeStub* code_stub_;
   // The compiled code.
   Handle<Code> code_;
 
@@ -348,10 +310,6 @@ class CompilationInfoWithZone: public CompilationInfo {
       : CompilationInfo(closure, &zone_),
         zone_(closure->GetIsolate()),
         zone_scope_(&zone_, DELETE_ON_EXIT) {}
-  explicit CompilationInfoWithZone(HydrogenCodeStub* stub, Isolate* isolate)
-      : CompilationInfo(stub, isolate, &zone_),
-        zone_(isolate),
-        zone_scope_(&zone_, DELETE_ON_EXIT) {}
 
  private:
   Zone zone_;
@@ -377,7 +335,7 @@ class CompilationHandleScope BASE_EMBEDDED {
 
 
 class HGraph;
-class HOptimizedGraphBuilder;
+class HGraphBuilder;
 class LChunk;
 
 // A helper class that calls the three compilation phases in
@@ -419,7 +377,7 @@ class OptimizingCompiler: public ZoneObject {
  private:
   CompilationInfo* info_;
   TypeFeedbackOracle* oracle_;
-  HOptimizedGraphBuilder* graph_builder_;
+  HGraphBuilder* graph_builder_;
   HGraph* graph_;
   LChunk* chunk_;
   int64_t time_taken_to_create_graph_;
index 357e4b8f9991a3656e134bd1302eaeacf2e1c577..8f1711144e65c9a121a233b08a44d507ea893332 100644 (file)
@@ -410,24 +410,17 @@ Deoptimizer::Deoptimizer(Isolate* isolate,
            reinterpret_cast<intptr_t>(from),
            fp_to_sp_delta - (2 * kPointerSize));
   }
-  // For COMPILED_STUBs called from builtins, the function pointer
-  // is a SMI indicating an internal frame.
-  if (function->IsSmi()) {
-    function = NULL;
-  }
-  if (function != NULL && function->IsOptimized()) {
-    function->shared()->increment_deopt_count();
-  }
+  function->shared()->increment_deopt_count();
   // Find the optimized code.
   if (type == EAGER) {
     ASSERT(from == NULL);
-    compiled_code_ = function_->code();
+    optimized_code_ = function_->code();
     if (FLAG_trace_deopt && FLAG_code_comments) {
       // Print instruction associated with this bailout.
       const char* last_comment = NULL;
       int mask = RelocInfo::ModeMask(RelocInfo::COMMENT)
           | RelocInfo::ModeMask(RelocInfo::RUNTIME_ENTRY);
-      for (RelocIterator it(compiled_code_, mask); !it.done(); it.next()) {
+      for (RelocIterator it(optimized_code_, mask); !it.done(); it.next()) {
         RelocInfo* info = it.rinfo();
         if (info->rmode() == RelocInfo::COMMENT) {
           last_comment = reinterpret_cast<const char*>(info->data());
@@ -443,22 +436,18 @@ Deoptimizer::Deoptimizer(Isolate* isolate,
       }
     }
   } else if (type == LAZY) {
-    compiled_code_ = FindDeoptimizingCodeFromAddress(from);
-    if (compiled_code_ == NULL) {
-      compiled_code_ =
-          static_cast<Code*>(isolate->heap()->FindCodeObject(from));
-    }
-    ASSERT(compiled_code_ != NULL);
+    optimized_code_ = FindDeoptimizingCodeFromAddress(from);
+    ASSERT(optimized_code_ != NULL);
   } else if (type == OSR) {
     // The function has already been optimized and we're transitioning
     // from the unoptimized shared version to the optimized one in the
     // function. The return address (from) points to unoptimized code.
-    compiled_code_ = function_->code();
-    ASSERT(compiled_code_->kind() == Code::OPTIMIZED_FUNCTION);
-    ASSERT(!compiled_code_->contains(from));
+    optimized_code_ = function_->code();
+    ASSERT(optimized_code_->kind() == Code::OPTIMIZED_FUNCTION);
+    ASSERT(!optimized_code_->contains(from));
   } else if (type == DEBUGGER) {
-    compiled_code_ = optimized_code;
-    ASSERT(compiled_code_->contains(from));
+    optimized_code_ = optimized_code;
+    ASSERT(optimized_code_->contains(from));
   }
   ASSERT(HEAP->allow_allocation(false));
   unsigned size = ComputeInputFrameSize();
@@ -584,7 +573,7 @@ void Deoptimizer::DoComputeOutputFrames() {
   // Determine basic deoptimization information.  The optimized frame is
   // described by the input data.
   DeoptimizationInputData* input_data =
-      DeoptimizationInputData::cast(compiled_code_->deoptimization_data());
+      DeoptimizationInputData::cast(optimized_code_->deoptimization_data());
   BailoutId node_id = input_data->AstId(bailout_id_);
   ByteArray* translations = input_data->TranslationByteArray();
   unsigned translation_index =
@@ -629,9 +618,6 @@ void Deoptimizer::DoComputeOutputFrames() {
       case Translation::SETTER_STUB_FRAME:
         DoComputeAccessorStubFrame(&iterator, i, true);
         break;
-      case Translation::COMPILED_STUB_FRAME:
-        DoCompiledStubFrame(&iterator, i);
-        break;
       case Translation::BEGIN:
       case Translation::REGISTER:
       case Translation::INT32_REGISTER:
@@ -644,7 +630,6 @@ void Deoptimizer::DoComputeOutputFrames() {
       case Translation::LITERAL:
       case Translation::ARGUMENTS_OBJECT:
       case Translation::DUPLICATE:
-      default:
         UNREACHABLE();
         break;
     }
@@ -824,7 +809,6 @@ void Deoptimizer::DoTranslateCommand(TranslationIterator* iterator,
     case Translation::CONSTRUCT_STUB_FRAME:
     case Translation::GETTER_STUB_FRAME:
     case Translation::SETTER_STUB_FRAME:
-    case Translation::COMPILED_STUB_FRAME:
     case Translation::DUPLICATE:
       UNREACHABLE();
       return;
@@ -1133,7 +1117,6 @@ bool Deoptimizer::DoOsrTranslateCommand(TranslationIterator* iterator,
     case Translation::CONSTRUCT_STUB_FRAME:
     case Translation::GETTER_STUB_FRAME:
     case Translation::SETTER_STUB_FRAME:
-    case Translation::COMPILED_STUB_FRAME:
     case Translation::DUPLICATE:
       UNREACHABLE();  // Malformed input.
        return false;
@@ -1354,9 +1337,8 @@ unsigned Deoptimizer::ComputeInputFrameSize() const {
     // environment at the OSR entry. The code for that his built into
     // the DoComputeOsrOutputFrame function for now.
   } else {
-    unsigned stack_slots = compiled_code_->stack_slots();
-    unsigned outgoing_size = compiled_code_->kind() == Code::COMPILED_STUB
-        ? 0 : ComputeOutgoingArgumentSize();
+    unsigned stack_slots = optimized_code_->stack_slots();
+    unsigned outgoing_size = ComputeOutgoingArgumentSize();
     ASSERT(result == fixed_size + (stack_slots * kPointerSize) + outgoing_size);
   }
 #endif
@@ -1375,10 +1357,6 @@ unsigned Deoptimizer::ComputeFixedSize(JSFunction* function) const {
 unsigned Deoptimizer::ComputeIncomingArgumentSize(JSFunction* function) const {
   // The incoming arguments is the values for formal parameters and
   // the receiver. Every slot contains a pointer.
-  if (function->IsSmi()) {
-    ASSERT(Smi::cast(function) == Smi::FromInt(StackFrame::STUB));
-    return 0;
-  }
   unsigned arguments = function->shared()->formal_parameter_count() + 1;
   return arguments * kPointerSize;
 }
@@ -1386,7 +1364,7 @@ unsigned Deoptimizer::ComputeIncomingArgumentSize(JSFunction* function) const {
 
 unsigned Deoptimizer::ComputeOutgoingArgumentSize() const {
   DeoptimizationInputData* data = DeoptimizationInputData::cast(
-      compiled_code_->deoptimization_data());
+      optimized_code_->deoptimization_data());
   unsigned height = data->ArgumentsStackHeight(bailout_id_)->value();
   return height * kPointerSize;
 }
@@ -1394,7 +1372,7 @@ unsigned Deoptimizer::ComputeOutgoingArgumentSize() const {
 
 Object* Deoptimizer::ComputeLiteral(int index) const {
   DeoptimizationInputData* data = DeoptimizationInputData::cast(
-      compiled_code_->deoptimization_data());
+      optimized_code_->deoptimization_data());
   FixedArray* literals = data->LiteralArray();
   return literals->get(index);
 }
@@ -1425,6 +1403,8 @@ void Deoptimizer::EnsureCodeForDeoptimizationEntry(BailoutType type,
   // cause us to emit relocation information for the external
   // references. This is fine because the deoptimizer's code section
   // isn't meant to be serialized at all.
+  ASSERT(!Serializer::enabled());
+
   ASSERT(type == EAGER || type == LAZY);
   DeoptimizerData* data = Isolate::Current()->deoptimizer_data();
   int entry_count = (type == EAGER)
@@ -1439,6 +1419,7 @@ void Deoptimizer::EnsureCodeForDeoptimizationEntry(BailoutType type,
   GenerateDeoptimizationEntries(&masm, entry_count, type);
   CodeDesc desc;
   masm.GetCode(&desc);
+  ASSERT(desc.reloc_size == 0);
 
   VirtualMemory* memory = type == EAGER
       ? data->eager_deoptimization_entry_code_
@@ -1700,11 +1681,6 @@ void Translation::BeginJSFrame(BailoutId node_id,
 }
 
 
-void Translation::BeginCompiledStubFrame() {
-  buffer_->Add(COMPILED_STUB_FRAME, zone());
-}
-
-
 void Translation::StoreRegister(Register reg) {
   buffer_->Add(REGISTER, zone());
   buffer_->Add(reg.code(), zone());
@@ -1786,7 +1762,6 @@ int Translation::NumberOfOperandsFor(Opcode opcode) {
     case UINT32_STACK_SLOT:
     case DOUBLE_STACK_SLOT:
     case LITERAL:
-    case COMPILED_STUB_FRAME:
       return 1;
     case BEGIN:
     case ARGUMENTS_ADAPTOR_FRAME:
@@ -1817,8 +1792,6 @@ const char* Translation::StringFor(Opcode opcode) {
       return "GETTER_STUB_FRAME";
     case SETTER_STUB_FRAME:
       return "SETTER_STUB_FRAME";
-    case COMPILED_STUB_FRAME:
-      return "COMPILED_STUB_FRAME";
     case REGISTER:
       return "REGISTER";
     case INT32_REGISTER:
@@ -1926,10 +1899,6 @@ SlotRef SlotRef::ComputeSlotForNextArgument(TranslationIterator* iterator,
       int literal_index = iterator->Next();
       return SlotRef(data->LiteralArray()->get(literal_index));
     }
-
-    case Translation::COMPILED_STUB_FRAME:
-      UNREACHABLE();
-      break;
   }
 
   UNREACHABLE();
index dbcdf615b108e6759399bc5f49e95409cc66a4f5..89955b38bde84d67790ef01e5d3c87d7ad9b260a 100644 (file)
@@ -135,8 +135,6 @@ class Deoptimizer : public Malloced {
 
   int output_count() const { return output_count_; }
 
-  Code::Kind compiled_code_kind() const { return compiled_code_->kind(); }
-
   // Number of created JS frames. Not all created frames are necessarily JS.
   int jsframe_count() const { return jsframe_count_; }
 
@@ -299,9 +297,6 @@ class Deoptimizer : public Malloced {
 
   static size_t GetMaxDeoptTableSize();
 
-  static void EnsureCodeForDeoptimizationEntry(BailoutType type,
-                                               int max_entry_id);
-
  private:
   static const int kMinNumberOfEntries = 64;
   static const int kMaxNumberOfEntries = 16384;
@@ -325,8 +320,6 @@ class Deoptimizer : public Malloced {
   void DoComputeAccessorStubFrame(TranslationIterator* iterator,
                                   int frame_index,
                                   bool is_setter_stub_frame);
-  void DoCompiledStubFrame(TranslationIterator* iterator,
-                           int frame_index);
   void DoTranslateCommand(TranslationIterator* iterator,
                           int frame_index,
                           unsigned output_offset);
@@ -349,6 +342,8 @@ class Deoptimizer : public Malloced {
   void AddArgumentsObjectValue(intptr_t value);
   void AddDoubleValue(intptr_t slot_address, double value);
 
+  static void EnsureCodeForDeoptimizationEntry(BailoutType type,
+                                               int max_entry_id);
   static void GenerateDeoptimizationEntries(
       MacroAssembler* masm, int count, BailoutType type);
 
@@ -365,7 +360,7 @@ class Deoptimizer : public Malloced {
 
   Isolate* isolate_;
   JSFunction* function_;
-  Code* compiled_code_;
+  Code* optimized_code_;
   unsigned bailout_id_;
   BailoutType bailout_type_;
   Address from_;
@@ -535,7 +530,7 @@ class FrameDescription {
   uintptr_t frame_size_;  // Number of bytes.
   JSFunction* function_;
   intptr_t registers_[Register::kNumRegisters];
-  double double_registers_[DoubleRegister::kMaxNumAllocatableRegisters];
+  double double_registers_[DoubleRegister::kNumAllocatableRegisters];
   intptr_t top_;
   intptr_t pc_;
   intptr_t fp_;
@@ -605,7 +600,6 @@ class Translation BASE_EMBEDDED {
     GETTER_STUB_FRAME,
     SETTER_STUB_FRAME,
     ARGUMENTS_ADAPTOR_FRAME,
-    COMPILED_STUB_FRAME,
     REGISTER,
     INT32_REGISTER,
     UINT32_REGISTER,
@@ -636,7 +630,6 @@ class Translation BASE_EMBEDDED {
 
   // Commands.
   void BeginJSFrame(BailoutId node_id, int literal_id, unsigned height);
-  void BeginCompiledStubFrame();
   void BeginArgumentsAdaptorFrame(int literal_id, unsigned height);
   void BeginConstructStubFrame(int literal_id, unsigned height);
   void BeginGetterStubFrame(int literal_id);
index 05d6b9bde29817eae6dc71912dc4c4c69da9e563..9f8b9a820bc7fb65dd169aae2f6f1a24f30fc172 100644 (file)
@@ -287,12 +287,7 @@ static int DecodeIt(FILE* f,
         Address addr = relocinfo.target_address();
         int id = Deoptimizer::GetDeoptimizationId(addr, Deoptimizer::EAGER);
         if (id == Deoptimizer::kNotDeoptimizationEntry) {
-          id = Deoptimizer::GetDeoptimizationId(addr, Deoptimizer::LAZY);
-          if (id == Deoptimizer::kNotDeoptimizationEntry) {
-            out.AddFormatted("    ;; %s", RelocInfo::RelocModeName(rmode));
-          } else {
-            out.AddFormatted("    ;; lazy deoptimization bailout %d", id);
-          }
+          out.AddFormatted("    ;; %s", RelocInfo::RelocModeName(rmode));
         } else {
           out.AddFormatted("    ;; deoptimization bailout %d", id);
         }
@@ -327,8 +322,7 @@ int Disassembler::Decode(FILE* f, byte* begin, byte* end) {
 
 // Called by Code::CodePrint.
 void Disassembler::Decode(FILE* f, Code* code) {
-  int decode_size = (code->kind() == Code::OPTIMIZED_FUNCTION ||
-                     code->kind() == Code::COMPILED_STUB)
+  int decode_size = (code->kind() == Code::OPTIMIZED_FUNCTION)
       ? static_cast<int>(code->safepoint_table_offset())
       : code->instruction_size();
   // If there might be a stack check table, stop before reaching it.
index 475393222b127be4104fa79a804a81e2beb9f0fc..27a526cef1086e039e877b9d6626ce2dc33e5515 100644 (file)
@@ -235,18 +235,8 @@ inline Object* JavaScriptFrame::function() const {
 }
 
 
-inline CompiledFrame::CompiledFrame(StackFrameIterator* iterator)
-    : JavaScriptFrame(iterator) {
-}
-
-
-inline StubFrame::StubFrame(StackFrameIterator* iterator)
-    : CompiledFrame(iterator) {
-}
-
-
 inline OptimizedFrame::OptimizedFrame(StackFrameIterator* iterator)
-    : CompiledFrame(iterator) {
+    : JavaScriptFrame(iterator) {
 }
 
 
index cb9ffba8c913a47d6d7ac09e664ceb1e4de4dd5e..3b60fb59fa230537268533c6813d4c7cbe0575ae 100644 (file)
@@ -617,7 +617,7 @@ bool StandardFrame::IsExpressionInsideHandler(int n) const {
 }
 
 
-void CompiledFrame::Iterate(ObjectVisitor* v) const {
+void OptimizedFrame::Iterate(ObjectVisitor* v) const {
 #ifdef DEBUG
   // Make sure that optimized frames do not contain any stack handlers.
   StackHandlerIterator it(this, top_handler());
@@ -649,7 +649,7 @@ void CompiledFrame::Iterate(ObjectVisitor* v) const {
 
   // Skip saved double registers.
   if (safepoint_entry.has_doubles()) {
-    parameters_base += DoubleRegister::NumAllocatableRegisters() *
+    parameters_base += DoubleRegister::kNumAllocatableRegisters *
         kDoubleSize / kPointerSize;
   }
 
@@ -681,24 +681,14 @@ void CompiledFrame::Iterate(ObjectVisitor* v) const {
     }
   }
 
-  // Visit the return address in the callee and incoming arguments.
-  IteratePc(v, pc_address(), code);
-}
-
-
-void StubFrame::Iterate(ObjectVisitor* v) const {
-  CompiledFrame::Iterate(v);
-}
-
-
-void OptimizedFrame::Iterate(ObjectVisitor* v) const {
-  CompiledFrame::Iterate(v);
-
   // Visit the context and the function.
   Object** fixed_base = &Memory::Object_at(
       fp() + JavaScriptFrameConstants::kFunctionOffset);
   Object** fixed_limit = &Memory::Object_at(fp());
   v->VisitPointers(fixed_base, fixed_limit);
+
+  // Visit the return address in the callee and incoming arguments.
+  IteratePc(v, pc_address(), code);
 }
 
 
index 6a9570eead57305feb5eec5bde7de33b88f38089..30f7e1f00e552575295b074b08d5bb4396461cbb 100644 (file)
@@ -136,7 +136,6 @@ class StackHandler BASE_EMBEDDED {
   V(EXIT,              ExitFrame)             \
   V(JAVA_SCRIPT,       JavaScriptFrame)       \
   V(OPTIMIZED,         OptimizedFrame)        \
-  V(STUB,              StubFrame)             \
   V(INTERNAL,          InternalFrame)         \
   V(CONSTRUCT,         ConstructFrame)        \
   V(ARGUMENTS_ADAPTOR, ArgumentsAdaptorFrame)
@@ -556,33 +555,7 @@ class JavaScriptFrame: public StandardFrame {
 };
 
 
-class CompiledFrame : public JavaScriptFrame {
- public:
-  virtual Type type() const = 0;
-
-  // GC support.
-  virtual void Iterate(ObjectVisitor* v) const;
-
- protected:
-  inline explicit CompiledFrame(StackFrameIterator* iterator);
-};
-
-
-class StubFrame : public CompiledFrame {
- public:
-  virtual Type type() const { return STUB; }
-
-  // GC support.
-  virtual void Iterate(ObjectVisitor* v) const;
-
- protected:
-  inline explicit StubFrame(StackFrameIterator* iterator);
-
-  friend class StackFrameIterator;
-};
-
-
-class OptimizedFrame : public CompiledFrame {
+class OptimizedFrame : public JavaScriptFrame {
  public:
   virtual Type type() const { return OPTIMIZED; }
 
index faf111fa410a94215b9e9638f8b01e6c29f48437..928da4a76455bcebd63ca00d93818b8ee86be038 100644 (file)
@@ -398,7 +398,6 @@ void FullCodeGenerator::Initialize() {
                          !Snapshot::HaveASnapshotToStartFrom();
   masm_->set_emit_debug_code(generate_debug_code_);
   masm_->set_predictable_code_size(true);
-  InitializeAstVisitor();
 }
 
 
index c25076a0b13e603940dee80af80bf7a0df205ebf..2f88184806c787b9db4a5f1ed6ec4ced75118990 100644 (file)
@@ -48,9 +48,7 @@ class JumpPatchSite;
 // debugger to piggybag on.
 class BreakableStatementChecker: public AstVisitor {
  public:
-  BreakableStatementChecker() : is_breakable_(false) {
-    InitializeAstVisitor();
-  }
+  BreakableStatementChecker() : is_breakable_(false) {}
 
   void Check(Statement* stmt);
   void Check(Expression* stmt);
@@ -65,7 +63,6 @@ class BreakableStatementChecker: public AstVisitor {
 
   bool is_breakable_;
 
-  DEFINE_AST_VISITOR_SUBCLASS_MEMBERS();
   DISALLOW_COPY_AND_ASSIGN(BreakableStatementChecker);
 };
 
@@ -828,7 +825,6 @@ class FullCodeGenerator: public AstVisitor {
 
   friend class NestedStatement;
 
-  DEFINE_AST_VISITOR_SUBCLASS_MEMBERS();
   DISALLOW_COPY_AND_ASSIGN(FullCodeGenerator);
 };
 
index 3c5f771693ea6eabac14bc48e6bfb49942ce2145..5d2099237b4c4ef24d2f9bcadd7ab70cf9e50325 100644 (file)
@@ -138,7 +138,6 @@ HSimulate* HBasicBlock::CreateSimulate(BailoutId ast_id,
   ASSERT(HasEnvironment());
   HEnvironment* environment = last_environment();
   ASSERT(ast_id.IsNone() ||
-         ast_id == BailoutId::StubEntry() ||
          environment->closure()->shared()->VerifyBailoutId(ast_id));
 
   int push_count = environment->push_count();
@@ -622,204 +621,33 @@ HConstant* HGraph::GetConstantHole() {
 }
 
 
-HGraph* HGraphBuilder::CreateGraph() {
-  graph_ = new(zone()) HGraph(info_);
-  if (FLAG_hydrogen_stats) HStatistics::Instance()->Initialize(info_);
-  HPhase phase("H_Block building");
-  set_current_block(graph()->entry_block());
-  if (!BuildGraph()) return NULL;
-  return graph_;
-}
-
-
-HInstruction* HGraphBuilder::AddInstruction(HInstruction* instr) {
-  ASSERT(current_block() != NULL);
-  current_block()->AddInstruction(instr);
-  return instr;
-}
-
-
-void HGraphBuilder::AddSimulate(BailoutId id,
-                                RemovableSimulate removable) {
-  ASSERT(current_block() != NULL);
-  current_block()->AddSimulate(id, removable);
-}
-
-
-HInstruction* HGraphBuilder::BuildExternalArrayElementAccess(
-    HValue* external_elements,
-    HValue* checked_key,
-    HValue* val,
-    HValue* dependency,
-    ElementsKind elements_kind,
-    bool is_store) {
-  Zone* zone = this->zone();
-  if (is_store) {
-    ASSERT(val != NULL);
-    switch (elements_kind) {
-      case EXTERNAL_PIXEL_ELEMENTS: {
-        val = AddInstruction(new(zone) HClampToUint8(val));
-        break;
-      }
-      case EXTERNAL_BYTE_ELEMENTS:
-      case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
-      case EXTERNAL_SHORT_ELEMENTS:
-      case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
-      case EXTERNAL_INT_ELEMENTS:
-      case EXTERNAL_UNSIGNED_INT_ELEMENTS: {
-        break;
-      }
-      case EXTERNAL_FLOAT_ELEMENTS:
-      case EXTERNAL_DOUBLE_ELEMENTS:
-        break;
-      case FAST_SMI_ELEMENTS:
-      case FAST_ELEMENTS:
-      case FAST_DOUBLE_ELEMENTS:
-      case FAST_HOLEY_SMI_ELEMENTS:
-      case FAST_HOLEY_ELEMENTS:
-      case FAST_HOLEY_DOUBLE_ELEMENTS:
-      case DICTIONARY_ELEMENTS:
-      case NON_STRICT_ARGUMENTS_ELEMENTS:
-        UNREACHABLE();
-        break;
-    }
-    return new(zone) HStoreKeyed(external_elements, checked_key,
-                                 val, elements_kind);
-  } else {
-    ASSERT(val == NULL);
-    HLoadKeyed* load =
-       new(zone) HLoadKeyed(
-           external_elements, checked_key, dependency, elements_kind);
-    if (FLAG_opt_safe_uint32_operations &&
-        elements_kind == EXTERNAL_UNSIGNED_INT_ELEMENTS) {
-      graph()->RecordUint32Instruction(load);
-    }
-    return load;
-  }
-}
-
-
-HInstruction* HGraphBuilder::BuildFastElementAccess(
-    HValue* elements,
-    HValue* checked_key,
-    HValue* val,
-    HValue* load_dependency,
-    ElementsKind elements_kind,
-    bool is_store) {
-  Zone* zone = this->zone();
-  if (is_store) {
-    ASSERT(val != NULL);
-    switch (elements_kind) {
-      case FAST_SMI_ELEMENTS:
-      case FAST_HOLEY_SMI_ELEMENTS:
-        // Smi-only arrays need a smi check.
-        AddInstruction(new(zone) HCheckSmi(val));
-        // Fall through.
-      case FAST_ELEMENTS:
-      case FAST_HOLEY_ELEMENTS:
-      case FAST_DOUBLE_ELEMENTS:
-      case FAST_HOLEY_DOUBLE_ELEMENTS:
-        return new(zone) HStoreKeyed(elements, checked_key, val, elements_kind);
-      default:
-        UNREACHABLE();
-        return NULL;
-    }
-  }
-  // It's an element load (!is_store).
-  return new(zone) HLoadKeyed(elements,
-                              checked_key,
-                              load_dependency,
-                              elements_kind);
-}
-
-
-HInstruction* HGraphBuilder::BuildUncheckedMonomorphicElementAccess(
-    HValue* object,
-    HValue* key,
-    HValue* val,
-    HCheckMaps* mapcheck,
-    bool is_js_array,
-    ElementsKind elements_kind,
-    bool is_store) {
-  Zone* zone = this->zone();
-  // No GVNFlag is necessary for ElementsKind if there is an explicit dependency
-  // on a HElementsTransition instruction. The flag can also be removed if the
-  // map to check has FAST_HOLEY_ELEMENTS, since there can be no further
-  // ElementsKind transitions. Finally, the dependency can be removed for stores
-  // for FAST_ELEMENTS, since a transition to HOLEY elements won't change the
-  // generated store code.
-  if ((elements_kind == FAST_HOLEY_ELEMENTS) ||
-      (elements_kind == FAST_ELEMENTS && is_store)) {
-    if (mapcheck != NULL) {
-      mapcheck->ClearGVNFlag(kDependsOnElementsKind);
-    }
-  }
-  bool fast_smi_only_elements = IsFastSmiElementsKind(elements_kind);
-  bool fast_elements = IsFastObjectElementsKind(elements_kind);
-  HInstruction* elements =
-      AddInstruction(new(zone) HLoadElements(object, mapcheck));
-  if (is_store && (fast_elements || fast_smi_only_elements)) {
-    HCheckMaps* check_cow_map = new(zone) HCheckMaps(
-        elements, Isolate::Current()->factory()->fixed_array_map(), zone);
-    check_cow_map->ClearGVNFlag(kDependsOnElementsKind);
-    AddInstruction(check_cow_map);
-  }
-  HInstruction* length = NULL;
-  HInstruction* checked_key = NULL;
-  if (IsExternalArrayElementsKind(elements_kind)) {
-    length = AddInstruction(new(zone) HFixedArrayBaseLength(elements));
-    checked_key = AddInstruction(new(zone) HBoundsCheck(key, length,
-                                                        ALLOW_SMI_KEY));
-    HLoadExternalArrayPointer* external_elements =
-        new(zone) HLoadExternalArrayPointer(elements);
-    AddInstruction(external_elements);
-    return BuildExternalArrayElementAccess(
-        external_elements, checked_key, val, mapcheck,
-        elements_kind, is_store);
-  }
-  ASSERT(fast_smi_only_elements ||
-         fast_elements ||
-         IsFastDoubleElementsKind(elements_kind));
-  if (is_js_array) {
-    length = AddInstruction(new(zone) HJSArrayLength(object, mapcheck,
-                                                     HType::Smi()));
-  } else {
-    length = AddInstruction(new(zone) HFixedArrayBaseLength(elements));
-  }
-  checked_key = AddInstruction(new(zone) HBoundsCheck(key, length,
-                                                      ALLOW_SMI_KEY));
-  return BuildFastElementAccess(elements, checked_key, val, mapcheck,
-                                elements_kind, is_store);
-}
-
-
-HOptimizedGraphBuilder::HOptimizedGraphBuilder(CompilationInfo* info,
-                                               TypeFeedbackOracle* oracle)
-    : HGraphBuilder(info),
-      function_state_(NULL),
+HGraphBuilder::HGraphBuilder(CompilationInfo* info,
+                             TypeFeedbackOracle* oracle)
+    : function_state_(NULL),
       initial_function_state_(this, info, oracle, NORMAL_RETURN),
       ast_context_(NULL),
       break_scope_(NULL),
+      graph_(NULL),
+      current_block_(NULL),
       inlined_count_(0),
       globals_(10, info->zone()),
+      zone_(info->zone()),
       inline_bailout_(false) {
   // This is not initialized in the initializer list because the
   // constructor for the initial state relies on function_state_ == NULL
   // to know it's the initial state.
   function_state_= &initial_function_state_;
-  InitializeAstVisitor();
 }
 
-
-HBasicBlock* HOptimizedGraphBuilder::CreateJoin(HBasicBlock* first,
-                                                HBasicBlock* second,
-                                                BailoutId join_id) {
+HBasicBlock* HGraphBuilder::CreateJoin(HBasicBlock* first,
+                                       HBasicBlock* second,
+                                       BailoutId join_id) {
   if (first == NULL) {
     return second;
   } else if (second == NULL) {
     return first;
   } else {
-    HBasicBlock* join_block = graph()->CreateBasicBlock();
+    HBasicBlock* join_block = graph_->CreateBasicBlock();
     first->Goto(join_block);
     second->Goto(join_block);
     join_block->SetJoinId(join_id);
@@ -828,9 +656,9 @@ HBasicBlock* HOptimizedGraphBuilder::CreateJoin(HBasicBlock* first,
 }
 
 
-HBasicBlock* HOptimizedGraphBuilder::JoinContinue(IterationStatement* statement,
-                                                  HBasicBlock* exit_block,
-                                                  HBasicBlock* continue_block) {
+HBasicBlock* HGraphBuilder::JoinContinue(IterationStatement* statement,
+                                         HBasicBlock* exit_block,
+                                         HBasicBlock* continue_block) {
   if (continue_block != NULL) {
     if (exit_block != NULL) exit_block->Goto(continue_block);
     continue_block->SetJoinId(statement->ContinueId());
@@ -840,11 +668,11 @@ HBasicBlock* HOptimizedGraphBuilder::JoinContinue(IterationStatement* statement,
 }
 
 
-HBasicBlock* HOptimizedGraphBuilder::CreateLoop(IterationStatement* statement,
-                                                HBasicBlock* loop_entry,
-                                                HBasicBlock* body_exit,
-                                                HBasicBlock* loop_successor,
-                                                HBasicBlock* break_block) {
+HBasicBlock* HGraphBuilder::CreateLoop(IterationStatement* statement,
+                                       HBasicBlock* loop_entry,
+                                       HBasicBlock* body_exit,
+                                       HBasicBlock* loop_successor,
+                                       HBasicBlock* break_block) {
   if (body_exit != NULL) body_exit->Goto(loop_entry);
   loop_entry->PostProcessLoopHeader(statement);
   if (break_block != NULL) {
@@ -875,13 +703,8 @@ HGraph::HGraph(CompilationInfo* info)
       is_recursive_(false),
       use_optimistic_licm_(false),
       type_change_checksum_(0) {
-  if (info->IsStub()) {
-    start_environment_ =
-        new(zone_) HEnvironment(zone_);
-  } else {
-    start_environment_ =
-        new(zone_) HEnvironment(NULL, info->scope(), info->closure(), zone_);
-  }
+  start_environment_ =
+      new(zone_) HEnvironment(NULL, info->scope(), info->closure(), zone_);
   start_environment_->set_ast_id(BailoutId::FunctionEntry());
   entry_block_ = CreateBasicBlock();
   entry_block_->SetInitialEnvironment(start_environment_);
@@ -3070,7 +2893,7 @@ void HGraph::ComputeMinusZeroChecks() {
 
 // Implementation of utility class to encapsulate the translation state for
 // a (possibly inlined) function.
-FunctionState::FunctionState(HOptimizedGraphBuilder* owner,
+FunctionState::FunctionState(HGraphBuilder* owner,
                              CompilationInfo* info,
                              TypeFeedbackOracle* oracle,
                              InliningKind inlining_kind)
@@ -3119,7 +2942,7 @@ FunctionState::~FunctionState() {
 
 // Implementation of utility classes to represent an expression's context in
 // the AST.
-AstContext::AstContext(HOptimizedGraphBuilder* owner, Expression::Context kind)
+AstContext::AstContext(HGraphBuilder* owner, Expression::Context kind)
     : owner_(owner),
       kind_(kind),
       outer_(owner->ast_context()),
@@ -3230,7 +3053,7 @@ void ValueContext::ReturnControl(HControlInstruction* instr, BailoutId ast_id) {
 
 void TestContext::ReturnInstruction(HInstruction* instr, BailoutId ast_id) {
   ASSERT(!instr->IsControlInstruction());
-  HOptimizedGraphBuilder* builder = owner();
+  HGraphBuilder* builder = owner();
   builder->AddInstruction(instr);
   // We expect a simulate after every expression with side effects, though
   // this one isn't actually needed (and wouldn't work if it were targeted).
@@ -3261,7 +3084,7 @@ void TestContext::BuildBranch(HValue* value) {
   // connects a branch node to a join node.  We conservatively ensure that
   // property by always adding an empty block on the outgoing edges of this
   // branch.
-  HOptimizedGraphBuilder* builder = owner();
+  HGraphBuilder* builder = owner();
   if (value != NULL && value->CheckFlag(HValue::kIsArguments)) {
     builder->Bailout("arguments object value in a test context");
   }
@@ -3278,7 +3101,7 @@ void TestContext::BuildBranch(HValue* value) {
 }
 
 
-// HOptimizedGraphBuilder infrastructure for bailing out and checking bailouts.
+// HGraphBuilder infrastructure for bailing out and checking bailouts.
 #define CHECK_BAILOUT(call)                     \
   do {                                          \
     call;                                       \
@@ -3293,26 +3116,25 @@ void TestContext::BuildBranch(HValue* value) {
   } while (false)
 
 
-void HOptimizedGraphBuilder::Bailout(const char* reason) {
+void HGraphBuilder::Bailout(const char* reason) {
   info()->set_bailout_reason(reason);
   SetStackOverflow();
 }
 
 
-void HOptimizedGraphBuilder::VisitForEffect(Expression* expr) {
+void HGraphBuilder::VisitForEffect(Expression* expr) {
   EffectContext for_effect(this);
   Visit(expr);
 }
 
 
-void HOptimizedGraphBuilder::VisitForValue(Expression* expr,
-                                           ArgumentsAllowedFlag flag) {
+void HGraphBuilder::VisitForValue(Expression* expr, ArgumentsAllowedFlag flag) {
   ValueContext for_value(this, flag);
   Visit(expr);
 }
 
 
-void HOptimizedGraphBuilder::VisitForTypeOf(Expression* expr) {
+void HGraphBuilder::VisitForTypeOf(Expression* expr) {
   ValueContext for_value(this, ARGUMENTS_NOT_ALLOWED);
   for_value.set_for_typeof(true);
   Visit(expr);
@@ -3320,107 +3142,112 @@ void HOptimizedGraphBuilder::VisitForTypeOf(Expression* expr) {
 
 
 
-void HOptimizedGraphBuilder::VisitForControl(Expression* expr,
-                                             HBasicBlock* true_block,
-                                             HBasicBlock* false_block) {
+void HGraphBuilder::VisitForControl(Expression* expr,
+                                    HBasicBlock* true_block,
+                                    HBasicBlock* false_block) {
   TestContext for_test(this, expr, oracle(), true_block, false_block);
   Visit(expr);
 }
 
 
-void HOptimizedGraphBuilder::VisitArgument(Expression* expr) {
+void HGraphBuilder::VisitArgument(Expression* expr) {
   CHECK_ALIVE(VisitForValue(expr));
   Push(AddInstruction(new(zone()) HPushArgument(Pop())));
 }
 
 
-void HOptimizedGraphBuilder::VisitArgumentList(
-    ZoneList<Expression*>* arguments) {
+void HGraphBuilder::VisitArgumentList(ZoneList<Expression*>* arguments) {
   for (int i = 0; i < arguments->length(); i++) {
     CHECK_ALIVE(VisitArgument(arguments->at(i)));
   }
 }
 
 
-void HOptimizedGraphBuilder::VisitExpressions(
-    ZoneList<Expression*>* exprs) {
+void HGraphBuilder::VisitExpressions(ZoneList<Expression*>* exprs) {
   for (int i = 0; i < exprs->length(); ++i) {
     CHECK_ALIVE(VisitForValue(exprs->at(i)));
   }
 }
 
 
-bool HOptimizedGraphBuilder::BuildGraph() {
-  Scope* scope = info()->scope();
-  if (scope->HasIllegalRedeclaration()) {
-    Bailout("function with illegal redeclaration");
-    return false;
-  }
-  if (scope->calls_eval()) {
-    Bailout("function calls eval");
-    return false;
-  }
-  SetUpScope(scope);
-
-  // Add an edge to the body entry.  This is warty: the graph's start
-  // environment will be used by the Lithium translation as the initial
-  // environment on graph entry, but it has now been mutated by the
-  // Hydrogen translation of the instructions in the start block.  This
-  // environment uses values which have not been defined yet.  These
-  // Hydrogen instructions will then be replayed by the Lithium
-  // translation, so they cannot have an environment effect.  The edge to
-  // the body's entry block (along with some special logic for the start
-  // block in HInstruction::InsertAfter) seals the start block from
-  // getting unwanted instructions inserted.
-  //
-  // TODO(kmillikin): Fix this.  Stop mutating the initial environment.
-  // Make the Hydrogen instructions in the initial block into Hydrogen
-  // values (but not instructions), present in the initial environment and
-  // not replayed by the Lithium translation.
-  HEnvironment* initial_env = environment()->CopyWithoutHistory();
-  HBasicBlock* body_entry = CreateBasicBlock(initial_env);
-  current_block()->Goto(body_entry);
-  body_entry->SetJoinId(BailoutId::FunctionEntry());
-  set_current_block(body_entry);
-
-  // Handle implicit declaration of the function name in named function
-  // expressions before other declarations.
-  if (scope->is_function_scope() && scope->function() != NULL) {
-    VisitVariableDeclaration(scope->function());
-  }
-  VisitDeclarations(scope->declarations());
-  AddSimulate(BailoutId::Declarations());
+HGraph* HGraphBuilder::CreateGraph() {
+  graph_ = new(zone()) HGraph(info());
+  if (FLAG_hydrogen_stats) HStatistics::Instance()->Initialize(info());
 
-  HValue* context = environment()->LookupContext();
-  AddInstruction(
-      new(zone()) HStackCheck(context, HStackCheck::kFunctionEntry));
+  {
+    HPhase phase("H_Block building");
+    current_block_ = graph()->entry_block();
 
-  VisitStatements(info()->function()->body());
-  if (HasStackOverflow()) return false;
+    Scope* scope = info()->scope();
+    if (scope->HasIllegalRedeclaration()) {
+      Bailout("function with illegal redeclaration");
+      return NULL;
+    }
+    if (scope->calls_eval()) {
+      Bailout("function calls eval");
+      return NULL;
+    }
+    SetUpScope(scope);
+
+    // Add an edge to the body entry.  This is warty: the graph's start
+    // environment will be used by the Lithium translation as the initial
+    // environment on graph entry, but it has now been mutated by the
+    // Hydrogen translation of the instructions in the start block.  This
+    // environment uses values which have not been defined yet.  These
+    // Hydrogen instructions will then be replayed by the Lithium
+    // translation, so they cannot have an environment effect.  The edge to
+    // the body's entry block (along with some special logic for the start
+    // block in HInstruction::InsertAfter) seals the start block from
+    // getting unwanted instructions inserted.
+    //
+    // TODO(kmillikin): Fix this.  Stop mutating the initial environment.
+    // Make the Hydrogen instructions in the initial block into Hydrogen
+    // values (but not instructions), present in the initial environment and
+    // not replayed by the Lithium translation.
+    HEnvironment* initial_env = environment()->CopyWithoutHistory();
+    HBasicBlock* body_entry = CreateBasicBlock(initial_env);
+    current_block()->Goto(body_entry);
+    body_entry->SetJoinId(BailoutId::FunctionEntry());
+    set_current_block(body_entry);
+
+    // Handle implicit declaration of the function name in named function
+    // expressions before other declarations.
+    if (scope->is_function_scope() && scope->function() != NULL) {
+      VisitVariableDeclaration(scope->function());
+    }
+    VisitDeclarations(scope->declarations());
+    AddSimulate(BailoutId::Declarations());
 
-  if (current_block() != NULL) {
-    HReturn* instr = new(zone()) HReturn(graph()->GetConstantUndefined());
-    current_block()->FinishExit(instr);
-    set_current_block(NULL);
-  }
+    HValue* context = environment()->LookupContext();
+    AddInstruction(
+        new(zone()) HStackCheck(context, HStackCheck::kFunctionEntry));
 
-  // If the checksum of the number of type info changes is the same as the
-  // last time this function was compiled, then this recompile is likely not
-  // due to missing/inadequate type feedback, but rather too aggressive
-  // optimization. Disable optimistic LICM in that case.
-  Handle<Code> unoptimized_code(info()->shared_info()->code());
-  ASSERT(unoptimized_code->kind() == Code::FUNCTION);
-  Handle<TypeFeedbackInfo> type_info(
-      TypeFeedbackInfo::cast(unoptimized_code->type_feedback_info()));
-  int checksum = type_info->own_type_change_checksum();
-  int composite_checksum = graph()->update_type_change_checksum(checksum);
-  graph()->set_use_optimistic_licm(
-      !type_info->matches_inlined_type_change_checksum(composite_checksum));
-  type_info->set_inlined_type_change_checksum(composite_checksum);
+    VisitStatements(info()->function()->body());
+    if (HasStackOverflow()) return NULL;
 
-  return true;
-}
+    if (current_block() != NULL) {
+      HReturn* instr = new(zone()) HReturn(graph()->GetConstantUndefined());
+      current_block()->FinishExit(instr);
+      set_current_block(NULL);
+    }
 
+    // If the checksum of the number of type info changes is the same as the
+    // last time this function was compiled, then this recompile is likely not
+    // due to missing/inadequate type feedback, but rather too aggressive
+    // optimization. Disable optimistic LICM in that case.
+    Handle<Code> unoptimized_code(info()->shared_info()->code());
+    ASSERT(unoptimized_code->kind() == Code::FUNCTION);
+    Handle<TypeFeedbackInfo> type_info(
+        TypeFeedbackInfo::cast(unoptimized_code->type_feedback_info()));
+    int checksum = type_info->own_type_change_checksum();
+    int composite_checksum = graph()->update_type_change_checksum(checksum);
+    graph()->set_use_optimistic_licm(
+        !type_info->matches_inlined_type_change_checksum(composite_checksum));
+    type_info->set_inlined_type_change_checksum(composite_checksum);
+  }
+
+  return graph();
+}
 
 bool HGraph::Optimize(SmartArrayPointer<char>* bailout_reason) {
   *bailout_reason = SmartArrayPointer<char>();
@@ -3935,20 +3762,33 @@ void HGraph::DeadCodeElimination() {
 }
 
 
-void HOptimizedGraphBuilder::AddPhi(HPhi* instr) {
+HInstruction* HGraphBuilder::AddInstruction(HInstruction* instr) {
+  ASSERT(current_block() != NULL);
+  current_block()->AddInstruction(instr);
+  return instr;
+}
+
+
+void HGraphBuilder::AddSimulate(BailoutId ast_id, RemovableSimulate removable) {
+  ASSERT(current_block() != NULL);
+  current_block()->AddSimulate(ast_id, removable);
+}
+
+
+void HGraphBuilder::AddPhi(HPhi* instr) {
   ASSERT(current_block() != NULL);
   current_block()->AddPhi(instr);
 }
 
 
-void HOptimizedGraphBuilder::PushAndAdd(HInstruction* instr) {
+void HGraphBuilder::PushAndAdd(HInstruction* instr) {
   Push(instr);
   AddInstruction(instr);
 }
 
 
 template <class Instruction>
-HInstruction* HOptimizedGraphBuilder::PreProcessCall(Instruction* call) {
+HInstruction* HGraphBuilder::PreProcessCall(Instruction* call) {
   int count = call->argument_count();
   ZoneList<HValue*> arguments(count, zone());
   for (int i = 0; i < count; ++i) {
@@ -3962,11 +3802,11 @@ HInstruction* HOptimizedGraphBuilder::PreProcessCall(Instruction* call) {
 }
 
 
-void HOptimizedGraphBuilder::SetUpScope(Scope* scope) {
+void HGraphBuilder::SetUpScope(Scope* scope) {
   HConstant* undefined_constant = new(zone()) HConstant(
       isolate()->factory()->undefined_value(), Representation::Tagged());
   AddInstruction(undefined_constant);
-  graph()->set_undefined_constant(undefined_constant);
+  graph_->set_undefined_constant(undefined_constant);
 
   HArgumentsObject* object = new(zone()) HArgumentsObject;
   AddInstruction(object);
@@ -4005,21 +3845,21 @@ void HOptimizedGraphBuilder::SetUpScope(Scope* scope) {
 }
 
 
-void HOptimizedGraphBuilder::VisitStatements(ZoneList<Statement*>* statements) {
+void HGraphBuilder::VisitStatements(ZoneList<Statement*>* statements) {
   for (int i = 0; i < statements->length(); i++) {
     CHECK_ALIVE(Visit(statements->at(i)));
   }
 }
 
 
-HBasicBlock* HOptimizedGraphBuilder::CreateBasicBlock(HEnvironment* env) {
+HBasicBlock* HGraphBuilder::CreateBasicBlock(HEnvironment* env) {
   HBasicBlock* b = graph()->CreateBasicBlock();
   b->SetInitialEnvironment(env);
   return b;
 }
 
 
-HBasicBlock* HOptimizedGraphBuilder::CreateLoopHeaderBlock() {
+HBasicBlock* HGraphBuilder::CreateLoopHeaderBlock() {
   HBasicBlock* header = graph()->CreateBasicBlock();
   HEnvironment* entry_env = environment()->CopyAsLoopHeader(header);
   header->SetInitialEnvironment(entry_env);
@@ -4028,7 +3868,7 @@ HBasicBlock* HOptimizedGraphBuilder::CreateLoopHeaderBlock() {
 }
 
 
-void HOptimizedGraphBuilder::VisitBlock(Block* stmt) {
+void HGraphBuilder::VisitBlock(Block* stmt) {
   ASSERT(!HasStackOverflow());
   ASSERT(current_block() != NULL);
   ASSERT(current_block()->HasPredecessor());
@@ -4048,8 +3888,7 @@ void HOptimizedGraphBuilder::VisitBlock(Block* stmt) {
 }
 
 
-void HOptimizedGraphBuilder::VisitExpressionStatement(
-    ExpressionStatement* stmt) {
+void HGraphBuilder::VisitExpressionStatement(ExpressionStatement* stmt) {
   ASSERT(!HasStackOverflow());
   ASSERT(current_block() != NULL);
   ASSERT(current_block()->HasPredecessor());
@@ -4057,14 +3896,14 @@ void HOptimizedGraphBuilder::VisitExpressionStatement(
 }
 
 
-void HOptimizedGraphBuilder::VisitEmptyStatement(EmptyStatement* stmt) {
+void HGraphBuilder::VisitEmptyStatement(EmptyStatement* stmt) {
   ASSERT(!HasStackOverflow());
   ASSERT(current_block() != NULL);
   ASSERT(current_block()->HasPredecessor());
 }
 
 
-void HOptimizedGraphBuilder::VisitIfStatement(IfStatement* stmt) {
+void HGraphBuilder::VisitIfStatement(IfStatement* stmt) {
   ASSERT(!HasStackOverflow());
   ASSERT(current_block() != NULL);
   ASSERT(current_block()->HasPredecessor());
@@ -4103,7 +3942,7 @@ void HOptimizedGraphBuilder::VisitIfStatement(IfStatement* stmt) {
 }
 
 
-HBasicBlock* HOptimizedGraphBuilder::BreakAndContinueScope::Get(
+HBasicBlock* HGraphBuilder::BreakAndContinueScope::Get(
     BreakableStatement* stmt,
     BreakType type,
     int* drop_extra) {
@@ -4142,8 +3981,7 @@ HBasicBlock* HOptimizedGraphBuilder::BreakAndContinueScope::Get(
 }
 
 
-void HOptimizedGraphBuilder::VisitContinueStatement(
-    ContinueStatement* stmt) {
+void HGraphBuilder::VisitContinueStatement(ContinueStatement* stmt) {
   ASSERT(!HasStackOverflow());
   ASSERT(current_block() != NULL);
   ASSERT(current_block()->HasPredecessor());
@@ -4157,7 +3995,7 @@ void HOptimizedGraphBuilder::VisitContinueStatement(
 }
 
 
-void HOptimizedGraphBuilder::VisitBreakStatement(BreakStatement* stmt) {
+void HGraphBuilder::VisitBreakStatement(BreakStatement* stmt) {
   ASSERT(!HasStackOverflow());
   ASSERT(current_block() != NULL);
   ASSERT(current_block()->HasPredecessor());
@@ -4171,7 +4009,7 @@ void HOptimizedGraphBuilder::VisitBreakStatement(BreakStatement* stmt) {
 }
 
 
-void HOptimizedGraphBuilder::VisitReturnStatement(ReturnStatement* stmt) {
+void HGraphBuilder::VisitReturnStatement(ReturnStatement* stmt) {
   ASSERT(!HasStackOverflow());
   ASSERT(current_block() != NULL);
   ASSERT(current_block()->HasPredecessor());
@@ -4243,7 +4081,7 @@ void HOptimizedGraphBuilder::VisitReturnStatement(ReturnStatement* stmt) {
 }
 
 
-void HOptimizedGraphBuilder::VisitWithStatement(WithStatement* stmt) {
+void HGraphBuilder::VisitWithStatement(WithStatement* stmt) {
   ASSERT(!HasStackOverflow());
   ASSERT(current_block() != NULL);
   ASSERT(current_block()->HasPredecessor());
@@ -4251,7 +4089,7 @@ void HOptimizedGraphBuilder::VisitWithStatement(WithStatement* stmt) {
 }
 
 
-void HOptimizedGraphBuilder::VisitSwitchStatement(SwitchStatement* stmt) {
+void HGraphBuilder::VisitSwitchStatement(SwitchStatement* stmt) {
   ASSERT(!HasStackOverflow());
   ASSERT(current_block() != NULL);
   ASSERT(current_block()->HasPredecessor());
@@ -4437,12 +4275,12 @@ void HOptimizedGraphBuilder::VisitSwitchStatement(SwitchStatement* stmt) {
 }
 
 
-bool HOptimizedGraphBuilder::HasOsrEntryAt(IterationStatement* statement) {
+bool HGraphBuilder::HasOsrEntryAt(IterationStatement* statement) {
   return statement->OsrEntryId() == info()->osr_ast_id();
 }
 
 
-bool HOptimizedGraphBuilder::PreProcessOsrEntry(IterationStatement* statement) {
+bool HGraphBuilder::PreProcessOsrEntry(IterationStatement* statement) {
   if (!HasOsrEntryAt(statement)) return false;
 
   HBasicBlock* non_osr_entry = graph()->CreateBasicBlock();
@@ -4492,9 +4330,9 @@ bool HOptimizedGraphBuilder::PreProcessOsrEntry(IterationStatement* statement) {
 }
 
 
-void HOptimizedGraphBuilder::VisitLoopBody(IterationStatement* stmt,
-                                           HBasicBlock* loop_entry,
-                                           BreakAndContinueInfo* break_info) {
+void HGraphBuilder::VisitLoopBody(IterationStatement* stmt,
+                                  HBasicBlock* loop_entry,
+                                  BreakAndContinueInfo* break_info) {
   BreakAndContinueScope push(break_info, this);
   AddSimulate(stmt->StackCheckId());
   HValue* context = environment()->LookupContext();
@@ -4507,7 +4345,7 @@ void HOptimizedGraphBuilder::VisitLoopBody(IterationStatement* stmt,
 }
 
 
-void HOptimizedGraphBuilder::VisitDoWhileStatement(DoWhileStatement* stmt) {
+void HGraphBuilder::VisitDoWhileStatement(DoWhileStatement* stmt) {
   ASSERT(!HasStackOverflow());
   ASSERT(current_block() != NULL);
   ASSERT(current_block()->HasPredecessor());
@@ -4550,7 +4388,7 @@ void HOptimizedGraphBuilder::VisitDoWhileStatement(DoWhileStatement* stmt) {
 }
 
 
-void HOptimizedGraphBuilder::VisitWhileStatement(WhileStatement* stmt) {
+void HGraphBuilder::VisitWhileStatement(WhileStatement* stmt) {
   ASSERT(!HasStackOverflow());
   ASSERT(current_block() != NULL);
   ASSERT(current_block()->HasPredecessor());
@@ -4594,7 +4432,7 @@ void HOptimizedGraphBuilder::VisitWhileStatement(WhileStatement* stmt) {
 }
 
 
-void HOptimizedGraphBuilder::VisitForStatement(ForStatement* stmt) {
+void HGraphBuilder::VisitForStatement(ForStatement* stmt) {
   ASSERT(!HasStackOverflow());
   ASSERT(current_block() != NULL);
   ASSERT(current_block()->HasPredecessor());
@@ -4646,7 +4484,7 @@ void HOptimizedGraphBuilder::VisitForStatement(ForStatement* stmt) {
 }
 
 
-void HOptimizedGraphBuilder::VisitForInStatement(ForInStatement* stmt) {
+void HGraphBuilder::VisitForInStatement(ForInStatement* stmt) {
   ASSERT(!HasStackOverflow());
   ASSERT(current_block() != NULL);
   ASSERT(current_block()->HasPredecessor());
@@ -4767,7 +4605,7 @@ void HOptimizedGraphBuilder::VisitForInStatement(ForInStatement* stmt) {
 }
 
 
-void HOptimizedGraphBuilder::VisitTryCatchStatement(TryCatchStatement* stmt) {
+void HGraphBuilder::VisitTryCatchStatement(TryCatchStatement* stmt) {
   ASSERT(!HasStackOverflow());
   ASSERT(current_block() != NULL);
   ASSERT(current_block()->HasPredecessor());
@@ -4775,8 +4613,7 @@ void HOptimizedGraphBuilder::VisitTryCatchStatement(TryCatchStatement* stmt) {
 }
 
 
-void HOptimizedGraphBuilder::VisitTryFinallyStatement(
-    TryFinallyStatement* stmt) {
+void HGraphBuilder::VisitTryFinallyStatement(TryFinallyStatement* stmt) {
   ASSERT(!HasStackOverflow());
   ASSERT(current_block() != NULL);
   ASSERT(current_block()->HasPredecessor());
@@ -4784,7 +4621,7 @@ void HOptimizedGraphBuilder::VisitTryFinallyStatement(
 }
 
 
-void HOptimizedGraphBuilder::VisitDebuggerStatement(DebuggerStatement* stmt) {
+void HGraphBuilder::VisitDebuggerStatement(DebuggerStatement* stmt) {
   ASSERT(!HasStackOverflow());
   ASSERT(current_block() != NULL);
   ASSERT(current_block()->HasPredecessor());
@@ -4812,7 +4649,7 @@ static Handle<SharedFunctionInfo> SearchSharedFunctionInfo(
 }
 
 
-void HOptimizedGraphBuilder::VisitFunctionLiteral(FunctionLiteral* expr) {
+void HGraphBuilder::VisitFunctionLiteral(FunctionLiteral* expr) {
   ASSERT(!HasStackOverflow());
   ASSERT(current_block() != NULL);
   ASSERT(current_block()->HasPredecessor());
@@ -4831,7 +4668,7 @@ void HOptimizedGraphBuilder::VisitFunctionLiteral(FunctionLiteral* expr) {
 }
 
 
-void HOptimizedGraphBuilder::VisitSharedFunctionInfoLiteral(
+void HGraphBuilder::VisitSharedFunctionInfoLiteral(
     SharedFunctionInfoLiteral* expr) {
   ASSERT(!HasStackOverflow());
   ASSERT(current_block() != NULL);
@@ -4840,7 +4677,7 @@ void HOptimizedGraphBuilder::VisitSharedFunctionInfoLiteral(
 }
 
 
-void HOptimizedGraphBuilder::VisitConditional(Conditional* expr) {
+void HGraphBuilder::VisitConditional(Conditional* expr) {
   ASSERT(!HasStackOverflow());
   ASSERT(current_block() != NULL);
   ASSERT(current_block()->HasPredecessor());
@@ -4878,9 +4715,8 @@ void HOptimizedGraphBuilder::VisitConditional(Conditional* expr) {
 }
 
 
-HOptimizedGraphBuilder::GlobalPropertyAccess
-    HOptimizedGraphBuilder::LookupGlobalProperty(
-        Variable* var, LookupResult* lookup, bool is_store) {
+HGraphBuilder::GlobalPropertyAccess HGraphBuilder::LookupGlobalProperty(
+    Variable* var, LookupResult* lookup, bool is_store) {
   if (var->is_this() || !info()->has_global_object()) {
     return kUseGeneric;
   }
@@ -4896,7 +4732,7 @@ HOptimizedGraphBuilder::GlobalPropertyAccess
 }
 
 
-HValue* HOptimizedGraphBuilder::BuildContextChainWalk(Variable* var) {
+HValue* HGraphBuilder::BuildContextChainWalk(Variable* var) {
   ASSERT(var->IsContextSlot());
   HValue* context = environment()->LookupContext();
   int length = info()->scope()->ContextChainLength(var->scope());
@@ -4909,7 +4745,7 @@ HValue* HOptimizedGraphBuilder::BuildContextChainWalk(Variable* var) {
 }
 
 
-void HOptimizedGraphBuilder::VisitVariableProxy(VariableProxy* expr) {
+void HGraphBuilder::VisitVariableProxy(VariableProxy* expr) {
   ASSERT(!HasStackOverflow());
   ASSERT(current_block() != NULL);
   ASSERT(current_block()->HasPredecessor());
@@ -4982,7 +4818,7 @@ void HOptimizedGraphBuilder::VisitVariableProxy(VariableProxy* expr) {
 }
 
 
-void HOptimizedGraphBuilder::VisitLiteral(Literal* expr) {
+void HGraphBuilder::VisitLiteral(Literal* expr) {
   ASSERT(!HasStackOverflow());
   ASSERT(current_block() != NULL);
   ASSERT(current_block()->HasPredecessor());
@@ -4992,7 +4828,7 @@ void HOptimizedGraphBuilder::VisitLiteral(Literal* expr) {
 }
 
 
-void HOptimizedGraphBuilder::VisitRegExpLiteral(RegExpLiteral* expr) {
+void HGraphBuilder::VisitRegExpLiteral(RegExpLiteral* expr) {
   ASSERT(!HasStackOverflow());
   ASSERT(current_block() != NULL);
   ASSERT(current_block()->HasPredecessor());
@@ -5151,7 +4987,7 @@ static bool IsFastLiteral(Handle<JSObject> boilerplate,
 }
 
 
-void HOptimizedGraphBuilder::VisitObjectLiteral(ObjectLiteral* expr) {
+void HGraphBuilder::VisitObjectLiteral(ObjectLiteral* expr) {
   ASSERT(!HasStackOverflow());
   ASSERT(current_block() != NULL);
   ASSERT(current_block()->HasPredecessor());
@@ -5256,7 +5092,7 @@ void HOptimizedGraphBuilder::VisitObjectLiteral(ObjectLiteral* expr) {
 }
 
 
-void HOptimizedGraphBuilder::VisitArrayLiteral(ArrayLiteral* expr) {
+void HGraphBuilder::VisitArrayLiteral(ArrayLiteral* expr) {
   ASSERT(!HasStackOverflow());
   ASSERT(current_block() != NULL);
   ASSERT(current_block()->HasPredecessor());
@@ -5390,19 +5226,18 @@ static int ComputeLoadStoreFieldIndex(Handle<Map> type,
 }
 
 
-void HOptimizedGraphBuilder::AddCheckMapsWithTransitions(HValue* object,
-                                                         Handle<Map> map) {
+void HGraphBuilder::AddCheckMapsWithTransitions(HValue* object,
+                                                Handle<Map> map) {
   AddInstruction(new(zone()) HCheckNonSmi(object));
   AddInstruction(HCheckMaps::NewWithTransitions(object, map, zone()));
 }
 
 
-HInstruction* HOptimizedGraphBuilder::BuildStoreNamedField(
-    HValue* object,
-    Handle<String> name,
-    HValue* value,
-    Handle<Map> map,
-    LookupResult* lookup) {
+HInstruction* HGraphBuilder::BuildStoreNamedField(HValue* object,
+                                                  Handle<String> name,
+                                                  HValue* value,
+                                                  Handle<Map> map,
+                                                  LookupResult* lookup) {
   ASSERT(lookup->IsFound());
   // If the property does not exist yet, we have to check that it wasn't made
   // readonly or turned into a setter by some meanwhile modifications on the
@@ -5454,10 +5289,9 @@ HInstruction* HOptimizedGraphBuilder::BuildStoreNamedField(
 }
 
 
-HInstruction* HOptimizedGraphBuilder::BuildStoreNamedGeneric(
-    HValue* object,
-    Handle<String> name,
-    HValue* value) {
+HInstruction* HGraphBuilder::BuildStoreNamedGeneric(HValue* object,
+                                                    Handle<String> name,
+                                                    HValue* value) {
   HValue* context = environment()->LookupContext();
   return new(zone()) HStoreNamedGeneric(
                          context,
@@ -5468,12 +5302,11 @@ HInstruction* HOptimizedGraphBuilder::BuildStoreNamedGeneric(
 }
 
 
-HInstruction* HOptimizedGraphBuilder::BuildCallSetter(
-    HValue* object,
-    HValue* value,
-    Handle<Map> map,
-    Handle<JSFunction> setter,
-    Handle<JSObject> holder) {
+HInstruction* HGraphBuilder::BuildCallSetter(HValue* object,
+                                             HValue* value,
+                                             Handle<Map> map,
+                                             Handle<JSFunction> setter,
+                                             Handle<JSObject> holder) {
   AddCheckConstantFunction(holder, object, map);
   AddInstruction(new(zone()) HPushArgument(object));
   AddInstruction(new(zone()) HPushArgument(value));
@@ -5481,11 +5314,10 @@ HInstruction* HOptimizedGraphBuilder::BuildCallSetter(
 }
 
 
-HInstruction* HOptimizedGraphBuilder::BuildStoreNamedMonomorphic(
-    HValue* object,
-    Handle<String> name,
-    HValue* value,
-    Handle<Map> map) {
+HInstruction* HGraphBuilder::BuildStoreNamedMonomorphic(HValue* object,
+                                                        Handle<String> name,
+                                                        HValue* value,
+                                                        Handle<Map> map) {
   // Handle a store to a known field.
   LookupResult lookup(isolate());
   if (ComputeLoadStoreField(map, name, &lookup, true)) {
@@ -5498,11 +5330,10 @@ HInstruction* HOptimizedGraphBuilder::BuildStoreNamedMonomorphic(
 }
 
 
-void HOptimizedGraphBuilder::HandlePolymorphicLoadNamedField(
-    Property* expr,
-    HValue* object,
-    SmallMapList* types,
-    Handle<String> name) {
+void HGraphBuilder::HandlePolymorphicLoadNamedField(Property* expr,
+                                                    HValue* object,
+                                                    SmallMapList* types,
+                                                    Handle<String> name) {
   int count = 0;
   int previous_field_offset = 0;
   bool previous_field_is_in_object = false;
@@ -5554,12 +5385,11 @@ void HOptimizedGraphBuilder::HandlePolymorphicLoadNamedField(
 }
 
 
-void HOptimizedGraphBuilder::HandlePolymorphicStoreNamedField(
-    Assignment* expr,
-    HValue* object,
-    HValue* value,
-    SmallMapList* types,
-    Handle<String> name) {
+void HGraphBuilder::HandlePolymorphicStoreNamedField(Assignment* expr,
+                                                     HValue* object,
+                                                     HValue* value,
+                                                     SmallMapList* types,
+                                                     Handle<String> name) {
   // TODO(ager): We should recognize when the prototype chains for different
   // maps are identical. In that case we can avoid repeatedly generating the
   // same prototype map checks.
@@ -5631,7 +5461,7 @@ void HOptimizedGraphBuilder::HandlePolymorphicStoreNamedField(
 }
 
 
-void HOptimizedGraphBuilder::HandlePropertyAssignment(Assignment* expr) {
+void HGraphBuilder::HandlePropertyAssignment(Assignment* expr) {
   Property* prop = expr->target()->AsProperty();
   ASSERT(prop != NULL);
   expr->RecordTypeFeedback(oracle(), zone());
@@ -5714,11 +5544,10 @@ void HOptimizedGraphBuilder::HandlePropertyAssignment(Assignment* expr) {
 // Because not every expression has a position and there is not common
 // superclass of Assignment and CountOperation, we cannot just pass the
 // owning expression instead of position and ast_id separately.
-void HOptimizedGraphBuilder::HandleGlobalVariableAssignment(
-    Variable* var,
-    HValue* value,
-    int position,
-    BailoutId ast_id) {
+void HGraphBuilder::HandleGlobalVariableAssignment(Variable* var,
+                                                   HValue* value,
+                                                   int position,
+                                                   BailoutId ast_id) {
   LookupResult lookup(isolate());
   GlobalPropertyAccess type = LookupGlobalProperty(var, &lookup, true);
   if (type == kUseCell) {
@@ -5749,7 +5578,7 @@ void HOptimizedGraphBuilder::HandleGlobalVariableAssignment(
 }
 
 
-void HOptimizedGraphBuilder::HandleCompoundAssignment(Assignment* expr) {
+void HGraphBuilder::HandleCompoundAssignment(Assignment* expr) {
   Expression* target = expr->target();
   VariableProxy* proxy = target->AsVariableProxy();
   Property* prop = target->AsProperty();
@@ -5946,7 +5775,7 @@ void HOptimizedGraphBuilder::HandleCompoundAssignment(Assignment* expr) {
 }
 
 
-void HOptimizedGraphBuilder::VisitAssignment(Assignment* expr) {
+void HGraphBuilder::VisitAssignment(Assignment* expr) {
   ASSERT(!HasStackOverflow());
   ASSERT(current_block() != NULL);
   ASSERT(current_block()->HasPredecessor());
@@ -6073,7 +5902,7 @@ void HOptimizedGraphBuilder::VisitAssignment(Assignment* expr) {
 }
 
 
-void HOptimizedGraphBuilder::VisitThrow(Throw* expr) {
+void HGraphBuilder::VisitThrow(Throw* expr) {
   ASSERT(!HasStackOverflow());
   ASSERT(current_block() != NULL);
   ASSERT(current_block()->HasPredecessor());
@@ -6094,10 +5923,9 @@ void HOptimizedGraphBuilder::VisitThrow(Throw* expr) {
 }
 
 
-HLoadNamedField* HOptimizedGraphBuilder::BuildLoadNamedField(
-    HValue* object,
-    Handle<Map> map,
-    LookupResult* lookup) {
+HLoadNamedField* HGraphBuilder::BuildLoadNamedField(HValue* object,
+                                                    Handle<Map> map,
+                                                    LookupResult* lookup) {
   int index = lookup->GetLocalFieldIndexFromMap(*map);
   if (index < 0) {
     // Negative property indices are in-object properties, indexed
@@ -6112,10 +5940,9 @@ HLoadNamedField* HOptimizedGraphBuilder::BuildLoadNamedField(
 }
 
 
-HInstruction* HOptimizedGraphBuilder::BuildLoadNamedGeneric(
-    HValue* object,
-    Handle<String> name,
-    Property* expr) {
+HInstruction* HGraphBuilder::BuildLoadNamedGeneric(HValue* object,
+                                                   Handle<String> name,
+                                                   Property* expr) {
   if (expr->IsUninitialized() && !FLAG_always_opt) {
     AddInstruction(new(zone()) HSoftDeoptimize);
     current_block()->MarkAsDeoptimizing();
@@ -6125,22 +5952,20 @@ HInstruction* HOptimizedGraphBuilder::BuildLoadNamedGeneric(
 }
 
 
-HInstruction* HOptimizedGraphBuilder::BuildCallGetter(
-    HValue* object,
-    Handle<Map> map,
-    Handle<JSFunction> getter,
-    Handle<JSObject> holder) {
+HInstruction* HGraphBuilder::BuildCallGetter(HValue* object,
+                                             Handle<Map> map,
+                                             Handle<JSFunction> getter,
+                                             Handle<JSObject> holder) {
   AddCheckConstantFunction(holder, object, map);
   AddInstruction(new(zone()) HPushArgument(object));
   return new(zone()) HCallConstantFunction(getter, 1);
 }
 
 
-HInstruction* HOptimizedGraphBuilder::BuildLoadNamedMonomorphic(
-    HValue* object,
-    Handle<String> name,
-    Property* expr,
-    Handle<Map> map) {
+HInstruction* HGraphBuilder::BuildLoadNamedMonomorphic(HValue* object,
+                                                       Handle<String> name,
+                                                       Property* expr,
+                                                       Handle<Map> map) {
   // Handle a load from a known field.
   ASSERT(!map->is_dictionary_map());
   LookupResult lookup(isolate());
@@ -6174,34 +5999,174 @@ HInstruction* HOptimizedGraphBuilder::BuildLoadNamedMonomorphic(
 }
 
 
-HInstruction* HOptimizedGraphBuilder::BuildLoadKeyedGeneric(HValue* object,
-                                                            HValue* key) {
+HInstruction* HGraphBuilder::BuildLoadKeyedGeneric(HValue* object,
+                                                   HValue* key) {
   HValue* context = environment()->LookupContext();
   return new(zone()) HLoadKeyedGeneric(context, object, key);
 }
 
 
-HInstruction* HOptimizedGraphBuilder::BuildMonomorphicElementAccess(
-    HValue* object,
-    HValue* key,
+HInstruction* HGraphBuilder::BuildExternalArrayElementAccess(
+    HValue* external_elements,
+    HValue* checked_key,
     HValue* val,
     HValue* dependency,
-    Handle<Map> map,
+    ElementsKind elements_kind,
     bool is_store) {
+  if (is_store) {
+    ASSERT(val != NULL);
+    switch (elements_kind) {
+      case EXTERNAL_PIXEL_ELEMENTS: {
+        val = AddInstruction(new(zone()) HClampToUint8(val));
+        break;
+      }
+      case EXTERNAL_BYTE_ELEMENTS:
+      case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
+      case EXTERNAL_SHORT_ELEMENTS:
+      case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
+      case EXTERNAL_INT_ELEMENTS:
+      case EXTERNAL_UNSIGNED_INT_ELEMENTS: {
+        break;
+      }
+      case EXTERNAL_FLOAT_ELEMENTS:
+      case EXTERNAL_DOUBLE_ELEMENTS:
+        break;
+      case FAST_SMI_ELEMENTS:
+      case FAST_ELEMENTS:
+      case FAST_DOUBLE_ELEMENTS:
+      case FAST_HOLEY_SMI_ELEMENTS:
+      case FAST_HOLEY_ELEMENTS:
+      case FAST_HOLEY_DOUBLE_ELEMENTS:
+      case DICTIONARY_ELEMENTS:
+      case NON_STRICT_ARGUMENTS_ELEMENTS:
+        UNREACHABLE();
+        break;
+    }
+    return new(zone()) HStoreKeyed(external_elements,
+                                   checked_key,
+                                   val,
+                                   elements_kind);
+  } else {
+    ASSERT(val == NULL);
+    HLoadKeyed* load =
+       new(zone()) HLoadKeyed(
+           external_elements, checked_key, dependency, elements_kind);
+    if (FLAG_opt_safe_uint32_operations &&
+        elements_kind == EXTERNAL_UNSIGNED_INT_ELEMENTS) {
+      graph()->RecordUint32Instruction(load);
+    }
+    return load;
+  }
+}
+
+
+HInstruction* HGraphBuilder::BuildFastElementAccess(HValue* elements,
+                                                    HValue* checked_key,
+                                                    HValue* val,
+                                                    HValue* load_dependency,
+                                                    ElementsKind elements_kind,
+                                                    bool is_store) {
+  if (is_store) {
+    ASSERT(val != NULL);
+    switch (elements_kind) {
+      case FAST_SMI_ELEMENTS:
+      case FAST_HOLEY_SMI_ELEMENTS:
+        // Smi-only arrays need a smi check.
+        AddInstruction(new(zone()) HCheckSmi(val));
+        // Fall through.
+      case FAST_ELEMENTS:
+      case FAST_HOLEY_ELEMENTS:
+      case FAST_DOUBLE_ELEMENTS:
+      case FAST_HOLEY_DOUBLE_ELEMENTS:
+        return new(zone()) HStoreKeyed(
+            elements, checked_key, val, elements_kind);
+      default:
+        UNREACHABLE();
+        return NULL;
+    }
+  }
+  // It's an element load (!is_store).
+  return new(zone()) HLoadKeyed(elements,
+                                checked_key,
+                                load_dependency,
+                                elements_kind);
+}
+
+
+HInstruction* HGraphBuilder::BuildMonomorphicElementAccess(HValue* object,
+                                                           HValue* key,
+                                                           HValue* val,
+                                                           HValue* dependency,
+                                                           Handle<Map> map,
+                                                           bool is_store) {
   HCheckMaps* mapcheck = new(zone()) HCheckMaps(object, map,
                                                 zone(), dependency);
   AddInstruction(mapcheck);
   if (dependency) {
     mapcheck->ClearGVNFlag(kDependsOnElementsKind);
   }
-  return BuildUncheckedMonomorphicElementAccess(
-      object, key, val,
-      mapcheck, map->instance_type() == JS_ARRAY_TYPE,
-      map->elements_kind(), is_store);
+  return BuildUncheckedMonomorphicElementAccess(object, key, val,
+                                                mapcheck, map, is_store);
+}
+
+
+HInstruction* HGraphBuilder::BuildUncheckedMonomorphicElementAccess(
+    HValue* object,
+    HValue* key,
+    HValue* val,
+    HCheckMaps* mapcheck,
+    Handle<Map> map,
+    bool is_store) {
+  // No GVNFlag is necessary for ElementsKind if there is an explicit dependency
+  // on a HElementsTransition instruction. The flag can also be removed if the
+  // map to check has FAST_HOLEY_ELEMENTS, since there can be no further
+  // ElementsKind transitions. Finally, the dependency can be removed for stores
+  // for FAST_ELEMENTS, since a transition to HOLEY elements won't change the
+  // generated store code.
+  if ((map->elements_kind() == FAST_HOLEY_ELEMENTS) ||
+      (map->elements_kind() == FAST_ELEMENTS && is_store)) {
+    mapcheck->ClearGVNFlag(kDependsOnElementsKind);
+  }
+  bool fast_smi_only_elements = map->has_fast_smi_elements();
+  bool fast_elements = map->has_fast_object_elements();
+  HInstruction* elements =
+      AddInstruction(new(zone()) HLoadElements(object, mapcheck));
+  if (is_store && (fast_elements || fast_smi_only_elements)) {
+    HCheckMaps* check_cow_map = new(zone()) HCheckMaps(
+        elements, isolate()->factory()->fixed_array_map(), zone());
+    check_cow_map->ClearGVNFlag(kDependsOnElementsKind);
+    AddInstruction(check_cow_map);
+  }
+  HInstruction* length = NULL;
+  HInstruction* checked_key = NULL;
+  if (map->has_external_array_elements()) {
+    length = AddInstruction(new(zone()) HFixedArrayBaseLength(elements));
+    checked_key = AddInstruction(new(zone()) HBoundsCheck(key, length,
+                                                          ALLOW_SMI_KEY));
+    HLoadExternalArrayPointer* external_elements =
+        new(zone()) HLoadExternalArrayPointer(elements);
+    AddInstruction(external_elements);
+    return BuildExternalArrayElementAccess(
+        external_elements, checked_key, val, mapcheck,
+        map->elements_kind(), is_store);
+  }
+  ASSERT(fast_smi_only_elements ||
+         fast_elements ||
+         map->has_fast_double_elements());
+  if (map->instance_type() == JS_ARRAY_TYPE) {
+    length = AddInstruction(new(zone()) HJSArrayLength(object, mapcheck,
+                                                       HType::Smi()));
+  } else {
+    length = AddInstruction(new(zone()) HFixedArrayBaseLength(elements));
+  }
+  checked_key = AddInstruction(new(zone()) HBoundsCheck(key, length,
+                                                        ALLOW_SMI_KEY));
+  return BuildFastElementAccess(elements, checked_key, val, mapcheck,
+                                map->elements_kind(), is_store);
 }
 
 
-HInstruction* HOptimizedGraphBuilder::TryBuildConsolidatedElementLoad(
+HInstruction* HGraphBuilder::TryBuildConsolidatedElementLoad(
     HValue* object,
     HValue* key,
     HValue* val,
@@ -6249,23 +6214,19 @@ HInstruction* HOptimizedGraphBuilder::TryBuildConsolidatedElementLoad(
   HCheckMaps* check_maps = new(zone()) HCheckMaps(object, maps, zone());
   AddInstruction(check_maps);
   HInstruction* instr = BuildUncheckedMonomorphicElementAccess(
-      object, key, val, check_maps,
-      most_general_consolidated_map->instance_type() == JS_ARRAY_TYPE,
-      most_general_consolidated_map->elements_kind(),
-      false);
+      object, key, val, check_maps, most_general_consolidated_map, false);
   return instr;
 }
 
 
-HValue* HOptimizedGraphBuilder::HandlePolymorphicElementAccess(
-    HValue* object,
-    HValue* key,
-    HValue* val,
-    Expression* prop,
-    BailoutId ast_id,
-    int position,
-    bool is_store,
-    bool* has_side_effects) {
+HValue* HGraphBuilder::HandlePolymorphicElementAccess(HValue* object,
+                                                      HValue* key,
+                                                      HValue* val,
+                                                      Expression* prop,
+                                                      BailoutId ast_id,
+                                                      int position,
+                                                      bool is_store,
+                                                      bool* has_side_effects) {
   *has_side_effects = false;
   AddInstruction(new(zone()) HCheckNonSmi(object));
   SmallMapList* maps = prop->GetReceiverTypes();
@@ -6450,8 +6411,8 @@ HValue* HOptimizedGraphBuilder::HandlePolymorphicElementAccess(
         }
       } else {  // External array elements.
         access = AddInstruction(BuildExternalArrayElementAccess(
-            external_elements, checked_key, val,
-            elements_kind_branch, elements_kind, is_store));
+            external_elements, checked_key, val, elements_kind_branch,
+            elements_kind, is_store));
       }
       *has_side_effects |= access->HasObservableSideEffects();
       if (position != RelocInfo::kNoPosition) access->set_position(position);
@@ -6471,15 +6432,14 @@ HValue* HOptimizedGraphBuilder::HandlePolymorphicElementAccess(
 }
 
 
-HValue* HOptimizedGraphBuilder::HandleKeyedElementAccess(
-    HValue* obj,
-    HValue* key,
-    HValue* val,
-    Expression* expr,
-    BailoutId ast_id,
-    int position,
-    bool is_store,
-    bool* has_side_effects) {
+HValue* HGraphBuilder::HandleKeyedElementAccess(HValue* obj,
+                                                HValue* key,
+                                                HValue* val,
+                                                Expression* expr,
+                                                BailoutId ast_id,
+                                                int position,
+                                                bool is_store,
+                                                bool* has_side_effects) {
   ASSERT(!expr->IsPropertyName());
   HInstruction* instr = NULL;
   if (expr->IsMonomorphic()) {
@@ -6509,10 +6469,9 @@ HValue* HOptimizedGraphBuilder::HandleKeyedElementAccess(
 }
 
 
-HInstruction* HOptimizedGraphBuilder::BuildStoreKeyedGeneric(
-    HValue* object,
-    HValue* key,
-    HValue* value) {
+HInstruction* HGraphBuilder::BuildStoreKeyedGeneric(HValue* object,
+                                                    HValue* key,
+                                                    HValue* value) {
   HValue* context = environment()->LookupContext();
   return new(zone()) HStoreKeyedGeneric(
                          context,
@@ -6523,7 +6482,7 @@ HInstruction* HOptimizedGraphBuilder::BuildStoreKeyedGeneric(
 }
 
 
-void HOptimizedGraphBuilder::EnsureArgumentsArePushedForAccess() {
+void HGraphBuilder::EnsureArgumentsArePushedForAccess() {
   // Outermost function already has arguments on the stack.
   if (function_state()->outer() == NULL) return;
 
@@ -6551,7 +6510,7 @@ void HOptimizedGraphBuilder::EnsureArgumentsArePushedForAccess() {
 }
 
 
-bool HOptimizedGraphBuilder::TryArgumentsAccess(Property* expr) {
+bool HGraphBuilder::TryArgumentsAccess(Property* expr) {
   VariableProxy* proxy = expr->obj()->AsVariableProxy();
   if (proxy == NULL) return false;
   if (!proxy->var()->IsStackAllocated()) return false;
@@ -6610,7 +6569,7 @@ bool HOptimizedGraphBuilder::TryArgumentsAccess(Property* expr) {
 }
 
 
-void HOptimizedGraphBuilder::VisitProperty(Property* expr) {
+void HGraphBuilder::VisitProperty(Property* expr) {
   ASSERT(!HasStackOverflow());
   ASSERT(current_block() != NULL);
   ASSERT(current_block()->HasPredecessor());
@@ -6701,8 +6660,8 @@ void HOptimizedGraphBuilder::VisitProperty(Property* expr) {
 }
 
 
-void HOptimizedGraphBuilder::AddCheckPrototypeMaps(Handle<JSObject> holder,
-                                                   Handle<Map> receiver_map) {
+void HGraphBuilder::AddCheckPrototypeMaps(Handle<JSObject> holder,
+                                          Handle<Map> receiver_map) {
   if (!holder.is_null()) {
     AddInstruction(new(zone()) HCheckPrototypeMaps(
         Handle<JSObject>(JSObject::cast(receiver_map->prototype())), holder));
@@ -6710,10 +6669,9 @@ void HOptimizedGraphBuilder::AddCheckPrototypeMaps(Handle<JSObject> holder,
 }
 
 
-void HOptimizedGraphBuilder::AddCheckConstantFunction(
-    Handle<JSObject> holder,
-    HValue* receiver,
-    Handle<Map> receiver_map) {
+void HGraphBuilder::AddCheckConstantFunction(Handle<JSObject> holder,
+                                             HValue* receiver,
+                                             Handle<Map> receiver_map) {
   // Constant functions have the nice property that the map will change if they
   // are overwritten.  Therefore it is enough to check the map of the holder and
   // its prototypes.
@@ -6755,11 +6713,10 @@ static int CompareHotness(void const* a, void const* b) {
 }
 
 
-void HOptimizedGraphBuilder::HandlePolymorphicCallNamed(
-    Call* expr,
-    HValue* receiver,
-    SmallMapList* types,
-    Handle<String> name) {
+void HGraphBuilder::HandlePolymorphicCallNamed(Call* expr,
+                                               HValue* receiver,
+                                               SmallMapList* types,
+                                               Handle<String> name) {
   // TODO(ager): We should recognize when the prototype chains for different
   // maps are identical. In that case we can avoid repeatedly generating the
   // same prototype map checks.
@@ -6861,9 +6818,9 @@ void HOptimizedGraphBuilder::HandlePolymorphicCallNamed(
 }
 
 
-void HOptimizedGraphBuilder::TraceInline(Handle<JSFunction> target,
-                                         Handle<JSFunction> caller,
-                                         const char* reason) {
+void HGraphBuilder::TraceInline(Handle<JSFunction> target,
+                                Handle<JSFunction> caller,
+                                const char* reason) {
   if (FLAG_trace_inlining) {
     SmartArrayPointer<char> target_name =
         target->shared()->DebugName()->ToCString();
@@ -6882,7 +6839,7 @@ void HOptimizedGraphBuilder::TraceInline(Handle<JSFunction> target,
 static const int kNotInlinable = 1000000000;
 
 
-int HOptimizedGraphBuilder::InliningAstSize(Handle<JSFunction> target) {
+int HGraphBuilder::InliningAstSize(Handle<JSFunction> target) {
   if (!FLAG_use_inlining) return kNotInlinable;
 
   // Precondition: call is monomorphic and we have found a target with the
@@ -6913,13 +6870,13 @@ int HOptimizedGraphBuilder::InliningAstSize(Handle<JSFunction> target) {
 }
 
 
-bool HOptimizedGraphBuilder::TryInline(CallKind call_kind,
-                                       Handle<JSFunction> target,
-                                       int arguments_count,
-                                       HValue* implicit_return_value,
-                                       BailoutId ast_id,
-                                       BailoutId return_id,
-                                       InliningKind inlining_kind) {
+bool HGraphBuilder::TryInline(CallKind call_kind,
+                              Handle<JSFunction> target,
+                              int arguments_count,
+                              HValue* implicit_return_value,
+                              BailoutId ast_id,
+                              BailoutId return_id,
+                              InliningKind inlining_kind) {
   int nodes_added = InliningAstSize(target);
   if (nodes_added == kNotInlinable) return false;
 
@@ -7228,7 +7185,7 @@ bool HOptimizedGraphBuilder::TryInline(CallKind call_kind,
 }
 
 
-bool HOptimizedGraphBuilder::TryInlineCall(Call* expr, bool drop_extra) {
+bool HGraphBuilder::TryInlineCall(Call* expr, bool drop_extra) {
   // The function call we are inlining is a method call if the call
   // is a property call.
   CallKind call_kind = (expr->expression()->AsProperty() == NULL)
@@ -7245,8 +7202,8 @@ bool HOptimizedGraphBuilder::TryInlineCall(Call* expr, bool drop_extra) {
 }
 
 
-bool HOptimizedGraphBuilder::TryInlineConstruct(CallNew* expr,
-                                                HValue* implicit_return_value) {
+bool HGraphBuilder::TryInlineConstruct(CallNew* expr,
+                                       HValue* implicit_return_value) {
   return TryInline(CALL_AS_FUNCTION,
                    expr->target(),
                    expr->arguments()->length(),
@@ -7257,8 +7214,8 @@ bool HOptimizedGraphBuilder::TryInlineConstruct(CallNew* expr,
 }
 
 
-bool HOptimizedGraphBuilder::TryInlineGetter(Handle<JSFunction> getter,
-                                             Property* prop) {
+bool HGraphBuilder::TryInlineGetter(Handle<JSFunction> getter,
+                                    Property* prop) {
   return TryInline(CALL_AS_METHOD,
                    getter,
                    0,
@@ -7269,9 +7226,9 @@ bool HOptimizedGraphBuilder::TryInlineGetter(Handle<JSFunction> getter,
 }
 
 
-bool HOptimizedGraphBuilder::TryInlineSetter(Handle<JSFunction> setter,
-                                             Assignment* assignment,
-                                             HValue* implicit_return_value) {
+bool HGraphBuilder::TryInlineSetter(Handle<JSFunction> setter,
+                                    Assignment* assignment,
+                                    HValue* implicit_return_value) {
   return TryInline(CALL_AS_METHOD,
                    setter,
                    1,
@@ -7282,8 +7239,7 @@ bool HOptimizedGraphBuilder::TryInlineSetter(Handle<JSFunction> setter,
 }
 
 
-bool HOptimizedGraphBuilder::TryInlineBuiltinFunctionCall(Call* expr,
-                                                          bool drop_extra) {
+bool HGraphBuilder::TryInlineBuiltinFunctionCall(Call* expr, bool drop_extra) {
   if (!expr->target()->shared()->HasBuiltinFunctionId()) return false;
   BuiltinFunctionId id = expr->target()->shared()->builtin_function_id();
   switch (id) {
@@ -7317,11 +7273,10 @@ bool HOptimizedGraphBuilder::TryInlineBuiltinFunctionCall(Call* expr,
 }
 
 
-bool HOptimizedGraphBuilder::TryInlineBuiltinMethodCall(
-    Call* expr,
-    HValue* receiver,
-    Handle<Map> receiver_map,
-    CheckType check_type) {
+bool HGraphBuilder::TryInlineBuiltinMethodCall(Call* expr,
+                                               HValue* receiver,
+                                               Handle<Map> receiver_map,
+                                               CheckType check_type) {
   ASSERT(check_type != RECEIVER_MAP_CHECK || !receiver_map.is_null());
   // Try to inline calls like Math.* as operations in the calling function.
   if (!expr->target()->shared()->HasBuiltinFunctionId()) return false;
@@ -7451,7 +7406,7 @@ bool HOptimizedGraphBuilder::TryInlineBuiltinMethodCall(
 }
 
 
-bool HOptimizedGraphBuilder::TryCallApply(Call* expr) {
+bool HGraphBuilder::TryCallApply(Call* expr) {
   Expression* callee = expr->expression();
   Property* prop = callee->AsProperty();
   ASSERT(prop != NULL);
@@ -7579,7 +7534,7 @@ static Map* CheckSameElementsFamily(SmallMapList* types) {
 }
 
 
-void HOptimizedGraphBuilder::VisitCall(Call* expr) {
+void HGraphBuilder::VisitCall(Call* expr) {
   ASSERT(!HasStackOverflow());
   ASSERT(current_block() != NULL);
   ASSERT(current_block()->HasPredecessor());
@@ -7809,7 +7764,7 @@ static bool IsAllocationInlineable(Handle<JSFunction> constructor) {
 }
 
 
-void HOptimizedGraphBuilder::VisitCallNew(CallNew* expr) {
+void HGraphBuilder::VisitCallNew(CallNew* expr) {
   ASSERT(!HasStackOverflow());
   ASSERT(current_block() != NULL);
   ASSERT(current_block()->HasPredecessor());
@@ -7873,21 +7828,20 @@ void HOptimizedGraphBuilder::VisitCallNew(CallNew* expr) {
 
 // Support for generating inlined runtime functions.
 
-// Lookup table for generators for runtime calls that are generated inline.
-// Elements of the table are member pointers to functions of
-// HOptimizedGraphBuilder.
+// Lookup table for generators for runtime calls that are  generated inline.
+// Elements of the table are member pointers to functions of HGraphBuilder.
 #define INLINE_FUNCTION_GENERATOR_ADDRESS(Name, argc, ressize)  \
-    &HOptimizedGraphBuilder::Generate##Name,
+    &HGraphBuilder::Generate##Name,
 
-const HOptimizedGraphBuilder::InlineFunctionGenerator
-    HOptimizedGraphBuilder::kInlineFunctionGenerators[] = {
+const HGraphBuilder::InlineFunctionGenerator
+    HGraphBuilder::kInlineFunctionGenerators[] = {
         INLINE_FUNCTION_LIST(INLINE_FUNCTION_GENERATOR_ADDRESS)
         INLINE_RUNTIME_FUNCTION_LIST(INLINE_FUNCTION_GENERATOR_ADDRESS)
 };
 #undef INLINE_FUNCTION_GENERATOR_ADDRESS
 
 
-void HOptimizedGraphBuilder::VisitCallRuntime(CallRuntime* expr) {
+void HGraphBuilder::VisitCallRuntime(CallRuntime* expr) {
   ASSERT(!HasStackOverflow());
   ASSERT(current_block() != NULL);
   ASSERT(current_block()->HasPredecessor());
@@ -7925,7 +7879,7 @@ void HOptimizedGraphBuilder::VisitCallRuntime(CallRuntime* expr) {
 }
 
 
-void HOptimizedGraphBuilder::VisitUnaryOperation(UnaryOperation* expr) {
+void HGraphBuilder::VisitUnaryOperation(UnaryOperation* expr) {
   ASSERT(!HasStackOverflow());
   ASSERT(current_block() != NULL);
   ASSERT(current_block()->HasPredecessor());
@@ -7941,7 +7895,7 @@ void HOptimizedGraphBuilder::VisitUnaryOperation(UnaryOperation* expr) {
   }
 }
 
-void HOptimizedGraphBuilder::VisitDelete(UnaryOperation* expr) {
+void HGraphBuilder::VisitDelete(UnaryOperation* expr) {
   Property* prop = expr->expression()->AsProperty();
   VariableProxy* proxy = expr->expression()->AsVariableProxy();
   if (prop != NULL) {
@@ -7976,13 +7930,13 @@ void HOptimizedGraphBuilder::VisitDelete(UnaryOperation* expr) {
 }
 
 
-void HOptimizedGraphBuilder::VisitVoid(UnaryOperation* expr) {
+void HGraphBuilder::VisitVoid(UnaryOperation* expr) {
   CHECK_ALIVE(VisitForEffect(expr->expression()));
   return ast_context()->ReturnValue(graph()->GetConstantUndefined());
 }
 
 
-void HOptimizedGraphBuilder::VisitTypeof(UnaryOperation* expr) {
+void HGraphBuilder::VisitTypeof(UnaryOperation* expr) {
   CHECK_ALIVE(VisitForTypeOf(expr->expression()));
   HValue* value = Pop();
   HValue* context = environment()->LookupContext();
@@ -7991,22 +7945,22 @@ void HOptimizedGraphBuilder::VisitTypeof(UnaryOperation* expr) {
 }
 
 
-void HOptimizedGraphBuilder::VisitAdd(UnaryOperation* expr) {
+void HGraphBuilder::VisitAdd(UnaryOperation* expr) {
   CHECK_ALIVE(VisitForValue(expr->expression()));
   HValue* value = Pop();
   HValue* context = environment()->LookupContext();
   HInstruction* instr =
-      new(zone()) HMul(context, value, graph()->GetConstant1());
+      new(zone()) HMul(context, value, graph_->GetConstant1());
   return ast_context()->ReturnInstruction(instr, expr->id());
 }
 
 
-void HOptimizedGraphBuilder::VisitSub(UnaryOperation* expr) {
+void HGraphBuilder::VisitSub(UnaryOperation* expr) {
   CHECK_ALIVE(VisitForValue(expr->expression()));
   HValue* value = Pop();
   HValue* context = environment()->LookupContext();
   HInstruction* instr =
-      new(zone()) HMul(context, value, graph()->GetConstantMinus1());
+      new(zone()) HMul(context, value, graph_->GetConstantMinus1());
   TypeInfo info = oracle()->UnaryType(expr);
   Representation rep = ToRepresentation(info);
   if (info.IsUninitialized()) {
@@ -8019,7 +7973,7 @@ void HOptimizedGraphBuilder::VisitSub(UnaryOperation* expr) {
 }
 
 
-void HOptimizedGraphBuilder::VisitBitNot(UnaryOperation* expr) {
+void HGraphBuilder::VisitBitNot(UnaryOperation* expr) {
   CHECK_ALIVE(VisitForValue(expr->expression()));
   HValue* value = Pop();
   TypeInfo info = oracle()->UnaryType(expr);
@@ -8032,7 +7986,7 @@ void HOptimizedGraphBuilder::VisitBitNot(UnaryOperation* expr) {
 }
 
 
-void HOptimizedGraphBuilder::VisitNot(UnaryOperation* expr) {
+void HGraphBuilder::VisitNot(UnaryOperation* expr) {
   if (ast_context()->IsTest()) {
     TestContext* context = TestContext::cast(ast_context());
     VisitForControl(expr->expression(),
@@ -8076,9 +8030,8 @@ void HOptimizedGraphBuilder::VisitNot(UnaryOperation* expr) {
 }
 
 
-HInstruction* HOptimizedGraphBuilder::BuildIncrement(
-    bool returns_original_input,
-    CountOperation* expr) {
+HInstruction* HGraphBuilder::BuildIncrement(bool returns_original_input,
+                                            CountOperation* expr) {
   // The input to the count operation is on top of the expression stack.
   TypeInfo info = oracle()->IncrementType(expr);
   Representation rep = ToRepresentation(info);
@@ -8100,8 +8053,8 @@ HInstruction* HOptimizedGraphBuilder::BuildIncrement(
   // to simulate the expression stack after this instruction.
   // Any later failures deopt to the load of the input or earlier.
   HConstant* delta = (expr->op() == Token::INC)
-      ? graph()->GetConstant1()
-      : graph()->GetConstantMinus1();
+      ? graph_->GetConstant1()
+      : graph_->GetConstantMinus1();
   HValue* context = environment()->LookupContext();
   HInstruction* instr = new(zone()) HAdd(context, Top(), delta);
   // We can't insert a simulate here, because it would break deoptimization,
@@ -8114,7 +8067,7 @@ HInstruction* HOptimizedGraphBuilder::BuildIncrement(
 }
 
 
-void HOptimizedGraphBuilder::VisitCountOperation(CountOperation* expr) {
+void HGraphBuilder::VisitCountOperation(CountOperation* expr) {
   ASSERT(!HasStackOverflow());
   ASSERT(current_block() != NULL);
   ASSERT(current_block()->HasPredecessor());
@@ -8198,7 +8151,7 @@ void HOptimizedGraphBuilder::VisitCountOperation(CountOperation* expr) {
 
     if (prop->key()->IsPropertyName()) {
       // Named property.
-      if (returns_original_input) Push(graph()->GetConstantUndefined());
+      if (returns_original_input) Push(graph_->GetConstantUndefined());
 
       CHECK_ALIVE(VisitForValue(prop->obj()));
       HValue* object = Top();
@@ -8259,7 +8212,7 @@ void HOptimizedGraphBuilder::VisitCountOperation(CountOperation* expr) {
 
     } else {
       // Keyed property.
-      if (returns_original_input) Push(graph()->GetConstantUndefined());
+      if (returns_original_input) Push(graph_->GetConstantUndefined());
 
       CHECK_ALIVE(VisitForValue(prop->obj()));
       CHECK_ALIVE(VisitForValue(prop->key()));
@@ -8299,10 +8252,9 @@ void HOptimizedGraphBuilder::VisitCountOperation(CountOperation* expr) {
 }
 
 
-HStringCharCodeAt* HOptimizedGraphBuilder::BuildStringCharCodeAt(
-    HValue* context,
-    HValue* string,
-    HValue* index) {
+HStringCharCodeAt* HGraphBuilder::BuildStringCharCodeAt(HValue* context,
+                                                        HValue* string,
+                                                        HValue* index) {
   AddInstruction(new(zone()) HCheckNonSmi(string));
   AddInstruction(HCheckInstanceType::NewIsString(string, zone()));
   HStringLength* length = new(zone()) HStringLength(string);
@@ -8330,10 +8282,10 @@ static bool ShiftAmountsAllowReplaceByRotate(HValue* sa,
 // directions that can be replaced by one rotate right instruction or not.
 // Returns the operand and the shift amount for the rotate instruction in the
 // former case.
-bool HOptimizedGraphBuilder::MatchRotateRight(HValue* left,
-                                              HValue* right,
-                                              HValue** operand,
-                                              HValue** shift_amount) {
+bool HGraphBuilder::MatchRotateRight(HValue* left,
+                                     HValue* right,
+                                     HValue** operand,
+                                     HValue** shift_amount) {
   HShl* shl;
   HShr* shr;
   if (left->IsShl() && right->IsShr()) {
@@ -8368,10 +8320,9 @@ bool CanBeZero(HValue *right) {
 }
 
 
-HInstruction* HOptimizedGraphBuilder::BuildBinaryOperation(
-    BinaryOperation* expr,
-    HValue* left,
-    HValue* right) {
+HInstruction* HGraphBuilder::BuildBinaryOperation(BinaryOperation* expr,
+                                                  HValue* left,
+                                                  HValue* right) {
   HValue* context = environment()->LookupContext();
   TypeInfo left_info, right_info, result_info, combined_info;
   oracle()->BinaryType(expr, &left_info, &right_info, &result_info);
@@ -8464,7 +8415,7 @@ static bool IsClassOfTest(CompareOperation* expr) {
 }
 
 
-void HOptimizedGraphBuilder::VisitBinaryOperation(BinaryOperation* expr) {
+void HGraphBuilder::VisitBinaryOperation(BinaryOperation* expr) {
   ASSERT(!HasStackOverflow());
   ASSERT(current_block() != NULL);
   ASSERT(current_block()->HasPredecessor());
@@ -8480,7 +8431,7 @@ void HOptimizedGraphBuilder::VisitBinaryOperation(BinaryOperation* expr) {
 }
 
 
-void HOptimizedGraphBuilder::VisitComma(BinaryOperation* expr) {
+void HGraphBuilder::VisitComma(BinaryOperation* expr) {
   CHECK_ALIVE(VisitForEffect(expr->left()));
   // Visit the right subexpression in the same AST context as the entire
   // expression.
@@ -8488,7 +8439,7 @@ void HOptimizedGraphBuilder::VisitComma(BinaryOperation* expr) {
 }
 
 
-void HOptimizedGraphBuilder::VisitLogicalExpression(BinaryOperation* expr) {
+void HGraphBuilder::VisitLogicalExpression(BinaryOperation* expr) {
   bool is_logical_and = expr->op() == Token::AND;
   if (ast_context()->IsTest()) {
     TestContext* context = TestContext::cast(ast_context());
@@ -8578,7 +8529,7 @@ void HOptimizedGraphBuilder::VisitLogicalExpression(BinaryOperation* expr) {
 }
 
 
-void HOptimizedGraphBuilder::VisitArithmeticExpression(BinaryOperation* expr) {
+void HGraphBuilder::VisitArithmeticExpression(BinaryOperation* expr) {
   CHECK_ALIVE(VisitForValue(expr->left()));
   CHECK_ALIVE(VisitForValue(expr->right()));
   HValue* right = Pop();
@@ -8589,7 +8540,7 @@ void HOptimizedGraphBuilder::VisitArithmeticExpression(BinaryOperation* expr) {
 }
 
 
-Representation HOptimizedGraphBuilder::ToRepresentation(TypeInfo info) {
+Representation HGraphBuilder::ToRepresentation(TypeInfo info) {
   if (info.IsUninitialized()) return Representation::None();
   if (info.IsSmi()) return Representation::Integer32();
   if (info.IsInteger32()) return Representation::Integer32();
@@ -8599,9 +8550,9 @@ Representation HOptimizedGraphBuilder::ToRepresentation(TypeInfo info) {
 }
 
 
-void HOptimizedGraphBuilder::HandleLiteralCompareTypeof(CompareOperation* expr,
-                                                        HTypeof* typeof_expr,
-                                                        Handle<String> check) {
+void HGraphBuilder::HandleLiteralCompareTypeof(CompareOperation* expr,
+                                               HTypeof* typeof_expr,
+                                               Handle<String> check) {
   // Note: The HTypeof itself is removed during canonicalization, if possible.
   HValue* value = typeof_expr->value();
   HTypeofIsAndBranch* instr = new(zone()) HTypeofIsAndBranch(value, check);
@@ -8671,7 +8622,7 @@ static bool IsLiteralCompareBool(HValue* left,
 }
 
 
-void HOptimizedGraphBuilder::VisitCompareOperation(CompareOperation* expr) {
+void HGraphBuilder::VisitCompareOperation(CompareOperation* expr) {
   ASSERT(!HasStackOverflow());
   ASSERT(current_block() != NULL);
   ASSERT(current_block()->HasPredecessor());
@@ -8824,9 +8775,9 @@ void HOptimizedGraphBuilder::VisitCompareOperation(CompareOperation* expr) {
 }
 
 
-void HOptimizedGraphBuilder::HandleLiteralCompareNil(CompareOperation* expr,
-                                                     HValue* value,
-                                                     NilValue nil) {
+void HGraphBuilder::HandleLiteralCompareNil(CompareOperation* expr,
+                                            HValue* value,
+                                            NilValue nil) {
   ASSERT(!HasStackOverflow());
   ASSERT(current_block() != NULL);
   ASSERT(current_block()->HasPredecessor());
@@ -8838,7 +8789,7 @@ void HOptimizedGraphBuilder::HandleLiteralCompareNil(CompareOperation* expr,
 }
 
 
-HInstruction* HOptimizedGraphBuilder::BuildThisFunction() {
+HInstruction* HGraphBuilder::BuildThisFunction() {
   // If we share optimized code between different closures, the
   // this-function is not a constant, except inside an inlined body.
   if (function_state()->outer() != NULL) {
@@ -8851,7 +8802,7 @@ HInstruction* HOptimizedGraphBuilder::BuildThisFunction() {
 }
 
 
-void HOptimizedGraphBuilder::VisitThisFunction(ThisFunction* expr) {
+void HGraphBuilder::VisitThisFunction(ThisFunction* expr) {
   ASSERT(!HasStackOverflow());
   ASSERT(current_block() != NULL);
   ASSERT(current_block()->HasPredecessor());
@@ -8860,8 +8811,7 @@ void HOptimizedGraphBuilder::VisitThisFunction(ThisFunction* expr) {
 }
 
 
-void HOptimizedGraphBuilder::VisitDeclarations(
-    ZoneList<Declaration*>* declarations) {
+void HGraphBuilder::VisitDeclarations(ZoneList<Declaration*>* declarations) {
   ASSERT(globals_.is_empty());
   AstVisitor::VisitDeclarations(declarations);
   if (!globals_.is_empty()) {
@@ -8879,8 +8829,7 @@ void HOptimizedGraphBuilder::VisitDeclarations(
 }
 
 
-void HOptimizedGraphBuilder::VisitVariableDeclaration(
-    VariableDeclaration* declaration) {
+void HGraphBuilder::VisitVariableDeclaration(VariableDeclaration* declaration) {
   VariableProxy* proxy = declaration->proxy();
   VariableMode mode = declaration->mode();
   Variable* variable = proxy->var();
@@ -8917,8 +8866,7 @@ void HOptimizedGraphBuilder::VisitVariableDeclaration(
 }
 
 
-void HOptimizedGraphBuilder::VisitFunctionDeclaration(
-    FunctionDeclaration* declaration) {
+void HGraphBuilder::VisitFunctionDeclaration(FunctionDeclaration* declaration) {
   VariableProxy* proxy = declaration->proxy();
   Variable* variable = proxy->var();
   switch (variable->location()) {
@@ -8956,52 +8904,49 @@ void HOptimizedGraphBuilder::VisitFunctionDeclaration(
 }
 
 
-void HOptimizedGraphBuilder::VisitModuleDeclaration(
-    ModuleDeclaration* declaration) {
+void HGraphBuilder::VisitModuleDeclaration(ModuleDeclaration* declaration) {
   UNREACHABLE();
 }
 
 
-void HOptimizedGraphBuilder::VisitImportDeclaration(
-    ImportDeclaration* declaration) {
+void HGraphBuilder::VisitImportDeclaration(ImportDeclaration* declaration) {
   UNREACHABLE();
 }
 
 
-void HOptimizedGraphBuilder::VisitExportDeclaration(
-    ExportDeclaration* declaration) {
+void HGraphBuilder::VisitExportDeclaration(ExportDeclaration* declaration) {
   UNREACHABLE();
 }
 
 
-void HOptimizedGraphBuilder::VisitModuleLiteral(ModuleLiteral* module) {
+void HGraphBuilder::VisitModuleLiteral(ModuleLiteral* module) {
   UNREACHABLE();
 }
 
 
-void HOptimizedGraphBuilder::VisitModuleVariable(ModuleVariable* module) {
+void HGraphBuilder::VisitModuleVariable(ModuleVariable* module) {
   UNREACHABLE();
 }
 
 
-void HOptimizedGraphBuilder::VisitModulePath(ModulePath* module) {
+void HGraphBuilder::VisitModulePath(ModulePath* module) {
   UNREACHABLE();
 }
 
 
-void HOptimizedGraphBuilder::VisitModuleUrl(ModuleUrl* module) {
+void HGraphBuilder::VisitModuleUrl(ModuleUrl* module) {
   UNREACHABLE();
 }
 
 
-void HOptimizedGraphBuilder::VisitModuleStatement(ModuleStatement* stmt) {
+void HGraphBuilder::VisitModuleStatement(ModuleStatement* stmt) {
   UNREACHABLE();
 }
 
 
 // Generators for inline runtime functions.
 // Support for types.
-void HOptimizedGraphBuilder::GenerateIsSmi(CallRuntime* call) {
+void HGraphBuilder::GenerateIsSmi(CallRuntime* call) {
   ASSERT(call->arguments()->length() == 1);
   CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
   HValue* value = Pop();
@@ -9010,7 +8955,7 @@ void HOptimizedGraphBuilder::GenerateIsSmi(CallRuntime* call) {
 }
 
 
-void HOptimizedGraphBuilder::GenerateIsSpecObject(CallRuntime* call) {
+void HGraphBuilder::GenerateIsSpecObject(CallRuntime* call) {
   ASSERT(call->arguments()->length() == 1);
   CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
   HValue* value = Pop();
@@ -9022,7 +8967,7 @@ void HOptimizedGraphBuilder::GenerateIsSpecObject(CallRuntime* call) {
 }
 
 
-void HOptimizedGraphBuilder::GenerateIsFunction(CallRuntime* call) {
+void HGraphBuilder::GenerateIsFunction(CallRuntime* call) {
   ASSERT(call->arguments()->length() == 1);
   CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
   HValue* value = Pop();
@@ -9032,7 +8977,7 @@ void HOptimizedGraphBuilder::GenerateIsFunction(CallRuntime* call) {
 }
 
 
-void HOptimizedGraphBuilder::GenerateHasCachedArrayIndex(CallRuntime* call) {
+void HGraphBuilder::GenerateHasCachedArrayIndex(CallRuntime* call) {
   ASSERT(call->arguments()->length() == 1);
   CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
   HValue* value = Pop();
@@ -9042,7 +8987,7 @@ void HOptimizedGraphBuilder::GenerateHasCachedArrayIndex(CallRuntime* call) {
 }
 
 
-void HOptimizedGraphBuilder::GenerateIsArray(CallRuntime* call) {
+void HGraphBuilder::GenerateIsArray(CallRuntime* call) {
   ASSERT(call->arguments()->length() == 1);
   CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
   HValue* value = Pop();
@@ -9052,7 +8997,7 @@ void HOptimizedGraphBuilder::GenerateIsArray(CallRuntime* call) {
 }
 
 
-void HOptimizedGraphBuilder::GenerateIsRegExp(CallRuntime* call) {
+void HGraphBuilder::GenerateIsRegExp(CallRuntime* call) {
   ASSERT(call->arguments()->length() == 1);
   CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
   HValue* value = Pop();
@@ -9062,7 +9007,7 @@ void HOptimizedGraphBuilder::GenerateIsRegExp(CallRuntime* call) {
 }
 
 
-void HOptimizedGraphBuilder::GenerateIsObject(CallRuntime* call) {
+void HGraphBuilder::GenerateIsObject(CallRuntime* call) {
   ASSERT(call->arguments()->length() == 1);
   CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
   HValue* value = Pop();
@@ -9071,12 +9016,12 @@ void HOptimizedGraphBuilder::GenerateIsObject(CallRuntime* call) {
 }
 
 
-void HOptimizedGraphBuilder::GenerateIsNonNegativeSmi(CallRuntime* call) {
+void HGraphBuilder::GenerateIsNonNegativeSmi(CallRuntime* call) {
   return Bailout("inlined runtime function: IsNonNegativeSmi");
 }
 
 
-void HOptimizedGraphBuilder::GenerateIsUndetectableObject(CallRuntime* call) {
+void HGraphBuilder::GenerateIsUndetectableObject(CallRuntime* call) {
   ASSERT(call->arguments()->length() == 1);
   CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
   HValue* value = Pop();
@@ -9086,7 +9031,7 @@ void HOptimizedGraphBuilder::GenerateIsUndetectableObject(CallRuntime* call) {
 }
 
 
-void HOptimizedGraphBuilder::GenerateIsStringWrapperSafeForDefaultValueOf(
+void HGraphBuilder::GenerateIsStringWrapperSafeForDefaultValueOf(
     CallRuntime* call) {
   return Bailout(
       "inlined runtime function: IsStringWrapperSafeForDefaultValueOf");
@@ -9094,7 +9039,7 @@ void HOptimizedGraphBuilder::GenerateIsStringWrapperSafeForDefaultValueOf(
 
 
 // Support for construct call checks.
-void HOptimizedGraphBuilder::GenerateIsConstructCall(CallRuntime* call) {
+void HGraphBuilder::GenerateIsConstructCall(CallRuntime* call) {
   ASSERT(call->arguments()->length() == 0);
   if (function_state()->outer() != NULL) {
     // We are generating graph for inlined function.
@@ -9110,7 +9055,7 @@ void HOptimizedGraphBuilder::GenerateIsConstructCall(CallRuntime* call) {
 
 
 // Support for arguments.length and arguments[?].
-void HOptimizedGraphBuilder::GenerateArgumentsLength(CallRuntime* call) {
+void HGraphBuilder::GenerateArgumentsLength(CallRuntime* call) {
   // Our implementation of arguments (based on this stack frame or an
   // adapter below it) does not work for inlined functions.  This runtime
   // function is blacklisted by AstNode::IsInlineable.
@@ -9123,7 +9068,7 @@ void HOptimizedGraphBuilder::GenerateArgumentsLength(CallRuntime* call) {
 }
 
 
-void HOptimizedGraphBuilder::GenerateArguments(CallRuntime* call) {
+void HGraphBuilder::GenerateArguments(CallRuntime* call) {
   // Our implementation of arguments (based on this stack frame or an
   // adapter below it) does not work for inlined functions.  This runtime
   // function is blacklisted by AstNode::IsInlineable.
@@ -9143,14 +9088,14 @@ void HOptimizedGraphBuilder::GenerateArguments(CallRuntime* call) {
 
 
 // Support for accessing the class and value fields of an object.
-void HOptimizedGraphBuilder::GenerateClassOf(CallRuntime* call) {
+void HGraphBuilder::GenerateClassOf(CallRuntime* call) {
   // The special form detected by IsClassOfTest is detected before we get here
   // and does not cause a bailout.
   return Bailout("inlined runtime function: ClassOf");
 }
 
 
-void HOptimizedGraphBuilder::GenerateValueOf(CallRuntime* call) {
+void HGraphBuilder::GenerateValueOf(CallRuntime* call) {
   ASSERT(call->arguments()->length() == 1);
   CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
   HValue* value = Pop();
@@ -9159,7 +9104,7 @@ void HOptimizedGraphBuilder::GenerateValueOf(CallRuntime* call) {
 }
 
 
-void HOptimizedGraphBuilder::GenerateDateField(CallRuntime* call) {
+void HGraphBuilder::GenerateDateField(CallRuntime* call) {
   ASSERT(call->arguments()->length() == 2);
   ASSERT_NE(NULL, call->arguments()->at(1)->AsLiteral());
   Smi* index = Smi::cast(*(call->arguments()->at(1)->AsLiteral()->handle()));
@@ -9170,7 +9115,7 @@ void HOptimizedGraphBuilder::GenerateDateField(CallRuntime* call) {
 }
 
 
-void HOptimizedGraphBuilder::GenerateOneByteSeqStringSetChar(
+void HGraphBuilder::GenerateOneByteSeqStringSetChar(
     CallRuntime* call) {
   ASSERT(call->arguments()->length() == 3);
   CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
@@ -9185,7 +9130,7 @@ void HOptimizedGraphBuilder::GenerateOneByteSeqStringSetChar(
 }
 
 
-void HOptimizedGraphBuilder::GenerateTwoByteSeqStringSetChar(
+void HGraphBuilder::GenerateTwoByteSeqStringSetChar(
     CallRuntime* call) {
   ASSERT(call->arguments()->length() == 3);
   CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
@@ -9203,7 +9148,7 @@ void HOptimizedGraphBuilder::GenerateTwoByteSeqStringSetChar(
 }
 
 
-void HOptimizedGraphBuilder::GenerateSetValueOf(CallRuntime* call) {
+void HGraphBuilder::GenerateSetValueOf(CallRuntime* call) {
   ASSERT(call->arguments()->length() == 2);
   CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
   CHECK_ALIVE(VisitForValue(call->arguments()->at(1)));
@@ -9246,7 +9191,7 @@ void HOptimizedGraphBuilder::GenerateSetValueOf(CallRuntime* call) {
 
 
 // Fast support for charCodeAt(n).
-void HOptimizedGraphBuilder::GenerateStringCharCodeAt(CallRuntime* call) {
+void HGraphBuilder::GenerateStringCharCodeAt(CallRuntime* call) {
   ASSERT(call->arguments()->length() == 2);
   CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
   CHECK_ALIVE(VisitForValue(call->arguments()->at(1)));
@@ -9259,7 +9204,7 @@ void HOptimizedGraphBuilder::GenerateStringCharCodeAt(CallRuntime* call) {
 
 
 // Fast support for string.charAt(n) and string[n].
-void HOptimizedGraphBuilder::GenerateStringCharFromCode(CallRuntime* call) {
+void HGraphBuilder::GenerateStringCharFromCode(CallRuntime* call) {
   ASSERT(call->arguments()->length() == 1);
   CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
   HValue* char_code = Pop();
@@ -9271,7 +9216,7 @@ void HOptimizedGraphBuilder::GenerateStringCharFromCode(CallRuntime* call) {
 
 
 // Fast support for string.charAt(n) and string[n].
-void HOptimizedGraphBuilder::GenerateStringCharAt(CallRuntime* call) {
+void HGraphBuilder::GenerateStringCharAt(CallRuntime* call) {
   ASSERT(call->arguments()->length() == 2);
   CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
   CHECK_ALIVE(VisitForValue(call->arguments()->at(1)));
@@ -9287,7 +9232,7 @@ void HOptimizedGraphBuilder::GenerateStringCharAt(CallRuntime* call) {
 
 
 // Fast support for object equality testing.
-void HOptimizedGraphBuilder::GenerateObjectEquals(CallRuntime* call) {
+void HGraphBuilder::GenerateObjectEquals(CallRuntime* call) {
   ASSERT(call->arguments()->length() == 2);
   CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
   CHECK_ALIVE(VisitForValue(call->arguments()->at(1)));
@@ -9299,14 +9244,14 @@ void HOptimizedGraphBuilder::GenerateObjectEquals(CallRuntime* call) {
 }
 
 
-void HOptimizedGraphBuilder::GenerateLog(CallRuntime* call) {
+void HGraphBuilder::GenerateLog(CallRuntime* call) {
   // %_Log is ignored in optimized code.
   return ast_context()->ReturnValue(graph()->GetConstantUndefined());
 }
 
 
 // Fast support for Math.random().
-void HOptimizedGraphBuilder::GenerateRandomHeapNumber(CallRuntime* call) {
+void HGraphBuilder::GenerateRandomHeapNumber(CallRuntime* call) {
   HValue* context = environment()->LookupContext();
   HGlobalObject* global_object = new(zone()) HGlobalObject(context);
   AddInstruction(global_object);
@@ -9316,7 +9261,7 @@ void HOptimizedGraphBuilder::GenerateRandomHeapNumber(CallRuntime* call) {
 
 
 // Fast support for StringAdd.
-void HOptimizedGraphBuilder::GenerateStringAdd(CallRuntime* call) {
+void HGraphBuilder::GenerateStringAdd(CallRuntime* call) {
   ASSERT_EQ(2, call->arguments()->length());
   CHECK_ALIVE(VisitArgumentList(call->arguments()));
   HValue* context = environment()->LookupContext();
@@ -9327,7 +9272,7 @@ void HOptimizedGraphBuilder::GenerateStringAdd(CallRuntime* call) {
 
 
 // Fast support for SubString.
-void HOptimizedGraphBuilder::GenerateSubString(CallRuntime* call) {
+void HGraphBuilder::GenerateSubString(CallRuntime* call) {
   ASSERT_EQ(3, call->arguments()->length());
   CHECK_ALIVE(VisitArgumentList(call->arguments()));
   HValue* context = environment()->LookupContext();
@@ -9338,7 +9283,7 @@ void HOptimizedGraphBuilder::GenerateSubString(CallRuntime* call) {
 
 
 // Fast support for StringCompare.
-void HOptimizedGraphBuilder::GenerateStringCompare(CallRuntime* call) {
+void HGraphBuilder::GenerateStringCompare(CallRuntime* call) {
   ASSERT_EQ(2, call->arguments()->length());
   CHECK_ALIVE(VisitArgumentList(call->arguments()));
   HValue* context = environment()->LookupContext();
@@ -9350,7 +9295,7 @@ void HOptimizedGraphBuilder::GenerateStringCompare(CallRuntime* call) {
 
 
 // Support for direct calls from JavaScript to native RegExp code.
-void HOptimizedGraphBuilder::GenerateRegExpExec(CallRuntime* call) {
+void HGraphBuilder::GenerateRegExpExec(CallRuntime* call) {
   ASSERT_EQ(4, call->arguments()->length());
   CHECK_ALIVE(VisitArgumentList(call->arguments()));
   HValue* context = environment()->LookupContext();
@@ -9361,7 +9306,7 @@ void HOptimizedGraphBuilder::GenerateRegExpExec(CallRuntime* call) {
 
 
 // Construct a RegExp exec result with two in-object properties.
-void HOptimizedGraphBuilder::GenerateRegExpConstructResult(CallRuntime* call) {
+void HGraphBuilder::GenerateRegExpConstructResult(CallRuntime* call) {
   ASSERT_EQ(3, call->arguments()->length());
   CHECK_ALIVE(VisitArgumentList(call->arguments()));
   HValue* context = environment()->LookupContext();
@@ -9373,13 +9318,13 @@ void HOptimizedGraphBuilder::GenerateRegExpConstructResult(CallRuntime* call) {
 
 
 // Support for fast native caches.
-void HOptimizedGraphBuilder::GenerateGetFromCache(CallRuntime* call) {
+void HGraphBuilder::GenerateGetFromCache(CallRuntime* call) {
   return Bailout("inlined runtime function: GetFromCache");
 }
 
 
 // Fast support for number to string.
-void HOptimizedGraphBuilder::GenerateNumberToString(CallRuntime* call) {
+void HGraphBuilder::GenerateNumberToString(CallRuntime* call) {
   ASSERT_EQ(1, call->arguments()->length());
   CHECK_ALIVE(VisitArgumentList(call->arguments()));
   HValue* context = environment()->LookupContext();
@@ -9391,7 +9336,7 @@ void HOptimizedGraphBuilder::GenerateNumberToString(CallRuntime* call) {
 
 
 // Fast call for custom callbacks.
-void HOptimizedGraphBuilder::GenerateCallFunction(CallRuntime* call) {
+void HGraphBuilder::GenerateCallFunction(CallRuntime* call) {
   // 1 ~ The function to call is not itself an argument to the call.
   int arg_count = call->arguments()->length() - 1;
   ASSERT(arg_count >= 1);  // There's always at least a receiver.
@@ -9435,7 +9380,7 @@ void HOptimizedGraphBuilder::GenerateCallFunction(CallRuntime* call) {
 
 
 // Fast call to math functions.
-void HOptimizedGraphBuilder::GenerateMathPow(CallRuntime* call) {
+void HGraphBuilder::GenerateMathPow(CallRuntime* call) {
   ASSERT_EQ(2, call->arguments()->length());
   CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
   CHECK_ALIVE(VisitForValue(call->arguments()->at(1)));
@@ -9446,7 +9391,7 @@ void HOptimizedGraphBuilder::GenerateMathPow(CallRuntime* call) {
 }
 
 
-void HOptimizedGraphBuilder::GenerateMathSin(CallRuntime* call) {
+void HGraphBuilder::GenerateMathSin(CallRuntime* call) {
   ASSERT_EQ(1, call->arguments()->length());
   CHECK_ALIVE(VisitArgumentList(call->arguments()));
   HValue* context = environment()->LookupContext();
@@ -9458,7 +9403,7 @@ void HOptimizedGraphBuilder::GenerateMathSin(CallRuntime* call) {
 }
 
 
-void HOptimizedGraphBuilder::GenerateMathCos(CallRuntime* call) {
+void HGraphBuilder::GenerateMathCos(CallRuntime* call) {
   ASSERT_EQ(1, call->arguments()->length());
   CHECK_ALIVE(VisitArgumentList(call->arguments()));
   HValue* context = environment()->LookupContext();
@@ -9470,7 +9415,7 @@ void HOptimizedGraphBuilder::GenerateMathCos(CallRuntime* call) {
 }
 
 
-void HOptimizedGraphBuilder::GenerateMathTan(CallRuntime* call) {
+void HGraphBuilder::GenerateMathTan(CallRuntime* call) {
   ASSERT_EQ(1, call->arguments()->length());
   CHECK_ALIVE(VisitArgumentList(call->arguments()));
   HValue* context = environment()->LookupContext();
@@ -9482,7 +9427,7 @@ void HOptimizedGraphBuilder::GenerateMathTan(CallRuntime* call) {
 }
 
 
-void HOptimizedGraphBuilder::GenerateMathLog(CallRuntime* call) {
+void HGraphBuilder::GenerateMathLog(CallRuntime* call) {
   ASSERT_EQ(1, call->arguments()->length());
   CHECK_ALIVE(VisitArgumentList(call->arguments()));
   HValue* context = environment()->LookupContext();
@@ -9494,18 +9439,18 @@ void HOptimizedGraphBuilder::GenerateMathLog(CallRuntime* call) {
 }
 
 
-void HOptimizedGraphBuilder::GenerateMathSqrt(CallRuntime* call) {
+void HGraphBuilder::GenerateMathSqrt(CallRuntime* call) {
   return Bailout("inlined runtime function: MathSqrt");
 }
 
 
 // Check whether two RegExps are equivalent
-void HOptimizedGraphBuilder::GenerateIsRegExpEquivalent(CallRuntime* call) {
+void HGraphBuilder::GenerateIsRegExpEquivalent(CallRuntime* call) {
   return Bailout("inlined runtime function: IsRegExpEquivalent");
 }
 
 
-void HOptimizedGraphBuilder::GenerateGetCachedArrayIndex(CallRuntime* call) {
+void HGraphBuilder::GenerateGetCachedArrayIndex(CallRuntime* call) {
   ASSERT(call->arguments()->length() == 1);
   CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
   HValue* value = Pop();
@@ -9514,7 +9459,7 @@ void HOptimizedGraphBuilder::GenerateGetCachedArrayIndex(CallRuntime* call) {
 }
 
 
-void HOptimizedGraphBuilder::GenerateFastAsciiArrayJoin(CallRuntime* call) {
+void HGraphBuilder::GenerateFastAsciiArrayJoin(CallRuntime* call) {
   return Bailout("inlined runtime function: FastAsciiArrayJoin");
 }
 
@@ -9544,23 +9489,6 @@ HEnvironment::HEnvironment(HEnvironment* outer,
 }
 
 
-HEnvironment::HEnvironment(Zone* zone)
-    : values_(0, zone),
-      assigned_variables_(0, zone),
-      frame_type_(STUB),
-      parameter_count_(0),
-      specials_count_(0),
-      local_count_(0),
-      outer_(NULL),
-      entry_(NULL),
-      pop_count_(0),
-      push_count_(0),
-      ast_id_(BailoutId::None()),
-      zone_(zone) {
-  Initialize(0, 0, 0);
-}
-
-
 HEnvironment::HEnvironment(const HEnvironment* other, Zone* zone)
     : values_(0, zone),
       assigned_variables_(0, zone),
@@ -9828,17 +9756,11 @@ void HEnvironment::PrintToStd() {
 }
 
 
-void HTracer::TraceCompilation(CompilationInfo* info) {
+void HTracer::TraceCompilation(FunctionLiteral* function) {
   Tag tag(this, "compilation");
-  if (info->IsOptimizing()) {
-    Handle<String> name = info->function()->debug_name();
-    PrintStringProperty("name", *name->ToCString());
-    PrintStringProperty("method", *name->ToCString());
-  } else {
-    CodeStub::Major major_key = info->code_stub()->MajorKey();
-    PrintStringProperty("name", CodeStub::MajorName(major_key, false));
-    PrintStringProperty("method", "stub");
-  }
+  Handle<String> name = function->debug_name();
+  PrintStringProperty("name", *name->ToCString());
+  PrintStringProperty("method", *name->ToCString());
   PrintLongProperty("date", static_cast<int64_t>(OS::TimeCurrentMillis()));
 }
 
index 0837bf9634e33cc088a7a8dea90e479161c65902..98b05d147a290d67e10cbe57daae8bc2f73b02ea 100644 (file)
@@ -429,8 +429,7 @@ enum FrameType {
   JS_CONSTRUCT,
   JS_GETTER,
   JS_SETTER,
-  ARGUMENTS_ADAPTOR,
-  STUB
+  ARGUMENTS_ADAPTOR
 };
 
 
@@ -441,8 +440,6 @@ class HEnvironment: public ZoneObject {
                Handle<JSFunction> closure,
                Zone* zone);
 
-  explicit HEnvironment(Zone* zone);
-
   HEnvironment* arguments_environment() {
     return outer()->frame_type() == ARGUMENTS_ADAPTOR ? outer() : this;
   }
@@ -639,7 +636,7 @@ class HInferRepresentation BASE_EMBEDDED {
 };
 
 
-class HOptimizedGraphBuilder;
+class HGraphBuilder;
 
 enum ArgumentsAllowedFlag {
   ARGUMENTS_NOT_ALLOWED,
@@ -675,10 +672,10 @@ class AstContext {
   bool is_for_typeof() { return for_typeof_; }
 
  protected:
-  AstContext(HOptimizedGraphBuilder* owner, Expression::Context kind);
+  AstContext(HGraphBuilder* owner, Expression::Context kind);
   virtual ~AstContext();
 
-  HOptimizedGraphBuilder* owner() const { return owner_; }
+  HGraphBuilder* owner() const { return owner_; }
 
   inline Zone* zone() const;
 
@@ -689,7 +686,7 @@ class AstContext {
 #endif
 
  private:
-  HOptimizedGraphBuilder* owner_;
+  HGraphBuilder* owner_;
   Expression::Context kind_;
   AstContext* outer_;
   bool for_typeof_;
@@ -698,7 +695,7 @@ class AstContext {
 
 class EffectContext: public AstContext {
  public:
-  explicit EffectContext(HOptimizedGraphBuilder* owner)
+  explicit EffectContext(HGraphBuilder* owner)
       : AstContext(owner, Expression::kEffect) {
   }
   virtual ~EffectContext();
@@ -711,7 +708,7 @@ class EffectContext: public AstContext {
 
 class ValueContext: public AstContext {
  public:
-  ValueContext(HOptimizedGraphBuilder* owner, ArgumentsAllowedFlag flag)
+  explicit ValueContext(HGraphBuilder* owner, ArgumentsAllowedFlag flag)
       : AstContext(owner, Expression::kValue), flag_(flag) {
   }
   virtual ~ValueContext();
@@ -729,7 +726,7 @@ class ValueContext: public AstContext {
 
 class TestContext: public AstContext {
  public:
-  TestContext(HOptimizedGraphBuilder* owner,
+  TestContext(HGraphBuilder* owner,
               Expression* condition,
               TypeFeedbackOracle* oracle,
               HBasicBlock* if_true,
@@ -769,7 +766,7 @@ class TestContext: public AstContext {
 
 class FunctionState {
  public:
-  FunctionState(HOptimizedGraphBuilder* owner,
+  FunctionState(HGraphBuilder* owner,
                 CompilationInfo* info,
                 TypeFeedbackOracle* oracle,
                 InliningKind inlining_kind);
@@ -799,7 +796,7 @@ class FunctionState {
   bool arguments_pushed() { return arguments_elements() != NULL; }
 
  private:
-  HOptimizedGraphBuilder* owner_;
+  HGraphBuilder* owner_;
 
   CompilationInfo* compilation_info_;
   TypeFeedbackOracle* oracle_;
@@ -831,65 +828,7 @@ class FunctionState {
 };
 
 
-class HGraphBuilder {
- public:
-  explicit HGraphBuilder(CompilationInfo* info)
-      : info_(info), graph_(NULL), current_block_(NULL) {}
-  virtual ~HGraphBuilder() {}
-
-  HBasicBlock* current_block() const { return current_block_; }
-  void set_current_block(HBasicBlock* block) { current_block_ = block; }
-  HEnvironment* environment() const {
-    return current_block()->last_environment();
-  }
-  Zone* zone() const { return info_->zone(); }
-  HGraph* graph() { return graph_; }
-
-  HGraph* CreateGraph();
-
-  // Adding instructions.
-  HInstruction* AddInstruction(HInstruction* instr);
-  void AddSimulate(BailoutId id,
-                   RemovableSimulate removable = FIXED_SIMULATE);
-
- protected:
-  virtual bool BuildGraph() = 0;
-
-  // Building common constructs
-  HInstruction* BuildExternalArrayElementAccess(
-      HValue* external_elements,
-      HValue* checked_key,
-      HValue* val,
-      HValue* dependency,
-      ElementsKind elements_kind,
-      bool is_store);
-
-  HInstruction* BuildFastElementAccess(
-      HValue* elements,
-      HValue* checked_key,
-      HValue* val,
-      HValue* dependency,
-      ElementsKind elements_kind,
-      bool is_store);
-
-  HInstruction* BuildUncheckedMonomorphicElementAccess(
-      HValue* object,
-      HValue* key,
-      HValue* val,
-      HCheckMaps* mapcheck,
-      bool is_js_array,
-      ElementsKind elements_kind,
-      bool is_store);
-
- private:
-  HGraphBuilder();
-  CompilationInfo* info_;
-  HGraph* graph_;
-  HBasicBlock* current_block_;
-};
-
-
-class HOptimizedGraphBuilder: public HGraphBuilder, public AstVisitor {
+class HGraphBuilder: public AstVisitor {
  public:
   enum BreakType { BREAK, CONTINUE };
   enum SwitchType { UNKNOWN_SWITCH, SMI_SWITCH, STRING_SWITCH };
@@ -925,8 +864,7 @@ class HOptimizedGraphBuilder: public HGraphBuilder, public AstVisitor {
   // structures mirroring BreakableStatement nesting.
   class BreakAndContinueScope BASE_EMBEDDED {
    public:
-    BreakAndContinueScope(BreakAndContinueInfo* info,
-                          HOptimizedGraphBuilder* owner)
+    BreakAndContinueScope(BreakAndContinueInfo* info, HGraphBuilder* owner)
         : info_(info), owner_(owner), next_(owner->break_scope()) {
       owner->set_break_scope(this);
     }
@@ -934,7 +872,7 @@ class HOptimizedGraphBuilder: public HGraphBuilder, public AstVisitor {
     ~BreakAndContinueScope() { owner_->set_break_scope(next_); }
 
     BreakAndContinueInfo* info() { return info_; }
-    HOptimizedGraphBuilder* owner() { return owner_; }
+    HGraphBuilder* owner() { return owner_; }
     BreakAndContinueScope* next() { return next_; }
 
     // Search the break stack for a break or continue target.
@@ -942,20 +880,32 @@ class HOptimizedGraphBuilder: public HGraphBuilder, public AstVisitor {
 
    private:
     BreakAndContinueInfo* info_;
-    HOptimizedGraphBuilder* owner_;
+    HGraphBuilder* owner_;
     BreakAndContinueScope* next_;
   };
 
-  HOptimizedGraphBuilder(CompilationInfo* info, TypeFeedbackOracle* oracle);
+  HGraphBuilder(CompilationInfo* info, TypeFeedbackOracle* oracle);
 
-  virtual bool BuildGraph();
+  HGraph* CreateGraph();
 
   // Simple accessors.
+  HGraph* graph() const { return graph_; }
   BreakAndContinueScope* break_scope() const { return break_scope_; }
   void set_break_scope(BreakAndContinueScope* head) { break_scope_ = head; }
 
+  HBasicBlock* current_block() const { return current_block_; }
+  void set_current_block(HBasicBlock* block) { current_block_ = block; }
+  HEnvironment* environment() const {
+    return current_block()->last_environment();
+  }
+
   bool inline_bailout() { return inline_bailout_; }
 
+  // Adding instructions.
+  HInstruction* AddInstruction(HInstruction* instr);
+  void AddSimulate(BailoutId ast_id,
+                   RemovableSimulate removable = FIXED_SIMULATE);
+
   // Bailout environment manipulation.
   void Push(HValue* value) { environment()->Push(value); }
   HValue* Pop() { return environment()->Pop(); }
@@ -978,12 +928,9 @@ class HOptimizedGraphBuilder: public HGraphBuilder, public AstVisitor {
   void operator delete(void* pointer, Zone* zone) { }
   void operator delete(void* pointer) { }
 
-  DEFINE_AST_VISITOR_SUBCLASS_MEMBERS();
-
  private:
   // Type of a member function that generates inline code for a native function.
-  typedef void (HOptimizedGraphBuilder::*InlineFunctionGenerator)
-      (CallRuntime* call);
+  typedef void (HGraphBuilder::*InlineFunctionGenerator)(CallRuntime* call);
 
   // Forward declarations for inner scope classes.
   class SubgraphScope;
@@ -1192,14 +1139,25 @@ class HOptimizedGraphBuilder: public HGraphBuilder, public AstVisitor {
                                      HValue* right);
   HInstruction* BuildIncrement(bool returns_original_input,
                                CountOperation* expr);
-  HInstruction* BuildLoadKeyedGeneric(HValue* object,
-                                      HValue* key);
+  HInstruction* BuildFastElementAccess(HValue* elements,
+                                       HValue* checked_key,
+                                       HValue* val,
+                                       HValue* dependency,
+                                       ElementsKind elements_kind,
+                                       bool is_store);
 
   HInstruction* TryBuildConsolidatedElementLoad(HValue* object,
                                                 HValue* key,
                                                 HValue* val,
                                                 SmallMapList* maps);
 
+  HInstruction* BuildUncheckedMonomorphicElementAccess(HValue* object,
+                                                       HValue* key,
+                                                       HValue* val,
+                                                       HCheckMaps* mapcheck,
+                                                       Handle<Map> map,
+                                                       bool is_store);
+
   HInstruction* BuildMonomorphicElementAccess(HValue* object,
                                               HValue* key,
                                               HValue* val,
@@ -1239,6 +1197,14 @@ class HOptimizedGraphBuilder: public HGraphBuilder, public AstVisitor {
                                           Handle<String> name,
                                           Property* expr,
                                           Handle<Map> map);
+  HInstruction* BuildLoadKeyedGeneric(HValue* object, HValue* key);
+  HInstruction* BuildExternalArrayElementAccess(
+      HValue* external_elements,
+      HValue* checked_key,
+      HValue* val,
+      HValue* dependency,
+      ElementsKind elements_kind,
+      bool is_store);
 
   void AddCheckMapsWithTransitions(HValue* object,
                                    Handle<Map> map);
@@ -1280,6 +1246,8 @@ class HOptimizedGraphBuilder: public HGraphBuilder, public AstVisitor {
                         HValue** operand,
                         HValue** shift_amount);
 
+  Zone* zone() const { return zone_; }
+
   // The translation state of the currently-being-translated function.
   FunctionState* function_state_;
 
@@ -1293,16 +1261,20 @@ class HOptimizedGraphBuilder: public HGraphBuilder, public AstVisitor {
   // A stack of breakable statements entered.
   BreakAndContinueScope* break_scope_;
 
+  HGraph* graph_;
+  HBasicBlock* current_block_;
+
   int inlined_count_;
   ZoneList<Handle<Object> > globals_;
 
+  Zone* zone_;
+
   bool inline_bailout_;
 
   friend class FunctionState;  // Pushes and pops the state stack.
   friend class AstContext;  // Pushes and pops the AST context stack.
-  friend class KeyedLoadFastElementStub;
 
-  DISALLOW_COPY_AND_ASSIGN(HOptimizedGraphBuilder);
+  DISALLOW_COPY_AND_ASSIGN(HGraphBuilder);
 };
 
 
@@ -1475,7 +1447,7 @@ class HPhase BASE_EMBEDDED {
 
 class HTracer: public Malloced {
  public:
-  void TraceCompilation(CompilationInfo* info);
+  void TraceCompilation(FunctionLiteral* function);
   void TraceHydrogen(const char* name, HGraph* graph);
   void TraceLithium(const char* name, LChunk* chunk);
   void TraceLiveRanges(const char* name, LAllocator* allocator);
index f82defc5d595af088ad86df20878f3bfa57348ca..8cccaa5a744ccaaf11dd862ab2d45e55f16d9521 100644 (file)
@@ -55,33 +55,6 @@ uint64_t CpuFeatures::supported_ = 0;
 uint64_t CpuFeatures::found_by_runtime_probing_ = 0;
 
 
-int IntelDoubleRegister::NumAllocatableRegisters() {
-  if (CpuFeatures::IsSupported(SSE2)) {
-    return XMMRegister::kNumAllocatableRegisters;
-  } else {
-    return X87TopOfStackRegister::kNumAllocatableRegisters;
-  }
-}
-
-
-int IntelDoubleRegister::NumRegisters() {
-  if (CpuFeatures::IsSupported(SSE2)) {
-    return XMMRegister::kNumRegisters;
-  } else {
-    return X87TopOfStackRegister::kNumRegisters;
-  }
-}
-
-
-const char* IntelDoubleRegister::AllocationIndexToString(int index) {
-  if (CpuFeatures::IsSupported(SSE2)) {
-    return XMMRegister::AllocationIndexToString(index);
-  } else {
-    return X87TopOfStackRegister::AllocationIndexToString(index);
-  }
-}
-
-
 // The Probe method needs executable memory, so it uses Heap::CreateCode.
 // Allocation failure is silent and leads to safe default.
 void CpuFeatures::Probe() {
@@ -2226,8 +2199,7 @@ void Assembler::prefetch(const Operand& src, int level) {
   EnsureSpace ensure_space(this);
   EMIT(0x0F);
   EMIT(0x18);
-  // Emit hint number in Reg position of RegR/M.
-  XMMRegister code = XMMRegister::from_code(level);
+  XMMRegister code = { level };  // Emit hint number in Reg position of RegR/M.
   emit_sse_operand(code, src);
 }
 
index 232a85e23a9c78afa7937b948f22c289bb22354a..b1f421ec86730388f2e8d72c1e8b6c1e5b980c2a 100644 (file)
@@ -65,10 +65,7 @@ namespace internal {
 // and best performance in optimized code.
 //
 struct Register {
-  static const int kMaxNumAllocatableRegisters = 6;
-  static int NumAllocatableRegisters() {
-    return kMaxNumAllocatableRegisters;
-  }
+  static const int kNumAllocatableRegisters = 6;
   static const int kNumRegisters = 8;
 
   static inline const char* AllocationIndexToString(int index);
@@ -122,7 +119,7 @@ const Register no_reg = { kRegister_no_reg_Code };
 
 
 inline const char* Register::AllocationIndexToString(int index) {
-  ASSERT(index >= 0 && index < kMaxNumAllocatableRegisters);
+  ASSERT(index >= 0 && index < kNumAllocatableRegisters);
   // This is the mapping of allocation indices to registers.
   const char* const kNames[] = { "eax", "ecx", "edx", "ebx", "esi", "edi" };
   return kNames[index];
@@ -136,69 +133,22 @@ inline int Register::ToAllocationIndex(Register reg) {
 
 
 inline Register Register::FromAllocationIndex(int index)  {
-  ASSERT(index >= 0 && index < kMaxNumAllocatableRegisters);
+  ASSERT(index >= 0 && index < kNumAllocatableRegisters);
   return (index >= 4) ? from_code(index + 2) : from_code(index);
 }
 
 
-struct IntelDoubleRegister {
-  static const int kMaxNumAllocatableRegisters = 7;
-  static int NumAllocatableRegisters();
-  static int NumRegisters();
-  static const char* AllocationIndexToString(int index);
-
-  static int ToAllocationIndex(IntelDoubleRegister reg) {
-    ASSERT(reg.code() != 0);
-    return reg.code() - 1;
-  }
-
-  static IntelDoubleRegister FromAllocationIndex(int index) {
-    ASSERT(index >= 0 && index < NumAllocatableRegisters());
-    return from_code(index + 1);
-  }
-
-  static IntelDoubleRegister from_code(int code) {
-    IntelDoubleRegister result = { code };
-    return result;
-  }
-
-  bool is_valid() const {
-    return 0 <= code_ && code_ < NumRegisters();
-  }
-  int code() const {
-    ASSERT(is_valid());
-    return code_;
-  }
-
-  int code_;
-};
-
-
-const IntelDoubleRegister double_register_0 = { 0 };
-const IntelDoubleRegister double_register_1 = { 1 };
-const IntelDoubleRegister double_register_2 = { 2 };
-const IntelDoubleRegister double_register_3 = { 3 };
-const IntelDoubleRegister double_register_4 = { 4 };
-const IntelDoubleRegister double_register_5 = { 5 };
-const IntelDoubleRegister double_register_6 = { 6 };
-const IntelDoubleRegister double_register_7 = { 7 };
-
-
-struct XMMRegister : IntelDoubleRegister {
+struct XMMRegister {
   static const int kNumAllocatableRegisters = 7;
   static const int kNumRegisters = 8;
 
-  static XMMRegister from_code(int code) {
-    STATIC_ASSERT(sizeof(XMMRegister) == sizeof(IntelDoubleRegister));
-    XMMRegister result;
-    result.code_ = code;
-    return result;
+  static int ToAllocationIndex(XMMRegister reg) {
+    ASSERT(reg.code() != 0);
+    return reg.code() - 1;
   }
 
-  bool is(XMMRegister reg) const { return code_ == reg.code_; }
-
   static XMMRegister FromAllocationIndex(int index) {
-    ASSERT(index >= 0 && index < NumAllocatableRegisters());
+    ASSERT(index >= 0 && index < kNumAllocatableRegisters);
     return from_code(index + 1);
   }
 
@@ -215,46 +165,34 @@ struct XMMRegister : IntelDoubleRegister {
     };
     return names[index];
   }
-};
-
 
-#define xmm0 (static_cast<const XMMRegister&>(double_register_0))
-#define xmm1 (static_cast<const XMMRegister&>(double_register_1))
-#define xmm2 (static_cast<const XMMRegister&>(double_register_2))
-#define xmm3 (static_cast<const XMMRegister&>(double_register_3))
-#define xmm4 (static_cast<const XMMRegister&>(double_register_4))
-#define xmm5 (static_cast<const XMMRegister&>(double_register_5))
-#define xmm6 (static_cast<const XMMRegister&>(double_register_6))
-#define xmm7 (static_cast<const XMMRegister&>(double_register_7))
-
-
-struct X87TopOfStackRegister : IntelDoubleRegister {
-  static const int kNumAllocatableRegisters = 1;
-  static const int kNumRegisters = 1;
-
-  bool is(X87TopOfStackRegister reg) const {
-    return code_ == reg.code_;
+  static XMMRegister from_code(int code) {
+    XMMRegister r = { code };
+    return r;
   }
 
-  static const char* AllocationIndexToString(int index) {
-    ASSERT(index >= 0 && index < kNumAllocatableRegisters);
-    const char* const names[] = {
-      "st0",
-    };
-    return names[index];
+  bool is_valid() const { return 0 <= code_ && code_ < kNumRegisters; }
+  bool is(XMMRegister reg) const { return code_ == reg.code_; }
+  int code() const {
+    ASSERT(is_valid());
+    return code_;
   }
 
-  static int ToAllocationIndex(X87TopOfStackRegister reg) {
-    ASSERT(reg.code() == 0);
-    return 0;
-  }
+  int code_;
 };
 
-#define x87tos \
-  static_cast<const X87TopOfStackRegister&>(double_register_0)
+
+const XMMRegister xmm0 = { 0 };
+const XMMRegister xmm1 = { 1 };
+const XMMRegister xmm2 = { 2 };
+const XMMRegister xmm3 = { 3 };
+const XMMRegister xmm4 = { 4 };
+const XMMRegister xmm5 = { 5 };
+const XMMRegister xmm6 = { 6 };
+const XMMRegister xmm7 = { 7 };
 
 
-typedef IntelDoubleRegister DoubleRegister;
+typedef XMMRegister DoubleRegister;
 
 
 enum Condition {
index cadff4986f158ceddfd7c33f440725998700a994..01785bb53e25400eda7668ffd94fb3e12088a374 100644 (file)
@@ -574,25 +574,6 @@ CODE_AGE_LIST(DEFINE_CODE_AGE_BUILTIN_GENERATOR)
 #undef DEFINE_CODE_AGE_BUILTIN_GENERATOR
 
 
-void Builtins::Generate_NotifyICMiss(MacroAssembler* masm) {
-  // Enter an internal frame.
-  {
-    FrameScope scope(masm, StackFrame::INTERNAL);
-
-    // Preserve registers across notification, this is important for compiled
-    // stubs that tail call the runtime on deopts passing their parameters in
-    // registers.
-    __ pushad();
-    __ CallRuntime(Runtime::kNotifyICMiss, 0);
-    __ popad();
-    // Tear down internal frame.
-  }
-
-  __ pop(MemOperand(esp, 0));  // Ignore state offset
-  __ ret(0);  // Return to IC Miss stub, continuation still on stack.
-}
-
-
 static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm,
                                              Deoptimizer::BailoutType type) {
   {
index 23dfc24b6e9d3f12897b23ad33fe11751896b7ff..da8e2ae4576796285ee2fcbd769beeb3a3f05e39 100644 (file)
 namespace v8 {
 namespace internal {
 
-
-void KeyedLoadFastElementStub::InitializeInterfaceDescriptor(
-    Isolate* isolate,
-    CodeStubInterfaceDescriptor* descriptor) {
-  static Register registers[] = { edx, ecx };
-  descriptor->register_param_count_ = 2;
-  descriptor->register_params_ = registers;
-  descriptor->deoptimization_handler_ =
-      isolate->builtins()->KeyedLoadIC_Miss();
-}
-
-
 #define __ ACCESS_MASM(masm)
 
 void ToNumberStub::Generate(MacroAssembler* masm) {
@@ -2438,7 +2426,6 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
 
     __ bind(&loaded);
   } else {  // UNTAGGED.
-    CpuFeatures::Scope scope(SSE2);
     if (CpuFeatures::IsSupported(SSE4_1)) {
       CpuFeatures::Scope sse4_scope(SSE4_1);
       __ pextrd(edx, xmm1, 0x1);  // copy xmm1[63..32] to edx.
@@ -2511,7 +2498,6 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
     __ fstp(0);
     __ ret(kPointerSize);
   } else {  // UNTAGGED.
-    CpuFeatures::Scope scope(SSE2);
     __ movdbl(xmm1, FieldOperand(eax, HeapNumber::kValueOffset));
     __ Ret();
   }
@@ -2524,7 +2510,6 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
   if (tagged) {
     __ AllocateHeapNumber(eax, edi, no_reg, &runtime_call_clear_stack);
   } else {  // UNTAGGED.
-    CpuFeatures::Scope scope(SSE2);
     __ AllocateHeapNumber(eax, edi, no_reg, &skip_cache);
     __ sub(esp, Immediate(kDoubleSize));
     __ movdbl(Operand(esp, 0), xmm1);
@@ -2539,7 +2524,6 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
   if (tagged) {
     __ ret(kPointerSize);
   } else {  // UNTAGGED.
-    CpuFeatures::Scope scope(SSE2);
     __ movdbl(xmm1, FieldOperand(eax, HeapNumber::kValueOffset));
     __ Ret();
 
@@ -2572,7 +2556,6 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
         ExternalReference(RuntimeFunction(), masm->isolate());
     __ TailCallExternalReference(runtime, 1, 1);
   } else {  // UNTAGGED.
-    CpuFeatures::Scope scope(SSE2);
     __ bind(&runtime_call_clear_stack);
     __ bind(&runtime_call);
     __ AllocateHeapNumber(eax, edi, no_reg, &skip_cache);
@@ -4825,17 +4808,10 @@ void CodeStub::GenerateStubsAheadOfTime() {
 
 
 void CodeStub::GenerateFPStubs() {
-  if (CpuFeatures::IsSupported(SSE2)) {
-    CEntryStub save_doubles(1, kSaveFPRegs);
-    // Stubs might already be in the snapshot, detect that and don't regenerate,
-    // which would lead to code stub initialization state being messed up.
-    Code* save_doubles_code;
-    if (!save_doubles.FindCodeInCache(&save_doubles_code, ISOLATE)) {
-      save_doubles_code = *(save_doubles.GetCode());
-    }
-    save_doubles_code->set_is_pregenerated(true);
-    save_doubles_code->GetIsolate()->set_fp_stubs_generated(true);
-  }
+  CEntryStub save_doubles(1, kSaveFPRegs);
+  Handle<Code> code = save_doubles.GetCode();
+  code->set_is_pregenerated(true);
+  code->GetIsolate()->set_fp_stubs_generated(true);
 }
 
 
index 4f8c81f043f90a4f2cb7312b562c30ea42c025d9..29c16e1304ba17f5520dfcbe67dcd23de197a38f 100644 (file)
@@ -38,7 +38,7 @@ namespace internal {
 
 // Compute a transcendental math function natively, or call the
 // TranscendentalCache runtime function.
-class TranscendentalCacheStub: public PlatformCodeStub {
+class TranscendentalCacheStub: public CodeStub {
  public:
   enum ArgumentType {
     TAGGED = 0,
@@ -61,7 +61,7 @@ class TranscendentalCacheStub: public PlatformCodeStub {
 };
 
 
-class StoreBufferOverflowStub: public PlatformCodeStub {
+class StoreBufferOverflowStub: public CodeStub {
  public:
   explicit StoreBufferOverflowStub(SaveFPRegsMode save_fp)
       : save_doubles_(save_fp) { }
@@ -80,7 +80,7 @@ class StoreBufferOverflowStub: public PlatformCodeStub {
 };
 
 
-class UnaryOpStub: public PlatformCodeStub {
+class UnaryOpStub: public CodeStub {
  public:
   UnaryOpStub(Token::Value op,
               UnaryOverwriteMode mode,
@@ -225,7 +225,7 @@ enum StringAddFlags {
 };
 
 
-class StringAddStub: public PlatformCodeStub {
+class StringAddStub: public CodeStub {
  public:
   explicit StringAddStub(StringAddFlags flags) : flags_(flags) {}
 
@@ -247,7 +247,7 @@ class StringAddStub: public PlatformCodeStub {
 };
 
 
-class SubStringStub: public PlatformCodeStub {
+class SubStringStub: public CodeStub {
  public:
   SubStringStub() {}
 
@@ -259,7 +259,7 @@ class SubStringStub: public PlatformCodeStub {
 };
 
 
-class StringCompareStub: public PlatformCodeStub {
+class StringCompareStub: public CodeStub {
  public:
   StringCompareStub() { }
 
@@ -295,7 +295,7 @@ class StringCompareStub: public PlatformCodeStub {
 };
 
 
-class NumberToStringStub: public PlatformCodeStub {
+class NumberToStringStub: public CodeStub {
  public:
   NumberToStringStub() { }
 
@@ -320,7 +320,7 @@ class NumberToStringStub: public PlatformCodeStub {
 };
 
 
-class StringDictionaryLookupStub: public PlatformCodeStub {
+class StringDictionaryLookupStub: public CodeStub {
  public:
   enum LookupMode { POSITIVE_LOOKUP, NEGATIVE_LOOKUP };
 
@@ -382,7 +382,7 @@ class StringDictionaryLookupStub: public PlatformCodeStub {
 };
 
 
-class RecordWriteStub: public PlatformCodeStub {
+class RecordWriteStub: public CodeStub {
  public:
   RecordWriteStub(Register object,
                   Register value,
@@ -585,7 +585,7 @@ class RecordWriteStub: public PlatformCodeStub {
     Register GetRegThatIsNotEcxOr(Register r1,
                                   Register r2,
                                   Register r3) {
-      for (int i = 0; i < Register::NumAllocatableRegisters(); i++) {
+      for (int i = 0; i < Register::kNumAllocatableRegisters; i++) {
         Register candidate = Register::FromAllocationIndex(i);
         if (candidate.is(ecx)) continue;
         if (candidate.is(r1)) continue;
index 8fd7316355135d893caafb8c26beba576ea35b9d..c9ecaccd2daf318ffd3fbcaafca193a0e2965c6c 100644 (file)
@@ -295,7 +295,7 @@ static int LookupBailoutId(DeoptimizationInputData* data, BailoutId ast_id) {
 
 void Deoptimizer::DoComputeOsrOutputFrame() {
   DeoptimizationInputData* data = DeoptimizationInputData::cast(
-      compiled_code_->deoptimization_data());
+      optimized_code_->deoptimization_data());
   unsigned ast_id = data->OsrAstId()->value();
   // TODO(kasperl): This should not be the bailout_id_. It should be
   // the ast id. Confusing.
@@ -332,7 +332,7 @@ void Deoptimizer::DoComputeOsrOutputFrame() {
   unsigned input_frame_size = input_->GetFrameSize();
   ASSERT(fixed_size + height_in_bytes == input_frame_size);
 
-  unsigned stack_slot_size = compiled_code_->stack_slots() * kPointerSize;
+  unsigned stack_slot_size = optimized_code_->stack_slots() * kPointerSize;
   unsigned outgoing_height = data->ArgumentsStackHeight(bailout_id)->value();
   unsigned outgoing_size = outgoing_height * kPointerSize;
   unsigned output_frame_size = fixed_size + stack_slot_size + outgoing_size;
@@ -443,7 +443,7 @@ void Deoptimizer::DoComputeOsrOutputFrame() {
 
     unsigned pc_offset = data->OsrPcOffset()->value();
     uint32_t pc = reinterpret_cast<uint32_t>(
-        compiled_code_->entry() + pc_offset);
+        optimized_code_->entry() + pc_offset);
     output_[0]->SetPc(pc);
   }
   Code* continuation =
@@ -557,70 +557,6 @@ void Deoptimizer::DoComputeArgumentsAdaptorFrame(TranslationIterator* iterator,
 }
 
 
-void Deoptimizer::DoCompiledStubFrame(TranslationIterator* iterator,
-                                      int frame_index) {
-  //
-  //               FROM                                  TO             <-ebp
-  //    |          ....           |          |          ....           |
-  //    +-------------------------+          +-------------------------+
-  //    | JSFunction continuation |          | JSFunction continuation |
-  //    +-------------------------+          +-------------------------+<-esp
-  // |  |   saved frame (ebp)     |
-  // |  +=========================+<-ebp
-  // |  |   JSFunction context    |
-  // v  +-------------------------+
-  //    |   COMPILED_STUB marker  |          ebp = saved frame
-  //    +-------------------------+          esi = JSFunction context
-  //    |                         |
-  //    | ...                     |
-  //    |                         |
-  //    +-------------------------+<-esp
-  //
-  //
-  int output_frame_size = 1 * kPointerSize;
-  FrameDescription* output_frame =
-      new(output_frame_size) FrameDescription(output_frame_size, 0);
-  Code* notify_miss =
-      isolate_->builtins()->builtin(Builtins::kNotifyICMiss);
-  output_frame->SetState(Smi::FromInt(FullCodeGenerator::NO_REGISTERS));
-  output_frame->SetContinuation(
-      reinterpret_cast<uint32_t>(notify_miss->entry()));
-
-  ASSERT(compiled_code_->kind() == Code::COMPILED_STUB);
-  int major_key = compiled_code_->major_key();
-  CodeStubInterfaceDescriptor* descriptor =
-      isolate_->code_stub_interface_descriptor(major_key);
-  Handle<Code> miss_ic(descriptor->deoptimization_handler_);
-  output_frame->SetPc(reinterpret_cast<intptr_t>(miss_ic->instruction_start()));
-  unsigned input_frame_size = input_->GetFrameSize();
-  intptr_t value = input_->GetFrameSlot(input_frame_size - kPointerSize);
-  output_frame->SetFrameSlot(0, value);
-  value = input_->GetFrameSlot(input_frame_size - 2 * kPointerSize);
-  output_frame->SetRegister(ebp.code(), value);
-  output_frame->SetFp(value);
-  value = input_->GetFrameSlot(input_frame_size - 3 * kPointerSize);
-  output_frame->SetRegister(esi.code(), value);
-
-  Translation::Opcode opcode =
-      static_cast<Translation::Opcode>(iterator->Next());
-  ASSERT(opcode == Translation::REGISTER);
-  USE(opcode);
-  int input_reg = iterator->Next();
-  intptr_t input_value = input_->GetRegister(input_reg);
-  output_frame->SetRegister(edx.code(), input_value);
-
-  int32_t next = iterator->Next();
-  opcode = static_cast<Translation::Opcode>(next);
-  ASSERT(opcode == Translation::REGISTER);
-  input_reg = iterator->Next();
-  input_value = input_->GetRegister(input_reg);
-  output_frame->SetRegister(ecx.code(), input_value);
-
-  ASSERT(frame_index == 0);
-  output_[frame_index] = output_frame;
-}
-
-
 void Deoptimizer::DoComputeConstructStubFrame(TranslationIterator* iterator,
                                               int frame_index) {
   Builtins* builtins = isolate_->builtins();
@@ -1049,7 +985,7 @@ void Deoptimizer::FillInputFrame(Address tos, JavaScriptFrame* frame) {
   }
   input_->SetRegister(esp.code(), reinterpret_cast<intptr_t>(frame->sp()));
   input_->SetRegister(ebp.code(), reinterpret_cast<intptr_t>(frame->fp()));
-  for (int i = 0; i < DoubleRegister::NumAllocatableRegisters(); i++) {
+  for (int i = 0; i < DoubleRegister::kNumAllocatableRegisters; i++) {
     input_->SetDoubleRegister(i, 0.0);
   }
 
@@ -1064,6 +1000,7 @@ void Deoptimizer::FillInputFrame(Address tos, JavaScriptFrame* frame) {
 
 void Deoptimizer::EntryGenerator::Generate() {
   GeneratePrologue();
+  CpuFeatures::Scope scope(SSE2);
 
   Isolate* isolate = masm()->isolate();
 
@@ -1073,13 +1010,10 @@ void Deoptimizer::EntryGenerator::Generate() {
   const int kDoubleRegsSize = kDoubleSize *
                               XMMRegister::kNumAllocatableRegisters;
   __ sub(esp, Immediate(kDoubleRegsSize));
-  if (CpuFeatures::IsSupported(SSE2)) {
-    CpuFeatures::Scope scope(SSE2);
-    for (int i = 0; i < XMMRegister::kNumAllocatableRegisters; ++i) {
-      XMMRegister xmm_reg = XMMRegister::FromAllocationIndex(i);
-      int offset = i * kDoubleSize;
-      __ movdbl(Operand(esp, offset), xmm_reg);
-    }
+  for (int i = 0; i < XMMRegister::kNumAllocatableRegisters; ++i) {
+    XMMRegister xmm_reg = XMMRegister::FromAllocationIndex(i);
+    int offset = i * kDoubleSize;
+    __ movdbl(Operand(esp, offset), xmm_reg);
   }
 
   __ pushad();
@@ -1127,18 +1061,14 @@ void Deoptimizer::EntryGenerator::Generate() {
     __ pop(Operand(ebx, offset));
   }
 
+  // Fill in the double input registers.
   int double_regs_offset = FrameDescription::double_registers_offset();
-  if (CpuFeatures::IsSupported(SSE2)) {
-    CpuFeatures::Scope scope(SSE2);
-    // Fill in the double input registers.
-    for (int i = 0; i < XMMRegister::kNumAllocatableRegisters; ++i) {
-      int dst_offset = i * kDoubleSize + double_regs_offset;
-      int src_offset = i * kDoubleSize;
-      __ movdbl(xmm0, Operand(esp, src_offset));
-      __ movdbl(Operand(ebx, dst_offset), xmm0);
-    }
+  for (int i = 0; i < XMMRegister::kNumAllocatableRegisters; ++i) {
+    int dst_offset = i * kDoubleSize + double_regs_offset;
+    int src_offset = i * kDoubleSize;
+    __ movdbl(xmm0, Operand(esp, src_offset));
+    __ movdbl(Operand(ebx, dst_offset), xmm0);
   }
-  __ fninit();
 
   // Remove the bailout id and the double registers from the stack.
   if (type() == EAGER) {
@@ -1156,13 +1086,10 @@ void Deoptimizer::EntryGenerator::Generate() {
   // limit and copy the contents of the activation frame to the input
   // frame description.
   __ lea(edx, Operand(ebx, FrameDescription::frame_content_offset()));
-  Label pop_loop_header;
-  __ jmp(&pop_loop_header);
   Label pop_loop;
   __ bind(&pop_loop);
   __ pop(Operand(edx, 0));
   __ add(edx, Immediate(sizeof(uint32_t)));
-  __ bind(&pop_loop_header);
   __ cmp(ecx, esp);
   __ j(not_equal, &pop_loop);
 
@@ -1200,39 +1127,31 @@ void Deoptimizer::EntryGenerator::Generate() {
   }
 
   // Replace the current frame with the output frames.
-  Label outer_push_loop, inner_push_loop,
-      outer_loop_header, inner_loop_header;
+  Label outer_push_loop, inner_push_loop;
   // Outer loop state: eax = current FrameDescription**, edx = one past the
   // last FrameDescription**.
   __ mov(edx, Operand(eax, Deoptimizer::output_count_offset()));
   __ mov(eax, Operand(eax, Deoptimizer::output_offset()));
   __ lea(edx, Operand(eax, edx, times_4, 0));
-  __ jmp(&outer_loop_header);
   __ bind(&outer_push_loop);
   // Inner loop state: ebx = current FrameDescription*, ecx = loop index.
   __ mov(ebx, Operand(eax, 0));
   __ mov(ecx, Operand(ebx, FrameDescription::frame_size_offset()));
-  __ jmp(&inner_loop_header);
   __ bind(&inner_push_loop);
   __ sub(ecx, Immediate(sizeof(uint32_t)));
   __ push(Operand(ebx, ecx, times_1, FrameDescription::frame_content_offset()));
-  __ bind(&inner_loop_header);
   __ test(ecx, ecx);
   __ j(not_zero, &inner_push_loop);
   __ add(eax, Immediate(kPointerSize));
-  __ bind(&outer_loop_header);
   __ cmp(eax, edx);
   __ j(below, &outer_push_loop);
 
   // In case of OSR, we have to restore the XMM registers.
   if (type() == OSR) {
-    if (CpuFeatures::IsSupported(SSE2)) {
-      CpuFeatures::Scope scope(SSE2);
-      for (int i = 0; i < XMMRegister::kNumAllocatableRegisters; ++i) {
-        XMMRegister xmm_reg = XMMRegister::FromAllocationIndex(i);
-        int src_offset = i * kDoubleSize + double_regs_offset;
-        __ movdbl(xmm_reg, Operand(ebx, src_offset));
-      }
+    for (int i = 0; i < XMMRegister::kNumAllocatableRegisters; ++i) {
+      XMMRegister xmm_reg = XMMRegister::FromAllocationIndex(i);
+      int src_offset = i * kDoubleSize + double_regs_offset;
+      __ movdbl(xmm_reg, Operand(ebx, src_offset));
     }
   }
 
index 0bc8aef6ebc5bc27e40bf088a24f5cd9ba8e1e49..e03f73323d40968466de4162346693092d4dc77e 100644 (file)
@@ -30,7 +30,6 @@
 #if defined(V8_TARGET_ARCH_IA32)
 
 #include "ia32/lithium-codegen-ia32.h"
-#include "ic.h"
 #include "code-stubs.h"
 #include "deoptimizer.h"
 #include "stub-cache.h"
@@ -71,6 +70,7 @@ bool LCodeGen::GenerateCode() {
   HPhase phase("Z_Code generation", chunk());
   ASSERT(is_unused());
   status_ = GENERATING;
+  CpuFeatures::Scope scope(SSE2);
 
   CodeStub::GenerateFPStubs();
 
@@ -79,15 +79,13 @@ bool LCodeGen::GenerateCode() {
   // the frame (that is done in GeneratePrologue).
   FrameScope frame_scope(masm_, StackFrame::MANUAL);
 
-  dynamic_frame_alignment_ = info()->IsOptimizing() &&
-      ((chunk()->num_double_slots() > 2 &&
-        !chunk()->graph()->is_recursive()) ||
-       !info()->osr_ast_id().IsNone());
+  dynamic_frame_alignment_ = (chunk()->num_double_slots() > 2 &&
+                              !chunk()->graph()->is_recursive()) ||
+                             !info()->osr_ast_id().IsNone();
 
   return GeneratePrologue() &&
       GenerateBody() &&
       GenerateDeferredCode() &&
-      GenerateJumpTable() &&
       GenerateSafepointTable();
 }
 
@@ -97,9 +95,7 @@ void LCodeGen::FinishCode(Handle<Code> code) {
   code->set_stack_slots(GetStackSlotCount());
   code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
   PopulateDeoptimizationData(code);
-  if (!info()->IsStub()) {
-    Deoptimizer::EnsureRelocSpaceForLazyDeoptimization(code);
-  }
+  Deoptimizer::EnsureRelocSpaceForLazyDeoptimization(code);
 }
 
 
@@ -130,126 +126,113 @@ void LCodeGen::Comment(const char* format, ...) {
 bool LCodeGen::GeneratePrologue() {
   ASSERT(is_generating());
 
-  if (info()->IsOptimizing()) {
-    ProfileEntryHookStub::MaybeCallEntryHook(masm_);
+  ProfileEntryHookStub::MaybeCallEntryHook(masm_);
 
 #ifdef DEBUG
-    if (strlen(FLAG_stop_at) > 0 &&
-        info_->function()->name()->IsEqualTo(CStrVector(FLAG_stop_at))) {
-      __ int3();
-    }
+  if (strlen(FLAG_stop_at) > 0 &&
+      info_->function()->name()->IsEqualTo(CStrVector(FLAG_stop_at))) {
+    __ int3();
+  }
 #endif
 
-    // Strict mode functions and builtins need to replace the receiver
-    // with undefined when called as functions (without an explicit
-    // receiver object). ecx is zero for method calls and non-zero for
-    // function calls.
-    if (!info_->is_classic_mode() || info_->is_native()) {
-      Label ok;
-      __ test(ecx, Operand(ecx));
-      __ j(zero, &ok, Label::kNear);
-      // +1 for return address.
-      int receiver_offset = (scope()->num_parameters() + 1) * kPointerSize;
-      __ mov(Operand(esp, receiver_offset),
-             Immediate(isolate()->factory()->undefined_value()));
-      __ bind(&ok);
-    }
+  // Strict mode functions and builtins need to replace the receiver
+  // with undefined when called as functions (without an explicit
+  // receiver object). ecx is zero for method calls and non-zero for
+  // function calls.
+  if (!info_->is_classic_mode() || info_->is_native()) {
+    Label ok;
+    __ test(ecx, Operand(ecx));
+    __ j(zero, &ok, Label::kNear);
+    // +1 for return address.
+    int receiver_offset = (scope()->num_parameters() + 1) * kPointerSize;
+    __ mov(Operand(esp, receiver_offset),
+           Immediate(isolate()->factory()->undefined_value()));
+    __ bind(&ok);
+  }
 
-    if (dynamic_frame_alignment_) {
-      // Move state of dynamic frame alignment into edx.
-      __ mov(edx, Immediate(kNoAlignmentPadding));
-
-      Label do_not_pad, align_loop;
-      STATIC_ASSERT(kDoubleSize == 2 * kPointerSize);
-      // Align esp + 4 to a multiple of 2 * kPointerSize.
-      __ test(esp, Immediate(kPointerSize));
-      __ j(not_zero, &do_not_pad, Label::kNear);
-      __ push(Immediate(0));
-      __ mov(ebx, esp);
-      __ mov(edx, Immediate(kAlignmentPaddingPushed));
-      // Copy arguments, receiver, and return address.
-      __ mov(ecx, Immediate(scope()->num_parameters() + 2));
-
-      __ bind(&align_loop);
-      __ mov(eax, Operand(ebx, 1 * kPointerSize));
-      __ mov(Operand(ebx, 0), eax);
-      __ add(Operand(ebx), Immediate(kPointerSize));
-      __ dec(ecx);
-      __ j(not_zero, &align_loop, Label::kNear);
-      __ mov(Operand(ebx, 0), Immediate(kAlignmentZapValue));
-      __ bind(&do_not_pad);
-    }
+
+  if (dynamic_frame_alignment_) {
+    // Move state of dynamic frame alignment into edx.
+    __ mov(edx, Immediate(kNoAlignmentPadding));
+
+    Label do_not_pad, align_loop;
+    STATIC_ASSERT(kDoubleSize == 2 * kPointerSize);
+    // Align esp + 4 to a multiple of 2 * kPointerSize.
+    __ test(esp, Immediate(kPointerSize));
+    __ j(not_zero, &do_not_pad, Label::kNear);
+    __ push(Immediate(0));
+    __ mov(ebx, esp);
+    __ mov(edx, Immediate(kAlignmentPaddingPushed));
+    // Copy arguments, receiver, and return address.
+    __ mov(ecx, Immediate(scope()->num_parameters() + 2));
+
+    __ bind(&align_loop);
+    __ mov(eax, Operand(ebx, 1 * kPointerSize));
+    __ mov(Operand(ebx, 0), eax);
+    __ add(Operand(ebx), Immediate(kPointerSize));
+    __ dec(ecx);
+    __ j(not_zero, &align_loop, Label::kNear);
+    __ mov(Operand(ebx, 0), Immediate(kAlignmentZapValue));
+    __ bind(&do_not_pad);
   }
 
   info()->set_prologue_offset(masm_->pc_offset());
-  if (NeedsEagerFrame()) {
-    ASSERT(!frame_is_built_);
-    frame_is_built_ = true;
-    __ push(ebp);  // Caller's frame pointer.
-    __ mov(ebp, esp);
-    __ push(esi);  // Callee's context.
-    if (info()->IsStub()) {
-      __ push(Immediate(Smi::FromInt(StackFrame::STUB)));
-    } else {
-      __ push(edi);  // Callee's JS function.
-    }
-  }
+  __ push(ebp);  // Caller's frame pointer.
+  __ mov(ebp, esp);
+  __ push(esi);  // Callee's context.
+  __ push(edi);  // Callee's JS function.
 
-  if (info()->IsOptimizing() &&
-      dynamic_frame_alignment_ &&
-      FLAG_debug_code) {
+  if (dynamic_frame_alignment_ && FLAG_debug_code) {
     __ test(esp, Immediate(kPointerSize));
     __ Assert(zero, "frame is expected to be aligned");
   }
 
   // Reserve space for the stack slots needed by the code.
   int slots = GetStackSlotCount();
-  ASSERT(slots != 0 || !info()->IsOptimizing());
-  if (slots > 0) {
-    if (slots == 1) {
-      if (dynamic_frame_alignment_) {
-        __ push(edx);
-      } else {
-        __ push(Immediate(kNoAlignmentPadding));
-      }
+  ASSERT_GE(slots, 1);
+  if (slots == 1) {
+    if (dynamic_frame_alignment_) {
+      __ push(edx);
     } else {
-      if (FLAG_debug_code) {
-        __ mov(Operand(eax), Immediate(slots));
-        Label loop;
-        __ bind(&loop);
-        __ push(Immediate(kSlotsZapValue));
-        __ dec(eax);
-        __ j(not_zero, &loop);
-      } else {
-        __ sub(Operand(esp), Immediate(slots * kPointerSize));
-#ifdef _MSC_VER
-        // On windows, you may not access the stack more than one page below
-        // the most recently mapped page. To make the allocated area randomly
-        // accessible, we write to each page in turn (the value is irrelevant).
-        const int kPageSize = 4 * KB;
-        for (int offset = slots * kPointerSize - kPageSize;
-             offset > 0;
-             offset -= kPageSize) {
-          __ mov(Operand(esp, offset), eax);
-        }
-#endif
+      __ push(Immediate(kNoAlignmentPadding));
+    }
+  } else {
+    if (FLAG_debug_code) {
+      __ mov(Operand(eax), Immediate(slots));
+      Label loop;
+      __ bind(&loop);
+      __ push(Immediate(kSlotsZapValue));
+      __ dec(eax);
+      __ j(not_zero, &loop);
+    } else {
+      __ sub(Operand(esp), Immediate(slots * kPointerSize));
+  #ifdef _MSC_VER
+      // On windows, you may not access the stack more than one page below
+      // the most recently mapped page. To make the allocated area randomly
+      // accessible, we write to each page in turn (the value is irrelevant).
+      const int kPageSize = 4 * KB;
+      for (int offset = slots * kPointerSize - kPageSize;
+           offset > 0;
+           offset -= kPageSize) {
+        __ mov(Operand(esp, offset), eax);
       }
+  #endif
+    }
 
-      // Store dynamic frame alignment state in the first local.
-      if (dynamic_frame_alignment_) {
-        __ mov(Operand(ebp,
-                       JavaScriptFrameConstants::kDynamicAlignmentStateOffset),
-               edx);
-      } else {
-        __ mov(Operand(ebp,
-                       JavaScriptFrameConstants::kDynamicAlignmentStateOffset),
-               Immediate(kNoAlignmentPadding));
-      }
+    // Store dynamic frame alignment state in the first local.
+    if (dynamic_frame_alignment_) {
+      __ mov(Operand(ebp,
+                     JavaScriptFrameConstants::kDynamicAlignmentStateOffset),
+             edx);
+    } else {
+      __ mov(Operand(ebp,
+                     JavaScriptFrameConstants::kDynamicAlignmentStateOffset),
+             Immediate(kNoAlignmentPadding));
     }
   }
 
   // Possibly allocate a local context.
-  int heap_slots = info_->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
+  int heap_slots = scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
   if (heap_slots > 0) {
     Comment(";;; Allocate local context");
     // Argument to NewContext is the function, which is still in edi.
@@ -289,7 +272,7 @@ bool LCodeGen::GeneratePrologue() {
   }
 
   // Trace the call.
-  if (FLAG_trace && info()->IsOptimizing()) {
+  if (FLAG_trace) {
     // We have not executed any compiled code yet, so esi still holds the
     // incoming context.
     __ CallRuntime(Runtime::kTraceEnter, 0);
@@ -343,102 +326,16 @@ bool LCodeGen::GenerateBody() {
 }
 
 
-bool LCodeGen::GenerateJumpTable() {
-  Label needs_frame_not_call;
-  Label needs_frame_is_call;
-  for (int i = 0; i < jump_table_.length(); i++) {
-    __ bind(&jump_table_[i].label);
-    Address entry = jump_table_[i].address;
-    if (jump_table_[i].needs_frame) {
-      __ push(Immediate(ExternalReference::ForDeoptEntry(entry)));
-      if (jump_table_[i].is_lazy_deopt) {
-        if (needs_frame_is_call.is_bound()) {
-          __ jmp(&needs_frame_is_call);
-        } else {
-          __ bind(&needs_frame_is_call);
-          __ push(esi);
-          // This variant of deopt can only be used with stubs. Since we don't
-          // have a function pointer to install in the stack frame that we're
-          // building, install a special marker there instead.
-          ASSERT(info()->IsStub());
-          __ push(Immediate(Smi::FromInt(StackFrame::STUB)));
-          // Push a PC inside the function so that the deopt code can find where
-          // the deopt comes from. It doesn't have to be the precise return
-          // address of a "calling" LAZY deopt, it only has to be somewhere
-          // inside the code body.
-          Label push_approx_pc;
-          __ call(&push_approx_pc);
-          __ bind(&push_approx_pc);
-          // Push the continuation which was stashed were the ebp should
-          // be. Replace it with the saved ebp.
-          __ push(MemOperand(esp, 3 * kPointerSize));
-          __ mov(MemOperand(esp, 4 * kPointerSize), ebp);
-          __ lea(ebp, MemOperand(esp, 4 * kPointerSize));
-          __ ret(0);  // Call the continuation without clobbering registers.
-        }
-      } else {
-        if (needs_frame_not_call.is_bound()) {
-          __ jmp(&needs_frame_not_call);
-        } else {
-          __ bind(&needs_frame_not_call);
-          __ push(esi);
-          // This variant of deopt can only be used with stubs. Since we don't
-          // have a function pointer to install in the stack frame that we're
-          // building, install a special marker there instead.
-          ASSERT(info()->IsStub());
-          __ push(Immediate(Smi::FromInt(StackFrame::STUB)));
-          // Push the continuation which was stashed were the ebp should
-          // be. Replace it with the saved ebp.
-          __ push(MemOperand(esp, 2 * kPointerSize));
-          __ mov(MemOperand(esp, 3 * kPointerSize), ebp);
-          __ lea(ebp, MemOperand(esp, 3 * kPointerSize));
-          __ ret(0);  // Call the continuation without clobbering registers.
-        }
-      }
-    } else {
-      if (jump_table_[i].is_lazy_deopt) {
-        __ call(entry, RelocInfo::RUNTIME_ENTRY);
-      } else {
-        __ jmp(entry, RelocInfo::RUNTIME_ENTRY);
-      }
-    }
-  }
-  return !is_aborted();
-}
-
-
 bool LCodeGen::GenerateDeferredCode() {
   ASSERT(is_generating());
   if (deferred_.length() > 0) {
     for (int i = 0; !is_aborted() && i < deferred_.length(); i++) {
       LDeferredCode* code = deferred_[i];
       __ bind(code->entry());
-      if (NeedsDeferredFrame()) {
-        Comment(";;; Deferred build frame",
-                code->instruction_index(),
-                code->instr()->Mnemonic());
-        ASSERT(!frame_is_built_);
-        ASSERT(info()->IsStub());
-        frame_is_built_ = true;
-        // Build the frame in such a way that esi isn't trashed.
-        __ push(ebp);  // Caller's frame pointer.
-        __ push(Operand(ebp, StandardFrameConstants::kContextOffset));
-        __ push(Immediate(Smi::FromInt(StackFrame::STUB)));
-        __ lea(ebp, Operand(esp, 2 * kPointerSize));
-      }
       Comment(";;; Deferred code @%d: %s.",
               code->instruction_index(),
               code->instr()->Mnemonic());
       code->Generate();
-      if (NeedsDeferredFrame()) {
-        Comment(";;; Deferred destroy frame",
-                code->instruction_index(),
-                code->instr()->Mnemonic());
-        ASSERT(frame_is_built_);
-        frame_is_built_ = false;
-        __ mov(esp, ebp);
-        __ pop(ebp);
-      }
       __ jmp(code->exit());
     }
   }
@@ -452,15 +349,6 @@ bool LCodeGen::GenerateDeferredCode() {
 
 bool LCodeGen::GenerateSafepointTable() {
   ASSERT(is_done());
-  if (!info()->IsStub()) {
-    // For lazy deoptimization we need space to patch a call after every call.
-    // Ensure there is always space for such patching, even if the code ends
-    // in a call.
-    int target_offset = masm()->pc_offset() + Deoptimizer::patch_size();
-    while (masm()->pc_offset() < target_offset) {
-      masm()->nop();
-    }
-  }
   safepoints_.Emit(masm(), GetStackSlotCount());
   return !is_aborted();
 }
@@ -476,11 +364,6 @@ XMMRegister LCodeGen::ToDoubleRegister(int index) const {
 }
 
 
-bool LCodeGen::IsX87TopOfStack(LOperand* op) const {
-  return op->IsDoubleRegister();
-}
-
-
 Register LCodeGen::ToRegister(LOperand* op) const {
   ASSERT(op->IsRegister());
   return ToRegister(op->index());
@@ -566,9 +449,7 @@ void LCodeGen::WriteTranslation(LEnvironment* environment,
                    translation,
                    arguments_index,
                    arguments_count);
-  bool has_closure_id = !info()->closure().is_null() &&
-      *info()->closure() != *environment->closure();
-  int closure_id = has_closure_id
+  int closure_id = *info()->closure() != *environment->closure()
       ? DefineDeoptimizationLiteral(environment->closure())
       : Translation::kSelfLiteralId;
   switch (environment->frame_type()) {
@@ -591,11 +472,6 @@ void LCodeGen::WriteTranslation(LEnvironment* environment,
     case ARGUMENTS_ADAPTOR:
       translation->BeginArgumentsAdaptorFrame(closure_id, translation_size);
       break;
-    case STUB:
-      translation->BeginCompiledStubFrame();
-      break;
-    default:
-      UNREACHABLE();
   }
 
   // Inlined frames which push their arguments cause the index to be
@@ -730,8 +606,6 @@ void LCodeGen::CallRuntime(const Runtime::Function* fun,
   __ CallRuntime(fun, argc);
 
   RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
-
-  ASSERT(info()->is_calling());
 }
 
 
@@ -756,8 +630,6 @@ void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id,
   __ CallRuntimeSaveDoubles(id);
   RecordSafepointWithRegisters(
       instr->pointer_map(), argc, Safepoint::kNoLazyDeopt);
-
-  ASSERT(info()->is_calling());
 }
 
 
@@ -803,11 +675,7 @@ void LCodeGen::DeoptimizeIf(Condition cc, LEnvironment* environment) {
   RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
   ASSERT(environment->HasBeenRegistered());
   int id = environment->deoptimization_index();
-  ASSERT(info()->IsOptimizing() || info()->IsStub());
-  Deoptimizer::BailoutType bailout_type = frame_is_built_
-      ? Deoptimizer::EAGER
-      : Deoptimizer::LAZY;
-  Address entry = Deoptimizer::GetDeoptimizationEntry(id, bailout_type);
+  Address entry = Deoptimizer::GetDeoptimizationEntry(id, Deoptimizer::EAGER);
   if (entry == NULL) {
     Abort("bailout was not prepared");
     return;
@@ -841,44 +709,19 @@ void LCodeGen::DeoptimizeIf(Condition cc, LEnvironment* environment) {
     __ popfd();
   }
 
-  ASSERT(info()->IsStub() || frame_is_built_);
-  bool lazy_deopt_needed = info()->IsStub();
   if (cc == no_condition) {
     if (FLAG_trap_on_deopt) __ int3();
-    if (lazy_deopt_needed) {
-      __ call(entry, RelocInfo::RUNTIME_ENTRY);
-    } else {
-      __ jmp(entry, RelocInfo::RUNTIME_ENTRY);
-    }
+    __ jmp(entry, RelocInfo::RUNTIME_ENTRY);
   } else {
-    Label done;
     if (FLAG_trap_on_deopt) {
+      Label done;
       __ j(NegateCondition(cc), &done, Label::kNear);
       __ int3();
-    }
-    if (!lazy_deopt_needed && frame_is_built_) {
-      if (FLAG_trap_on_deopt) {
-        __ jmp(entry, RelocInfo::RUNTIME_ENTRY);
-      } else {
-        __ j(cc, entry, RelocInfo::RUNTIME_ENTRY);
-      }
+      __ jmp(entry, RelocInfo::RUNTIME_ENTRY);
+      __ bind(&done);
     } else {
-      // We often have several deopts to the same entry, reuse the last
-      // jump entry if this is the case.
-      if (jump_table_.is_empty() ||
-          jump_table_.last().address != entry ||
-          jump_table_.last().needs_frame != !frame_is_built_ ||
-          jump_table_.last().is_lazy_deopt != lazy_deopt_needed) {
-        JumpTableEntry table_entry(entry, !frame_is_built_, lazy_deopt_needed);
-        jump_table_.Add(table_entry, zone());
-      }
-      if (FLAG_trap_on_deopt) {
-        __ jmp(&jump_table_.last().label);
-      } else {
-        __ j(cc, &jump_table_.last().label);
-      }
+      __ j(cc, entry, RelocInfo::RUNTIME_ENTRY);
     }
-    __ bind(&done);
   }
 }
 
@@ -1616,8 +1459,7 @@ void LCodeGen::DoConstantD(LConstantD* instr) {
     int32_t lower = static_cast<int32_t>(int_val);
     int32_t upper = static_cast<int32_t>(int_val >> (kBitsPerInt));
     if (CpuFeatures::IsSupported(SSE4_1)) {
-      CpuFeatures::Scope scope1(SSE2);
-      CpuFeatures::Scope scope2(SSE4_1);
+      CpuFeatures::Scope scope(SSE4_1);
       if (lower != 0) {
         __ Set(temp, Immediate(lower));
         __ movd(res, Operand(temp));
@@ -1629,7 +1471,6 @@ void LCodeGen::DoConstantD(LConstantD* instr) {
         __ pinsrd(res, Operand(temp), 1);
       }
     } else {
-      CpuFeatures::Scope scope(SSE2);
       __ Set(temp, Immediate(upper));
       __ movd(res, Operand(temp));
       __ psllq(res, 32);
@@ -1792,7 +1633,6 @@ void LCodeGen::DoAddI(LAddI* instr) {
 
 
 void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
-  CpuFeatures::Scope scope(SSE2);
   LOperand* left = instr->left();
   LOperand* right = instr->right();
   ASSERT(left->Equals(instr->result()));
@@ -1854,7 +1694,6 @@ void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
 
 
 void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
-  CpuFeatures::Scope scope(SSE2);
   XMMRegister left = ToDoubleRegister(instr->left());
   XMMRegister right = ToDoubleRegister(instr->right());
   XMMRegister result = ToDoubleRegister(instr->result());
@@ -1865,8 +1704,8 @@ void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
       __ addsd(left, right);
       break;
     case Token::SUB:
-      __ subsd(left, right);
-      break;
+       __ subsd(left, right);
+       break;
     case Token::MUL:
       __ mulsd(left, right);
       break;
@@ -1939,7 +1778,6 @@ void LCodeGen::EmitBranch(int left_block, int right_block, Condition cc) {
 void LCodeGen::DoBranch(LBranch* instr) {
   int true_block = chunk_->LookupDestination(instr->true_block_id());
   int false_block = chunk_->LookupDestination(instr->false_block_id());
-  CpuFeatures::Scope scope(SSE2);
 
   Representation r = instr->hydrogen()->value()->representation();
   if (r.IsInteger32()) {
@@ -2099,7 +1937,6 @@ void LCodeGen::DoCmpIDAndBranch(LCmpIDAndBranch* instr) {
   int false_block = chunk_->LookupDestination(instr->false_block_id());
   int true_block = chunk_->LookupDestination(instr->true_block_id());
   Condition cc = TokenToCondition(instr->op(), instr->is_double());
-  CpuFeatures::Scope scope(SSE2);
 
   if (left->IsConstantOperand() && right->IsConstantOperand()) {
     // We can statically evaluate the comparison.
@@ -2609,7 +2446,7 @@ void LCodeGen::DoCmpT(LCmpT* instr) {
 
 
 void LCodeGen::DoReturn(LReturn* instr) {
-  if (FLAG_trace && info()->IsOptimizing()) {
+  if (FLAG_trace) {
     // Preserve the return value on the stack and rely on the runtime call
     // to return the value in the same register.  We're leaving the code
     // managed by the register allocator and tearing down the frame, it's
@@ -2623,10 +2460,8 @@ void LCodeGen::DoReturn(LReturn* instr) {
     __ mov(edx, Operand(ebp,
       JavaScriptFrameConstants::kDynamicAlignmentStateOffset));
   }
-  if (NeedsEagerFrame()) {
-    __ mov(esp, ebp);
-    __ pop(ebp);
-  }
+  __ mov(esp, ebp);
+  __ pop(ebp);
   if (dynamic_frame_alignment_) {
     Label no_padding;
     __ cmp(edx, Immediate(kNoAlignmentPadding));
@@ -2639,12 +2474,7 @@ void LCodeGen::DoReturn(LReturn* instr) {
     __ Ret((GetParameterCount() + 2) * kPointerSize, ecx);
     __ bind(&no_padding);
   }
-  if (info()->IsStub()) {
-    __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
-    __ Ret();
-  } else {
-    __ Ret((GetParameterCount() + 1) * kPointerSize, ecx);
-  }
+  __ Ret((GetParameterCount() + 1) * kPointerSize, ecx);
 }
 
 
@@ -3020,23 +2850,11 @@ void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
       0,
       instr->additional_index()));
   if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
-    if (CpuFeatures::IsSupported(SSE2)) {
-      CpuFeatures::Scope scope(SSE2);
-      XMMRegister result(ToDoubleRegister(instr->result()));
-      __ movss(result, operand);
-      __ cvtss2sd(result, result);
-    } else {
-      __ fld_s(operand);
-      HandleX87FPReturnValue(instr);
-    }
+    XMMRegister result(ToDoubleRegister(instr->result()));
+    __ movss(result, operand);
+    __ cvtss2sd(result, result);
   } else if (elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
-    if (CpuFeatures::IsSupported(SSE2)) {
-      CpuFeatures::Scope scope(SSE2);
-      __ movdbl(ToDoubleRegister(instr->result()), operand);
-    } else {
-      __ fld_d(operand);
-      HandleX87FPReturnValue(instr);
-    }
+    __ movdbl(ToDoubleRegister(instr->result()), operand);
   } else {
     Register result(ToRegister(instr->result()));
     switch (elements_kind) {
@@ -3080,30 +2898,9 @@ void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
 }
 
 
-void LCodeGen::HandleX87FPReturnValue(LInstruction* instr) {
-  if (IsX87TopOfStack(instr->result())) {
-    // Return value is already on stack. If the value has no uses, then
-    // pop it off the FP stack. Otherwise, make sure that there are enough
-    // copies of the value on the stack to feed all of the usages, e.g.
-    // when the following instruction uses the return value in multiple
-    // inputs.
-    int count = instr->hydrogen_value()->UseCount();
-    if (count == 0) {
-      __ fstp(0);
-    } else {
-      count--;
-      ASSERT(count <= 7);
-      while (count-- > 0) {
-        __ fld(0);
-      }
-    }
-  } else {
-    __ fstp_d(ToOperand(instr->result()));
-  }
-}
-
-
 void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) {
+  XMMRegister result = ToDoubleRegister(instr->result());
+
   if (instr->hydrogen()->RequiresHoleCheck()) {
     int offset = FixedDoubleArray::kHeaderSize - kHeapObjectTag +
         sizeof(kHoleNanLower32);
@@ -3124,14 +2921,7 @@ void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) {
       FAST_DOUBLE_ELEMENTS,
       FixedDoubleArray::kHeaderSize - kHeapObjectTag,
       instr->additional_index());
-  if (CpuFeatures::IsSupported(SSE2)) {
-    CpuFeatures::Scope scope(SSE2);
-    XMMRegister result = ToDoubleRegister(instr->result());
-    __ movdbl(result, double_load_operand);
-  } else {
-    __ fld_d(double_load_operand);
-    HandleX87FPReturnValue(instr);
-  }
+  __ movdbl(result, double_load_operand);
 }
 
 
@@ -3547,7 +3337,6 @@ void LCodeGen::DoMathAbs(LUnaryMathOperation* instr) {
   ASSERT(instr->value()->Equals(instr->result()));
   Representation r = instr->hydrogen()->value()->representation();
 
-  CpuFeatures::Scope scope(SSE2);
   if (r.IsDouble()) {
     XMMRegister  scratch = xmm0;
     XMMRegister input_reg = ToDoubleRegister(instr->value());
@@ -3569,7 +3358,6 @@ void LCodeGen::DoMathAbs(LUnaryMathOperation* instr) {
 
 
 void LCodeGen::DoMathFloor(LUnaryMathOperation* instr) {
-  CpuFeatures::Scope scope(SSE2);
   XMMRegister xmm_scratch = xmm0;
   Register output_reg = ToRegister(instr->result());
   XMMRegister input_reg = ToDoubleRegister(instr->value());
@@ -3634,7 +3422,6 @@ void LCodeGen::DoMathFloor(LUnaryMathOperation* instr) {
 }
 
 void LCodeGen::DoMathRound(LUnaryMathOperation* instr) {
-  CpuFeatures::Scope scope(SSE2);
   XMMRegister xmm_scratch = xmm0;
   Register output_reg = ToRegister(instr->result());
   XMMRegister input_reg = ToDoubleRegister(instr->value());
@@ -3680,7 +3467,6 @@ void LCodeGen::DoMathRound(LUnaryMathOperation* instr) {
 
 
 void LCodeGen::DoMathSqrt(LUnaryMathOperation* instr) {
-  CpuFeatures::Scope scope(SSE2);
   XMMRegister input_reg = ToDoubleRegister(instr->value());
   ASSERT(ToDoubleRegister(instr->result()).is(input_reg));
   __ sqrtsd(input_reg, input_reg);
@@ -3688,7 +3474,6 @@ void LCodeGen::DoMathSqrt(LUnaryMathOperation* instr) {
 
 
 void LCodeGen::DoMathPowHalf(LMathPowHalf* instr) {
-  CpuFeatures::Scope scope(SSE2);
   XMMRegister xmm_scratch = xmm0;
   XMMRegister input_reg = ToDoubleRegister(instr->value());
   Register scratch = ToRegister(instr->temp());
@@ -3765,7 +3550,6 @@ void LCodeGen::DoRandom(LRandom* instr) {
 
   DeferredDoRandom* deferred = new(zone()) DeferredDoRandom(this, instr);
 
-  CpuFeatures::Scope scope(SSE2);
   // Having marked this instruction as a call we can use any
   // registers.
   ASSERT(ToDoubleRegister(instr->result()).is(xmm1));
@@ -3833,7 +3617,6 @@ void LCodeGen::DoDeferredRandom(LRandom* instr) {
 
 
 void LCodeGen::DoMathLog(LUnaryMathOperation* instr) {
-  CpuFeatures::Scope scope(SSE2);
   ASSERT(instr->value()->Equals(instr->result()));
   XMMRegister input_reg = ToDoubleRegister(instr->value());
   Label positive, done, zero;
@@ -3865,7 +3648,6 @@ void LCodeGen::DoMathLog(LUnaryMathOperation* instr) {
 
 
 void LCodeGen::DoMathExp(LMathExp* instr) {
-  CpuFeatures::Scope scope(SSE2);
   XMMRegister input = ToDoubleRegister(instr->value());
   XMMRegister result = ToDoubleRegister(instr->result());
   Register temp1 = ToRegister(instr->temp1());
@@ -4134,11 +3916,6 @@ void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
     }
     DeoptimizeIf(below_equal, instr->environment());
   } else {
-    if (instr->hydrogen()->index()->representation().IsTagged() &&
-        !instr->hydrogen()->index()->type().IsSmi()) {
-      __ test(ToRegister(instr->index()), Immediate(kSmiTagMask));
-      DeoptimizeIf(not_zero, instr->environment());
-    }
     __ cmp(ToRegister(instr->index()), ToOperand(instr->length()));
     DeoptimizeIf(above_equal, instr->environment());
   }
@@ -4161,11 +3938,9 @@ void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
       0,
       instr->additional_index()));
   if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
-    CpuFeatures::Scope scope(SSE2);
     __ cvtsd2ss(xmm0, ToDoubleRegister(instr->value()));
     __ movss(operand, xmm0);
   } else if (elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
-    CpuFeatures::Scope scope(SSE2);
     __ movdbl(operand, ToDoubleRegister(instr->value()));
   } else {
     Register value = ToRegister(instr->value());
@@ -4201,7 +3976,6 @@ void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
 
 
 void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) {
-  CpuFeatures::Scope scope(SSE2);
   XMMRegister value = ToDoubleRegister(instr->value());
 
   if (instr->NeedsCanonicalization()) {
@@ -4452,21 +4226,15 @@ void LCodeGen::DoStringAdd(LStringAdd* instr) {
 
 
 void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
-  if (CpuFeatures::IsSupported(SSE2)) {
-    CpuFeatures::Scope scope(SSE2);
-    LOperand* input = instr->value();
-    ASSERT(input->IsRegister() || input->IsStackSlot());
-    LOperand* output = instr->result();
-    ASSERT(output->IsDoubleRegister());
-    __ cvtsi2sd(ToDoubleRegister(output), ToOperand(input));
-  } else {
-    UNREACHABLE();
-  }
+  LOperand* input = instr->value();
+  ASSERT(input->IsRegister() || input->IsStackSlot());
+  LOperand* output = instr->result();
+  ASSERT(output->IsDoubleRegister());
+  __ cvtsi2sd(ToDoubleRegister(output), ToOperand(input));
 }
 
 
 void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) {
-  CpuFeatures::Scope scope(SSE2);
   LOperand* input = instr->value();
   LOperand* output = instr->result();
   LOperand* temp = instr->temp();
@@ -4544,21 +4312,9 @@ void LCodeGen::DoDeferredNumberTagI(LInstruction* instr,
     // the value in there. If that fails, call the runtime system.
     __ SmiUntag(reg);
     __ xor_(reg, 0x80000000);
-    if (CpuFeatures::IsSupported(SSE2)) {
-      CpuFeatures::Scope feature_scope(SSE2);
-      __ cvtsi2sd(xmm0, Operand(reg));
-    } else {
-      __ push(reg);
-      __ fild_s(Operand(esp, 0));
-      __ pop(reg);
-    }
+    __ cvtsi2sd(xmm0, Operand(reg));
   } else {
-    if (CpuFeatures::IsSupported(SSE2)) {
-      CpuFeatures::Scope feature_scope(SSE2);
-      __ LoadUint32(xmm0, reg, xmm1);
-    } else {
-      UNREACHABLE();
-    }
+    __ LoadUint32(xmm0, reg, xmm1);
   }
 
   if (FLAG_inline_new) {
@@ -4587,12 +4343,7 @@ void LCodeGen::DoDeferredNumberTagI(LInstruction* instr,
   // Done. Put the value in xmm0 into the value of the allocated heap
   // number.
   __ bind(&done);
-  if (CpuFeatures::IsSupported(SSE2)) {
-    CpuFeatures::Scope feature_scope(SSE2);
-    __ movdbl(FieldOperand(reg, HeapNumber::kValueOffset), xmm0);
-  } else {
-    __ fstp_d(FieldOperand(reg, HeapNumber::kValueOffset));
-  }
+  __ movdbl(FieldOperand(reg, HeapNumber::kValueOffset), xmm0);
   __ StoreToSafepointRegisterSlot(reg, reg);
 }
 
@@ -4608,6 +4359,7 @@ void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
     LNumberTagD* instr_;
   };
 
+  XMMRegister input_reg = ToDoubleRegister(instr->value());
   Register reg = ToRegister(instr->result());
   Register tmp = ToRegister(instr->temp());
 
@@ -4618,16 +4370,7 @@ void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
     __ jmp(deferred->entry());
   }
   __ bind(deferred->exit());
-  if (CpuFeatures::IsSupported(SSE2)) {
-    CpuFeatures::Scope scope(SSE2);
-    XMMRegister input_reg = ToDoubleRegister(instr->value());
-    __ movdbl(FieldOperand(reg, HeapNumber::kValueOffset), input_reg);
-  } else {
-    if (!IsX87TopOfStack(instr->value())) {
-    __ fld_d(ToOperand(instr->value()));
-    }
-    __ fstp_d(FieldOperand(reg, HeapNumber::kValueOffset));
-  }
+  __ movdbl(FieldOperand(reg, HeapNumber::kValueOffset), input_reg);
 }
 
 
@@ -4771,7 +4514,6 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
       __ mov(input_reg, Operand(esp, 0));  // Low word of answer is the result.
       __ add(Operand(esp), Immediate(kDoubleSize));
     } else {
-      CpuFeatures::Scope scope(SSE2);
       XMMRegister xmm_temp = ToDoubleRegister(instr->temp());
       __ movdbl(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
       __ cvttsd2si(input_reg, Operand(xmm0));
@@ -4785,8 +4527,7 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
       DeoptimizeIf(not_equal, instr->environment());
       DeoptimizeIf(parity_even, instr->environment());  // NaN.
     }
-  } else if (CpuFeatures::IsSupported(SSE2)) {
-    CpuFeatures::Scope scope(SSE2);
+  } else {
     // Deoptimize if we don't have a heap number.
     __ RecordComment("Deferred TaggedToI: not a heap number");
     DeoptimizeIf(not_equal, instr->environment());
@@ -4808,8 +4549,6 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
       __ RecordComment("Deferred TaggedToI: minus zero");
       DeoptimizeIf(not_zero, instr->environment());
     }
-  } else {
-    UNREACHABLE();
   }
   __ bind(&done);
 }
@@ -4852,24 +4591,19 @@ void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
   LOperand* result = instr->result();
   ASSERT(result->IsDoubleRegister());
 
-  if (CpuFeatures::IsSupported(SSE2)) {
-    CpuFeatures::Scope scope(SSE2);
-    Register input_reg = ToRegister(input);
-    XMMRegister result_reg = ToDoubleRegister(result);
-
-    bool deoptimize_on_minus_zero =
-        instr->hydrogen()->deoptimize_on_minus_zero();
-    Register temp_reg = deoptimize_on_minus_zero ? ToRegister(temp) : no_reg;
-
-    EmitNumberUntagD(input_reg,
-                     temp_reg,
-                     result_reg,
-                     instr->hydrogen()->deoptimize_on_undefined(),
-                     deoptimize_on_minus_zero,
-                     instr->environment());
-  } else {
-    UNIMPLEMENTED();
-  }
+  Register input_reg = ToRegister(input);
+  XMMRegister result_reg = ToDoubleRegister(result);
+
+  bool deoptimize_on_minus_zero =
+      instr->hydrogen()->deoptimize_on_minus_zero();
+  Register temp_reg = deoptimize_on_minus_zero ? ToRegister(temp) : no_reg;
+
+  EmitNumberUntagD(input_reg,
+                   temp_reg,
+                   result_reg,
+                   instr->hydrogen()->deoptimize_on_undefined(),
+                   deoptimize_on_minus_zero,
+                   instr->environment());
 }
 
 
@@ -4878,7 +4612,6 @@ void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
   ASSERT(input->IsDoubleRegister());
   LOperand* result = instr->result();
   ASSERT(result->IsRegister());
-  CpuFeatures::Scope scope(SSE2);
 
   XMMRegister input_reg = ToDoubleRegister(input);
   Register result_reg = ToRegister(result);
@@ -5068,10 +4801,10 @@ void LCodeGen::DoCheckFunction(LCheckFunction* instr) {
 void LCodeGen::DoCheckMapCommon(Register reg,
                                 Handle<Map> map,
                                 CompareMapMode mode,
-                                LInstruction* instr) {
+                                LEnvironment* env) {
   Label success;
   __ CompareMap(reg, map, &success, mode);
-  DeoptimizeIf(not_equal, instr->environment());
+  DeoptimizeIf(not_equal, env);
   __ bind(&success);
 }
 
@@ -5089,13 +4822,12 @@ void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
     __ j(equal, &success);
   }
   Handle<Map> map = map_set->last();
-  DoCheckMapCommon(reg, map, REQUIRE_EXACT_MAP, instr);
+  DoCheckMapCommon(reg, map, REQUIRE_EXACT_MAP, instr->environment());
   __ bind(&success);
 }
 
 
 void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) {
-  CpuFeatures::Scope scope(SSE2);
   XMMRegister value_reg = ToDoubleRegister(instr->unclamped());
   Register result_reg = ToRegister(instr->result());
   __ ClampDoubleToUint8(value_reg, xmm0, result_reg);
@@ -5110,8 +4842,6 @@ void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) {
 
 
 void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
-  CpuFeatures::Scope scope(SSE2);
-
   ASSERT(instr->unclamped()->Equals(instr->result()));
   Register input_reg = ToRegister(instr->unclamped());
   Label is_smi, done, heap_number;
@@ -5158,7 +4888,7 @@ void LCodeGen::DoCheckPrototypeMaps(LCheckPrototypeMaps* instr) {
   // Check prototype maps up to the holder.
   while (!current_prototype.is_identical_to(holder)) {
     DoCheckMapCommon(reg, Handle<Map>(current_prototype->map()),
-                     ALLOW_ELEMENT_TRANSITION_MAPS, instr);
+                     ALLOW_ELEMENT_TRANSITION_MAPS, instr->environment());
 
     current_prototype =
         Handle<JSObject>(JSObject::cast(current_prototype->GetPrototype()));
@@ -5168,7 +4898,7 @@ void LCodeGen::DoCheckPrototypeMaps(LCheckPrototypeMaps* instr) {
 
   // Check the holder map.
   DoCheckMapCommon(reg, Handle<Map>(current_prototype->map()),
-                   ALLOW_ELEMENT_TRANSITION_MAPS, instr);
+                   ALLOW_ELEMENT_TRANSITION_MAPS, instr->environment());
 }
 
 
@@ -5705,15 +5435,13 @@ void LCodeGen::EmitIsConstructCall(Register temp) {
 
 
 void LCodeGen::EnsureSpaceForLazyDeopt() {
-  if (!info()->IsStub()) {
-    // Ensure that we have enough space after the previous lazy-bailout
-    // instruction for patching the code here.
-    int current_pc = masm()->pc_offset();
-    int patch_size = Deoptimizer::patch_size();
-    if (current_pc < last_lazy_deopt_pc_ + patch_size) {
-      int padding_size = last_lazy_deopt_pc_ + patch_size - current_pc;
-      __ Nop(padding_size);
-    }
+  // Ensure that we have enough space after the previous lazy-bailout
+  // instruction for patching the code here.
+  int current_pc = masm()->pc_offset();
+  int patch_size = Deoptimizer::patch_size();
+  if (current_pc < last_lazy_deopt_pc_ + patch_size) {
+    int padding_size = last_lazy_deopt_pc_ + patch_size - current_pc;
+    __ Nop(padding_size);
   }
   last_lazy_deopt_pc_ = masm()->pc_offset();
 }
index 63d15f4d62f7eae12171e753a6b77e7805762bef..44ddaffcd41ab038c00db70110761f8d9bb5331e 100644 (file)
@@ -55,7 +55,6 @@ class LCodeGen BASE_EMBEDDED {
         current_instruction_(-1),
         instructions_(chunk->instructions()),
         deoptimizations_(4, info->zone()),
-        jump_table_(4, info->zone()),
         deoptimization_literals_(8, info->zone()),
         inlined_function_count_(0),
         scope_(info->scope()),
@@ -65,7 +64,6 @@ class LCodeGen BASE_EMBEDDED {
         dynamic_frame_alignment_(false),
         osr_pc_offset_(-1),
         last_lazy_deopt_pc_(0),
-        frame_is_built_(false),
         safepoints_(info->zone()),
         resolver_(this),
         expected_safepoint_kind_(Safepoint::kSimple) {
@@ -80,20 +78,10 @@ class LCodeGen BASE_EMBEDDED {
   Heap* heap() const { return isolate()->heap(); }
   Zone* zone() const { return zone_; }
 
-  bool NeedsEagerFrame() const {
-    return GetStackSlotCount() > 0 ||
-        info()->is_non_deferred_calling() ||
-        !info()->IsStub();
-  }
-  bool NeedsDeferredFrame() const {
-    return !NeedsEagerFrame() && info()->is_deferred_calling();
-  }
-
   // Support for converting LOperands to assembler types.
   Operand ToOperand(LOperand* op) const;
   Register ToRegister(LOperand* op) const;
   XMMRegister ToDoubleRegister(LOperand* op) const;
-  bool IsX87TopOfStack(LOperand* op) const;
 
   bool IsInteger32(LConstantOperand* op) const;
   Immediate ToInteger32Immediate(LOperand* op) const {
@@ -102,9 +90,6 @@ class LCodeGen BASE_EMBEDDED {
 
   Handle<Object> ToHandle(LConstantOperand* op) const;
 
-  // A utility for instructions that return floating point values on X87.
-  void HandleX87FPReturnValue(LInstruction* instr);
-
   // The operand denoting the second word (the one with a higher address) of
   // a double stack slot.
   Operand HighOperand(LOperand* op);
@@ -137,7 +122,7 @@ class LCodeGen BASE_EMBEDDED {
                                        Label* map_check);
 
   void DoCheckMapCommon(Register reg, Handle<Map> map,
-                        CompareMapMode mode, LInstruction* instr);
+                        CompareMapMode mode, LEnvironment* env);
 
   // Parallel move support.
   void DoParallelMove(LParallelMove* move);
@@ -187,7 +172,7 @@ class LCodeGen BASE_EMBEDDED {
                        Register temporary2);
 
   int GetStackSlotCount() const { return chunk()->spill_slot_count(); }
-  int GetParameterCount() const { return info()->num_parameters(); }
+  int GetParameterCount() const { return scope()->num_parameters(); }
 
   void Abort(const char* reason);
   void Comment(const char* format, ...);
@@ -199,7 +184,9 @@ class LCodeGen BASE_EMBEDDED {
   bool GeneratePrologue();
   bool GenerateBody();
   bool GenerateDeferredCode();
-  bool GenerateJumpTable();
+  // Pad the reloc info to ensure that we have enough space to patch during
+  // deoptimization.
+  bool GenerateRelocPadding();
   bool GenerateSafepointTable();
 
   enum SafepointMode {
@@ -369,23 +356,10 @@ class LCodeGen BASE_EMBEDDED {
   MacroAssembler* const masm_;
   CompilationInfo* const info_;
 
-  struct JumpTableEntry {
-    inline JumpTableEntry(Address entry, bool frame, bool is_lazy)
-        : label(),
-          address(entry),
-          needs_frame(frame),
-          is_lazy_deopt(is_lazy) { }
-    Label label;
-    Address address;
-    bool needs_frame;
-    bool is_lazy_deopt;
-  };
-
   int current_block_;
   int current_instruction_;
   const ZoneList<LInstruction*>* instructions_;
   ZoneList<LEnvironment*> deoptimizations_;
-  ZoneList<JumpTableEntry> jump_table_;
   ZoneList<Handle<Object> > deoptimization_literals_;
   int inlined_function_count_;
   Scope* const scope_;
@@ -395,7 +369,6 @@ class LCodeGen BASE_EMBEDDED {
   bool dynamic_frame_alignment_;
   int osr_pc_offset_;
   int last_lazy_deopt_pc_;
-  bool frame_is_built_;
 
   // Builder that keeps track of safepoints in the code. The table
   // itself is emitted at the end of the generated code.
@@ -413,7 +386,6 @@ class LCodeGen BASE_EMBEDDED {
       ASSERT(codegen_->expected_safepoint_kind_ == Safepoint::kSimple);
       codegen_->masm_->PushSafepointRegisters();
       codegen_->expected_safepoint_kind_ = Safepoint::kWithRegisters;
-      ASSERT(codegen_->info()->is_calling());
     }
 
     ~PushSafepointRegistersScope() {
index 7cb15e6eb5ac8cef169f51613af2c7f0feac876c..6428916fefa5119662d5583b10855a5a75276af7 100644 (file)
@@ -191,7 +191,7 @@ int LGapResolver::CountSourceUses(LOperand* operand) {
 
 Register LGapResolver::GetFreeRegisterNot(Register reg) {
   int skip_index = reg.is(no_reg) ? -1 : Register::ToAllocationIndex(reg);
-  for (int i = 0; i < Register::NumAllocatableRegisters(); ++i) {
+  for (int i = 0; i < Register::kNumAllocatableRegisters; ++i) {
     if (source_uses_[i] == 0 && destination_uses_[i] > 0 && i != skip_index) {
       return Register::FromAllocationIndex(i);
     }
@@ -204,7 +204,7 @@ bool LGapResolver::HasBeenReset() {
   if (!moves_.is_empty()) return false;
   if (spilled_register_ >= 0) return false;
 
-  for (int i = 0; i < Register::NumAllocatableRegisters(); ++i) {
+  for (int i = 0; i < Register::kNumAllocatableRegisters; ++i) {
     if (source_uses_[i] != 0) return false;
     if (destination_uses_[i] != 0) return false;
   }
@@ -256,7 +256,7 @@ Register LGapResolver::EnsureTempRegister() {
 
   // 3. Prefer to spill a register that is not used in any remaining move
   // because it will not need to be restored until the end.
-  for (int i = 0; i < Register::NumAllocatableRegisters(); ++i) {
+  for (int i = 0; i < Register::kNumAllocatableRegisters; ++i) {
     if (source_uses_[i] == 0 && destination_uses_[i] == 0) {
       Register scratch = Register::FromAllocationIndex(i);
       __ push(scratch);
@@ -324,38 +324,29 @@ void LGapResolver::EmitMove(int index) {
     }
 
   } else if (source->IsDoubleRegister()) {
-    if (CpuFeatures::IsSupported(SSE2)) {
-      CpuFeatures::Scope scope(SSE2);
-      XMMRegister src = cgen_->ToDoubleRegister(source);
-      if (destination->IsDoubleRegister()) {
-        XMMRegister dst = cgen_->ToDoubleRegister(destination);
-        __ movaps(dst, src);
-      } else {
-        ASSERT(destination->IsDoubleStackSlot());
-        Operand dst = cgen_->ToOperand(destination);
-        __ movdbl(dst, src);
-      }
+    XMMRegister src = cgen_->ToDoubleRegister(source);
+    if (destination->IsDoubleRegister()) {
+      XMMRegister dst = cgen_->ToDoubleRegister(destination);
+      __ movaps(dst, src);
     } else {
-      UNREACHABLE();
+      ASSERT(destination->IsDoubleStackSlot());
+      Operand dst = cgen_->ToOperand(destination);
+      __ movdbl(dst, src);
     }
   } else if (source->IsDoubleStackSlot()) {
-    if (CpuFeatures::IsSupported(SSE2)) {
-      CpuFeatures::Scope scope(SSE2);
-      ASSERT(destination->IsDoubleRegister() ||
-             destination->IsDoubleStackSlot());
-      Operand src = cgen_->ToOperand(source);
-      if (destination->IsDoubleRegister()) {
-        XMMRegister dst = cgen_->ToDoubleRegister(destination);
-        __ movdbl(dst, src);
-      } else {
-        // We rely on having xmm0 available as a fixed scratch register.
-        Operand dst = cgen_->ToOperand(destination);
-        __ movdbl(xmm0, src);
-        __ movdbl(dst, xmm0);
-      }
+    ASSERT(destination->IsDoubleRegister() ||
+           destination->IsDoubleStackSlot());
+    Operand src = cgen_->ToOperand(source);
+    if (destination->IsDoubleRegister()) {
+      XMMRegister dst = cgen_->ToDoubleRegister(destination);
+      __ movdbl(dst, src);
     } else {
-      UNREACHABLE();
+      // We rely on having xmm0 available as a fixed scratch register.
+      Operand dst = cgen_->ToOperand(destination);
+      __ movdbl(xmm0, src);
+      __ movdbl(dst, xmm0);
     }
+
   } else {
     UNREACHABLE();
   }
@@ -419,7 +410,6 @@ void LGapResolver::EmitSwap(int index) {
       __ mov(src, tmp0);
     }
   } else if (source->IsDoubleRegister() && destination->IsDoubleRegister()) {
-    CpuFeatures::Scope scope(SSE2);
     // XMM register-register swap. We rely on having xmm0
     // available as a fixed scratch register.
     XMMRegister src = cgen_->ToDoubleRegister(source);
index 3a58f585c31910fc058698d31a7cf89e18ff8d23..0c81d72ee3d728d0edc6ac5efa070f26fedee81e 100644 (file)
@@ -97,8 +97,8 @@ class LGapResolver BASE_EMBEDDED {
   ZoneList<LMoveOperands> moves_;
 
   // Source and destination use counts for the general purpose registers.
-  int source_uses_[Register::kMaxNumAllocatableRegisters];
-  int destination_uses_[Register::kMaxNumAllocatableRegisters];
+  int source_uses_[Register::kNumAllocatableRegisters];
+  int destination_uses_[Register::kNumAllocatableRegisters];
 
   // If we had to spill on demand, the currently spilled register's
   // allocation index.
index 6e075937977f83461a7ab19be17c061a4be23a49..d7ac7a8b149ffa07d343bf2f265ae94f0cc87c05 100644 (file)
@@ -44,10 +44,10 @@ LITHIUM_CONCRETE_INSTRUCTION_LIST(DEFINE_COMPILE)
 #undef DEFINE_COMPILE
 
 LOsrEntry::LOsrEntry() {
-  for (int i = 0; i < Register::NumAllocatableRegisters(); ++i) {
+  for (int i = 0; i < Register::kNumAllocatableRegisters; ++i) {
     register_spills_[i] = NULL;
   }
-  for (int i = 0; i < DoubleRegister::NumAllocatableRegisters(); ++i) {
+  for (int i = 0; i < DoubleRegister::kNumAllocatableRegisters; ++i) {
     double_register_spills_[i] = NULL;
   }
 }
@@ -460,11 +460,9 @@ LPlatformChunk* LChunkBuilder::Build() {
   status_ = BUILDING;
 
   // Reserve the first spill slot for the state of dynamic alignment.
-  if (info()->IsOptimizing()) {
-    int alignment_state_index = chunk_->GetNextSpillIndex(false);
-    ASSERT_EQ(alignment_state_index, 0);
-    USE(alignment_state_index);
-  }
+  int alignment_state_index = chunk_->GetNextSpillIndex(false);
+  ASSERT_EQ(alignment_state_index, 0);
+  USE(alignment_state_index);
 
   const ZoneList<HBasicBlock*>* blocks = graph()->blocks();
   for (int i = 0; i < blocks->length(); i++) {
@@ -496,12 +494,6 @@ LUnallocated* LChunkBuilder::ToUnallocated(XMMRegister reg) {
 }
 
 
-LUnallocated* LChunkBuilder::ToUnallocated(X87TopOfStackRegister reg) {
-  return new(zone()) LUnallocated(LUnallocated::FIXED_DOUBLE_REGISTER,
-      X87TopOfStackRegister::ToAllocationIndex(reg));
-}
-
-
 LOperand* LChunkBuilder::UseFixed(HValue* value, Register fixed_register) {
   return Use(value, ToUnallocated(fixed_register));
 }
@@ -634,13 +626,6 @@ LInstruction* LChunkBuilder::DefineFixedDouble(
 }
 
 
-template<int I, int T>
-LInstruction* LChunkBuilder::DefineX87TOS(
-    LTemplateInstruction<1, I, T>* instr) {
-  return Define(instr, ToUnallocated(x87tos));
-}
-
-
 LInstruction* LChunkBuilder::AssignEnvironment(LInstruction* instr) {
   HEnvironment* hydrogen_env = current_block_->last_environment();
   int argument_index_accumulator = 0;
@@ -653,8 +638,6 @@ LInstruction* LChunkBuilder::AssignEnvironment(LInstruction* instr) {
 LInstruction* LChunkBuilder::MarkAsCall(LInstruction* instr,
                                         HInstruction* hinstr,
                                         CanDeoptimize can_deoptimize) {
-  info()->MarkAsNonDeferredCalling();
-
 #ifdef DEBUG
   instr->VerifyCall();
 #endif
@@ -1715,12 +1698,8 @@ LInstruction* LChunkBuilder::DoForceRepresentation(HForceRepresentation* bad) {
 LInstruction* LChunkBuilder::DoChange(HChange* instr) {
   Representation from = instr->from();
   Representation to = instr->to();
-  // Only mark conversions that might need to allocate as calling rather than
-  // all changes. This makes simple, non-allocating conversion not have to force
-  // building a stack frame.
   if (from.IsTagged()) {
     if (to.IsDouble()) {
-      info()->MarkAsDeferredCalling();
       LOperand* value = UseRegister(instr->value());
       // Temp register only necessary for minus zero check.
       LOperand* temp = instr->deoptimize_on_minus_zero()
@@ -1745,10 +1724,7 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) {
     }
   } else if (from.IsDouble()) {
     if (to.IsTagged()) {
-      info()->MarkAsDeferredCalling();
-      LOperand* value = CpuFeatures::IsSupported(SSE2)
-          ? UseRegisterAtStart(instr->value())
-          : UseAtStart(instr->value());
+      LOperand* value = UseRegister(instr->value());
       LOperand* temp = TempRegister();
 
       // Make sure that temp and result_temp are different registers.
@@ -1766,7 +1742,6 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) {
           DefineAsRegister(new(zone()) LDoubleToI(value, temp)));
     }
   } else if (from.IsInteger32()) {
-    info()->MarkAsDeferredCalling();
     if (to.IsTagged()) {
       HValue* val = instr->value();
       LOperand* value = UseRegister(val);
@@ -2283,17 +2258,8 @@ LInstruction* LChunkBuilder::DoOsrEntry(HOsrEntry* instr) {
 
 
 LInstruction* LChunkBuilder::DoParameter(HParameter* instr) {
-  LParameter* result = new(zone()) LParameter;
-  if (info()->IsOptimizing()) {
-    int spill_index = chunk()->GetParameterStackSlot(instr->index());
-    return DefineAsSpilled(result, spill_index);
-  } else {
-    ASSERT(info()->IsStub());
-    CodeStubInterfaceDescriptor* descriptor =
-        info()->code_stub()->GetInterfaceDescriptor(info()->isolate());
-    Register reg = descriptor->register_params_[instr->index()];
-    return DefineFixed(result, reg);
-  }
+  int spill_index = chunk()->GetParameterStackSlot(instr->index());
+  return DefineAsSpilled(new(zone()) LParameter, spill_index);
 }
 
 
@@ -2394,7 +2360,6 @@ LInstruction* LChunkBuilder::DoSimulate(HSimulate* instr) {
 
 
 LInstruction* LChunkBuilder::DoStackCheck(HStackCheck* instr) {
-  info()->MarkAsDeferredCalling();
   if (instr->is_function_entry()) {
     LOperand* context = UseFixed(instr->context(), esi);
     return MarkAsCall(new(zone()) LStackCheck(context), instr);
index f4056c10d9115b8652b713f8a66c13ba526b6030..cf85374267d46ab654ace6e4650f0048c64004af 100644 (file)
@@ -250,11 +250,7 @@ class LInstruction: public ZoneObject {
   void MarkAsCall() { is_call_ = true; }
 
   // Interface to the register allocator and iterators.
-  bool ClobbersTemps() const { return is_call_; }
-  bool ClobbersRegisters() const { return is_call_; }
-  virtual bool ClobbersDoubleRegisters() const {
-    return is_call_ || !CpuFeatures::IsSupported(SSE2);
-  }
+  bool IsMarkedAsCall() const { return is_call_; }
 
   virtual bool HasResult() const = 0;
   virtual LOperand* result() = 0;
@@ -360,7 +356,6 @@ class LGap: public LTemplateInstruction<0, 0, 0> {
 class LInstructionGap: public LGap {
  public:
   explicit LInstructionGap(HBasicBlock* block) : LGap(block) { }
-  virtual bool ClobbersDoubleRegisters() const { return false; }
 
   DECLARE_CONCRETE_INSTRUCTION(InstructionGap, "gap")
 };
@@ -1443,6 +1438,7 @@ class LLoadKeyed: public LTemplateInstruction<1, 2, 0> {
     inputs_[0] = elements;
     inputs_[1] = key;
   }
+
   LOperand* elements() { return inputs_[0]; }
   LOperand* key() { return inputs_[1]; }
   ElementsKind elements_kind() const {
@@ -1452,18 +1448,11 @@ class LLoadKeyed: public LTemplateInstruction<1, 2, 0> {
     return hydrogen()->is_external();
   }
 
-  virtual bool ClobbersDoubleRegisters() const {
-    return !IsDoubleOrFloatElementsKind(hydrogen()->elements_kind());
-  }
-
   DECLARE_CONCRETE_INSTRUCTION(LoadKeyed, "load-keyed")
   DECLARE_HYDROGEN_ACCESSOR(LoadKeyed)
 
   virtual void PrintDataTo(StringStream* stream);
   uint32_t additional_index() const { return hydrogen()->index_offset(); }
-  bool key_is_smi() {
-    return hydrogen()->key()->representation().IsTagged();
-  }
 };
 
 
@@ -2444,9 +2433,8 @@ class LOsrEntry: public LTemplateInstruction<0, 0, 0> {
   // slot, i.e., that must also be restored to the spill slot on OSR entry.
   // NULL if the register has no assigned spill slot.  Indexed by allocation
   // index.
-  LOperand* register_spills_[Register::kMaxNumAllocatableRegisters];
-  LOperand* double_register_spills_[
-      DoubleRegister::kMaxNumAllocatableRegisters];
+  LOperand* register_spills_[Register::kNumAllocatableRegisters];
+  LOperand* double_register_spills_[DoubleRegister::kNumAllocatableRegisters];
 };
 
 
@@ -2610,7 +2598,6 @@ class LChunkBuilder BASE_EMBEDDED {
   // Methods for getting operands for Use / Define / Temp.
   LUnallocated* ToUnallocated(Register reg);
   LUnallocated* ToUnallocated(XMMRegister reg);
-  LUnallocated* ToUnallocated(X87TopOfStackRegister reg);
 
   // Methods for setting up define-use relationships.
   MUST_USE_RESULT LOperand* Use(HValue* value, LUnallocated* operand);
@@ -2671,8 +2658,6 @@ class LChunkBuilder BASE_EMBEDDED {
   template<int I, int T>
       LInstruction* DefineFixedDouble(LTemplateInstruction<1, I, T>* instr,
                                       XMMRegister reg);
-  template<int I, int T>
-      LInstruction* DefineX87TOS(LTemplateInstruction<1, I, T>* instr);
   // Assigns an environment to an instruction.  An instruction which can
   // deoptimize must have an environment.
   LInstruction* AssignEnvironment(LInstruction* instr);
index e9ce7974e56c6f16b05e71e9f0dd0460ceda70e5..14fb8ca85220f1adba75dd26fb86711466bb72ad 100644 (file)
@@ -1801,8 +1801,7 @@ void MacroAssembler::CallRuntimeSaveDoubles(Runtime::FunctionId id) {
   const Runtime::Function* function = Runtime::FunctionForId(id);
   Set(eax, Immediate(function->nargs));
   mov(ebx, Immediate(ExternalReference(function, isolate())));
-  CEntryStub ces(1, CpuFeatures::IsSupported(SSE2) ? kSaveFPRegs
-                                                   : kDontSaveFPRegs);
+  CEntryStub ces(1, kSaveFPRegs);
   CallStub(&ces);
 }
 
index 79960ec133f7c6d24322d14a1bd0560f9953c131..7abb29b10aa2b6bf9cc4232cb4163b3c562655f3 100644 (file)
@@ -924,9 +924,9 @@ class MacroAssembler: public Assembler {
   Operand SafepointRegisterSlot(Register reg);
   static int SafepointRegisterStackIndex(int reg_code);
 
-  // Needs access to SafepointRegisterStackIndex for compiled frame
+  // Needs access to SafepointRegisterStackIndex for optimized frame
   // traversal.
-  friend class CompiledFrame;
+  friend class OptimizedFrame;
 };
 
 
index 783462714473d2a2d819c5d146d6e930e10073ed..c8695c572c8c6d8b4167c06f06cd5ae79c3547c5 100644 (file)
@@ -3398,17 +3398,9 @@ Handle<Code> KeyedLoadStubCompiler::CompileLoadElement(
   // -----------------------------------
 
   ElementsKind elements_kind = receiver_map->elements_kind();
-  if (receiver_map->has_fast_elements() ||
-      receiver_map->has_external_array_elements()) {
-    Handle<Code> stub = KeyedLoadFastElementStub(
-        receiver_map->instance_type() == JS_ARRAY_TYPE,
-        elements_kind).GetCode();
-    __ DispatchMap(edx, receiver_map, stub, DO_SMI_CHECK);
-  } else {
-    Handle<Code> stub =
-        KeyedLoadDictionaryElementStub().GetCode();
-    __ DispatchMap(edx, receiver_map, stub, DO_SMI_CHECK);
-  }
+  Handle<Code> stub = KeyedLoadElementStub(elements_kind).GetCode();
+
+  __ DispatchMap(edx, receiver_map, stub, DO_SMI_CHECK);
 
   GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
 
@@ -3669,6 +3661,157 @@ static void GenerateSmiKeyCheck(MacroAssembler* masm,
 }
 
 
+void KeyedLoadStubCompiler::GenerateLoadExternalArray(
+    MacroAssembler* masm,
+    ElementsKind elements_kind) {
+  // ----------- S t a t e -------------
+  //  -- ecx    : key
+  //  -- edx    : receiver
+  //  -- esp[0] : return address
+  // -----------------------------------
+  Label miss_force_generic, failed_allocation, slow;
+
+  // This stub is meant to be tail-jumped to, the receiver must already
+  // have been verified by the caller to not be a smi.
+
+  // Check that the key is a smi or a heap number convertible to a smi.
+  GenerateSmiKeyCheck(masm, ecx, eax, xmm0, xmm1, &miss_force_generic);
+
+  // Check that the index is in range.
+  __ mov(ebx, FieldOperand(edx, JSObject::kElementsOffset));
+  __ cmp(ecx, FieldOperand(ebx, ExternalArray::kLengthOffset));
+  // Unsigned comparison catches both negative and too-large values.
+  __ j(above_equal, &miss_force_generic);
+  __ mov(ebx, FieldOperand(ebx, ExternalArray::kExternalPointerOffset));
+  // ebx: base pointer of external storage
+  switch (elements_kind) {
+    case EXTERNAL_BYTE_ELEMENTS:
+      __ SmiUntag(ecx);  // Untag the index.
+      __ movsx_b(eax, Operand(ebx, ecx, times_1, 0));
+      break;
+    case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
+    case EXTERNAL_PIXEL_ELEMENTS:
+      __ SmiUntag(ecx);  // Untag the index.
+      __ movzx_b(eax, Operand(ebx, ecx, times_1, 0));
+      break;
+    case EXTERNAL_SHORT_ELEMENTS:
+      __ movsx_w(eax, Operand(ebx, ecx, times_1, 0));
+      break;
+    case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
+      __ movzx_w(eax, Operand(ebx, ecx, times_1, 0));
+      break;
+    case EXTERNAL_UNSIGNED_INT_ELEMENTS:
+    case EXTERNAL_INT_ELEMENTS:
+      __ mov(eax, Operand(ebx, ecx, times_2, 0));
+      break;
+    case EXTERNAL_FLOAT_ELEMENTS:
+      __ fld_s(Operand(ebx, ecx, times_2, 0));
+      break;
+    case EXTERNAL_DOUBLE_ELEMENTS:
+      __ fld_d(Operand(ebx, ecx, times_4, 0));
+      break;
+    default:
+      UNREACHABLE();
+      break;
+  }
+
+  // For integer array types:
+  // eax: value
+  // For floating-point array type:
+  // FP(0): value
+
+  if (elements_kind == EXTERNAL_INT_ELEMENTS ||
+      elements_kind == EXTERNAL_UNSIGNED_INT_ELEMENTS) {
+    // For the Int and UnsignedInt array types, we need to see whether
+    // the value can be represented in a Smi. If not, we need to convert
+    // it to a HeapNumber.
+    Label box_int;
+    if (elements_kind == EXTERNAL_INT_ELEMENTS) {
+      __ cmp(eax, 0xc0000000);
+      __ j(sign, &box_int);
+    } else {
+      ASSERT_EQ(EXTERNAL_UNSIGNED_INT_ELEMENTS, elements_kind);
+      // The test is different for unsigned int values. Since we need
+      // the value to be in the range of a positive smi, we can't
+      // handle either of the top two bits being set in the value.
+      __ test(eax, Immediate(0xc0000000));
+      __ j(not_zero, &box_int);
+    }
+
+    __ SmiTag(eax);
+    __ ret(0);
+
+    __ bind(&box_int);
+
+    // Allocate a HeapNumber for the int and perform int-to-double
+    // conversion.
+    if (elements_kind == EXTERNAL_INT_ELEMENTS) {
+      __ push(eax);
+      __ fild_s(Operand(esp, 0));
+      __ pop(eax);
+    } else {
+      ASSERT_EQ(EXTERNAL_UNSIGNED_INT_ELEMENTS, elements_kind);
+      // Need to zero-extend the value.
+      // There's no fild variant for unsigned values, so zero-extend
+      // to a 64-bit int manually.
+      __ push(Immediate(0));
+      __ push(eax);
+      __ fild_d(Operand(esp, 0));
+      __ pop(eax);
+      __ pop(eax);
+    }
+    // FP(0): value
+    __ AllocateHeapNumber(eax, ebx, edi, &failed_allocation);
+    // Set the value.
+    __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
+    __ ret(0);
+  } else if (elements_kind == EXTERNAL_FLOAT_ELEMENTS ||
+             elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
+    // For the floating-point array type, we need to always allocate a
+    // HeapNumber.
+    __ AllocateHeapNumber(eax, ebx, edi, &failed_allocation);
+    // Set the value.
+    __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
+    __ ret(0);
+  } else {
+    __ SmiTag(eax);
+    __ ret(0);
+  }
+
+  // If we fail allocation of the HeapNumber, we still have a value on
+  // top of the FPU stack. Remove it.
+  __ bind(&failed_allocation);
+  __ fstp(0);
+  // Fall through to slow case.
+
+  // Slow case: Jump to runtime.
+  __ bind(&slow);
+  Counters* counters = masm->isolate()->counters();
+  __ IncrementCounter(counters->keyed_load_external_array_slow(), 1);
+
+  // ----------- S t a t e -------------
+  //  -- ecx    : key
+  //  -- edx    : receiver
+  //  -- esp[0] : return address
+  // -----------------------------------
+
+  Handle<Code> ic = masm->isolate()->builtins()->KeyedLoadIC_Slow();
+  __ jmp(ic, RelocInfo::CODE_TARGET);
+
+  // ----------- S t a t e -------------
+  //  -- ecx    : key
+  //  -- edx    : receiver
+  //  -- esp[0] : return address
+  // -----------------------------------
+
+  // Miss case: Jump to runtime.
+  __ bind(&miss_force_generic);
+  Handle<Code> miss_ic =
+      masm->isolate()->builtins()->KeyedLoadIC_MissForceGeneric();
+  __ jmp(miss_ic, RelocInfo::CODE_TARGET);
+}
+
+
 void KeyedStoreStubCompiler::GenerateStoreExternalArray(
     MacroAssembler* masm,
     ElementsKind elements_kind) {
@@ -3868,6 +4011,106 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray(
 }
 
 
+void KeyedLoadStubCompiler::GenerateLoadFastElement(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- ecx    : key
+  //  -- edx    : receiver
+  //  -- esp[0] : return address
+  // -----------------------------------
+  Label miss_force_generic;
+
+  // This stub is meant to be tail-jumped to, the receiver must already
+  // have been verified by the caller to not be a smi.
+
+  // Check that the key is a smi or a heap number convertible to a smi.
+  GenerateSmiKeyCheck(masm, ecx, eax, xmm0, xmm1, &miss_force_generic);
+
+  // Get the elements array.
+  __ mov(eax, FieldOperand(edx, JSObject::kElementsOffset));
+  __ AssertFastElements(eax);
+
+  // Check that the key is within bounds.
+  __ cmp(ecx, FieldOperand(eax, FixedArray::kLengthOffset));
+  __ j(above_equal, &miss_force_generic);
+
+  // Load the result and make sure it's not the hole.
+  __ mov(ebx, Operand(eax, ecx, times_2,
+                      FixedArray::kHeaderSize - kHeapObjectTag));
+  __ cmp(ebx, masm->isolate()->factory()->the_hole_value());
+  __ j(equal, &miss_force_generic);
+  __ mov(eax, ebx);
+  __ ret(0);
+
+  __ bind(&miss_force_generic);
+  Handle<Code> miss_ic =
+      masm->isolate()->builtins()->KeyedLoadIC_MissForceGeneric();
+  __ jmp(miss_ic, RelocInfo::CODE_TARGET);
+}
+
+
+void KeyedLoadStubCompiler::GenerateLoadFastDoubleElement(
+    MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- ecx    : key
+  //  -- edx    : receiver
+  //  -- esp[0] : return address
+  // -----------------------------------
+  Label miss_force_generic, slow_allocate_heapnumber;
+
+  // This stub is meant to be tail-jumped to, the receiver must already
+  // have been verified by the caller to not be a smi.
+
+  // Check that the key is a smi or a heap number convertible to a smi.
+  GenerateSmiKeyCheck(masm, ecx, eax, xmm0, xmm1, &miss_force_generic);
+
+  // Get the elements array.
+  __ mov(eax, FieldOperand(edx, JSObject::kElementsOffset));
+  __ AssertFastElements(eax);
+
+  // Check that the key is within bounds.
+  __ cmp(ecx, FieldOperand(eax, FixedDoubleArray::kLengthOffset));
+  __ j(above_equal, &miss_force_generic);
+
+  // Check for the hole
+  uint32_t offset = FixedDoubleArray::kHeaderSize + sizeof(kHoleNanLower32);
+  __ cmp(FieldOperand(eax, ecx, times_4, offset), Immediate(kHoleNanUpper32));
+  __ j(equal, &miss_force_generic);
+
+  // Always allocate a heap number for the result.
+  if (CpuFeatures::IsSupported(SSE2)) {
+    CpuFeatures::Scope use_sse2(SSE2);
+    __ movdbl(xmm0, FieldOperand(eax, ecx, times_4,
+                                 FixedDoubleArray::kHeaderSize));
+  } else {
+    __ fld_d(FieldOperand(eax, ecx, times_4, FixedDoubleArray::kHeaderSize));
+  }
+  __ AllocateHeapNumber(eax, ebx, edi, &slow_allocate_heapnumber);
+  // Set the value.
+  if (CpuFeatures::IsSupported(SSE2)) {
+    CpuFeatures::Scope use_sse2(SSE2);
+    __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
+  } else {
+    __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
+  }
+  __ ret(0);
+
+  __ bind(&slow_allocate_heapnumber);
+  // A value was pushed on the floating point stack before the allocation, if
+  // the allocation fails it needs to be removed.
+  if (!CpuFeatures::IsSupported(SSE2)) {
+    __ fstp(0);
+  }
+  Handle<Code> slow_ic =
+      masm->isolate()->builtins()->KeyedLoadIC_Slow();
+  __ jmp(slow_ic, RelocInfo::CODE_TARGET);
+
+  __ bind(&miss_force_generic);
+  Handle<Code> miss_ic =
+      masm->isolate()->builtins()->KeyedLoadIC_MissForceGeneric();
+  __ jmp(miss_ic, RelocInfo::CODE_TARGET);
+}
+
+
 void KeyedStoreStubCompiler::GenerateStoreFastElement(
     MacroAssembler* masm,
     bool is_js_array,
index 363303617628cfc5f6fe6639bc16e979e8aa2621..bf2a649f7f298d1e53fb53b035b5dcfb323a7ee7 100644 (file)
--- a/src/ic.cc
+++ b/src/ic.cc
@@ -1054,13 +1054,7 @@ Handle<Code> KeyedLoadIC::GetElementStubWithoutMapCheck(
     ElementsKind elements_kind,
     KeyedAccessGrowMode grow_mode) {
   ASSERT(grow_mode == DO_NOT_ALLOW_JSARRAY_GROWTH);
-  if (IsFastElementsKind(elements_kind) ||
-      IsExternalArrayElementsKind(elements_kind)) {
-    return KeyedLoadFastElementStub(is_js_array, elements_kind).GetCode();
-  } else {
-    ASSERT(elements_kind == DICTIONARY_ELEMENTS);
-    return KeyedLoadDictionaryElementStub().GetCode();
-  }
+  return KeyedLoadElementStub(elements_kind).GetCode();
 }
 
 
index d09625c401a4a8ad0d27f5faae6cc54616c85b13..15d0bdd44193524f5a8f5127688daf4e3604e4b8 100644 (file)
@@ -1635,7 +1635,6 @@ Isolate::Isolate()
       string_tracker_(NULL),
       regexp_stack_(NULL),
       date_cache_(NULL),
-      code_stub_interface_descriptors_(NULL),
       context_exit_happened_(false),
       deferred_handles_head_(NULL),
       optimizing_compiler_thread_(this) {
@@ -1798,9 +1797,6 @@ Isolate::~Isolate() {
   delete date_cache_;
   date_cache_ = NULL;
 
-  delete[] code_stub_interface_descriptors_;
-  code_stub_interface_descriptors_ = NULL;
-
   delete regexp_stack_;
   regexp_stack_ = NULL;
 
@@ -1964,10 +1960,6 @@ bool Isolate::Init(Deserializer* des) {
   regexp_stack_ = new RegExpStack();
   regexp_stack_->isolate_ = this;
   date_cache_ = new DateCache();
-  code_stub_interface_descriptors_ =
-      new CodeStubInterfaceDescriptor[CodeStub::NUMBER_OF_IDS];
-  memset(code_stub_interface_descriptors_, 0,
-         kPointerSize * CodeStub::NUMBER_OF_IDS);
 
   // Enable logging before setting up the heap
   logger_->SetUp();
@@ -2028,8 +2020,6 @@ bool Isolate::Init(Deserializer* des) {
   debug_->SetUp(create_heap_objects);
 #endif
 
-  deoptimizer_data_ = new DeoptimizerData;
-
   // If we are deserializing, read the state into the now-empty heap.
   if (!create_heap_objects) {
     des->Deserialize();
@@ -2048,6 +2038,7 @@ bool Isolate::Init(Deserializer* des) {
   // Quiet the heap NaN if needed on target platform.
   if (!create_heap_objects) Assembler::QuietNaN(heap_.nan_value());
 
+  deoptimizer_data_ = new DeoptimizerData;
   runtime_profiler_ = new RuntimeProfiler(this);
   runtime_profiler_->SetUp();
 
@@ -2069,17 +2060,6 @@ bool Isolate::Init(Deserializer* des) {
 
   state_ = INITIALIZED;
   time_millis_at_init_ = OS::TimeCurrentMillis();
-
-  if (!create_heap_objects) {
-    // Now that the heap is consistent, it's OK to generate the code for the
-    // deopt entry table that might have been referred to by optimized code in
-    // the snapshot.
-    HandleScope scope(this);
-    Deoptimizer::EnsureCodeForDeoptimizationEntry(
-        Deoptimizer::LAZY,
-        kDeoptTableSerializeEntryCount - 1);
-  }
-
   if (FLAG_parallel_recompilation) optimizing_compiler_thread_.Start();
   return true;
 }
@@ -2194,12 +2174,6 @@ void Isolate::UnlinkDeferredHandles(DeferredHandles* deferred) {
 }
 
 
-CodeStubInterfaceDescriptor*
-    Isolate::code_stub_interface_descriptor(int index) {
-  return code_stub_interface_descriptors_ + index;
-}
-
-
 #ifdef DEBUG
 #define ISOLATE_FIELD_OFFSET(type, name, ignored)                       \
 const intptr_t Isolate::name##_debug_offset_ = OFFSET_OF(Isolate, name##_);
index 6bbe5eb527744e46a64bb791ee8c194e3683253b..ac2e554f85e5112b1a1d72efb0dfe7047eb20969 100644 (file)
@@ -53,7 +53,6 @@ namespace internal {
 class Bootstrapper;
 class CodeGenerator;
 class CodeRange;
-struct CodeStubInterfaceDescriptor;
 class CompilationCache;
 class ContextSlotCache;
 class ContextSwitcher;
@@ -1060,9 +1059,6 @@ class Isolate {
     date_cache_ = date_cache;
   }
 
-  CodeStubInterfaceDescriptor*
-      code_stub_interface_descriptor(int index);
-
   void IterateDeferredHandles(ObjectVisitor* visitor);
   void LinkDeferredHandles(DeferredHandles* deferred_handles);
   void UnlinkDeferredHandles(DeferredHandles* deferred_handles);
@@ -1245,7 +1241,6 @@ class Isolate {
   RegExpStack* regexp_stack_;
   DateCache* date_cache_;
   unibrow::Mapping<unibrow::Ecma262Canonicalize> interp_canonicalize_mapping_;
-  CodeStubInterfaceDescriptor* code_stub_interface_descriptors_;
 
   // The garbage collector should be a little more aggressive when it knows
   // that a context was recently exited.
index b23c86766a6fb71e86927aebefdd1b2034aab359..91a98112b61bd60a50b56d98f8ca4eebacdf01ff 100644 (file)
@@ -606,7 +606,7 @@ void LAllocator::AddInitialIntervals(HBasicBlock* block,
 
 
 int LAllocator::FixedDoubleLiveRangeID(int index) {
-  return -index - 1 - Register::kMaxNumAllocatableRegisters;
+  return -index - 1 - Register::kNumAllocatableRegisters;
 }
 
 
@@ -638,7 +638,7 @@ LOperand* LAllocator::AllocateFixed(LUnallocated* operand,
 
 
 LiveRange* LAllocator::FixedLiveRangeFor(int index) {
-  ASSERT(index < Register::kMaxNumAllocatableRegisters);
+  ASSERT(index < Register::kNumAllocatableRegisters);
   LiveRange* result = fixed_live_ranges_[index];
   if (result == NULL) {
     result = new(zone_) LiveRange(FixedLiveRangeID(index), zone_);
@@ -651,7 +651,7 @@ LiveRange* LAllocator::FixedLiveRangeFor(int index) {
 
 
 LiveRange* LAllocator::FixedDoubleLiveRangeFor(int index) {
-  ASSERT(index < DoubleRegister::NumAllocatableRegisters());
+  ASSERT(index < DoubleRegister::kNumAllocatableRegisters);
   LiveRange* result = fixed_double_live_ranges_[index];
   if (result == NULL) {
     result = new(zone_) LiveRange(FixedDoubleLiveRangeID(index), zone_);
@@ -768,7 +768,6 @@ void LAllocator::AddConstraintsGapMove(int index,
 void LAllocator::MeetRegisterConstraints(HBasicBlock* block) {
   int start = block->first_instruction_index();
   int end = block->last_instruction_index();
-  if (start == -1) return;
   for (int i = start; i <= end; ++i) {
     if (IsGapAt(i)) {
       LInstruction* instr = NULL;
@@ -947,8 +946,8 @@ void LAllocator::ProcessInstructions(HBasicBlock* block, BitVector* live) {
           Define(curr_position, output, NULL);
         }
 
-        if (instr->ClobbersRegisters()) {
-          for (int i = 0; i < Register::kMaxNumAllocatableRegisters; ++i) {
+        if (instr->IsMarkedAsCall()) {
+          for (int i = 0; i < Register::kNumAllocatableRegisters; ++i) {
             if (output == NULL || !output->IsRegister() ||
                 output->index() != i) {
               LiveRange* range = FixedLiveRangeFor(i);
@@ -959,8 +958,8 @@ void LAllocator::ProcessInstructions(HBasicBlock* block, BitVector* live) {
           }
         }
 
-        if (instr->ClobbersDoubleRegisters()) {
-          for (int i = 0; i < DoubleRegister::NumAllocatableRegisters(); ++i) {
+        if (instr->IsMarkedAsCall()) {
+          for (int i = 0; i < DoubleRegister::kNumAllocatableRegisters; ++i) {
             if (output == NULL || !output->IsDoubleRegister() ||
                 output->index() != i) {
               LiveRange* range = FixedDoubleLiveRangeFor(i);
@@ -990,7 +989,7 @@ void LAllocator::ProcessInstructions(HBasicBlock* block, BitVector* live) {
 
         for (TempIterator it(instr); !it.Done(); it.Advance()) {
           LOperand* temp = it.Current();
-          if (instr->ClobbersTemps()) {
+          if (instr->IsMarkedAsCall()) {
             if (temp->IsRegister()) continue;
             if (temp->IsUnallocated()) {
               LUnallocated* temp_unalloc = LUnallocated::cast(temp);
@@ -1325,14 +1324,8 @@ void LAllocator::BuildLiveRanges() {
       while (!iterator.Done()) {
         found = true;
         int operand_index = iterator.Current();
-        if (chunk_->info()->IsStub()) {
-          CodeStub::Major major_key = chunk_->info()->code_stub()->MajorKey();
-          PrintF("Function: %s\n", CodeStub::MajorName(major_key, false));
-        } else {
-          ASSERT(chunk_->info()->IsOptimizing());
-          PrintF("Function: %s\n",
-                 *chunk_->info()->function()->debug_name()->ToCString());
-        }
+        PrintF("Function: %s\n",
+               *chunk_->info()->function()->debug_name()->ToCString());
         PrintF("Value %d used before first definition!\n", operand_index);
         LiveRange* range = LiveRangeFor(operand_index);
         PrintF("First use is at %d\n", range->first_pos()->pos().Value());
@@ -1478,14 +1471,14 @@ void LAllocator::ProcessOsrEntry() {
 
 void LAllocator::AllocateGeneralRegisters() {
   HPhase phase("L_Allocate general registers", this);
-  num_registers_ = Register::NumAllocatableRegisters();
+  num_registers_ = Register::kNumAllocatableRegisters;
   AllocateRegisters();
 }
 
 
 void LAllocator::AllocateDoubleRegisters() {
   HPhase phase("L_Allocate double registers", this);
-  num_registers_ = DoubleRegister::NumAllocatableRegisters();
+  num_registers_ = DoubleRegister::kNumAllocatableRegisters;
   mode_ = DOUBLE_REGISTERS;
   AllocateRegisters();
 }
@@ -1764,14 +1757,14 @@ void LAllocator::InactiveToActive(LiveRange* range) {
 
 // TryAllocateFreeReg and AllocateBlockedReg assume this
 // when allocating local arrays.
-STATIC_ASSERT(DoubleRegister::kMaxNumAllocatableRegisters >=
-              Register::kMaxNumAllocatableRegisters);
+STATIC_ASSERT(DoubleRegister::kNumAllocatableRegisters >=
+              Register::kNumAllocatableRegisters);
 
 
 bool LAllocator::TryAllocateFreeReg(LiveRange* current) {
-  LifetimePosition free_until_pos[DoubleRegister::kMaxNumAllocatableRegisters];
+  LifetimePosition free_until_pos[DoubleRegister::kNumAllocatableRegisters];
 
-  for (int i = 0; i < DoubleRegister::kMaxNumAllocatableRegisters; i++) {
+  for (int i = 0; i < DoubleRegister::kNumAllocatableRegisters; i++) {
     free_until_pos[i] = LifetimePosition::MaxPosition();
   }
 
@@ -1860,10 +1853,10 @@ void LAllocator::AllocateBlockedReg(LiveRange* current) {
   }
 
 
-  LifetimePosition use_pos[DoubleRegister::kMaxNumAllocatableRegisters];
-  LifetimePosition block_pos[DoubleRegister::kMaxNumAllocatableRegisters];
+  LifetimePosition use_pos[DoubleRegister::kNumAllocatableRegisters];
+  LifetimePosition block_pos[DoubleRegister::kNumAllocatableRegisters];
 
-  for (int i = 0; i < DoubleRegister::NumAllocatableRegisters(); i++) {
+  for (int i = 0; i < DoubleRegister::kNumAllocatableRegisters; i++) {
     use_pos[i] = block_pos[i] = LifetimePosition::MaxPosition();
   }
 
index 0dd192d7c12a34e4bde76a16f8636f9bf87eec4b..5b05263575dcc45d2d1f9e53ee8066054660d49f 100644 (file)
@@ -608,9 +608,9 @@ class LAllocator BASE_EMBEDDED {
   ZoneList<LiveRange*> live_ranges_;
 
   // Lists of live ranges
-  EmbeddedVector<LiveRange*, Register::kMaxNumAllocatableRegisters>
+  EmbeddedVector<LiveRange*, Register::kNumAllocatableRegisters>
       fixed_live_ranges_;
-  EmbeddedVector<LiveRange*, DoubleRegister::kMaxNumAllocatableRegisters>
+  EmbeddedVector<LiveRange*, DoubleRegister::kNumAllocatableRegisters>
       fixed_double_live_ranges_;
   ZoneList<LiveRange*> unhandled_live_ranges_;
   ZoneList<LiveRange*> active_live_ranges_;
index 7ad175ea7c68c5db9802b094f1ae86d28c76d793..eb2198d85480242d08f166f5c673046dcd156b69 100644 (file)
@@ -414,7 +414,7 @@ LChunk* LChunk::NewChunk(HGraph* graph) {
 }
 
 
-Handle<Code> LChunk::Codegen(Code::Kind kind) {
+Handle<Code> LChunk::Codegen() {
   MacroAssembler assembler(info()->isolate(), NULL, 0);
   LCodeGen generator(this, &assembler, info());
 
@@ -425,7 +425,7 @@ Handle<Code> LChunk::Codegen(Code::Kind kind) {
       PrintF("Crankshaft Compiler - ");
     }
     CodeGenerator::MakeCodePrologue(info());
-    Code::Flags flags = Code::ComputeFlags(kind);
+    Code::Flags flags = Code::ComputeFlags(Code::OPTIMIZED_FUNCTION);
     Handle<Code> code =
         CodeGenerator::MakeCodeEpilogue(&assembler, flags, info());
     generator.FinishCode(code);
index cc2cde015a4539dbf2b2aa77440ccba8a78e88e1..089926e71a6f31b071afa3c195f157bb310dabff 100644 (file)
@@ -682,7 +682,7 @@ class LChunk: public ZoneObject {
 
   Zone* zone() const { return info_->zone(); }
 
-  Handle<Code> Codegen(Code::Kind kind);
+  Handle<Code> Codegen();
 
  protected:
   LChunk(CompilationInfo* info, HGraph* graph)
index 88527ca6b304d67b6fdd7d317988877da06568d0..d0dc76d4b885a5a77eadc8587e50335d256b4e53 100644 (file)
@@ -1457,7 +1457,6 @@ void Logger::LogCodeObject(Object* object) {
       case Code::BINARY_OP_IC:   // fall through
       case Code::COMPARE_IC:  // fall through
       case Code::TO_BOOLEAN_IC:  // fall through
-      case Code::COMPILED_STUB:  // fall through
       case Code::STUB:
         description =
             CodeStub::MajorName(CodeStub::GetMajorKey(code_object), true);
index 13b0b436cf9d8adcb31f5ce388e65c1059473406..0ed2414a035be1183c40803e967cc88d0ab33721 100644 (file)
@@ -70,8 +70,6 @@ class CodeGenerator: public AstVisitor {
                               int pos,
                               bool right_here = false);
 
-  DEFINE_AST_VISITOR_SUBCLASS_METHODS();
-
  private:
   DISALLOW_COPY_AND_ASSIGN(CodeGenerator);
 };
index 9ff35efbcf90aac117d2f1001fed87d705a8741e..451a3725a8b7290b1cd446ad27608a0b8f3192aa 100644 (file)
@@ -3436,7 +3436,6 @@ int Code::arguments_count() {
 
 int Code::major_key() {
   ASSERT(kind() == STUB ||
-         kind() == COMPILED_STUB ||
          kind() == UNARY_OP_IC ||
          kind() == BINARY_OP_IC ||
          kind() == COMPARE_IC ||
@@ -3448,7 +3447,6 @@ int Code::major_key() {
 
 void Code::set_major_key(int major) {
   ASSERT(kind() == STUB ||
-         kind() == COMPILED_STUB ||
          kind() == UNARY_OP_IC ||
          kind() == BINARY_OP_IC ||
          kind() == COMPARE_IC ||
@@ -3557,7 +3555,7 @@ void Code::set_profiler_ticks(int ticks) {
 
 
 unsigned Code::stack_slots() {
-  ASSERT(kind() == OPTIMIZED_FUNCTION || kind() == COMPILED_STUB);
+  ASSERT(kind() == OPTIMIZED_FUNCTION);
   return StackSlotsField::decode(
       READ_UINT32_FIELD(this, kKindSpecificFlags1Offset));
 }
@@ -3565,7 +3563,7 @@ unsigned Code::stack_slots() {
 
 void Code::set_stack_slots(unsigned slots) {
   CHECK(slots <= (1 << kStackSlotsBitCount));
-  ASSERT(kind() == OPTIMIZED_FUNCTION || kind() == COMPILED_STUB);
+  ASSERT(kind() == OPTIMIZED_FUNCTION);
   int previous = READ_UINT32_FIELD(this, kKindSpecificFlags1Offset);
   int updated = StackSlotsField::update(previous, slots);
   WRITE_UINT32_FIELD(this, kKindSpecificFlags1Offset, updated);
@@ -3573,7 +3571,7 @@ void Code::set_stack_slots(unsigned slots) {
 
 
 unsigned Code::safepoint_table_offset() {
-  ASSERT(kind() == OPTIMIZED_FUNCTION || kind() == COMPILED_STUB);
+  ASSERT(kind() == OPTIMIZED_FUNCTION);
   return SafepointTableOffsetField::decode(
       READ_UINT32_FIELD(this, kKindSpecificFlags2Offset));
 }
@@ -3581,7 +3579,7 @@ unsigned Code::safepoint_table_offset() {
 
 void Code::set_safepoint_table_offset(unsigned offset) {
   CHECK(offset <= (1 << kSafepointTableOffsetBitCount));
-  ASSERT(kind() == OPTIMIZED_FUNCTION || kind() == COMPILED_STUB);
+  ASSERT(kind() == OPTIMIZED_FUNCTION);
   ASSERT(IsAligned(offset, static_cast<unsigned>(kIntSize)));
   int previous = READ_UINT32_FIELD(this, kKindSpecificFlags2Offset);
   int updated = SafepointTableOffsetField::update(previous, offset);
index f2a1318ba6d0a7fba85b19f59e61ce95677dfeb9..fcc2efad31d96ab969a026f885c6bdaf7dbc64d9 100644 (file)
@@ -9141,12 +9141,6 @@ void DeoptimizationInputData::DeoptimizationInputDataPrint(FILE* out) {
           break;
         }
 
-        case Translation::COMPILED_STUB_FRAME: {
-          Code::Kind stub_kind = static_cast<Code::Kind>(iterator.Next());
-          PrintF(out, "{kind=%d}", stub_kind);
-          break;
-        }
-
         case Translation::ARGUMENTS_ADAPTOR_FRAME:
         case Translation::CONSTRUCT_STUB_FRAME: {
           int function_id = iterator.Next();
@@ -9261,7 +9255,6 @@ const char* Code::Kind2String(Kind kind) {
   switch (kind) {
     case FUNCTION: return "FUNCTION";
     case OPTIMIZED_FUNCTION: return "OPTIMIZED_FUNCTION";
-    case COMPILED_STUB: return "COMPILED_STUB";
     case STUB: return "STUB";
     case BUILTIN: return "BUILTIN";
     case LOAD_IC: return "LOAD_IC";
@@ -9381,7 +9374,7 @@ void Code::Disassemble(const char* name, FILE* out) {
   }
   PrintF("\n");
 
-  if (kind() == OPTIMIZED_FUNCTION || kind() == COMPILED_STUB) {
+  if (kind() == OPTIMIZED_FUNCTION) {
     SafepointTable table(this);
     PrintF(out, "Safepoints (size = %u)\n", table.size());
     for (unsigned i = 0; i < table.length(); i++) {
index e3a0c7ff1eeb4e36c8fcba8d5882136eff3d8096..2869f2bc0a44c60551a48ce9f649f66338c5c5f7 100644 (file)
@@ -4233,7 +4233,6 @@ class Code: public HeapObject {
   V(FUNCTION)             \
   V(OPTIMIZED_FUNCTION)   \
   V(STUB)                 \
-  V(COMPILED_STUB)        \
   V(BUILTIN)              \
   V(LOAD_IC)              \
   V(KEYED_LOAD_IC)        \
@@ -4849,10 +4848,6 @@ class Map: public HeapObject {
     return IsFastDoubleElementsKind(elements_kind());
   }
 
-  inline bool has_fast_elements() {
-    return IsFastElementsKind(elements_kind());
-  }
-
   inline bool has_non_strict_arguments_elements() {
     return elements_kind() == NON_STRICT_ARGUMENTS_ELEMENTS;
   }
index 7aad78ca5e86a25f6324ef7b8dbf476fc65e8153..2d56d1a72ba26ee0daedf252cb6cc3111a0fe800 100644 (file)
@@ -36,7 +36,7 @@
 namespace v8 {
 namespace internal {
 
-class HOptimizedGraphBuilder;
+class HGraphBuilder;
 class OptimizingCompiler;
 class SharedFunctionInfo;
 
index c0fdf48ff1b8656541527eeda4c7d26806d38b05..602fbb40b9840178da9da9c48c24483f72daa26f 100644 (file)
@@ -42,7 +42,6 @@ PrettyPrinter::PrettyPrinter() {
   output_ = NULL;
   size_ = 0;
   pos_ = 0;
-  InitializeAstVisitor();
 }
 
 
index 41175ab2ae2282c8d8f5996d2769316adf8860ae..9ac72576409f961bbc173b5acdd8bc13674bb34f 100644 (file)
@@ -74,8 +74,6 @@ class PrettyPrinter: public AstVisitor {
   void PrintDeclarations(ZoneList<Declaration*>* declarations);
   void PrintFunctionLiteral(FunctionLiteral* function);
   void PrintCaseClause(CaseClause* clause);
-
-  DEFINE_AST_VISITOR_SUBCLASS_MEMBERS();
 };
 
 
index 02907de60eb98d69f529b20a48d2aa305d2b5d86..2a98787177b614258727d3c793492a09bd75c42b 100644 (file)
@@ -43,9 +43,7 @@ class Processor: public AstVisitor {
         result_assigned_(false),
         is_set_(false),
         in_try_(false),
-        factory_(Isolate::Current(), zone) {
-    InitializeAstVisitor();
-  }
+        factory_(isolate(), zone) { }
 
   virtual ~Processor() { }
 
@@ -88,8 +86,6 @@ class Processor: public AstVisitor {
 #undef DEF_VISIT
 
   void VisitIterationStatement(IterationStatement* stmt);
-
-  DEFINE_AST_VISITOR_SUBCLASS_MEMBERS();
 };
 
 
index 8525e328f83fe39cefca3fb295a8a3ffd2e0c66e..446443148dcf219323317ec5ab40aefd29598706 100644 (file)
@@ -7889,17 +7889,6 @@ class ActivationsFinder : public ThreadVisitor {
 };
 
 
-RUNTIME_FUNCTION(MaybeObject*, Runtime_NotifyICMiss) {
-  HandleScope scope(isolate);
-  ASSERT(args.length() == 0);
-  Deoptimizer* deoptimizer = Deoptimizer::Grab(isolate);
-  ASSERT(isolate->heap()->IsAllocationAllowed());
-  ASSERT(deoptimizer->compiled_code_kind() == Code::COMPILED_STUB);
-  delete deoptimizer;
-  return isolate->heap()->undefined_value();
-}
-
-
 RUNTIME_FUNCTION(MaybeObject*, Runtime_NotifyDeoptimized) {
   HandleScope scope(isolate);
   ASSERT(args.length() == 1);
@@ -7908,11 +7897,9 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_NotifyDeoptimized) {
       static_cast<Deoptimizer::BailoutType>(args.smi_at(0));
   Deoptimizer* deoptimizer = Deoptimizer::Grab(isolate);
   ASSERT(isolate->heap()->IsAllocationAllowed());
-
-  ASSERT(deoptimizer->compiled_code_kind() != Code::COMPILED_STUB);
+  JavaScriptFrameIterator it(isolate);
 
   // Make sure to materialize objects before causing any allocation.
-  JavaScriptFrameIterator it(isolate);
   deoptimizer->MaterializeHeapObjects(&it);
   delete deoptimizer;
 
index 7a21bb92b4c719c72a169fc4b913f7bec1e24fdc..c77f988215e925c531060649f934dd3e012b7cde 100644 (file)
@@ -89,7 +89,6 @@ namespace internal {
   F(ForceParallelRecompile, 1, 1) \
   F(InstallRecompiledCode, 1, 1) \
   F(NotifyDeoptimized, 1, 1) \
-  F(NotifyICMiss, 0, 1) \
   F(NotifyOSR, 0, 1) \
   F(DeoptimizeFunction, 1, 1) \
   F(ClearFunctionTypeFeedback, 1, 1) \
index 9e423045aec59c01d7fca266e83116ec09096d24..714e5c3977f7b8a5ac99ece038733e893edbbc30 100644 (file)
@@ -59,8 +59,7 @@ bool SafepointEntry::HasRegisterAt(int reg_index) const {
 
 
 SafepointTable::SafepointTable(Code* code) {
-  ASSERT(code->kind() == Code::OPTIMIZED_FUNCTION ||
-         code->kind() == Code::COMPILED_STUB);
+  ASSERT(code->kind() == Code::OPTIMIZED_FUNCTION);
   code_ = code;
   Address header = code->instruction_start() + code->safepoint_table_offset();
   length_ = Memory::uint32_at(header + kLengthOffset);
@@ -159,6 +158,14 @@ unsigned SafepointTableBuilder::GetCodeOffset() const {
 
 
 void SafepointTableBuilder::Emit(Assembler* assembler, int bits_per_entry) {
+  // For lazy deoptimization we need space to patch a call after every call.
+  // Ensure there is always space for such patching, even if the code ends
+  // in a call.
+  int target_offset = assembler->pc_offset() + Deoptimizer::patch_size();
+  while (assembler->pc_offset() < target_offset) {
+    assembler->nop();
+  }
+
   // Make sure the safepoint table is properly aligned. Pad with nops.
   assembler->Align(kIntSize);
   assembler->RecordComment(";;; Safepoint table.");
index 26e0f0178dbf2df30006953e64e5ec05ccc6f2d1..dfc55740a36c412f710570e0a59bd73bb726f429 100644 (file)
@@ -30,7 +30,6 @@
 #include "accessors.h"
 #include "api.h"
 #include "bootstrapper.h"
-#include "deoptimizer.h"
 #include "execution.h"
 #include "global-handles.h"
 #include "ic-inl.h"
@@ -528,17 +527,6 @@ void ExternalReferenceTable::PopulateTable(Isolate* isolate) {
       UNCLASSIFIED,
       51,
       "Code::MakeCodeYoung");
-
-  // Add a small set of deopt entry addresses to encoder without generating the
-  // deopt table code, which isn't possible at deserialization time.
-  HandleScope scope(Isolate::Current());
-  for (int entry = 0; entry < kDeoptTableSerializeEntryCount; ++entry) {
-    Address address = Deoptimizer::GetDeoptimizationEntry(
-        entry,
-        Deoptimizer::LAZY,
-        Deoptimizer::CALCULATE_ENTRY_ADDRESS);
-    Add(address, LAZY_DEOPTIMIZATION, 52 + entry, "lazy_deopt");
-  }
 }
 
 
index 4bbde5a15ba58d71ffaf32e8a860751ac5140b9a..204179285623e3c55e43448a8cafcb04b34ce9de 100644 (file)
@@ -47,11 +47,10 @@ enum TypeCode {
   EXTENSION,
   ACCESSOR,
   RUNTIME_ENTRY,
-  STUB_CACHE_TABLE,
-  LAZY_DEOPTIMIZATION
+  STUB_CACHE_TABLE
 };
 
-const int kTypeCodeCount = LAZY_DEOPTIMIZATION + 1;
+const int kTypeCodeCount = STUB_CACHE_TABLE + 1;
 const int kFirstTypeCode = UNCLASSIFIED;
 
 const int kReferenceIdBits = 16;
@@ -60,7 +59,6 @@ const int kReferenceTypeShift = kReferenceIdBits;
 const int kDebugRegisterBits = 4;
 const int kDebugIdShift = kDebugRegisterBits;
 
-const int kDeoptTableSerializeEntryCount = 8;
 
 // ExternalReferenceTable is a helper class that defines the relationship
 // between external references and their encodings. It is used to build
index 02025bb7960b436bd41c8e85622dfebd251e30ee..345c4d47fb73e8d3e0c10c7f80fc87530876dbb3 100644 (file)
@@ -58,16 +58,11 @@ class SmartPointerBase {
   // You can get the underlying pointer out with the * operator.
   inline T* operator*() { return p_; }
 
-  // You can use [n] to index as if it was a plain pointer.
+  // You can use [n] to index as if it was a plain pointer
   inline T& operator[](size_t i) {
     return p_[i];
   }
 
-  // You can use [n] to index as if it was a plain pointer.
-  const inline T& operator[](size_t i) const {
-    return p_[i];
-  }
-
   // We don't have implicit conversion to a T* since that hinders migration:
   // You would not be able to change a method from returning a T* to
   // returning an SmartArrayPointer<T> and then get errors wherever it is used.
@@ -82,11 +77,6 @@ class SmartPointerBase {
     return temp;
   }
 
-  inline void Reset(T* new_value) {
-    if (p_) Deallocator::Delete(p_);
-    p_ = new_value;
-  }
-
   // Assignment requires an empty (NULL) SmartArrayPointer as the receiver. Like
   // the copy constructor it removes the pointer in the original to avoid
   // double freeing.
index ec9f30dc3e2828e9b1bad7d9d84dde9e4d8fbe7f..cacd9691501c10428a7a2b97d3a810cfd683789a 100644 (file)
@@ -1680,7 +1680,6 @@ static void ReportCodeKindStatistics() {
       CASE(FUNCTION);
       CASE(OPTIMIZED_FUNCTION);
       CASE(STUB);
-      CASE(COMPILED_STUB);
       CASE(BUILTIN);
       CASE(LOAD_IC);
       CASE(KEYED_LOAD_IC);
index c562bc714c8c124641c1265bbd08952ab58aaffa..f858e4722a2ded4868877034f3494977d66cccde 100644 (file)
@@ -681,6 +681,13 @@ class KeyedLoadStubCompiler: public StubCompiler {
   Handle<Code> CompileLoadPolymorphic(MapHandleList* receiver_maps,
                                       CodeHandleList* handler_ics);
 
+  static void GenerateLoadExternalArray(MacroAssembler* masm,
+                                        ElementsKind elements_kind);
+
+  static void GenerateLoadFastElement(MacroAssembler* masm);
+
+  static void GenerateLoadFastDoubleElement(MacroAssembler* masm);
+
   static void GenerateLoadDictionaryElement(MacroAssembler* masm);
 
  private:
index b4cd5a80cd86d3738d408eae8c8e1ce31800a313..e03f96f6e5d3a7bb2b1c527810a180333c4a26b8 100644 (file)
@@ -1015,7 +1015,6 @@ class BailoutId {
   static BailoutId FunctionEntry() { return BailoutId(kFunctionEntryId); }
   static BailoutId Declarations() { return BailoutId(kDeclarationsId); }
   static BailoutId FirstUsable() { return BailoutId(kFirstUsableId); }
-  static BailoutId StubEntry() { return BailoutId(kStubEntryId); }
 
   bool IsNone() const { return id_ == kNoneId; }
   bool operator==(const BailoutId& other) const { return id_ == other.id_; }
@@ -1031,12 +1030,9 @@ class BailoutId {
   // code (function declarations).
   static const int kDeclarationsId = 3;
 
-  // Every FunctionState starts with this id.
+  // Ever FunctionState starts with this id.
   static const int kFirstUsableId = 4;
 
-  // Every compiled stub starts with this id.
-  static const int kStubEntryId = 5;
-
   int id_;
 };
 
index cc072875aa484a8a5120c3c03086e3e179c45aad..370cb02a36363346795643f23252d225c34bb7e6 100644 (file)
@@ -201,8 +201,7 @@ void RelocInfo::PatchCode(byte* instructions, int instruction_count) {
 // -----------------------------------------------------------------------------
 // Register constants.
 
-const int
-    Register::kRegisterCodeByAllocationIndex[kMaxNumAllocatableRegisters] = {
+const int Register::kRegisterCodeByAllocationIndex[kNumAllocatableRegisters] = {
   // rax, rbx, rdx, rcx, rdi, r8, r9, r11, r14, r15
   0, 3, 2, 1, 7, 8, 9, 11, 14, 15
 };
index 9471a6d20b0d71f5f0831b5397402ac8ae351274..24c8df368f477dede5e10b2fef1059ca9dcb3edd 100644 (file)
@@ -95,24 +95,21 @@ struct Register {
   //  r10 - fixed scratch register
   //  r12 - smi constant register
   //  r13 - root register
-  static const int kMaxNumAllocatableRegisters = 10;
-  static int NumAllocatableRegisters() {
-    return kMaxNumAllocatableRegisters;
-  }
   static const int kNumRegisters = 16;
+  static const int kNumAllocatableRegisters = 10;
 
   static int ToAllocationIndex(Register reg) {
     return kAllocationIndexByRegisterCode[reg.code()];
   }
 
   static Register FromAllocationIndex(int index) {
-    ASSERT(index >= 0 && index < kMaxNumAllocatableRegisters);
+    ASSERT(index >= 0 && index < kNumAllocatableRegisters);
     Register result = { kRegisterCodeByAllocationIndex[index] };
     return result;
   }
 
   static const char* AllocationIndexToString(int index) {
-    ASSERT(index >= 0 && index < kMaxNumAllocatableRegisters);
+    ASSERT(index >= 0 && index < kNumAllocatableRegisters);
     const char* const names[] = {
       "rax",
       "rbx",
@@ -160,7 +157,7 @@ struct Register {
   int code_;
 
  private:
-  static const int kRegisterCodeByAllocationIndex[kMaxNumAllocatableRegisters];
+  static const int kRegisterCodeByAllocationIndex[kNumAllocatableRegisters];
   static const int kAllocationIndexByRegisterCode[kNumRegisters];
 };
 
@@ -203,10 +200,7 @@ const Register no_reg = { kRegister_no_reg_Code };
 
 struct XMMRegister {
   static const int kNumRegisters = 16;
-  static const int kMaxNumAllocatableRegisters = 15;
-  static int NumAllocatableRegisters() {
-    return kMaxNumAllocatableRegisters;
-  }
+  static const int kNumAllocatableRegisters = 15;
 
   static int ToAllocationIndex(XMMRegister reg) {
     ASSERT(reg.code() != 0);
@@ -214,13 +208,13 @@ struct XMMRegister {
   }
 
   static XMMRegister FromAllocationIndex(int index) {
-    ASSERT(0 <= index && index < kMaxNumAllocatableRegisters);
+    ASSERT(0 <= index && index < kNumAllocatableRegisters);
     XMMRegister result = { index + 1 };
     return result;
   }
 
   static const char* AllocationIndexToString(int index) {
-    ASSERT(index >= 0 && index < kMaxNumAllocatableRegisters);
+    ASSERT(index >= 0 && index < kNumAllocatableRegisters);
     const char* const names[] = {
       "xmm1",
       "xmm2",
index e156dfd2eb76d4999ca11c7ce50556a5739b9526..ed0ec684fc138fa63569d8519b1736a0810cb5c4 100644 (file)
@@ -646,25 +646,6 @@ CODE_AGE_LIST(DEFINE_CODE_AGE_BUILTIN_GENERATOR)
 #undef DEFINE_CODE_AGE_BUILTIN_GENERATOR
 
 
-void Builtins::Generate_NotifyICMiss(MacroAssembler* masm) {
-  // Enter an internal frame.
-  {
-    FrameScope scope(masm, StackFrame::INTERNAL);
-
-    // Preserve registers across notification, this is important for compiled
-    // stubs that tail call the runtime on deopts passing their parameters in
-    // registers.
-    __ Pushad();
-    __ CallRuntime(Runtime::kNotifyICMiss, 0);
-    __ Popad();
-    // Tear down internal frame.
-  }
-
-  __ pop(MemOperand(rsp, 0));  // Ignore state offset
-  __ ret(0);  // Return to IC Miss stub, continuation still on stack.
-}
-
-
 static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm,
                                              Deoptimizer::BailoutType type) {
   // Enter an internal frame.
@@ -679,17 +660,17 @@ static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm,
   }
 
   // Get the full codegen state from the stack and untag it.
-  __ SmiToInteger32(r10, Operand(rsp, 1 * kPointerSize));
+  __ SmiToInteger32(rcx, Operand(rsp, 1 * kPointerSize));
 
   // Switch on the state.
   Label not_no_registers, not_tos_rax;
-  __ cmpq(r10, Immediate(FullCodeGenerator::NO_REGISTERS));
+  __ cmpq(rcx, Immediate(FullCodeGenerator::NO_REGISTERS));
   __ j(not_equal, &not_no_registers, Label::kNear);
   __ ret(1 * kPointerSize);  // Remove state.
 
   __ bind(&not_no_registers);
   __ movq(rax, Operand(rsp, 2 * kPointerSize));
-  __ cmpq(r10, Immediate(FullCodeGenerator::TOS_REG));
+  __ cmpq(rcx, Immediate(FullCodeGenerator::TOS_REG));
   __ j(not_equal, &not_tos_rax, Label::kNear);
   __ ret(2 * kPointerSize);  // Remove state, rax.
 
index f950368404804ada7d78da991bc0b6ca2464a288..970571840b5cd49a3ec99859a62cfb860c5616aa 100644 (file)
 namespace v8 {
 namespace internal {
 
-
-void KeyedLoadFastElementStub::InitializeInterfaceDescriptor(
-    Isolate* isolate,
-    CodeStubInterfaceDescriptor* descriptor) {
-  static Register registers[] = { rdx, rax };
-  descriptor->register_param_count_ = 2;
-  descriptor->register_params_ = registers;
-  descriptor->deoptimization_handler_ =
-      isolate->builtins()->KeyedLoadIC_Miss();
-}
-
-
 #define __ ACCESS_MASM(masm)
 
 void ToNumberStub::Generate(MacroAssembler* masm) {
index 71cef58434fb361a4f206faa816abf31221fbda2..ab8ea76c8f617893c4292ad0c07ed81961665021 100644 (file)
@@ -37,7 +37,7 @@ namespace internal {
 
 // Compute a transcendental math function natively, or call the
 // TranscendentalCache runtime function.
-class TranscendentalCacheStub: public PlatformCodeStub {
+class TranscendentalCacheStub: public CodeStub {
  public:
   enum ArgumentType {
     TAGGED = 0,
@@ -60,7 +60,7 @@ class TranscendentalCacheStub: public PlatformCodeStub {
 };
 
 
-class StoreBufferOverflowStub: public PlatformCodeStub {
+class StoreBufferOverflowStub: public CodeStub {
  public:
   explicit StoreBufferOverflowStub(SaveFPRegsMode save_fp)
       : save_doubles_(save_fp) { }
@@ -79,7 +79,7 @@ class StoreBufferOverflowStub: public PlatformCodeStub {
 };
 
 
-class UnaryOpStub: public PlatformCodeStub {
+class UnaryOpStub: public CodeStub {
  public:
   UnaryOpStub(Token::Value op,
               UnaryOverwriteMode mode,
@@ -216,7 +216,7 @@ enum StringAddFlags {
 };
 
 
-class StringAddStub: public PlatformCodeStub {
+class StringAddStub: public CodeStub {
  public:
   explicit StringAddStub(StringAddFlags flags) : flags_(flags) {}
 
@@ -238,7 +238,7 @@ class StringAddStub: public PlatformCodeStub {
 };
 
 
-class SubStringStub: public PlatformCodeStub {
+class SubStringStub: public CodeStub {
  public:
   SubStringStub() {}
 
@@ -250,7 +250,7 @@ class SubStringStub: public PlatformCodeStub {
 };
 
 
-class StringCompareStub: public PlatformCodeStub {
+class StringCompareStub: public CodeStub {
  public:
   StringCompareStub() {}
 
@@ -287,7 +287,7 @@ class StringCompareStub: public PlatformCodeStub {
 };
 
 
-class NumberToStringStub: public PlatformCodeStub {
+class NumberToStringStub: public CodeStub {
  public:
   NumberToStringStub() { }
 
@@ -316,7 +316,7 @@ class NumberToStringStub: public PlatformCodeStub {
 };
 
 
-class StringDictionaryLookupStub: public PlatformCodeStub {
+class StringDictionaryLookupStub: public CodeStub {
  public:
   enum LookupMode { POSITIVE_LOOKUP, NEGATIVE_LOOKUP };
 
@@ -378,7 +378,7 @@ class StringDictionaryLookupStub: public PlatformCodeStub {
 };
 
 
-class RecordWriteStub: public PlatformCodeStub {
+class RecordWriteStub: public CodeStub {
  public:
   RecordWriteStub(Register object,
                   Register value,
@@ -561,7 +561,7 @@ class RecordWriteStub: public PlatformCodeStub {
     Register GetRegThatIsNotRcxOr(Register r1,
                                   Register r2,
                                   Register r3) {
-      for (int i = 0; i < Register::NumAllocatableRegisters(); i++) {
+      for (int i = 0; i < Register::kNumAllocatableRegisters; i++) {
         Register candidate = Register::FromAllocationIndex(i);
         if (candidate.is(rcx)) continue;
         if (candidate.is(r1)) continue;
index 3a7646bd1bb7ff68b959509f6a6acacf02fadd9e..d4440952135d93a0bf7d72df2fd7e92ebbe6bc3f 100644 (file)
@@ -44,10 +44,6 @@ enum TypeofState { INSIDE_TYPEOF, NOT_INSIDE_TYPEOF };
 
 class CodeGenerator: public AstVisitor {
  public:
-  CodeGenerator() {
-    InitializeAstVisitor();
-  }
-
   static bool MakeCode(CompilationInfo* info);
 
   // Printing of AST, etc. as requested by flags.
@@ -67,8 +63,6 @@ class CodeGenerator: public AstVisitor {
                               int pos,
                               bool right_here = false);
 
-  DEFINE_AST_VISITOR_SUBCLASS_MEMBERS();
-
  private:
   DISALLOW_COPY_AND_ASSIGN(CodeGenerator);
 };
index 0da9e86f0f5ca3eb9063eea6d80f905ab65d8b24..c8fdfce26b58b3361e2a1de48c1a6f80b4216815 100644 (file)
@@ -199,7 +199,7 @@ static int LookupBailoutId(DeoptimizationInputData* data, BailoutId ast_id) {
 
 void Deoptimizer::DoComputeOsrOutputFrame() {
   DeoptimizationInputData* data = DeoptimizationInputData::cast(
-      compiled_code_->deoptimization_data());
+      optimized_code_->deoptimization_data());
   unsigned ast_id = data->OsrAstId()->value();
   // TODO(kasperl): This should not be the bailout_id_. It should be
   // the ast id. Confusing.
@@ -236,7 +236,7 @@ void Deoptimizer::DoComputeOsrOutputFrame() {
   unsigned input_frame_size = input_->GetFrameSize();
   ASSERT(fixed_size + height_in_bytes == input_frame_size);
 
-  unsigned stack_slot_size = compiled_code_->stack_slots() * kPointerSize;
+  unsigned stack_slot_size = optimized_code_->stack_slots() * kPointerSize;
   unsigned outgoing_height = data->ArgumentsStackHeight(bailout_id)->value();
   unsigned outgoing_size = outgoing_height * kPointerSize;
   unsigned output_frame_size = fixed_size + stack_slot_size + outgoing_size;
@@ -328,7 +328,7 @@ void Deoptimizer::DoComputeOsrOutputFrame() {
 
     unsigned pc_offset = data->OsrPcOffset()->value();
     intptr_t pc = reinterpret_cast<intptr_t>(
-        compiled_code_->entry() + pc_offset);
+        optimized_code_->entry() + pc_offset);
     output_[0]->SetPc(pc);
   }
   Code* continuation =
@@ -447,70 +447,6 @@ void Deoptimizer::DoComputeArgumentsAdaptorFrame(TranslationIterator* iterator,
 }
 
 
-void Deoptimizer::DoCompiledStubFrame(TranslationIterator* iterator,
-                                      int frame_index) {
-  //
-  //               FROM                                  TO             <-rbp
-  //    |          ....           |          |          ....           |
-  //    +-------------------------+          +-------------------------+
-  //    | JSFunction continuation |          | JSFunction continuation |
-  //    +-------------------------+          +-------------------------+<-rsp
-  // |  |   saved frame (rbp)     |
-  // |  +=========================+<-rbp
-  // |  |   JSFunction context    |
-  // v  +-------------------------+
-  //    |   COMPILED_STUB marker  |          rbp = saved frame
-  //    +-------------------------+          rsi = JSFunction context
-  //    |                         |
-  //    | ...                     |
-  //    |                         |
-  //    +-------------------------+<-rsp
-  //
-  //
-  int output_frame_size = 1 * kPointerSize;
-  FrameDescription* output_frame =
-      new(output_frame_size) FrameDescription(output_frame_size, 0);
-  Code* notify_miss =
-      isolate_->builtins()->builtin(Builtins::kNotifyICMiss);
-  output_frame->SetState(Smi::FromInt(FullCodeGenerator::NO_REGISTERS));
-  output_frame->SetContinuation(
-      reinterpret_cast<intptr_t>(notify_miss->entry()));
-
-  ASSERT(compiled_code_->kind() == Code::COMPILED_STUB);
-  int major_key = compiled_code_->major_key();
-  CodeStubInterfaceDescriptor* descriptor =
-      isolate_->code_stub_interface_descriptor(major_key);
-  Handle<Code> miss_ic(descriptor->deoptimization_handler_);
-  output_frame->SetPc(reinterpret_cast<intptr_t>(miss_ic->instruction_start()));
-  unsigned input_frame_size = input_->GetFrameSize();
-  intptr_t value = input_->GetFrameSlot(input_frame_size - kPointerSize);
-  output_frame->SetFrameSlot(0, value);
-  value = input_->GetFrameSlot(input_frame_size - 2 * kPointerSize);
-  output_frame->SetRegister(rbp.code(), value);
-  output_frame->SetFp(value);
-  value = input_->GetFrameSlot(input_frame_size - 3 * kPointerSize);
-  output_frame->SetRegister(rsi.code(), value);
-
-  Translation::Opcode opcode =
-      static_cast<Translation::Opcode>(iterator->Next());
-  ASSERT(opcode == Translation::REGISTER);
-  USE(opcode);
-  int input_reg = iterator->Next();
-  intptr_t input_value = input_->GetRegister(input_reg);
-  output_frame->SetRegister(rdx.code(), input_value);
-
-  int32_t next = iterator->Next();
-  opcode = static_cast<Translation::Opcode>(next);
-  ASSERT(opcode == Translation::REGISTER);
-  input_reg = iterator->Next();
-  input_value = input_->GetRegister(input_reg);
-  output_frame->SetRegister(rax.code(), input_value);
-
-  ASSERT(frame_index == 0);
-  output_[frame_index] = output_frame;
-}
-
-
 void Deoptimizer::DoComputeConstructStubFrame(TranslationIterator* iterator,
                                               int frame_index) {
   Builtins* builtins = isolate_->builtins();
@@ -930,7 +866,7 @@ void Deoptimizer::FillInputFrame(Address tos, JavaScriptFrame* frame) {
   }
   input_->SetRegister(rsp.code(), reinterpret_cast<intptr_t>(frame->sp()));
   input_->SetRegister(rbp.code(), reinterpret_cast<intptr_t>(frame->fp()));
-  for (int i = 0; i < DoubleRegister::NumAllocatableRegisters(); i++) {
+  for (int i = 0; i < DoubleRegister::kNumAllocatableRegisters; i++) {
     input_->SetDoubleRegister(i, 0.0);
   }
 
@@ -950,10 +886,10 @@ void Deoptimizer::EntryGenerator::Generate() {
   const int kNumberOfRegisters = Register::kNumRegisters;
 
   const int kDoubleRegsSize = kDoubleSize *
-      XMMRegister::NumAllocatableRegisters();
+                              XMMRegister::kNumAllocatableRegisters;
   __ subq(rsp, Immediate(kDoubleRegsSize));
 
-  for (int i = 0; i < XMMRegister::NumAllocatableRegisters(); ++i) {
+  for (int i = 0; i < XMMRegister::kNumAllocatableRegisters; ++i) {
     XMMRegister xmm_reg = XMMRegister::FromAllocationIndex(i);
     int offset = i * kDoubleSize;
     __ movsd(Operand(rsp, offset), xmm_reg);
@@ -1042,7 +978,7 @@ void Deoptimizer::EntryGenerator::Generate() {
 
   // Fill in the double input registers.
   int double_regs_offset = FrameDescription::double_registers_offset();
-  for (int i = 0; i < XMMRegister::NumAllocatableRegisters(); i++) {
+  for (int i = 0; i < XMMRegister::kNumAllocatableRegisters; i++) {
     int dst_offset = i * kDoubleSize + double_regs_offset;
     __ pop(Operand(rbx, dst_offset));
   }
@@ -1063,13 +999,10 @@ void Deoptimizer::EntryGenerator::Generate() {
   // limit and copy the contents of the activation frame to the input
   // frame description.
   __ lea(rdx, Operand(rbx, FrameDescription::frame_content_offset()));
-  Label pop_loop_header;
-  __ jmp(&pop_loop_header);
   Label pop_loop;
   __ bind(&pop_loop);
   __ pop(Operand(rdx, 0));
   __ addq(rdx, Immediate(sizeof(intptr_t)));
-  __ bind(&pop_loop_header);
   __ cmpq(rcx, rsp);
   __ j(not_equal, &pop_loop);
 
@@ -1086,33 +1019,28 @@ void Deoptimizer::EntryGenerator::Generate() {
   __ pop(rax);
 
   // Replace the current frame with the output frames.
-  Label outer_push_loop, inner_push_loop,
-      outer_loop_header, inner_loop_header;
+  Label outer_push_loop, inner_push_loop;
   // Outer loop state: rax = current FrameDescription**, rdx = one past the
   // last FrameDescription**.
   __ movl(rdx, Operand(rax, Deoptimizer::output_count_offset()));
   __ movq(rax, Operand(rax, Deoptimizer::output_offset()));
   __ lea(rdx, Operand(rax, rdx, times_8, 0));
-  __ jmp(&outer_loop_header);
   __ bind(&outer_push_loop);
   // Inner loop state: rbx = current FrameDescription*, rcx = loop index.
   __ movq(rbx, Operand(rax, 0));
   __ movq(rcx, Operand(rbx, FrameDescription::frame_size_offset()));
-  __ jmp(&inner_loop_header);
   __ bind(&inner_push_loop);
   __ subq(rcx, Immediate(sizeof(intptr_t)));
   __ push(Operand(rbx, rcx, times_1, FrameDescription::frame_content_offset()));
-  __ bind(&inner_loop_header);
   __ testq(rcx, rcx);
   __ j(not_zero, &inner_push_loop);
   __ addq(rax, Immediate(kPointerSize));
-  __ bind(&outer_loop_header);
   __ cmpq(rax, rdx);
   __ j(below, &outer_push_loop);
 
   // In case of OSR, we have to restore the XMM registers.
   if (type() == OSR) {
-    for (int i = 0; i < XMMRegister::NumAllocatableRegisters(); ++i) {
+    for (int i = 0; i < XMMRegister::kNumAllocatableRegisters; ++i) {
       XMMRegister xmm_reg = XMMRegister::FromAllocationIndex(i);
       int src_offset = i * kDoubleSize + double_regs_offset;
       __ movsd(xmm_reg, Operand(rbx, src_offset));
index 26032bcda11117bd90b82d1c3f9a62b63bb9bd0e..c69b27c445568e21b07d7ee2d638d8cce38702a0 100644 (file)
@@ -119,45 +119,35 @@ void LCodeGen::Comment(const char* format, ...) {
 bool LCodeGen::GeneratePrologue() {
   ASSERT(is_generating());
 
-  if (info()->IsOptimizing()) {
-    ProfileEntryHookStub::MaybeCallEntryHook(masm_);
+  ProfileEntryHookStub::MaybeCallEntryHook(masm_);
 
 #ifdef DEBUG
-    if (strlen(FLAG_stop_at) > 0 &&
-        info_->function()->name()->IsEqualTo(CStrVector(FLAG_stop_at))) {
-      __ int3();
-    }
+  if (strlen(FLAG_stop_at) > 0 &&
+      info_->function()->name()->IsEqualTo(CStrVector(FLAG_stop_at))) {
+    __ int3();
+  }
 #endif
 
-    // Strict mode functions need to replace the receiver with undefined
-    // when called as functions (without an explicit receiver
-    // object). rcx is zero for method calls and non-zero for function
-    // calls.
-    if (!info_->is_classic_mode() || info_->is_native()) {
-      Label ok;
-      __ testq(rcx, rcx);
-      __ j(zero, &ok, Label::kNear);
-      // +1 for return address.
-      int receiver_offset = (scope()->num_parameters() + 1) * kPointerSize;
-      __ LoadRoot(kScratchRegister, Heap::kUndefinedValueRootIndex);
-      __ movq(Operand(rsp, receiver_offset), kScratchRegister);
-      __ bind(&ok);
-    }
+  // Strict mode functions need to replace the receiver with undefined
+  // when called as functions (without an explicit receiver
+  // object). rcx is zero for method calls and non-zero for function
+  // calls.
+  if (!info_->is_classic_mode() || info_->is_native()) {
+    Label ok;
+    __ testq(rcx, rcx);
+    __ j(zero, &ok, Label::kNear);
+    // +1 for return address.
+    int receiver_offset = (scope()->num_parameters() + 1) * kPointerSize;
+    __ LoadRoot(kScratchRegister, Heap::kUndefinedValueRootIndex);
+    __ movq(Operand(rsp, receiver_offset), kScratchRegister);
+    __ bind(&ok);
   }
 
   info()->set_prologue_offset(masm_->pc_offset());
-  if (NeedsEagerFrame()) {
-    ASSERT(!frame_is_built_);
-    frame_is_built_ = true;
-    __ push(rbp);  // Caller's frame pointer.
-    __ movq(rbp, rsp);
-    __ push(rsi);  // Callee's context.
-    if (info()->IsStub()) {
-      __ Push(Smi::FromInt(StackFrame::STUB));
-    } else {
-      __ push(rdi);  // Callee's JS function.
-    }
-  }
+  __ push(rbp);  // Caller's frame pointer.
+  __ movq(rbp, rsp);
+  __ push(rsi);  // Callee's context.
+  __ push(rdi);  // Callee's JS function.
 
   // Reserve space for the stack slots needed by the code.
   int slots = GetStackSlotCount();
@@ -187,7 +177,7 @@ bool LCodeGen::GeneratePrologue() {
   }
 
   // Possibly allocate a local context.
-  int heap_slots = info_->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
+  int heap_slots = scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
   if (heap_slots > 0) {
     Comment(";;; Allocate local context");
     // Argument to NewContext is the function, which is still in rdi.
@@ -223,7 +213,7 @@ bool LCodeGen::GeneratePrologue() {
   }
 
   // Trace the call.
-  if (FLAG_trace && info()->IsOptimizing()) {
+  if (FLAG_trace) {
     __ CallRuntime(Runtime::kTraceEnter, 0);
   }
   return !is_aborted();
@@ -276,55 +266,9 @@ bool LCodeGen::GenerateBody() {
 
 
 bool LCodeGen::GenerateJumpTable() {
-  Label needs_frame_not_call;
-  Label needs_frame_is_call;
   for (int i = 0; i < jump_table_.length(); i++) {
     __ bind(&jump_table_[i].label);
-    Address entry = jump_table_[i].address;
-    if (jump_table_[i].needs_frame) {
-      __ movq(kScratchRegister, ExternalReference::ForDeoptEntry(entry));
-      if (jump_table_[i].is_lazy_deopt) {
-        if (needs_frame_is_call.is_bound()) {
-          __ jmp(&needs_frame_is_call);
-        } else {
-          __ bind(&needs_frame_is_call);
-          __ push(rbp);
-          __ movq(rbp, rsp);
-          __ push(rsi);
-          // This variant of deopt can only be used with stubs. Since we don't
-          // have a function pointer to install in the stack frame that we're
-          // building, install a special marker there instead.
-          ASSERT(info()->IsStub());
-          __ Move(rsi, Smi::FromInt(StackFrame::STUB));
-          __ push(rsi);
-          __ movq(rsi, MemOperand(rsp, kPointerSize));
-          __ call(kScratchRegister);
-        }
-      } else {
-        if (needs_frame_not_call.is_bound()) {
-          __ jmp(&needs_frame_not_call);
-        } else {
-          __ bind(&needs_frame_not_call);
-          __ push(rbp);
-          __ movq(rbp, rsp);
-          __ push(r8);
-          // This variant of deopt can only be used with stubs. Since we don't
-          // have a function pointer to install in the stack frame that we're
-          // building, install a special marker there instead.
-          ASSERT(info()->IsStub());
-          __ Move(rsi, Smi::FromInt(StackFrame::STUB));
-          __ push(rsi);
-          __ movq(rsi, MemOperand(rsp, kPointerSize));
-          __ jmp(kScratchRegister);
-        }
-      }
-    } else {
-      if (jump_table_[i].is_lazy_deopt) {
-        __ Call(entry, RelocInfo::RUNTIME_ENTRY);
-      } else {
-        __ Jump(entry, RelocInfo::RUNTIME_ENTRY);
-      }
-    }
+    __ Jump(jump_table_[i].address, RelocInfo::RUNTIME_ENTRY);
   }
   return !is_aborted();
 }
@@ -336,32 +280,10 @@ bool LCodeGen::GenerateDeferredCode() {
     for (int i = 0; !is_aborted() && i < deferred_.length(); i++) {
       LDeferredCode* code = deferred_[i];
       __ bind(code->entry());
-      if (NeedsDeferredFrame()) {
-        Comment(";;; Deferred build frame",
-                code->instruction_index(),
-                code->instr()->Mnemonic());
-        ASSERT(!frame_is_built_);
-        ASSERT(info()->IsStub());
-        frame_is_built_ = true;
-        // Build the frame in such a way that esi isn't trashed.
-        __ push(rbp);  // Caller's frame pointer.
-        __ push(Operand(rbp, StandardFrameConstants::kContextOffset));
-        __ Push(Smi::FromInt(StackFrame::STUB));
-        __ lea(rbp, Operand(rsp, 2 * kPointerSize));
-      }
       Comment(";;; Deferred code @%d: %s.",
               code->instruction_index(),
               code->instr()->Mnemonic());
       code->Generate();
-      if (NeedsDeferredFrame()) {
-        Comment(";;; Deferred destroy frame",
-                code->instruction_index(),
-                code->instr()->Mnemonic());
-        ASSERT(frame_is_built_);
-        frame_is_built_ = false;
-        __ movq(rsp, rbp);
-        __ pop(rbp);
-      }
       __ jmp(code->exit());
     }
   }
@@ -474,9 +396,7 @@ void LCodeGen::WriteTranslation(LEnvironment* environment,
                    translation,
                    arguments_index,
                    arguments_count);
-  bool has_closure_id = !info()->closure().is_null() &&
-      *info()->closure() != *environment->closure();
-  int closure_id = has_closure_id
+  int closure_id = *info()->closure() != *environment->closure()
       ? DefineDeoptimizationLiteral(environment->closure())
       : Translation::kSelfLiteralId;
 
@@ -500,9 +420,6 @@ void LCodeGen::WriteTranslation(LEnvironment* environment,
     case ARGUMENTS_ADAPTOR:
       translation->BeginArgumentsAdaptorFrame(closure_id, translation_size);
       break;
-    case STUB:
-      translation->BeginCompiledStubFrame();
-      break;
   }
 
   // Inlined frames which push their arguments cause the index to be
@@ -693,33 +610,20 @@ void LCodeGen::DeoptimizeIf(Condition cc, LEnvironment* environment) {
   RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
   ASSERT(environment->HasBeenRegistered());
   int id = environment->deoptimization_index();
-  ASSERT(info()->IsOptimizing() || info()->IsStub());
-  Deoptimizer::BailoutType bailout_type = info()->IsStub()
-      ? Deoptimizer::LAZY
-      : Deoptimizer::EAGER;
-  Address entry = Deoptimizer::GetDeoptimizationEntry(id, bailout_type);
+  Address entry = Deoptimizer::GetDeoptimizationEntry(id, Deoptimizer::EAGER);
   if (entry == NULL) {
     Abort("bailout was not prepared");
     return;
   }
 
-  ASSERT(info()->IsStub() || frame_is_built_);
-  bool lazy_deopt = info()->IsStub();
   if (cc == no_condition) {
-    if (lazy_deopt) {
-      __ Call(entry, RelocInfo::RUNTIME_ENTRY);
-    } else {
-      __ Jump(entry, RelocInfo::RUNTIME_ENTRY);
-    }
+    __ Jump(entry, RelocInfo::RUNTIME_ENTRY);
   } else {
     // We often have several deopts to the same entry, reuse the last
     // jump entry if this is the case.
     if (jump_table_.is_empty() ||
-        jump_table_.last().address != entry ||
-        jump_table_.last().needs_frame != !frame_is_built_ ||
-        jump_table_.last().is_lazy_deopt != lazy_deopt) {
-      JumpTableEntry table_entry(entry, !frame_is_built_, lazy_deopt);
-      jump_table_.Add(table_entry, zone());
+        jump_table_.last().address != entry) {
+      jump_table_.Add(JumpTableEntry(entry), zone());
     }
     __ j(cc, &jump_table_.last().label);
   }
@@ -2430,22 +2334,15 @@ void LCodeGen::DoCmpT(LCmpT* instr) {
 
 
 void LCodeGen::DoReturn(LReturn* instr) {
-  if (FLAG_trace && info()->IsOptimizing()) {
+  if (FLAG_trace) {
     // Preserve the return value on the stack and rely on the runtime
     // call to return the value in the same register.
     __ push(rax);
     __ CallRuntime(Runtime::kTraceExit, 1);
   }
-  if (NeedsEagerFrame()) {
-    __ movq(rsp, rbp);
-    __ pop(rbp);
-  }
-  if (info()->IsStub()) {
-    __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
-    __ Ret(0, r10);
-  } else {
-    __ Ret((GetParameterCount() + 1) * kPointerSize, rcx);
-  }
+  __ movq(rsp, rbp);
+  __ pop(rbp);
+  __ Ret((GetParameterCount() + 1) * kPointerSize, rcx);
 }
 
 
@@ -4676,10 +4573,10 @@ void LCodeGen::DoCheckFunction(LCheckFunction* instr) {
 void LCodeGen::DoCheckMapCommon(Register reg,
                                 Handle<Map> map,
                                 CompareMapMode mode,
-                                LInstruction* instr) {
+                                LEnvironment* env) {
   Label success;
   __ CompareMap(reg, map, &success, mode);
-  DeoptimizeIf(not_equal, instr->environment());
+  DeoptimizeIf(not_equal, env);
   __ bind(&success);
 }
 
@@ -4697,7 +4594,7 @@ void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
     __ j(equal, &success);
   }
   Handle<Map> map = map_set->last();
-  DoCheckMapCommon(reg, map, REQUIRE_EXACT_MAP, instr);
+  DoCheckMapCommon(reg, map, REQUIRE_EXACT_MAP, instr->environment());
   __ bind(&success);
 }
 
@@ -4764,7 +4661,7 @@ void LCodeGen::DoCheckPrototypeMaps(LCheckPrototypeMaps* instr) {
   // Check prototype maps up to the holder.
   while (!current_prototype.is_identical_to(holder)) {
     DoCheckMapCommon(reg, Handle<Map>(current_prototype->map()),
-                     ALLOW_ELEMENT_TRANSITION_MAPS, instr);
+                     ALLOW_ELEMENT_TRANSITION_MAPS, instr->environment());
     current_prototype =
         Handle<JSObject>(JSObject::cast(current_prototype->GetPrototype()));
     // Load next prototype object.
@@ -4773,7 +4670,7 @@ void LCodeGen::DoCheckPrototypeMaps(LCheckPrototypeMaps* instr) {
 
   // Check the holder map.
     DoCheckMapCommon(reg, Handle<Map>(current_prototype->map()),
-                     ALLOW_ELEMENT_TRANSITION_MAPS, instr);
+                     ALLOW_ELEMENT_TRANSITION_MAPS, instr->environment());
 }
 
 
@@ -5309,7 +5206,6 @@ void LCodeGen::EmitIsConstructCall(Register temp) {
 
 
 void LCodeGen::EnsureSpaceForLazyDeopt(int space_needed) {
-  if (info()->IsStub()) return;
   // Ensure that we have enough space after the previous lazy-bailout
   // instruction for patching the code here.
   int current_pc = masm()->pc_offset();
index 2fa10e151766dc7d7bbc896bd11f0769b8dfc5af..e068f14b5ad94c833b5b17a3019a00846ada9e14 100644 (file)
@@ -63,7 +63,6 @@ class LCodeGen BASE_EMBEDDED {
         deferred_(8, info->zone()),
         osr_pc_offset_(-1),
         last_lazy_deopt_pc_(0),
-        frame_is_built_(false),
         safepoints_(info->zone()),
         resolver_(this),
         expected_safepoint_kind_(Safepoint::kSimple) {
@@ -78,15 +77,6 @@ class LCodeGen BASE_EMBEDDED {
   Heap* heap() const { return isolate()->heap(); }
   Zone* zone() const { return zone_; }
 
-  bool NeedsEagerFrame() const {
-    return GetStackSlotCount() > 0 ||
-        info()->is_non_deferred_calling() ||
-        !info()->IsStub();
-  }
-  bool NeedsDeferredFrame() const {
-    return !NeedsEagerFrame() && info()->is_deferred_calling();
-  }
-
   // Support for converting LOperands to assembler types.
   Register ToRegister(LOperand* op) const;
   XMMRegister ToDoubleRegister(LOperand* op) const;
@@ -120,7 +110,7 @@ class LCodeGen BASE_EMBEDDED {
                                        Label* map_check);
 
   void DoCheckMapCommon(Register reg, Handle<Map> map,
-                        CompareMapMode mode, LInstruction* instr);
+                        CompareMapMode mode, LEnvironment* env);
 
 // Parallel move support.
   void DoParallelMove(LParallelMove* move);
@@ -168,7 +158,7 @@ class LCodeGen BASE_EMBEDDED {
                        Register scratch);
 
   int GetStackSlotCount() const { return chunk()->spill_slot_count(); }
-  int GetParameterCount() const { return info()->num_parameters(); }
+  int GetParameterCount() const { return scope()->num_parameters(); }
 
   void Abort(const char* reason);
   void Comment(const char* format, ...);
@@ -337,15 +327,11 @@ class LCodeGen BASE_EMBEDDED {
                     int* offset);
 
   struct JumpTableEntry {
-    inline JumpTableEntry(Address entry, bool frame, bool is_lazy)
+    explicit inline JumpTableEntry(Address entry)
         : label(),
-          address(entry),
-          needs_frame(frame),
-          is_lazy_deopt(is_lazy) { }
+          address(entry) { }
     Label label;
     Address address;
-    bool needs_frame;
-    bool is_lazy_deopt;
   };
 
   void EnsureSpaceForLazyDeopt(int space_needed);
@@ -374,7 +360,6 @@ class LCodeGen BASE_EMBEDDED {
   ZoneList<LDeferredCode*> deferred_;
   int osr_pc_offset_;
   int last_lazy_deopt_pc_;
-  bool frame_is_built_;
 
   // Builder that keeps track of safepoints in the code. The table
   // itself is emitted at the end of the generated code.
@@ -389,7 +374,6 @@ class LCodeGen BASE_EMBEDDED {
    public:
     explicit PushSafepointRegistersScope(LCodeGen* codegen)
         : codegen_(codegen) {
-      ASSERT(codegen_->info()->is_calling());
       ASSERT(codegen_->expected_safepoint_kind_ == Safepoint::kSimple);
       codegen_->masm_->PushSafepointRegisters();
       codegen_->expected_safepoint_kind_ = Safepoint::kWithRegisters;
index 574970c6994e701cab44a590ac58d0f6aa121479..81228cef8ca76fd184470f784fad3c87b89e94cc 100644 (file)
@@ -44,10 +44,10 @@ LITHIUM_CONCRETE_INSTRUCTION_LIST(DEFINE_COMPILE)
 #undef DEFINE_COMPILE
 
 LOsrEntry::LOsrEntry() {
-  for (int i = 0; i < Register::NumAllocatableRegisters(); ++i) {
+  for (int i = 0; i < Register::kNumAllocatableRegisters; ++i) {
     register_spills_[i] = NULL;
   }
-  for (int i = 0; i < DoubleRegister::NumAllocatableRegisters(); ++i) {
+  for (int i = 0; i < DoubleRegister::kNumAllocatableRegisters; ++i) {
     double_register_spills_[i] = NULL;
   }
 }
@@ -619,8 +619,6 @@ LInstruction* LChunkBuilder::AssignEnvironment(LInstruction* instr) {
 LInstruction* LChunkBuilder::MarkAsCall(LInstruction* instr,
                                         HInstruction* hinstr,
                                         CanDeoptimize can_deoptimize) {
-  info()->MarkAsNonDeferredCalling();
-
 #ifdef DEBUG
   instr->VerifyCall();
 #endif
@@ -1637,12 +1635,8 @@ LInstruction* LChunkBuilder::DoForceRepresentation(HForceRepresentation* bad) {
 LInstruction* LChunkBuilder::DoChange(HChange* instr) {
   Representation from = instr->from();
   Representation to = instr->to();
-  // Only mark conversions that might need to allocate as calling rather than
-  // all changes. This makes simple, non-allocating conversion not have to force
-  // building a stack frame.
   if (from.IsTagged()) {
     if (to.IsDouble()) {
-      info()->MarkAsDeferredCalling();
       LOperand* value = UseRegister(instr->value());
       LNumberUntagD* res = new(zone()) LNumberUntagD(value);
       return AssignEnvironment(DefineAsRegister(res));
@@ -1660,7 +1654,6 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) {
     }
   } else if (from.IsDouble()) {
     if (to.IsTagged()) {
-      info()->MarkAsDeferredCalling();
       LOperand* value = UseRegister(instr->value());
       LOperand* temp = TempRegister();
 
@@ -1674,7 +1667,6 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) {
       return AssignEnvironment(DefineAsRegister(new(zone()) LDoubleToI(value)));
     }
   } else if (from.IsInteger32()) {
-    info()->MarkAsDeferredCalling();
     if (to.IsTagged()) {
       HValue* val = instr->value();
       LOperand* value = UseRegister(val);
@@ -2141,17 +2133,8 @@ LInstruction* LChunkBuilder::DoOsrEntry(HOsrEntry* instr) {
 
 
 LInstruction* LChunkBuilder::DoParameter(HParameter* instr) {
-  LParameter* result = new(zone()) LParameter;
-  if (info()->IsOptimizing()) {
-    int spill_index = chunk()->GetParameterStackSlot(instr->index());
-    return DefineAsSpilled(result, spill_index);
-  } else {
-    ASSERT(info()->IsStub());
-    CodeStubInterfaceDescriptor* descriptor =
-        info()->code_stub()->GetInterfaceDescriptor(info()->isolate());
-    Register reg = descriptor->register_params_[instr->index()];
-    return DefineFixed(result, reg);
-  }
+  int spill_index = chunk()->GetParameterStackSlot(instr->index());
+  return DefineAsSpilled(new(zone()) LParameter, spill_index);
 }
 
 
@@ -2247,7 +2230,6 @@ LInstruction* LChunkBuilder::DoSimulate(HSimulate* instr) {
 
 
 LInstruction* LChunkBuilder::DoStackCheck(HStackCheck* instr) {
-  info()->MarkAsDeferredCalling();
   if (instr->is_function_entry()) {
     return MarkAsCall(new(zone()) LStackCheck, instr);
   } else {
index f5f0250abd573d930750b7c501bf5c05ff7e77bd..4a909a1f2e03c54b993e7cd5eb79bbc410a59537 100644 (file)
@@ -252,11 +252,6 @@ class LInstruction: public ZoneObject {
 
   void MarkAsCall() { is_call_ = true; }
 
-  // Interface to the register allocator and iterators.
-  bool ClobbersTemps() const { return is_call_; }
-  bool ClobbersRegisters() const { return is_call_; }
-  bool ClobbersDoubleRegisters() const { return is_call_; }
-
   virtual void SetDeferredLazyDeoptimizationEnvironment(LEnvironment* env) { }
 
   // Interface to the register allocator and iterators.
@@ -2296,9 +2291,8 @@ class LOsrEntry: public LTemplateInstruction<0, 0, 0> {
   // slot, i.e., that must also be restored to the spill slot on OSR entry.
   // NULL if the register has no assigned spill slot.  Indexed by allocation
   // index.
-  LOperand* register_spills_[Register::kMaxNumAllocatableRegisters];
-  LOperand* double_register_spills_[
-      DoubleRegister::kMaxNumAllocatableRegisters];
+  LOperand* register_spills_[Register::kNumAllocatableRegisters];
+  LOperand* double_register_spills_[DoubleRegister::kNumAllocatableRegisters];
 };
 
 
index 8513a68fa915fa98ad69f5b544941287e4446c38..4e4f2c572c88097c74185c2ebc48a22cb6387e6f 100644 (file)
@@ -3432,7 +3432,7 @@ void MacroAssembler::EnterExitFrameEpilogue(int arg_stack_space,
         arg_stack_space * kPointerSize;
     subq(rsp, Immediate(space));
     int offset = -2 * kPointerSize;
-    for (int i = 0; i < XMMRegister::NumAllocatableRegisters(); i++) {
+    for (int i = 0; i < XMMRegister::kNumAllocatableRegisters; i++) {
       XMMRegister reg = XMMRegister::FromAllocationIndex(i);
       movsd(Operand(rbp, offset - ((i + 1) * kDoubleSize)), reg);
     }
@@ -3476,7 +3476,7 @@ void MacroAssembler::LeaveExitFrame(bool save_doubles) {
   // r15 : argv
   if (save_doubles) {
     int offset = -2 * kPointerSize;
-    for (int i = 0; i < XMMRegister::NumAllocatableRegisters(); i++) {
+    for (int i = 0; i < XMMRegister::kNumAllocatableRegisters; i++) {
       XMMRegister reg = XMMRegister::FromAllocationIndex(i);
       movsd(reg, Operand(rbp, offset - ((i + 1) * kDoubleSize)));
     }
index 716a591f2e4079cd75b8627e0556c30d2923a3d9..0d8d6f2cca649c324e5cb81e730334c81ece0786 100644 (file)
@@ -1414,9 +1414,9 @@ class MacroAssembler: public Assembler {
     return kNumSafepointRegisters - kSafepointPushRegisterIndices[reg_code] - 1;
   }
 
-  // Needs access to SafepointRegisterStackIndex for compiled frame
+  // Needs access to SafepointRegisterStackIndex for optimized frame
   // traversal.
-  friend class CompiledFrame;
+  friend class OptimizedFrame;
 };
 
 
index 032996686e312dd1daf3b9c95cf24895d2cd9585..683aa9d40904bcaf3cd75c7e510d50f5d9a1f014 100644 (file)
@@ -3210,19 +3210,12 @@ Handle<Code> KeyedLoadStubCompiler::CompileLoadElement(
   //  -- rsp[0] : return address
   // -----------------------------------
   ElementsKind elements_kind = receiver_map->elements_kind();
-  if (receiver_map->has_fast_elements() ||
-      receiver_map->has_external_array_elements()) {
-    Handle<Code> stub = KeyedLoadFastElementStub(
-        receiver_map->instance_type() == JS_ARRAY_TYPE,
-        elements_kind).GetCode();
-    __ DispatchMap(rdx, receiver_map, stub, DO_SMI_CHECK);
-  } else {
-    Handle<Code> stub =
-        KeyedLoadDictionaryElementStub().GetCode();
-    __ DispatchMap(rdx, receiver_map, stub, DO_SMI_CHECK);
-  }
+  Handle<Code> stub = KeyedLoadElementStub(elements_kind).GetCode();
 
-  GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
+  __ DispatchMap(rdx, receiver_map, stub, DO_SMI_CHECK);
+
+  Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Miss();
+  __ jmp(ic, RelocInfo::CODE_TARGET);
 
   // Return the generated code.
   return GetCode(Code::NORMAL, factory()->empty_string());
@@ -3464,6 +3457,140 @@ static void GenerateSmiKeyCheck(MacroAssembler* masm,
 }
 
 
+void KeyedLoadStubCompiler::GenerateLoadExternalArray(
+    MacroAssembler* masm,
+    ElementsKind elements_kind) {
+  // ----------- S t a t e -------------
+  //  -- rax    : key
+  //  -- rdx    : receiver
+  //  -- rsp[0] : return address
+  // -----------------------------------
+  Label slow, miss_force_generic;
+
+  // This stub is meant to be tail-jumped to, the receiver must already
+  // have been verified by the caller to not be a smi.
+
+  // Check that the key is a smi or a heap number convertible to a smi.
+  GenerateSmiKeyCheck(masm, rax, rcx, xmm0, xmm1, &miss_force_generic);
+
+  // Check that the index is in range.
+  __ movq(rbx, FieldOperand(rdx, JSObject::kElementsOffset));
+  __ SmiToInteger32(rcx, rax);
+  __ cmpq(rax, FieldOperand(rbx, ExternalArray::kLengthOffset));
+  // Unsigned comparison catches both negative and too-large values.
+  __ j(above_equal, &miss_force_generic);
+
+  // rax: index (as a smi)
+  // rdx: receiver (JSObject)
+  // rcx: untagged index
+  // rbx: elements array
+  __ movq(rbx, FieldOperand(rbx, ExternalArray::kExternalPointerOffset));
+  // rbx: base pointer of external storage
+  switch (elements_kind) {
+    case EXTERNAL_BYTE_ELEMENTS:
+      __ movsxbq(rcx, Operand(rbx, rcx, times_1, 0));
+      break;
+    case EXTERNAL_PIXEL_ELEMENTS:
+    case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
+      __ movzxbq(rcx, Operand(rbx, rcx, times_1, 0));
+      break;
+    case EXTERNAL_SHORT_ELEMENTS:
+      __ movsxwq(rcx, Operand(rbx, rcx, times_2, 0));
+      break;
+    case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
+      __ movzxwq(rcx, Operand(rbx, rcx, times_2, 0));
+      break;
+    case EXTERNAL_INT_ELEMENTS:
+      __ movsxlq(rcx, Operand(rbx, rcx, times_4, 0));
+      break;
+    case EXTERNAL_UNSIGNED_INT_ELEMENTS:
+      __ movl(rcx, Operand(rbx, rcx, times_4, 0));
+      break;
+    case EXTERNAL_FLOAT_ELEMENTS:
+      __ cvtss2sd(xmm0, Operand(rbx, rcx, times_4, 0));
+      break;
+    case EXTERNAL_DOUBLE_ELEMENTS:
+      __ movsd(xmm0, Operand(rbx, rcx, times_8, 0));
+      break;
+    default:
+      UNREACHABLE();
+      break;
+  }
+
+  // rax: index
+  // rdx: receiver
+  // For integer array types:
+  // rcx: value
+  // For floating-point array type:
+  // xmm0: value as double.
+
+  ASSERT(kSmiValueSize == 32);
+  if (elements_kind == EXTERNAL_UNSIGNED_INT_ELEMENTS) {
+    // For the UnsignedInt array type, we need to see whether
+    // the value can be represented in a Smi. If not, we need to convert
+    // it to a HeapNumber.
+    Label box_int;
+
+    __ JumpIfUIntNotValidSmiValue(rcx, &box_int, Label::kNear);
+
+    __ Integer32ToSmi(rax, rcx);
+    __ ret(0);
+
+    __ bind(&box_int);
+
+    // Allocate a HeapNumber for the int and perform int-to-double
+    // conversion.
+    // The value is zero-extended since we loaded the value from memory
+    // with movl.
+    __ cvtqsi2sd(xmm0, rcx);
+
+    __ AllocateHeapNumber(rcx, rbx, &slow);
+    // Set the value.
+    __ movsd(FieldOperand(rcx, HeapNumber::kValueOffset), xmm0);
+    __ movq(rax, rcx);
+    __ ret(0);
+  } else if (elements_kind == EXTERNAL_FLOAT_ELEMENTS ||
+             elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
+    // For the floating-point array type, we need to always allocate a
+    // HeapNumber.
+    __ AllocateHeapNumber(rcx, rbx, &slow);
+    // Set the value.
+    __ movsd(FieldOperand(rcx, HeapNumber::kValueOffset), xmm0);
+    __ movq(rax, rcx);
+    __ ret(0);
+  } else {
+    __ Integer32ToSmi(rax, rcx);
+    __ ret(0);
+  }
+
+  // Slow case: Jump to runtime.
+  __ bind(&slow);
+  Counters* counters = masm->isolate()->counters();
+  __ IncrementCounter(counters->keyed_load_external_array_slow(), 1);
+
+  // ----------- S t a t e -------------
+  //  -- rax    : key
+  //  -- rdx    : receiver
+  //  -- rsp[0]  : return address
+  // -----------------------------------
+
+  Handle<Code> ic = masm->isolate()->builtins()->KeyedLoadIC_Slow();
+  __ jmp(ic, RelocInfo::CODE_TARGET);
+
+  // Miss case: Jump to runtime.
+  __ bind(&miss_force_generic);
+
+  // ----------- S t a t e -------------
+  //  -- rax    : key
+  //  -- rdx    : receiver
+  //  -- rsp[0]  : return address
+  // -----------------------------------
+  Handle<Code> miss_ic =
+      masm->isolate()->builtins()->KeyedLoadIC_MissForceGeneric();
+  __ jmp(miss_ic, RelocInfo::CODE_TARGET);
+}
+
+
 void KeyedStoreStubCompiler::GenerateStoreExternalArray(
     MacroAssembler* masm,
     ElementsKind elements_kind) {
@@ -3653,6 +3780,98 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray(
 }
 
 
+void KeyedLoadStubCompiler::GenerateLoadFastElement(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- rax    : key
+  //  -- rdx    : receiver
+  //  -- rsp[0] : return address
+  // -----------------------------------
+  Label miss_force_generic;
+
+  // This stub is meant to be tail-jumped to, the receiver must already
+  // have been verified by the caller to not be a smi.
+
+  // Check that the key is a smi or a heap number convertible to a smi.
+  GenerateSmiKeyCheck(masm, rax, rcx, xmm0, xmm1, &miss_force_generic);
+
+  // Get the elements array.
+  __ movq(rcx, FieldOperand(rdx, JSObject::kElementsOffset));
+  __ AssertFastElements(rcx);
+
+  // Check that the key is within bounds.
+  __ SmiCompare(rax, FieldOperand(rcx, FixedArray::kLengthOffset));
+  __ j(above_equal, &miss_force_generic);
+
+  // Load the result and make sure it's not the hole.
+  SmiIndex index = masm->SmiToIndex(rbx, rax, kPointerSizeLog2);
+  __ movq(rbx, FieldOperand(rcx,
+                            index.reg,
+                            index.scale,
+                            FixedArray::kHeaderSize));
+  __ CompareRoot(rbx, Heap::kTheHoleValueRootIndex);
+  __ j(equal, &miss_force_generic);
+  __ movq(rax, rbx);
+  __ ret(0);
+
+  __ bind(&miss_force_generic);
+  Code* code = masm->isolate()->builtins()->builtin(
+      Builtins::kKeyedLoadIC_MissForceGeneric);
+  Handle<Code> ic(code);
+  __ jmp(ic, RelocInfo::CODE_TARGET);
+}
+
+
+void KeyedLoadStubCompiler::GenerateLoadFastDoubleElement(
+    MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- rax    : key
+  //  -- rdx    : receiver
+  //  -- rsp[0] : return address
+  // -----------------------------------
+  Label miss_force_generic, slow_allocate_heapnumber;
+
+  // This stub is meant to be tail-jumped to, the receiver must already
+  // have been verified by the caller to not be a smi.
+
+  // Check that the key is a smi or a heap number convertible to a smi.
+  GenerateSmiKeyCheck(masm, rax, rcx, xmm0, xmm1, &miss_force_generic);
+
+  // Get the elements array.
+  __ movq(rcx, FieldOperand(rdx, JSObject::kElementsOffset));
+  __ AssertFastElements(rcx);
+
+  // Check that the key is within bounds.
+  __ SmiCompare(rax, FieldOperand(rcx, FixedArray::kLengthOffset));
+  __ j(above_equal, &miss_force_generic);
+
+  // Check for the hole
+  __ SmiToInteger32(kScratchRegister, rax);
+  uint32_t offset = FixedDoubleArray::kHeaderSize + sizeof(kHoleNanLower32);
+  __ cmpl(FieldOperand(rcx, kScratchRegister, times_8, offset),
+          Immediate(kHoleNanUpper32));
+  __ j(equal, &miss_force_generic);
+
+  // Always allocate a heap number for the result.
+  __ movsd(xmm0, FieldOperand(rcx, kScratchRegister, times_8,
+                              FixedDoubleArray::kHeaderSize));
+  __ AllocateHeapNumber(rcx, rbx, &slow_allocate_heapnumber);
+  // Set the value.
+  __ movq(rax, rcx);
+  __ movsd(FieldOperand(rcx, HeapNumber::kValueOffset), xmm0);
+  __ ret(0);
+
+  __ bind(&slow_allocate_heapnumber);
+  Handle<Code> slow_ic =
+      masm->isolate()->builtins()->KeyedLoadIC_Slow();
+  __ jmp(slow_ic, RelocInfo::CODE_TARGET);
+
+  __ bind(&miss_force_generic);
+  Handle<Code> miss_ic =
+      masm->isolate()->builtins()->KeyedLoadIC_MissForceGeneric();
+  __ jmp(miss_ic, RelocInfo::CODE_TARGET);
+}
+
+
 void KeyedStoreStubCompiler::GenerateStoreFastElement(
     MacroAssembler* masm,
     bool is_js_array,
index f773a77e4c3e07f0b4486ec762a90de27ff05c58..da0950543768f96436755e7ada8ef7ad980ea35c 100644 (file)
@@ -442,8 +442,7 @@ TEST(DisasmIa320) {
   }
 
   {
-    if (CpuFeatures::IsSupported(SSE2) &&
-        CpuFeatures::IsSupported(SSE4_1)) {
+    if (CpuFeatures::IsSupported(SSE4_1)) {
       CpuFeatures::Scope scope(SSE4_1);
       __ pextrd(eax, xmm0, 1);
       __ pinsrd(xmm1, eax, 0);
index 44813b714fb4f86864cfce4f74762a3dc2edecfa..69abd8d6800e1543e788508c0eab109a9eeb0297 100644 (file)
@@ -546,9 +546,9 @@ TEST(BootUpMemoryUse) {
       }
     } else {
       if (v8::internal::Snapshot::IsEnabled()) {
-        CHECK_LE(delta, 2600 * 1024);  // 2400.
+        CHECK_LE(delta, 2500 * 1024);  // 2400.
       } else {
-        CHECK_LE(delta, 3000 * 1024);  // 2920.
+        CHECK_LE(delta, 2860 * 1024);  // 2760.
       }
     }
   }
index f459126db08a4fe3f3033d45e396b26dbc6f5889..87f7d0d766ee02c43ec5af00b3959321d2e53670 100644 (file)
@@ -152,7 +152,6 @@ var knownProblems = {
   "LazyRecompile": true,
   "ParallelRecompile": true,
   "NotifyDeoptimized": true,
-  "NotifyICMiss": true,
   "NotifyOSR": true,
   "CreateObjectLiteralBoilerplate": true,
   "CloneLiteralBoilerplate": true,
index 046a72c4e065ccc5115f6e903f26835c7a7678c7..aad07c70943bba0e3216b3a1f0fd71bc5b4d7ce4 100644 (file)
             '../../src/circular-queue.h',
             '../../src/code-stubs.cc',
             '../../src/code-stubs.h',
-            '../../src/code-stubs-hydrogen.cc',
             '../../src/code.h',
             '../../src/codegen.cc',
             '../../src/codegen.h',