Upstream version 9.38.207.0
[platform/framework/web/crosswalk.git] / src / v8 / src / deoptimizer.cc
index a19a827..b3ae6b1 100644 (file)
@@ -1,40 +1,17 @@
 // Copyright 2013 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-//       notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-//       copyright notice, this list of conditions and the following
-//       disclaimer in the documentation and/or other materials provided
-//       with the distribution.
-//     * Neither the name of Google Inc. nor the names of its
-//       contributors may be used to endorse or promote products derived
-//       from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#include "accessors.h"
-#include "codegen.h"
-#include "deoptimizer.h"
-#include "disasm.h"
-#include "full-codegen.h"
-#include "global-handles.h"
-#include "macro-assembler.h"
-#include "prettyprinter.h"
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#include "src/accessors.h"
+#include "src/codegen.h"
+#include "src/deoptimizer.h"
+#include "src/disasm.h"
+#include "src/full-codegen.h"
+#include "src/global-handles.h"
+#include "src/macro-assembler.h"
+#include "src/prettyprinter.h"
 
 
 namespace v8 {
@@ -42,7 +19,7 @@ namespace internal {
 
 static MemoryChunk* AllocateCodeChunk(MemoryAllocator* allocator) {
   return allocator->AllocateChunk(Deoptimizer::GetMaxDeoptTableSize(),
-                                  OS::CommitPageSize(),
+                                  base::OS::CommitPageSize(),
 #if defined(__native_client__)
   // The Native Client port of V8 uses an interpreter,
   // so code pages don't need PROT_EXEC.
@@ -56,9 +33,7 @@ static MemoryChunk* AllocateCodeChunk(MemoryAllocator* allocator) {
 
 DeoptimizerData::DeoptimizerData(MemoryAllocator* allocator)
     : allocator_(allocator),
-#ifdef ENABLE_DEBUGGER_SUPPORT
       deoptimized_frame_info_(NULL),
-#endif
       current_(NULL) {
   for (int i = 0; i < Deoptimizer::kBailoutTypesWithCodeEntry; ++i) {
     deopt_entry_code_entries_[i] = -1;
@@ -75,13 +50,11 @@ DeoptimizerData::~DeoptimizerData() {
 }
 
 
-#ifdef ENABLE_DEBUGGER_SUPPORT
 void DeoptimizerData::Iterate(ObjectVisitor* v) {
   if (deoptimized_frame_info_ != NULL) {
     deoptimized_frame_info_->Iterate(v);
   }
 }
-#endif
 
 
 Code* Deoptimizer::FindDeoptimizingCode(Address addr) {
@@ -91,7 +64,7 @@ Code* Deoptimizer::FindDeoptimizingCode(Address addr) {
     Object* element = native_context->DeoptimizedCodeListHead();
     while (!element->IsUndefined()) {
       Code* code = Code::cast(element);
-      ASSERT(code->kind() == Code::OPTIMIZED_FUNCTION);
+      CHECK(code->kind() == Code::OPTIMIZED_FUNCTION);
       if (code->contains(addr)) return code;
       element = code->next_code_link();
     }
@@ -115,7 +88,7 @@ Deoptimizer* Deoptimizer::New(JSFunction* function,
                                              from,
                                              fp_to_sp_delta,
                                              NULL);
-  ASSERT(isolate->deoptimizer_data()->current_ == NULL);
+  CHECK(isolate->deoptimizer_data()->current_ == NULL);
   isolate->deoptimizer_data()->current_ = deoptimizer;
   return deoptimizer;
 }
@@ -128,7 +101,7 @@ static const int kDeoptTableMaxEpilogueCodeSize = 2 * KB;
 size_t Deoptimizer::GetMaxDeoptTableSize() {
   int entries_size =
       Deoptimizer::kMaxNumberOfEntries * Deoptimizer::table_entry_size_;
-  int commit_page_size = static_cast<int>(OS::CommitPageSize());
+  int commit_page_size = static_cast<int>(base::OS::CommitPageSize());
   int page_count = ((kDeoptTableMaxEpilogueCodeSize + entries_size - 1) /
                     commit_page_size) + 1;
   return static_cast<size_t>(commit_page_size * page_count);
@@ -137,7 +110,7 @@ size_t Deoptimizer::GetMaxDeoptTableSize() {
 
 Deoptimizer* Deoptimizer::Grab(Isolate* isolate) {
   Deoptimizer* result = isolate->deoptimizer_data()->current_;
-  ASSERT(result != NULL);
+  CHECK_NE(result, NULL);
   result->DeleteFrameDescriptions();
   isolate->deoptimizer_data()->current_ = NULL;
   return result;
@@ -160,13 +133,12 @@ int Deoptimizer::ConvertJSFrameIndexToFrameIndex(int jsframe_index) {
 }
 
 
-#ifdef ENABLE_DEBUGGER_SUPPORT
 DeoptimizedFrameInfo* Deoptimizer::DebuggerInspectableFrame(
     JavaScriptFrame* frame,
     int jsframe_index,
     Isolate* isolate) {
-  ASSERT(frame->is_optimized());
-  ASSERT(isolate->deoptimizer_data()->deoptimized_frame_info_ == NULL);
+  CHECK(frame->is_optimized());
+  CHECK(isolate->deoptimizer_data()->deoptimized_frame_info_ == NULL);
 
   // Get the function and code from the frame.
   JSFunction* function = frame->function();
@@ -176,7 +148,7 @@ DeoptimizedFrameInfo* Deoptimizer::DebuggerInspectableFrame(
   // return address must be at a place in the code with deoptimization support.
   SafepointEntry safepoint_entry = code->GetSafepointEntry(frame->pc());
   int deoptimization_index = safepoint_entry.deoptimization_index();
-  ASSERT(deoptimization_index != Safepoint::kNoDeoptimizationIndex);
+  CHECK_NE(deoptimization_index, Safepoint::kNoDeoptimizationIndex);
 
   // Always use the actual stack slots when calculating the fp to sp
   // delta adding two for the function and context.
@@ -199,7 +171,7 @@ DeoptimizedFrameInfo* Deoptimizer::DebuggerInspectableFrame(
 
   // Create the GC safe output frame information and register it for GC
   // handling.
-  ASSERT_LT(jsframe_index, deoptimizer->jsframe_count());
+  CHECK_LT(jsframe_index, deoptimizer->jsframe_count());
 
   // Convert JS frame index into frame index.
   int frame_index = deoptimizer->ConvertJSFrameIndexToFrameIndex(jsframe_index);
@@ -251,11 +223,11 @@ DeoptimizedFrameInfo* Deoptimizer::DebuggerInspectableFrame(
 
 void Deoptimizer::DeleteDebuggerInspectableFrame(DeoptimizedFrameInfo* info,
                                                  Isolate* isolate) {
-  ASSERT(isolate->deoptimizer_data()->deoptimized_frame_info_ == info);
+  CHECK_EQ(isolate->deoptimizer_data()->deoptimized_frame_info_, info);
   delete info;
   isolate->deoptimizer_data()->deoptimized_frame_info_ = NULL;
 }
-#endif
+
 
 void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm,
                                                 int count,
@@ -269,7 +241,7 @@ void Deoptimizer::VisitAllOptimizedFunctionsForContext(
     Context* context, OptimizedFunctionVisitor* visitor) {
   DisallowHeapAllocation no_allocation;
 
-  ASSERT(context->IsNativeContext());
+  CHECK(context->IsNativeContext());
 
   visitor->EnterContext(context);
 
@@ -292,13 +264,13 @@ void Deoptimizer::VisitAllOptimizedFunctionsForContext(
         context->SetOptimizedFunctionsListHead(next);
       }
       // The visitor should not alter the link directly.
-      ASSERT(function->next_function_link() == next);
+      CHECK_EQ(function->next_function_link(), next);
       // Set the next function link to undefined to indicate it is no longer
       // in the optimized functions list.
       function->set_next_function_link(context->GetHeap()->undefined_value());
     } else {
       // The visitor should not alter the link directly.
-      ASSERT(function->next_function_link() == next);
+      CHECK_EQ(function->next_function_link(), next);
       // preserve this element.
       prev = function;
     }
@@ -342,7 +314,6 @@ void Deoptimizer::DeoptimizeMarkedCodeForContext(Context* context) {
       // Unlink this function and evict from optimized code map.
       SharedFunctionInfo* shared = function->shared();
       function->set_code(shared->code());
-      shared->EvictFromOptimizedCodeMap(code, "deoptimized function");
 
       if (FLAG_trace_deopt) {
         CodeTracer::Scope scope(code->GetHeap()->isolate()->GetCodeTracer());
@@ -358,9 +329,44 @@ void Deoptimizer::DeoptimizeMarkedCodeForContext(Context* context) {
   SelectedCodeUnlinker unlinker;
   VisitAllOptimizedFunctionsForContext(context, &unlinker);
 
+  Isolate* isolate = context->GetHeap()->isolate();
+#ifdef DEBUG
+  Code* topmost_optimized_code = NULL;
+  bool safe_to_deopt_topmost_optimized_code = false;
+  // Make sure all activations of optimized code can deopt at their current PC.
+  // The topmost optimized code has special handling because it cannot be
+  // deoptimized due to weak object dependency.
+  for (StackFrameIterator it(isolate, isolate->thread_local_top());
+       !it.done(); it.Advance()) {
+    StackFrame::Type type = it.frame()->type();
+    if (type == StackFrame::OPTIMIZED) {
+      Code* code = it.frame()->LookupCode();
+      if (FLAG_trace_deopt) {
+        JSFunction* function =
+            static_cast<OptimizedFrame*>(it.frame())->function();
+        CodeTracer::Scope scope(isolate->GetCodeTracer());
+        PrintF(scope.file(), "[deoptimizer found activation of function: ");
+        function->PrintName(scope.file());
+        PrintF(scope.file(),
+               " / %" V8PRIxPTR "]\n", reinterpret_cast<intptr_t>(function));
+      }
+      SafepointEntry safepoint = code->GetSafepointEntry(it.frame()->pc());
+      int deopt_index = safepoint.deoptimization_index();
+      // Turbofan deopt is checked when we are patching addresses on stack.
+      bool turbofanned = code->is_turbofanned();
+      bool safe_to_deopt =
+          deopt_index != Safepoint::kNoDeoptimizationIndex || turbofanned;
+      CHECK(topmost_optimized_code == NULL || safe_to_deopt || turbofanned);
+      if (topmost_optimized_code == NULL) {
+        topmost_optimized_code = code;
+        safe_to_deopt_topmost_optimized_code = safe_to_deopt;
+      }
+    }
+  }
+#endif
+
   // Move marked code from the optimized code list to the deoptimized
   // code list, collecting them into a ZoneList.
-  Isolate* isolate = context->GetHeap()->isolate();
   Zone zone(isolate);
   ZoneList<Code*> codes(10, &zone);
 
@@ -369,8 +375,9 @@ void Deoptimizer::DeoptimizeMarkedCodeForContext(Context* context) {
   Object* element = context->OptimizedCodeListHead();
   while (!element->IsUndefined()) {
     Code* code = Code::cast(element);
-    ASSERT(code->kind() == Code::OPTIMIZED_FUNCTION);
+    CHECK_EQ(code->kind(), Code::OPTIMIZED_FUNCTION);
     Object* next = code->next_code_link();
+
     if (code->marked_for_deoptimization()) {
       // Put the code into the list for later patching.
       codes.Add(code, &zone);
@@ -393,19 +400,93 @@ void Deoptimizer::DeoptimizeMarkedCodeForContext(Context* context) {
     element = next;
   }
 
+  if (FLAG_turbo_deoptimization) {
+    PatchStackForMarkedCode(isolate);
+  }
+
   // TODO(titzer): we need a handle scope only because of the macro assembler,
   // which is only used in EnsureCodeForDeoptimizationEntry.
   HandleScope scope(isolate);
+
   // Now patch all the codes for deoptimization.
   for (int i = 0; i < codes.length(); i++) {
+#ifdef DEBUG
+    if (codes[i] == topmost_optimized_code) {
+      DCHECK(safe_to_deopt_topmost_optimized_code);
+    }
+#endif
     // It is finally time to die, code object.
+
+    // Remove the code from optimized code map.
+    DeoptimizationInputData* deopt_data =
+        DeoptimizationInputData::cast(codes[i]->deoptimization_data());
+    SharedFunctionInfo* shared =
+        SharedFunctionInfo::cast(deopt_data->SharedFunctionInfo());
+    shared->EvictFromOptimizedCodeMap(codes[i], "deoptimized code");
+
     // Do platform-specific patching to force any activations to lazy deopt.
-    PatchCodeForDeoptimization(isolate, codes[i]);
+    //
+    // We skip patching Turbofan code - we patch return addresses on stack.
+    // TODO(jarin) We should still zap the code object (but we have to
+    // be careful not to zap the deoptimization block).
+    if (!codes[i]->is_turbofanned()) {
+      PatchCodeForDeoptimization(isolate, codes[i]);
 
-    // We might be in the middle of incremental marking with compaction.
-    // Tell collector to treat this code object in a special way and
-    // ignore all slots that might have been recorded on it.
-    isolate->heap()->mark_compact_collector()->InvalidateCode(codes[i]);
+      // We might be in the middle of incremental marking with compaction.
+      // Tell collector to treat this code object in a special way and
+      // ignore all slots that might have been recorded on it.
+      isolate->heap()->mark_compact_collector()->InvalidateCode(codes[i]);
+    }
+  }
+}
+
+
+static int FindPatchAddressForReturnAddress(Code* code, int pc) {
+  DeoptimizationInputData* input_data =
+      DeoptimizationInputData::cast(code->deoptimization_data());
+  int patch_count = input_data->ReturnAddressPatchCount();
+  for (int i = 0; i < patch_count; i++) {
+    int return_pc = input_data->ReturnAddressPc(i)->value();
+    int patch_pc = input_data->PatchedAddressPc(i)->value();
+    // If the supplied pc matches the return pc or if the address
+    // has been already patched, return the patch pc.
+    if (pc == return_pc || pc == patch_pc) {
+      return patch_pc;
+    }
+  }
+  return -1;
+}
+
+
+// For all marked Turbofanned code on stack, change the return address to go
+// to the deoptimization block.
+void Deoptimizer::PatchStackForMarkedCode(Isolate* isolate) {
+  // TODO(jarin) We should tolerate missing patch entry for the topmost frame.
+  for (StackFrameIterator it(isolate, isolate->thread_local_top()); !it.done();
+       it.Advance()) {
+    StackFrame::Type type = it.frame()->type();
+    if (type == StackFrame::OPTIMIZED) {
+      Code* code = it.frame()->LookupCode();
+      if (code->is_turbofanned() && code->marked_for_deoptimization()) {
+        JSFunction* function =
+            static_cast<OptimizedFrame*>(it.frame())->function();
+        Address* pc_address = it.frame()->pc_address();
+        int pc_offset =
+            static_cast<int>(*pc_address - code->instruction_start());
+        int new_pc_offset = FindPatchAddressForReturnAddress(code, pc_offset);
+
+        if (FLAG_trace_deopt) {
+          CodeTracer::Scope scope(isolate->GetCodeTracer());
+          PrintF(scope.file(), "[patching stack address for function: ");
+          function->PrintName(scope.file());
+          PrintF(scope.file(), " (Pc offset %i -> %i)]\n", pc_offset,
+                 new_pc_offset);
+        }
+
+        CHECK_LE(0, new_pc_offset);
+        *pc_address += new_pc_offset - pc_offset;
+      }
+    }
   }
 }
 
@@ -450,9 +531,11 @@ void Deoptimizer::DeoptimizeGlobalObject(JSObject* object) {
         reinterpret_cast<intptr_t>(object));
   }
   if (object->IsJSGlobalProxy()) {
-    Object* proto = object->GetPrototype();
-    ASSERT(proto->IsJSGlobalObject());
-    Context* native_context = GlobalObject::cast(proto)->native_context();
+    PrototypeIterator iter(object->GetIsolate(), object);
+    // TODO(verwaest): This CHECK will be hit if the global proxy is detached.
+    CHECK(iter.GetCurrent()->IsJSGlobalObject());
+    Context* native_context =
+        GlobalObject::cast(iter.GetCurrent())->native_context();
     MarkAllCodeForContext(native_context);
     DeoptimizeMarkedCodeForContext(native_context);
   } else if (object->IsGlobalObject()) {
@@ -467,7 +550,7 @@ void Deoptimizer::MarkAllCodeForContext(Context* context) {
   Object* element = context->OptimizedCodeListHead();
   while (!element->IsUndefined()) {
     Code* code = Code::cast(element);
-    ASSERT(code->kind() == Code::OPTIMIZED_FUNCTION);
+    CHECK_EQ(code->kind(), Code::OPTIMIZED_FUNCTION);
     code->set_marked_for_deoptimization(true);
     element = code->next_code_link();
   }
@@ -502,7 +585,7 @@ bool Deoptimizer::TraceEnabledFor(BailoutType deopt_type,
           ? FLAG_trace_stub_failures
           : FLAG_trace_deopt;
   }
-  UNREACHABLE();
+  FATAL("Unsupported deopt type");
   return false;
 }
 
@@ -514,7 +597,7 @@ const char* Deoptimizer::MessageFor(BailoutType type) {
     case LAZY: return "lazy";
     case DEBUGGER: return "debugger";
   }
-  UNREACHABLE();
+  FATAL("Unsupported deopt type");
   return NULL;
 }
 
@@ -553,7 +636,7 @@ Deoptimizer::Deoptimizer(Isolate* isolate,
   if (function->IsSmi()) {
     function = NULL;
   }
-  ASSERT(from != NULL);
+  DCHECK(from != NULL);
   if (function != NULL && function->IsOptimized()) {
     function->shared()->increment_deopt_count();
     if (bailout_type_ == Deoptimizer::SOFT) {
@@ -568,9 +651,9 @@ Deoptimizer::Deoptimizer(Isolate* isolate,
   compiled_code_ = FindOptimizedCode(function, optimized_code);
 
 #if DEBUG
-  ASSERT(compiled_code_ != NULL);
+  DCHECK(compiled_code_ != NULL);
   if (type == EAGER || type == SOFT || type == LAZY) {
-    ASSERT(compiled_code_->kind() != Code::FUNCTION);
+    DCHECK(compiled_code_->kind() != Code::FUNCTION);
   }
 #endif
 
@@ -601,10 +684,10 @@ Code* Deoptimizer::FindOptimizedCode(JSFunction* function,
           : compiled_code;
     }
     case Deoptimizer::DEBUGGER:
-      ASSERT(optimized_code->contains(from_));
+      DCHECK(optimized_code->contains(from_));
       return optimized_code;
   }
-  UNREACHABLE();
+  FATAL("Could not find code for optimized function");
   return NULL;
 }
 
@@ -620,8 +703,8 @@ void Deoptimizer::PrintFunctionName() {
 
 
 Deoptimizer::~Deoptimizer() {
-  ASSERT(input_ == NULL && output_ == NULL);
-  ASSERT(disallow_heap_allocation_ == NULL);
+  DCHECK(input_ == NULL && output_ == NULL);
+  DCHECK(disallow_heap_allocation_ == NULL);
   delete trace_scope_;
 }
 
@@ -647,15 +730,15 @@ Address Deoptimizer::GetDeoptimizationEntry(Isolate* isolate,
                                             int id,
                                             BailoutType type,
                                             GetEntryMode mode) {
-  ASSERT(id >= 0);
+  CHECK_GE(id, 0);
   if (id >= kMaxNumberOfEntries) return NULL;
   if (mode == ENSURE_ENTRY_CODE) {
     EnsureCodeForDeoptimizationEntry(isolate, type, id);
   } else {
-    ASSERT(mode == CALCULATE_ENTRY_ADDRESS);
+    CHECK_EQ(mode, CALCULATE_ENTRY_ADDRESS);
   }
   DeoptimizerData* data = isolate->deoptimizer_data();
-  ASSERT(type < kBailoutTypesWithCodeEntry);
+  CHECK_LT(type, kBailoutTypesWithCodeEntry);
   MemoryChunk* base = data->deopt_entry_code_[type];
   return base->area_start() + (id * table_entry_size_);
 }
@@ -672,7 +755,7 @@ int Deoptimizer::GetDeoptimizationId(Isolate* isolate,
       addr >= start + (kMaxNumberOfEntries * table_entry_size_)) {
     return kNotDeoptimizationEntry;
   }
-  ASSERT_EQ(0,
+  DCHECK_EQ(0,
             static_cast<int>(addr - start) % table_entry_size_);
   return static_cast<int>(addr - start) / table_entry_size_;
 }
@@ -690,13 +773,10 @@ int Deoptimizer::GetOutputInfo(DeoptimizationOutputData* data,
       return data->PcAndState(i)->value();
     }
   }
-  PrintF(stderr, "[couldn't find pc offset for node=%d]\n", id.ToInt());
-  PrintF(stderr, "[method: %s]\n", shared->DebugName()->ToCString().get());
-  // Print the source code if available.
-  HeapStringAllocator string_allocator;
-  StringStream stream(&string_allocator);
-  shared->SourceCodePrint(&stream, -1);
-  PrintF(stderr, "[source:\n%s\n]", stream.ToCString().get());
+  OFStream os(stderr);
+  os << "[couldn't find pc offset for node=" << id.ToInt() << "]\n"
+     << "[method: " << shared->DebugName()->ToCString().get() << "]\n"
+     << "[source:\n" << SourceCodeOf(shared) << "\n]" << endl;
 
   FATAL("unable to find pc offset during deoptimization");
   return -1;
@@ -712,7 +792,7 @@ int Deoptimizer::GetDeoptimizedCodeCount(Isolate* isolate) {
     Object* element = native_context->DeoptimizedCodeListHead();
     while (!element->IsUndefined()) {
       Code* code = Code::cast(element);
-      ASSERT(code->kind() == Code::OPTIMIZED_FUNCTION);
+      DCHECK(code->kind() == Code::OPTIMIZED_FUNCTION);
       length++;
       element = code->next_code_link();
     }
@@ -730,7 +810,13 @@ void Deoptimizer::DoComputeOutputFrames() {
       compiled_code_->kind() == Code::OPTIMIZED_FUNCTION) {
     LOG(isolate(), CodeDeoptEvent(compiled_code_));
   }
-  ElapsedTimer timer;
+  base::ElapsedTimer timer;
+
+  // Determine basic deoptimization information.  The optimized frame is
+  // described by the input data.
+  DeoptimizationInputData* input_data =
+      DeoptimizationInputData::cast(compiled_code_->deoptimization_data());
+
   if (trace_scope_ != NULL) {
     timer.Start();
     PrintF(trace_scope_->file(),
@@ -739,18 +825,16 @@ void Deoptimizer::DoComputeOutputFrames() {
            reinterpret_cast<intptr_t>(function_));
     PrintFunctionName();
     PrintF(trace_scope_->file(),
-           " @%d, FP to SP delta: %d]\n",
+           " (opt #%d) @%d, FP to SP delta: %d]\n",
+           input_data->OptimizationId()->value(),
            bailout_id_,
            fp_to_sp_delta_);
-    if (bailout_type_ == EAGER || bailout_type_ == SOFT) {
+    if (bailout_type_ == EAGER || bailout_type_ == SOFT ||
+        (compiled_code_->is_hydrogen_stub())) {
       compiled_code_->PrintDeoptLocation(trace_scope_->file(), bailout_id_);
     }
   }
 
-  // Determine basic deoptimization information.  The optimized frame is
-  // described by the input data.
-  DeoptimizationInputData* input_data =
-      DeoptimizationInputData::cast(compiled_code_->deoptimization_data());
   BailoutId node_id = input_data->AstId(bailout_id_);
   ByteArray* translations = input_data->TranslationByteArray();
   unsigned translation_index =
@@ -760,13 +844,13 @@ void Deoptimizer::DoComputeOutputFrames() {
   TranslationIterator iterator(translations, translation_index);
   Translation::Opcode opcode =
       static_cast<Translation::Opcode>(iterator.Next());
-  ASSERT(Translation::BEGIN == opcode);
+  DCHECK(Translation::BEGIN == opcode);
   USE(opcode);
   // Read the number of output frames and allocate an array for their
   // descriptions.
   int count = iterator.Next();
   iterator.Next();  // Drop JS frames count.
-  ASSERT(output_ == NULL);
+  DCHECK(output_ == NULL);
   output_ = new FrameDescription*[count];
   for (int i = 0; i < count; ++i) {
     output_[i] = NULL;
@@ -815,7 +899,7 @@ void Deoptimizer::DoComputeOutputFrames() {
       case Translation::LITERAL:
       case Translation::ARGUMENTS_OBJECT:
       default:
-        UNREACHABLE();
+        FATAL("Unsupported translation");
         break;
     }
   }
@@ -854,7 +938,7 @@ void Deoptimizer::DoComputeJSFrame(TranslationIterator* iterator,
   } else {
     int closure_id = iterator->Next();
     USE(closure_id);
-    ASSERT_EQ(Translation::kSelfLiteralId, closure_id);
+    CHECK_EQ(Translation::kSelfLiteralId, closure_id);
     function = function_;
   }
   unsigned height = iterator->Next();
@@ -879,8 +963,8 @@ void Deoptimizer::DoComputeJSFrame(TranslationIterator* iterator,
 
   bool is_bottommost = (0 == frame_index);
   bool is_topmost = (output_count_ - 1 == frame_index);
-  ASSERT(frame_index >= 0 && frame_index < output_count_);
-  ASSERT(output_[frame_index] == NULL);
+  CHECK(frame_index >= 0 && frame_index < output_count_);
+  CHECK_EQ(output_[frame_index], NULL);
   output_[frame_index] = output_frame;
 
   // The top address for the bottommost output frame can be computed from
@@ -891,7 +975,10 @@ void Deoptimizer::DoComputeJSFrame(TranslationIterator* iterator,
   intptr_t top_address;
   if (is_bottommost) {
     // Determine whether the input frame contains alignment padding.
-    has_alignment_padding_ = HasAlignmentPadding(function) ? 1 : 0;
+    has_alignment_padding_ =
+        (!compiled_code_->is_turbofanned() && HasAlignmentPadding(function))
+            ? 1
+            : 0;
     // 2 = context and function in the frame.
     // If the optimized frame had alignment padding, adjust the frame pointer
     // to point to the new position of the old frame pointer after padding
@@ -951,7 +1038,7 @@ void Deoptimizer::DoComputeJSFrame(TranslationIterator* iterator,
   }
   output_frame->SetCallerFp(output_offset, value);
   intptr_t fp_value = top_address + output_offset;
-  ASSERT(!is_bottommost || (input_->GetRegister(fp_reg.code()) +
+  DCHECK(!is_bottommost || (input_->GetRegister(fp_reg.code()) +
       has_alignment_padding_ * kPointerSize) == fp_value);
   output_frame->SetFp(fp_value);
   if (is_topmost) output_frame->SetRegister(fp_reg.code(), fp_value);
@@ -961,29 +1048,24 @@ void Deoptimizer::DoComputeJSFrame(TranslationIterator* iterator,
            V8PRIxPTR " ; caller's fp\n",
            fp_value, output_offset, value);
   }
-  ASSERT(!is_bottommost || !has_alignment_padding_ ||
+  DCHECK(!is_bottommost || !has_alignment_padding_ ||
          (fp_value & kPointerSize) != 0);
 
   if (FLAG_enable_ool_constant_pool) {
     // For the bottommost output frame the constant pool pointer can be gotten
-    // from the input frame. For subsequent output frames, it can be gotten from
-    // the function's code.
-    Register constant_pool_reg =
-        JavaScriptFrame::constant_pool_pointer_register();
+    // from the input frame. For subsequent output frames, it can be read from
+    // the previous frame.
     output_offset -= kPointerSize;
     input_offset -= kPointerSize;
     if (is_bottommost) {
       value = input_->GetFrameSlot(input_offset);
     } else {
-      value = reinterpret_cast<intptr_t>(
-                  function->shared()->code()->constant_pool());
+      value = output_[frame_index - 1]->GetConstantPool();
     }
-    output_frame->SetFrameSlot(output_offset, value);
-    output_frame->SetConstantPool(value);
-    if (is_topmost) output_frame->SetRegister(constant_pool_reg.code(), value);
+    output_frame->SetCallerConstantPool(output_offset, value);
     if (trace_scope_) {
       PrintF("    0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
-             V8PRIxPTR "; constant_pool\n",
+             V8PRIxPTR "; caller's constant_pool\n",
              top_address + output_offset, output_offset, value);
     }
   }
@@ -1015,7 +1097,7 @@ void Deoptimizer::DoComputeJSFrame(TranslationIterator* iterator,
   value = reinterpret_cast<intptr_t>(function);
   // The function for the bottommost output frame should also agree with the
   // input frame.
-  ASSERT(!is_bottommost || input_->GetFrameSlot(input_offset) == value);
+  DCHECK(!is_bottommost || input_->GetFrameSlot(input_offset) == value);
   output_frame->SetFrameSlot(output_offset, value);
   if (trace_scope_ != NULL) {
     PrintF(trace_scope_->file(),
@@ -1029,7 +1111,7 @@ void Deoptimizer::DoComputeJSFrame(TranslationIterator* iterator,
     output_offset -= kPointerSize;
     DoTranslateCommand(iterator, frame_index, output_offset);
   }
-  ASSERT(0 == output_offset);
+  CHECK_EQ(0, output_offset);
 
   // Compute this frame's PC, state, and continuation.
   Code* non_optimized_code = function->shared()->code();
@@ -1041,6 +1123,18 @@ void Deoptimizer::DoComputeJSFrame(TranslationIterator* iterator,
   intptr_t pc_value = reinterpret_cast<intptr_t>(start + pc_offset);
   output_frame->SetPc(pc_value);
 
+  // Update constant pool.
+  if (FLAG_enable_ool_constant_pool) {
+    intptr_t constant_pool_value =
+        reinterpret_cast<intptr_t>(non_optimized_code->constant_pool());
+    output_frame->SetConstantPool(constant_pool_value);
+    if (is_topmost) {
+      Register constant_pool_reg =
+          JavaScriptFrame::constant_pool_pointer_register();
+      output_frame->SetRegister(constant_pool_reg.code(), constant_pool_value);
+    }
+  }
+
   FullCodeGenerator::State state =
       FullCodeGenerator::StateField::decode(pc_and_state);
   output_frame->SetState(Smi::FromInt(state));
@@ -1054,7 +1148,7 @@ void Deoptimizer::DoComputeJSFrame(TranslationIterator* iterator,
     } else if (bailout_type_ == SOFT) {
       continuation = builtins->builtin(Builtins::kNotifySoftDeoptimized);
     } else {
-      ASSERT(bailout_type_ == EAGER);
+      CHECK_EQ(bailout_type_, EAGER);
     }
     output_frame->SetContinuation(
         reinterpret_cast<intptr_t>(continuation->entry()));
@@ -1081,8 +1175,8 @@ void Deoptimizer::DoComputeArgumentsAdaptorFrame(TranslationIterator* iterator,
   output_frame->SetFrameType(StackFrame::ARGUMENTS_ADAPTOR);
 
   // Arguments adaptor can not be topmost or bottommost.
-  ASSERT(frame_index > 0 && frame_index < output_count_ - 1);
-  ASSERT(output_[frame_index] == NULL);
+  CHECK(frame_index > 0 && frame_index < output_count_ - 1);
+  CHECK(output_[frame_index] == NULL);
   output_[frame_index] = output_frame;
 
   // The top address of the frame is computed from the previous
@@ -1124,15 +1218,14 @@ void Deoptimizer::DoComputeArgumentsAdaptorFrame(TranslationIterator* iterator,
   }
 
   if (FLAG_enable_ool_constant_pool) {
-    // A marker value is used in place of the constant pool.
+    // Read the caller's constant pool from the previous frame.
     output_offset -= kPointerSize;
-    intptr_t constant_pool = reinterpret_cast<intptr_t>(
-        Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
-    output_frame->SetFrameSlot(output_offset, constant_pool);
+    value = output_[frame_index - 1]->GetConstantPool();
+    output_frame->SetCallerConstantPool(output_offset, value);
     if (trace_scope_) {
       PrintF("    0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
-             V8PRIxPTR " ; constant_pool (adaptor sentinel)\n",
-             top_address + output_offset, output_offset, constant_pool);
+             V8PRIxPTR "; caller's constant_pool\n",
+             top_address + output_offset, output_offset, value);
     }
   }
 
@@ -1170,7 +1263,7 @@ void Deoptimizer::DoComputeArgumentsAdaptorFrame(TranslationIterator* iterator,
            top_address + output_offset, output_offset, value, height - 1);
   }
 
-  ASSERT(0 == output_offset);
+  DCHECK(0 == output_offset);
 
   Builtins* builtins = isolate_->builtins();
   Code* adaptor_trampoline =
@@ -1179,6 +1272,11 @@ void Deoptimizer::DoComputeArgumentsAdaptorFrame(TranslationIterator* iterator,
       adaptor_trampoline->instruction_start() +
       isolate_->heap()->arguments_adaptor_deopt_pc_offset()->value());
   output_frame->SetPc(pc_value);
+  if (FLAG_enable_ool_constant_pool) {
+    intptr_t constant_pool_value =
+        reinterpret_cast<intptr_t>(adaptor_trampoline->constant_pool());
+    output_frame->SetConstantPool(constant_pool_value);
+  }
 }
 
 
@@ -1203,8 +1301,8 @@ void Deoptimizer::DoComputeConstructStubFrame(TranslationIterator* iterator,
   output_frame->SetFrameType(StackFrame::CONSTRUCT);
 
   // Construct stub can not be topmost or bottommost.
-  ASSERT(frame_index > 0 && frame_index < output_count_ - 1);
-  ASSERT(output_[frame_index] == NULL);
+  DCHECK(frame_index > 0 && frame_index < output_count_ - 1);
+  DCHECK(output_[frame_index] == NULL);
   output_[frame_index] = output_frame;
 
   // The top address of the frame is computed from the previous
@@ -1224,7 +1322,7 @@ void Deoptimizer::DoComputeConstructStubFrame(TranslationIterator* iterator,
     // receiver parameter through the translation. It might be encoding
     // a captured object, patch the slot address for a captured object.
     if (i == 0 && deferred_objects_.length() > deferred_object_index) {
-      ASSERT(!deferred_objects_[deferred_object_index].is_arguments());
+      CHECK(!deferred_objects_[deferred_object_index].is_arguments());
       deferred_objects_[deferred_object_index].patch_slot_address(top_address);
     }
   }
@@ -1254,13 +1352,13 @@ void Deoptimizer::DoComputeConstructStubFrame(TranslationIterator* iterator,
   }
 
   if (FLAG_enable_ool_constant_pool) {
-    // The constant pool pointer can be gotten from the previous frame.
+    // Read the caller's constant pool from the previous frame.
     output_offset -= kPointerSize;
     value = output_[frame_index - 1]->GetConstantPool();
-    output_frame->SetFrameSlot(output_offset, value);
+    output_frame->SetCallerConstantPool(output_offset, value);
     if (trace_scope_) {
       PrintF("    0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
-             V8PRIxPTR " ; constant pool\n",
+             V8PRIxPTR " ; caller's constant pool\n",
              top_address + output_offset, output_offset, value);
     }
   }
@@ -1335,12 +1433,17 @@ void Deoptimizer::DoComputeConstructStubFrame(TranslationIterator* iterator,
            top_address + output_offset, output_offset, value);
   }
 
-  ASSERT(0 == output_offset);
+  CHECK_EQ(0, output_offset);
 
   intptr_t pc = reinterpret_cast<intptr_t>(
       construct_stub->instruction_start() +
       isolate_->heap()->construct_stub_deopt_pc_offset()->value());
   output_frame->SetPc(pc);
+  if (FLAG_enable_ool_constant_pool) {
+    intptr_t constant_pool_value =
+        reinterpret_cast<intptr_t>(construct_stub->constant_pool());
+    output_frame->SetConstantPool(constant_pool_value);
+  }
 }
 
 
@@ -1376,8 +1479,8 @@ void Deoptimizer::DoComputeAccessorStubFrame(TranslationIterator* iterator,
   output_frame->SetFrameType(StackFrame::INTERNAL);
 
   // A frame for an accessor stub can not be the topmost or bottommost one.
-  ASSERT(frame_index > 0 && frame_index < output_count_ - 1);
-  ASSERT(output_[frame_index] == NULL);
+  CHECK(frame_index > 0 && frame_index < output_count_ - 1);
+  CHECK_EQ(output_[frame_index], NULL);
   output_[frame_index] = output_frame;
 
   // The top address of the frame is computed from the previous frame's top and
@@ -1412,13 +1515,13 @@ void Deoptimizer::DoComputeAccessorStubFrame(TranslationIterator* iterator,
   }
 
   if (FLAG_enable_ool_constant_pool) {
-    // The constant pool pointer can be gotten from the previous frame.
+    // Read the caller's constant pool from the previous frame.
     output_offset -= kPointerSize;
     value = output_[frame_index - 1]->GetConstantPool();
-    output_frame->SetFrameSlot(output_offset, value);
+    output_frame->SetCallerConstantPool(output_offset, value);
     if (trace_scope_) {
       PrintF("    0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
-             V8PRIxPTR " ; constant pool\n",
+             V8PRIxPTR " ; caller's constant pool\n",
              top_address + output_offset, output_offset, value);
     }
   }
@@ -1461,9 +1564,7 @@ void Deoptimizer::DoComputeAccessorStubFrame(TranslationIterator* iterator,
   }
 
   // Skip receiver.
-  Translation::Opcode opcode =
-      static_cast<Translation::Opcode>(iterator->Next());
-  iterator->Skip(Translation::NumberOfOperandsFor(opcode));
+  DoTranslateObjectAndSkip(iterator);
 
   if (is_setter_stub_frame) {
     // The implicit return value was part of the artificial setter stub
@@ -1472,7 +1573,7 @@ void Deoptimizer::DoComputeAccessorStubFrame(TranslationIterator* iterator,
     DoTranslateCommand(iterator, frame_index, output_offset);
   }
 
-  ASSERT(0 == output_offset);
+  CHECK_EQ(output_offset, 0);
 
   Smi* offset = is_setter_stub_frame ?
       isolate_->heap()->setter_stub_deopt_pc_offset() :
@@ -1480,6 +1581,11 @@ void Deoptimizer::DoComputeAccessorStubFrame(TranslationIterator* iterator,
   intptr_t pc = reinterpret_cast<intptr_t>(
       accessor_stub->instruction_start() + offset->value());
   output_frame->SetPc(pc);
+  if (FLAG_enable_ool_constant_pool) {
+    intptr_t constant_pool_value =
+        reinterpret_cast<intptr_t>(accessor_stub->constant_pool());
+    output_frame->SetConstantPool(constant_pool_value);
+  }
 }
 
 
@@ -1517,19 +1623,23 @@ void Deoptimizer::DoComputeCompiledStubFrame(TranslationIterator* iterator,
   //                                         reg = JSFunction context
   //
 
-  ASSERT(compiled_code_->is_crankshafted() &&
-         compiled_code_->kind() != Code::OPTIMIZED_FUNCTION);
-  int major_key = compiled_code_->major_key();
+  CHECK(compiled_code_->is_hydrogen_stub());
+  int major_key = CodeStub::GetMajorKey(compiled_code_);
   CodeStubInterfaceDescriptor* descriptor =
       isolate_->code_stub_interface_descriptor(major_key);
+  // Check that there is a matching descriptor to the major key.
+  // This will fail if there has not been one installed to the isolate.
+  DCHECK_EQ(descriptor->MajorKey(), major_key);
 
   // The output frame must have room for all pushed register parameters
   // and the standard stack frame slots.  Include space for an argument
   // object to the callee and optionally the space to pass the argument
   // object to the stub failure handler.
-  ASSERT(descriptor->register_param_count_ >= 0);
-  int height_in_bytes = kPointerSize * descriptor->register_param_count_ +
-      sizeof(Arguments) + kPointerSize;
+  int param_count = descriptor->GetEnvironmentParameterCount();
+  CHECK_GE(param_count, 0);
+
+  int height_in_bytes = kPointerSize * param_count + sizeof(Arguments) +
+      kPointerSize;
   int fixed_frame_size = StandardFrameConstants::kFixedFrameSize;
   int input_frame_size = input_->GetFrameSize();
   int output_frame_size = height_in_bytes + fixed_frame_size;
@@ -1544,7 +1654,7 @@ void Deoptimizer::DoComputeCompiledStubFrame(TranslationIterator* iterator,
   FrameDescription* output_frame =
       new(output_frame_size) FrameDescription(output_frame_size, NULL);
   output_frame->SetFrameType(StackFrame::STUB_FAILURE_TRAMPOLINE);
-  ASSERT(frame_index == 0);
+  CHECK_EQ(frame_index, 0);
   output_[frame_index] = output_frame;
 
   // The top address for the output frame can be computed from the input
@@ -1583,17 +1693,14 @@ void Deoptimizer::DoComputeCompiledStubFrame(TranslationIterator* iterator,
   }
 
   if (FLAG_enable_ool_constant_pool) {
-    // The constant pool pointer can be gotten from the input frame.
-    Register constant_pool_pointer_register =
-        StubFailureTrampolineFrame::constant_pool_pointer_register();
+    // Read the caller's constant pool from the input frame.
     input_frame_offset -= kPointerSize;
     value = input_->GetFrameSlot(input_frame_offset);
-    output_frame->SetRegister(constant_pool_pointer_register.code(), value);
     output_frame_offset -= kPointerSize;
-    output_frame->SetFrameSlot(output_frame_offset, value);
+    output_frame->SetCallerConstantPool(output_frame_offset, value);
     if (trace_scope_) {
       PrintF("    0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
-             V8PRIxPTR " ; constant_pool_pointer\n",
+             V8PRIxPTR " ; caller's constant_pool\n",
              top_address + output_frame_offset, output_frame_offset, value);
     }
   }
@@ -1605,7 +1712,7 @@ void Deoptimizer::DoComputeCompiledStubFrame(TranslationIterator* iterator,
   output_frame->SetRegister(context_reg.code(), value);
   output_frame_offset -= kPointerSize;
   output_frame->SetFrameSlot(output_frame_offset, value);
-  ASSERT(reinterpret_cast<Object*>(value)->IsContext());
+  CHECK(reinterpret_cast<Object*>(value)->IsContext());
   if (trace_scope_ != NULL) {
     PrintF(trace_scope_->file(),
            "    0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
@@ -1626,7 +1733,7 @@ void Deoptimizer::DoComputeCompiledStubFrame(TranslationIterator* iterator,
   }
 
   intptr_t caller_arg_count = 0;
-  bool arg_count_known = !descriptor->stack_parameter_count_.is_valid();
+  bool arg_count_known = !descriptor->stack_parameter_count().is_valid();
 
   // Build the Arguments object for the caller's parameters and a pointer to it.
   output_frame_offset -= kPointerSize;
@@ -1674,19 +1781,20 @@ void Deoptimizer::DoComputeCompiledStubFrame(TranslationIterator* iterator,
 
   // Copy the register parameters to the failure frame.
   int arguments_length_offset = -1;
-  for (int i = 0; i < descriptor->register_param_count_; ++i) {
+  for (int i = 0; i < param_count; ++i) {
     output_frame_offset -= kPointerSize;
     DoTranslateCommand(iterator, 0, output_frame_offset);
 
-    if (!arg_count_known && descriptor->IsParameterCountRegister(i)) {
+    if (!arg_count_known &&
+        descriptor->IsEnvironmentParameterCountRegister(i)) {
       arguments_length_offset = output_frame_offset;
     }
   }
 
-  ASSERT(0 == output_frame_offset);
+  CHECK_EQ(output_frame_offset, 0);
 
   if (!arg_count_known) {
-    ASSERT(arguments_length_offset >= 0);
+    CHECK_GE(arguments_length_offset, 0);
     // We know it's a smi because 1) the code stub guarantees the stack
     // parameter count is in smi range, and 2) the DoTranslateCommand in the
     // parameter loop above translated that to a tagged value.
@@ -1721,14 +1829,23 @@ void Deoptimizer::DoComputeCompiledStubFrame(TranslationIterator* iterator,
 
   // Compute this frame's PC, state, and continuation.
   Code* trampoline = NULL;
-  StubFunctionMode function_mode = descriptor->function_mode_;
-  StubFailureTrampolineStub(function_mode).FindCodeInCache(&trampoline,
-                                                           isolate_);
-  ASSERT(trampoline != NULL);
+  StubFunctionMode function_mode = descriptor->function_mode();
+  StubFailureTrampolineStub(isolate_,
+                            function_mode).FindCodeInCache(&trampoline);
+  DCHECK(trampoline != NULL);
   output_frame->SetPc(reinterpret_cast<intptr_t>(
       trampoline->instruction_start()));
+  if (FLAG_enable_ool_constant_pool) {
+    Register constant_pool_reg =
+        StubFailureTrampolineFrame::constant_pool_pointer_register();
+    intptr_t constant_pool_value =
+        reinterpret_cast<intptr_t>(trampoline->constant_pool());
+    output_frame->SetConstantPool(constant_pool_value);
+    output_frame->SetRegister(constant_pool_reg.code(), constant_pool_value);
+  }
   output_frame->SetState(Smi::FromInt(FullCodeGenerator::NO_REGISTERS));
-  Code* notify_failure = NotifyStubFailureBuiltin();
+  Code* notify_failure =
+      isolate_->builtins()->builtin(Builtins::kNotifyStubFailureSaveDoubles);
   output_frame->SetContinuation(
       reinterpret_cast<intptr_t>(notify_failure->entry()));
 }
@@ -1750,7 +1867,11 @@ Handle<Object> Deoptimizer::MaterializeNextHeapObject() {
     Handle<JSObject> arguments = Handle<JSObject>::cast(
         Accessors::FunctionGetArguments(function));
     materialized_objects_->Add(arguments);
-    materialization_value_index_ += length;
+    // To keep consistent object counters, we still materialize the
+    // nested values (but we throw them away).
+    for (int i = 0; i < length; ++i) {
+      MaterializeNextValue();
+    }
   } else if (desc.is_arguments()) {
     // Construct an arguments object and copy the parameters to a newly
     // allocated arguments object backing store.
@@ -1758,7 +1879,7 @@ Handle<Object> Deoptimizer::MaterializeNextHeapObject() {
     Handle<JSObject> arguments =
         isolate_->factory()->NewArgumentsObject(function, length);
     Handle<FixedArray> array = isolate_->factory()->NewFixedArray(length);
-    ASSERT(array->length() == length);
+    DCHECK_EQ(array->length(), length);
     arguments->set_elements(*array);
     materialized_objects_->Add(arguments);
     for (int i = 0; i < length; ++i) {
@@ -1770,11 +1891,13 @@ Handle<Object> Deoptimizer::MaterializeNextHeapObject() {
     // We also need to make sure that the representation of all fields
     // in the given object are general enough to hold a tagged value.
     Handle<Map> map = Map::GeneralizeAllFieldRepresentations(
-        Handle<Map>::cast(MaterializeNextValue()), Representation::Tagged());
+        Handle<Map>::cast(MaterializeNextValue()));
     switch (map->instance_type()) {
+      case MUTABLE_HEAP_NUMBER_TYPE:
       case HEAP_NUMBER_TYPE: {
         // Reuse the HeapNumber value directly as it is already properly
-        // tagged and skip materializing the HeapNumber explicitly.
+        // tagged and skip materializing the HeapNumber explicitly. Turn mutable
+        // heap numbers immutable.
         Handle<Object> object = MaterializeNextValue();
         if (object_index < prev_materialized_count_) {
           materialized_objects_->Add(Handle<Object>(
@@ -1800,7 +1923,8 @@ Handle<Object> Deoptimizer::MaterializeNextHeapObject() {
         object->set_elements(FixedArrayBase::cast(*elements));
         for (int i = 0; i < length - 3; ++i) {
           Handle<Object> value = MaterializeNextValue();
-          object->FastPropertyAtPut(i, *value);
+          FieldIndex index = FieldIndex::ForPropertyIndex(object->map(), i);
+          object->FastPropertyAtPut(index, *value);
         }
         break;
       }
@@ -1824,7 +1948,7 @@ Handle<Object> Deoptimizer::MaterializeNextHeapObject() {
       default:
         PrintF(stderr,
                "[couldn't handle instance type %d]\n", map->instance_type());
-        UNREACHABLE();
+        FATAL("Unsupported instance type");
     }
   }
 
@@ -1835,6 +1959,9 @@ Handle<Object> Deoptimizer::MaterializeNextHeapObject() {
 Handle<Object> Deoptimizer::MaterializeNextValue() {
   int value_index = materialization_value_index_++;
   Handle<Object> value = materialized_values_->at(value_index);
+  if (value->IsMutableHeapNumber()) {
+    HeapNumber::cast(*value)->set_map(isolate_->heap()->heap_number_map());
+  }
   if (*value == isolate_->heap()->arguments_marker()) {
     value = MaterializeNextHeapObject();
   }
@@ -1843,7 +1970,7 @@ Handle<Object> Deoptimizer::MaterializeNextValue() {
 
 
 void Deoptimizer::MaterializeHeapObjects(JavaScriptFrameIterator* it) {
-  ASSERT_NE(DEBUGGER, bailout_type_);
+  DCHECK_NE(DEBUGGER, bailout_type_);
 
   MaterializedObjectStore* materialized_store =
       isolate_->materialized_object_store();
@@ -1902,6 +2029,24 @@ void Deoptimizer::MaterializeHeapObjects(JavaScriptFrameIterator* it) {
     Memory::Object_at(d.destination()) = *float32x4;
   }
 
+  // Materialize all float64x2 before looking at arguments because when the
+  // output frames are used to materialize arguments objects later on they need
+  // to already contain valid float64x2 values.
+  for (int i = 0; i < deferred_float64x2s_.length(); i++) {
+    SIMD128MaterializationDescriptor<Address> d = deferred_float64x2s_[i];
+    float64x2_value_t x2 = d.value().d2;
+    Handle<Object> float64x2 = isolate_->factory()->NewFloat64x2(x2);
+    if (trace_scope_ != NULL) {
+      PrintF(trace_scope_->file(),
+             "Materialized a new float64x2 %p "
+             "[float64x2(%e, %e)] in slot %p\n",
+             reinterpret_cast<void*>(*float64x2),
+             x2.storage[0], x2.storage[1],
+             d.destination());
+    }
+    Memory::Object_at(d.destination()) = *float64x2;
+  }
+
   // Materialize all int32x4 before looking at arguments because when the
   // output frames are used to materialize arguments objects later on they need
   // to already contain valid int32x4 values.
@@ -1933,7 +2078,7 @@ void Deoptimizer::MaterializeHeapObjects(JavaScriptFrameIterator* it) {
              d.value(),
              d.destination());
     }
-    ASSERT(values.at(d.destination())->IsTheHole());
+    DCHECK(values.at(d.destination())->IsTheHole());
     values.Set(d.destination(), num);
   }
 
@@ -1954,13 +2099,34 @@ void Deoptimizer::MaterializeHeapObjects(JavaScriptFrameIterator* it) {
              x4.storage[0], x4.storage[1], x4.storage[2], x4.storage[3],
              d.destination());
     }
-    ASSERT(values.at(d.destination())->IsTheHole());
+    DCHECK(values.at(d.destination())->IsTheHole());
     values.Set(d.destination(), float32x4);
   }
 
   // Play it safe and clear all object float32x4 values before we continue.
   deferred_objects_float32x4_values_.Clear();
 
+  // Materialize all float64x2 values required for arguments/captured objects.
+  for (int i = 0; i < deferred_objects_float64x2_values_.length(); i++) {
+    SIMD128MaterializationDescriptor<int> d =
+        deferred_objects_float64x2_values_[i];
+    float64x2_value_t x2 = d.value().d2;
+    Handle<Object> float64x2 = isolate_->factory()->NewFloat64x2(x2);
+    if (trace_scope_ != NULL) {
+      PrintF(trace_scope_->file(),
+             "Materialized a new float64x2 %p "
+             "[float64x2(%e, %e)] for object at %d\n",
+             reinterpret_cast<void*>(*float64x2),
+             x2.storage[0], x2.storage[1],
+             d.destination());
+    }
+    DCHECK(values.at(d.destination())->IsTheHole());
+    values.Set(d.destination(), float64x2);
+  }
+
+  // Play it safe and clear all object float64x2 values before we continue.
+  deferred_objects_float64x2_values_.Clear();
+
   // Materialize all int32x4 values required for arguments/captured objects.
   for (int i = 0; i < deferred_objects_int32x4_values_.length(); i++) {
     SIMD128MaterializationDescriptor<int> d =
@@ -1975,7 +2141,7 @@ void Deoptimizer::MaterializeHeapObjects(JavaScriptFrameIterator* it) {
              x4.storage[0], x4.storage[1], x4.storage[2], x4.storage[3],
              d.destination());
     }
-    ASSERT(values.at(d.destination())->IsTheHole());
+    DCHECK(values.at(d.destination())->IsTheHole());
     values.Set(d.destination(), int32x4);
   }
 
@@ -1997,7 +2163,9 @@ void Deoptimizer::MaterializeHeapObjects(JavaScriptFrameIterator* it) {
       // materialize a new instance of the object if necessary. Store
       // the materialized object into the frame slot.
       Handle<Object> object = MaterializeNextHeapObject();
-      Memory::Object_at(descriptor.slot_address()) = *object;
+      if (descriptor.slot_address() != NULL) {
+        Memory::Object_at(descriptor.slot_address()) = *object;
+      }
       if (trace_scope_ != NULL) {
         if (descriptor.is_arguments()) {
           PrintF(trace_scope_->file(),
@@ -2016,8 +2184,8 @@ void Deoptimizer::MaterializeHeapObjects(JavaScriptFrameIterator* it) {
       }
     }
 
-    ASSERT(materialization_object_index_ == materialized_objects_->length());
-    ASSERT(materialization_value_index_ == materialized_values_->length());
+    CHECK_EQ(materialization_object_index_, materialized_objects_->length());
+    CHECK_EQ(materialization_value_index_, materialized_values_->length());
   }
 
   if (prev_materialized_count_ > 0) {
@@ -2026,14 +2194,13 @@ void Deoptimizer::MaterializeHeapObjects(JavaScriptFrameIterator* it) {
 }
 
 
-#ifdef ENABLE_DEBUGGER_SUPPORT
 void Deoptimizer::MaterializeHeapNumbersForDebuggerInspectableFrame(
     Address parameters_top,
     uint32_t parameters_size,
     Address expressions_top,
     uint32_t expressions_size,
     DeoptimizedFrameInfo* info) {
-  ASSERT_EQ(DEBUGGER, bailout_type_);
+  CHECK_EQ(DEBUGGER, bailout_type_);
   Address parameters_bottom = parameters_top + parameters_size;
   Address expressions_bottom = expressions_top + expressions_size;
   for (int i = 0; i < deferred_heap_numbers_.length(); i++) {
@@ -2079,7 +2246,6 @@ void Deoptimizer::MaterializeHeapNumbersForDebuggerInspectableFrame(
     }
   }
 }
-#endif
 
 
 static const char* TraceValueType(bool is_smi) {
@@ -2091,6 +2257,79 @@ static const char* TraceValueType(bool is_smi) {
 }
 
 
+void Deoptimizer::DoTranslateObjectAndSkip(TranslationIterator* iterator) {
+  Translation::Opcode opcode =
+      static_cast<Translation::Opcode>(iterator->Next());
+
+  switch (opcode) {
+    case Translation::BEGIN:
+    case Translation::JS_FRAME:
+    case Translation::ARGUMENTS_ADAPTOR_FRAME:
+    case Translation::CONSTRUCT_STUB_FRAME:
+    case Translation::GETTER_STUB_FRAME:
+    case Translation::SETTER_STUB_FRAME:
+    case Translation::COMPILED_STUB_FRAME: {
+      FATAL("Unexpected frame start translation opcode");
+      return;
+    }
+
+    case Translation::REGISTER:
+    case Translation::INT32_REGISTER:
+    case Translation::UINT32_REGISTER:
+    case Translation::DOUBLE_REGISTER:
+    case Translation::FLOAT32x4_REGISTER:
+    case Translation::FLOAT64x2_REGISTER:
+    case Translation::INT32x4_REGISTER:
+    case Translation::STACK_SLOT:
+    case Translation::INT32_STACK_SLOT:
+    case Translation::UINT32_STACK_SLOT:
+    case Translation::DOUBLE_STACK_SLOT:
+    case Translation::FLOAT32x4_STACK_SLOT:
+    case Translation::FLOAT64x2_STACK_SLOT:
+    case Translation::INT32x4_STACK_SLOT:
+    case Translation::LITERAL: {
+      // The value is not part of any materialized object, so we can ignore it.
+      iterator->Skip(Translation::NumberOfOperandsFor(opcode));
+      return;
+    }
+
+    case Translation::DUPLICATED_OBJECT: {
+      int object_index = iterator->Next();
+      if (trace_scope_ != NULL) {
+        PrintF(trace_scope_->file(), "      skipping object ");
+        PrintF(trace_scope_->file(),
+               " ; duplicate of object #%d\n", object_index);
+      }
+      AddObjectDuplication(0, object_index);
+      return;
+    }
+
+    case Translation::ARGUMENTS_OBJECT:
+    case Translation::CAPTURED_OBJECT: {
+      int length = iterator->Next();
+      bool is_args = opcode == Translation::ARGUMENTS_OBJECT;
+      if (trace_scope_ != NULL) {
+        PrintF(trace_scope_->file(), "    skipping object ");
+        PrintF(trace_scope_->file(),
+               " ; object (length = %d, is_args = %d)\n", length, is_args);
+      }
+
+      AddObjectStart(0, length, is_args);
+
+      // We save the object values on the side and materialize the actual
+      // object after the deoptimized frame is built.
+      int object_index = deferred_objects_.length() - 1;
+      for (int i = 0; i < length; i++) {
+        DoTranslateObject(iterator, object_index, i);
+      }
+      return;
+    }
+  }
+
+  FATAL("Unexpected translation opcode");
+}
+
+
 void Deoptimizer::DoTranslateObject(TranslationIterator* iterator,
                                     int object_index,
                                     int field_index) {
@@ -2108,7 +2347,7 @@ void Deoptimizer::DoTranslateObject(TranslationIterator* iterator,
     case Translation::GETTER_STUB_FRAME:
     case Translation::SETTER_STUB_FRAME:
     case Translation::COMPILED_STUB_FRAME:
-      UNREACHABLE();
+      FATAL("Unexpected frame start translation opcode");
       return;
 
     case Translation::REGISTER: {
@@ -2198,6 +2437,7 @@ void Deoptimizer::DoTranslateObject(TranslationIterator* iterator,
     }
 
     case Translation::FLOAT32x4_REGISTER:
+    case Translation::FLOAT64x2_REGISTER:
     case Translation::INT32x4_REGISTER: {
       int input_reg = iterator->Next();
       simd128_value_t value = input_->GetSIMD128Register(input_reg);
@@ -2212,8 +2452,18 @@ void Deoptimizer::DoTranslateObject(TranslationIterator* iterator,
                  "float32x4(%e, %e, %e, %e) ; %s\n",
                  x4.storage[0], x4.storage[1], x4.storage[2], x4.storage[3],
                  SIMD128Register::AllocationIndexToString(input_reg));
+        } else if (opcode == Translation::FLOAT64x2_REGISTER) {
+          float64x2_value_t x2 = value.d2;
+          PrintF(trace_scope_->file(),
+                 "      object @0x%08" V8PRIxPTR ": [field #%d] <- ",
+                 reinterpret_cast<intptr_t>(object_slot),
+                 field_index);
+          PrintF(trace_scope_->file(),
+                 "float64x2(%e, %e) ; %s\n",
+                 x2.storage[0], x2.storage[1],
+                 SIMD128Register::AllocationIndexToString(input_reg));
         } else {
-          ASSERT(opcode == Translation::INT32x4_REGISTER);
+          DCHECK(opcode == Translation::INT32x4_REGISTER);
           int32x4_value_t x4 = value.i4;
           PrintF(trace_scope_->file(),
                  "      object @0x%08" V8PRIxPTR ": [field #%d] <- ",
@@ -2317,6 +2567,7 @@ void Deoptimizer::DoTranslateObject(TranslationIterator* iterator,
     }
 
     case Translation::FLOAT32x4_STACK_SLOT:
+    case Translation::FLOAT64x2_STACK_SLOT:
     case Translation::INT32x4_STACK_SLOT: {
       int input_slot_index = iterator->Next();
       unsigned input_offset = input_->GetOffsetFromSlotIndex(input_slot_index);
@@ -2332,8 +2583,18 @@ void Deoptimizer::DoTranslateObject(TranslationIterator* iterator,
                  "float32x4(%e, %e, %e, %e) ; [sp + %d]\n",
                  x4.storage[0], x4.storage[1], x4.storage[2], x4.storage[3],
                  input_offset);
+        } else if (opcode == Translation::FLOAT64x2_STACK_SLOT) {
+          float64x2_value_t x2 = value.d2;
+          PrintF(trace_scope_->file(),
+                 "      object @0x%08" V8PRIxPTR ": [field #%d] <- ",
+                 reinterpret_cast<intptr_t>(object_slot),
+                 field_index);
+          PrintF(trace_scope_->file(),
+                 "float64x2(%e, %e) ; [sp + %d]\n",
+                 x2.storage[0], x2.storage[1],
+                 input_offset);
         } else {
-          ASSERT(opcode == Translation::INT32x4_STACK_SLOT);
+          DCHECK(opcode == Translation::INT32x4_STACK_SLOT);
           int32x4_value_t x4 = value.i4;
           PrintF(trace_scope_->file(),
                  "      object @0x%08" V8PRIxPTR ": [field #%d] <- ",
@@ -2413,6 +2674,8 @@ void Deoptimizer::DoTranslateObject(TranslationIterator* iterator,
       return;
     }
   }
+
+  FATAL("Unexpected translation opcode");
 }
 
 
@@ -2434,7 +2697,7 @@ void Deoptimizer::DoTranslateCommand(TranslationIterator* iterator,
     case Translation::GETTER_STUB_FRAME:
     case Translation::SETTER_STUB_FRAME:
     case Translation::COMPILED_STUB_FRAME:
-      UNREACHABLE();
+      FATAL("Unexpected translation opcode");
       return;
 
     case Translation::REGISTER: {
@@ -2532,6 +2795,7 @@ void Deoptimizer::DoTranslateCommand(TranslationIterator* iterator,
     }
 
     case Translation::FLOAT32x4_REGISTER:
+    case Translation::FLOAT64x2_REGISTER:
     case Translation::INT32x4_REGISTER: {
       int input_reg = iterator->Next();
       simd128_value_t value = input_->GetSIMD128Register(input_reg);
@@ -2545,8 +2809,17 @@ void Deoptimizer::DoTranslateCommand(TranslationIterator* iterator,
                  output_offset,
                  x4.storage[0], x4.storage[1], x4.storage[2], x4.storage[3],
                  SIMD128Register::AllocationIndexToString(input_reg));
+        } else if (opcode == Translation::FLOAT64x2_REGISTER) {
+          float64x2_value_t x2 = value.d2;
+          PrintF(trace_scope_->file(),
+                 "    0x%08" V8PRIxPTR ":"
+                 " [top + %d] <- float64x2(%e, %e) ; %s\n",
+                 output_[frame_index]->GetTop() + output_offset,
+                 output_offset,
+                 x2.storage[0], x2.storage[1],
+                 SIMD128Register::AllocationIndexToString(input_reg));
         } else {
-          ASSERT(opcode == Translation::INT32x4_REGISTER);
+          DCHECK(opcode == Translation::INT32x4_REGISTER);
           int32x4_value_t x4 = value.i4;
           PrintF(trace_scope_->file(),
                  "    0x%08" V8PRIxPTR ":"
@@ -2667,6 +2940,7 @@ void Deoptimizer::DoTranslateCommand(TranslationIterator* iterator,
     }
 
     case Translation::FLOAT32x4_STACK_SLOT:
+    case Translation::FLOAT64x2_STACK_SLOT:
     case Translation::INT32x4_STACK_SLOT: {
       int input_slot_index = iterator->Next();
       unsigned input_offset = input_->GetOffsetFromSlotIndex(input_slot_index);
@@ -2681,8 +2955,17 @@ void Deoptimizer::DoTranslateCommand(TranslationIterator* iterator,
                  output_offset,
                  x4.storage[0], x4.storage[1], x4.storage[2], x4.storage[3],
                  input_offset);
+        } else if (opcode == Translation::FLOAT64x2_STACK_SLOT) {
+          float64x2_value_t x2 = value.d2;
+          PrintF(trace_scope_->file(),
+                 "    0x%08" V8PRIxPTR ": "
+                 "[top + %d] <- float64x2(%e, %e) ; [sp + %d]\n",
+                 output_[frame_index]->GetTop() + output_offset,
+                 output_offset,
+                 x2.storage[0], x2.storage[1],
+                 input_offset);
         } else {
-          ASSERT(opcode == Translation::INT32x4_STACK_SLOT);
+          DCHECK(opcode == Translation::INT32x4_STACK_SLOT);
           int32x4_value_t x4 = value.i4;
           PrintF(trace_scope_->file(),
                  "    0x%08" V8PRIxPTR ": "
@@ -2775,13 +3058,11 @@ unsigned Deoptimizer::ComputeInputFrameSize() const {
   // function into account so we have to avoid double counting them.
   unsigned result = fixed_size + fp_to_sp_delta_ -
       StandardFrameConstants::kFixedFrameSizeFromFp;
-#ifdef DEBUG
   if (compiled_code_->kind() == Code::OPTIMIZED_FUNCTION) {
     unsigned stack_slots = compiled_code_->stack_slots();
     unsigned outgoing_size = ComputeOutgoingArgumentSize();
-    ASSERT(result == fixed_size + (stack_slots * kPointerSize) + outgoing_size);
+    CHECK(result == fixed_size + (stack_slots * kPointerSize) + outgoing_size);
   }
-#endif
   return result;
 }
 
@@ -2798,7 +3079,7 @@ unsigned Deoptimizer::ComputeIncomingArgumentSize(JSFunction* function) const {
   // The incoming arguments is the values for formal parameters and
   // the receiver. Every slot contains a pointer.
   if (function->IsSmi()) {
-    ASSERT(Smi::cast(function) == Smi::FromInt(StackFrame::STUB));
+    CHECK_EQ(Smi::cast(function), Smi::FromInt(StackFrame::STUB));
     return 0;
   }
   unsigned arguments = function->shared()->formal_parameter_count() + 1;
@@ -2859,8 +3140,11 @@ void Deoptimizer::AddObjectSIMD128Value(simd128_value_t value,
   if (opcode == Translation::FLOAT32x4_REGISTER ||
       opcode == Translation::FLOAT32x4_STACK_SLOT) {
     deferred_objects_float32x4_values_.Add(value_desc);
+  } else if (opcode == Translation::FLOAT64x2_REGISTER ||
+             opcode == Translation::FLOAT64x2_STACK_SLOT) {
+    deferred_objects_float64x2_values_.Add(value_desc);
   } else {
-    ASSERT(opcode == Translation::INT32x4_REGISTER ||
+    DCHECK(opcode == Translation::INT32x4_REGISTER ||
            opcode == Translation::INT32x4_STACK_SLOT);
     deferred_objects_int32x4_values_.Add(value_desc);
   }
@@ -2884,8 +3168,11 @@ void Deoptimizer::AddSIMD128Value(intptr_t slot_address,
   if (opcode == Translation::FLOAT32x4_REGISTER ||
       opcode == Translation::FLOAT32x4_STACK_SLOT) {
     deferred_float32x4s_.Add(value_desc);
+  } else if (opcode == Translation::FLOAT64x2_REGISTER ||
+             opcode == Translation::FLOAT64x2_STACK_SLOT) {
+    deferred_float64x2s_.Add(value_desc);
   } else {
-    ASSERT(opcode == Translation::INT32x4_REGISTER ||
+    DCHECK(opcode == Translation::INT32x4_REGISTER ||
            opcode == Translation::INT32x4_STACK_SLOT);
     deferred_int32x4s_.Add(value_desc);
   }
@@ -2899,28 +3186,28 @@ void Deoptimizer::EnsureCodeForDeoptimizationEntry(Isolate* isolate,
   // cause us to emit relocation information for the external
   // references. This is fine because the deoptimizer's code section
   // isn't meant to be serialized at all.
-  ASSERT(type == EAGER || type == SOFT || type == LAZY);
+  CHECK(type == EAGER || type == SOFT || type == LAZY);
   DeoptimizerData* data = isolate->deoptimizer_data();
   int entry_count = data->deopt_entry_code_entries_[type];
   if (max_entry_id < entry_count) return;
   entry_count = Max(entry_count, Deoptimizer::kMinNumberOfEntries);
   while (max_entry_id >= entry_count) entry_count *= 2;
-  ASSERT(entry_count <= Deoptimizer::kMaxNumberOfEntries);
+  CHECK(entry_count <= Deoptimizer::kMaxNumberOfEntries);
 
   MacroAssembler masm(isolate, NULL, 16 * KB);
   masm.set_emit_debug_code(false);
   GenerateDeoptimizationEntries(&masm, entry_count, type);
   CodeDesc desc;
   masm.GetCode(&desc);
-  ASSERT(!RelocInfo::RequiresRelocation(desc));
+  DCHECK(!RelocInfo::RequiresRelocation(desc));
 
   MemoryChunk* chunk = data->deopt_entry_code_[type];
-  ASSERT(static_cast<int>(Deoptimizer::GetMaxDeoptTableSize()) >=
-         desc.instr_size);
+  CHECK(static_cast<int>(Deoptimizer::GetMaxDeoptTableSize()) >=
+        desc.instr_size);
   chunk->CommitArea(desc.instr_size);
   CopyBytes(chunk->area_start(), desc.buffer,
       static_cast<size_t>(desc.instr_size));
-  CPU::FlushICache(chunk->area_start(), desc.instr_size);
+  CpuFeatures::FlushICache(chunk->area_start(), desc.instr_size);
 
   data->deopt_entry_code_entries_[type] = entry_count;
 }
@@ -2937,6 +3224,9 @@ FrameDescription::FrameDescription(uint32_t frame_size,
       constant_pool_(kZapUint32) {
   // Zap all the registers.
   for (int r = 0; r < Register::kNumRegisters; r++) {
+    // TODO(jbramley): It isn't safe to use kZapUint32 here. If the register
+    // isn't used before the next safepoint, the GC will try to scan it as a
+    // tagged value. kZapUint32 looks like a valid tagged pointer, but it isn't.
     SetRegister(r, kZapUint32);
   }
 
@@ -2980,15 +3270,15 @@ int FrameDescription::ComputeParametersCount() {
     case StackFrame::STUB:
       return -1;  // Minus receiver.
     default:
-      UNREACHABLE();
+      FATAL("Unexpected stack frame type");
       return 0;
   }
 }
 
 
 Object* FrameDescription::GetParameter(int index) {
-  ASSERT(index >= 0);
-  ASSERT(index < ComputeParametersCount());
+  CHECK_GE(index, 0);
+  CHECK_LT(index, ComputeParametersCount());
   // The slot indexes for incoming arguments are negative.
   unsigned offset = GetOffsetFromSlotIndex(index - ComputeParametersCount());
   return reinterpret_cast<Object*>(*GetFrameSlotPointer(offset));
@@ -2996,14 +3286,14 @@ Object* FrameDescription::GetParameter(int index) {
 
 
 unsigned FrameDescription::GetExpressionCount() {
-  ASSERT_EQ(StackFrame::JAVA_SCRIPT, type_);
+  CHECK_EQ(StackFrame::JAVA_SCRIPT, type_);
   unsigned size = GetFrameSize() - ComputeFixedSize();
   return size / kPointerSize;
 }
 
 
 Object* FrameDescription::GetExpression(int index) {
-  ASSERT_EQ(StackFrame::JAVA_SCRIPT, type_);
+  DCHECK_EQ(StackFrame::JAVA_SCRIPT, type_);
   unsigned offset = GetOffsetFromSlotIndex(index);
   return reinterpret_cast<Object*>(*GetFrameSlotPointer(offset));
 }
@@ -3029,7 +3319,7 @@ int32_t TranslationIterator::Next() {
   // bit of zero (marks the end).
   uint32_t bits = 0;
   for (int i = 0; true; i += 7) {
-    ASSERT(HasNext());
+    DCHECK(HasNext());
     uint8_t next = buffer_->get(index_++);
     bits |= (next >> 1) << i;
     if ((next & 1) == 0) break;
@@ -3044,8 +3334,7 @@ int32_t TranslationIterator::Next() {
 Handle<ByteArray> TranslationBuffer::CreateByteArray(Factory* factory) {
   int length = contents_.length();
   Handle<ByteArray> result = factory->NewByteArray(length, TENURED);
-  OS::MemCopy(
-      result->GetDataStartAddress(), contents_.ToVector().start(), length);
+  MemCopy(result->GetDataStartAddress(), contents_.ToVector().start(), length);
   return result;
 }
 
@@ -3197,12 +3486,14 @@ int Translation::NumberOfOperandsFor(Opcode opcode) {
     case UINT32_REGISTER:
     case DOUBLE_REGISTER:
     case FLOAT32x4_REGISTER:
+    case FLOAT64x2_REGISTER:
     case INT32x4_REGISTER:
     case STACK_SLOT:
     case INT32_STACK_SLOT:
     case UINT32_STACK_SLOT:
     case DOUBLE_STACK_SLOT:
     case FLOAT32x4_STACK_SLOT:
+    case FLOAT64x2_STACK_SLOT:
     case INT32x4_STACK_SLOT:
     case LITERAL:
     case COMPILED_STUB_FRAME:
@@ -3214,7 +3505,7 @@ int Translation::NumberOfOperandsFor(Opcode opcode) {
     case JS_FRAME:
       return 3;
   }
-  UNREACHABLE();
+  FATAL("Unexpected translation type");
   return -1;
 }
 
@@ -3268,6 +3559,7 @@ SlotRef SlotRefValueBuilder::ComputeSlotForNextArgument(
     case Translation::UINT32_REGISTER:
     case Translation::DOUBLE_REGISTER:
     case Translation::FLOAT32x4_REGISTER:
+    case Translation::FLOAT64x2_REGISTER:
     case Translation::INT32x4_REGISTER:
       // We are at safepoint which corresponds to call.  All registers are
       // saved by caller so there would be no live registers at this
@@ -3304,6 +3596,12 @@ SlotRef SlotRefValueBuilder::ComputeSlotForNextArgument(
       return SlotRef(slot_addr, SlotRef::FLOAT32x4);
     }
 
+    case Translation::FLOAT64x2_STACK_SLOT: {
+      int slot_index = iterator->Next();
+      Address slot_addr = SlotAddress(frame, slot_index);
+      return SlotRef(slot_addr, SlotRef::FLOAT64x2);
+    }
+
     case Translation::INT32x4_STACK_SLOT: {
       int slot_index = iterator->Next();
       Address slot_addr = SlotAddress(frame, slot_index);
@@ -3338,14 +3636,13 @@ SlotRefValueBuilder::SlotRefValueBuilder(JavaScriptFrame* frame,
   TranslationIterator it(data->TranslationByteArray(),
                          data->TranslationIndex(deopt_index)->value());
   Translation::Opcode opcode = static_cast<Translation::Opcode>(it.Next());
-  ASSERT(opcode == Translation::BEGIN);
+  CHECK_EQ(opcode, Translation::BEGIN);
   it.Next();  // Drop frame count.
 
   stack_frame_id_ = frame->fp();
 
   int jsframe_count = it.Next();
-  USE(jsframe_count);
-  ASSERT(jsframe_count > inlined_jsframe_index);
+  CHECK_GT(jsframe_count, inlined_jsframe_index);
   int jsframes_to_skip = inlined_jsframe_index;
   int number_of_slots = -1;  // Number of slots inside our frame (yet unknown)
   bool should_deopt = false;
@@ -3354,7 +3651,7 @@ SlotRefValueBuilder::SlotRefValueBuilder(JavaScriptFrame* frame,
     bool processed = false;
     if (opcode == Translation::ARGUMENTS_ADAPTOR_FRAME) {
       if (jsframes_to_skip == 0) {
-        ASSERT(Translation::NumberOfOperandsFor(opcode) == 2);
+        CHECK_EQ(Translation::NumberOfOperandsFor(opcode), 2);
 
         it.Skip(1);  // literal id
         int height = it.Next();
@@ -3401,7 +3698,7 @@ SlotRefValueBuilder::SlotRefValueBuilder(JavaScriptFrame* frame,
         // the nested slots of captured objects
         number_of_slots--;
         SlotRef& slot = slot_refs_.last();
-        ASSERT(slot.Representation() != SlotRef::ARGUMENTS_OBJECT);
+        CHECK_NE(slot.Representation(), SlotRef::ARGUMENTS_OBJECT);
         number_of_slots += slot.GetChildrenCount();
         if (slot.Representation() == SlotRef::DEFERRED_OBJECT ||
             slot.Representation() == SlotRef::DUPLICATE_OBJECT) {
@@ -3430,7 +3727,11 @@ Handle<Object> SlotRef::GetValue(Isolate* isolate) {
       return Handle<Object>(Memory::Object_at(addr_), isolate);
 
     case INT32: {
+#if V8_TARGET_BIG_ENDIAN && V8_HOST_ARCH_64_BIT
+      int value = Memory::int32_at(addr_ + kIntSize);
+#else
       int value = Memory::int32_at(addr_);
+#endif
       if (Smi::IsValid(value)) {
         return Handle<Object>(Smi::FromInt(value), isolate);
       } else {
@@ -3439,7 +3740,11 @@ Handle<Object> SlotRef::GetValue(Isolate* isolate) {
     }
 
     case UINT32: {
+#if V8_TARGET_BIG_ENDIAN && V8_HOST_ARCH_64_BIT
+      uint32_t value = Memory::uint32_at(addr_ + kIntSize);
+#else
       uint32_t value = Memory::uint32_at(addr_);
+#endif
       if (value <= static_cast<uint32_t>(Smi::kMaxValue)) {
         return Handle<Object>(Smi::FromInt(static_cast<int>(value)), isolate);
       } else {
@@ -3455,6 +3760,9 @@ Handle<Object> SlotRef::GetValue(Isolate* isolate) {
     case FLOAT32x4:
       return isolate->factory()->NewFloat32x4(read_simd128_value(addr_).f4);
 
+    case FLOAT64x2:
+      return isolate->factory()->NewFloat64x2(read_simd128_value(addr_).d2);
+
     case INT32x4:
       return isolate->factory()->NewInt32x4(read_simd128_value(addr_).i4);
 
@@ -3481,7 +3789,7 @@ void SlotRefValueBuilder::Prepare(Isolate* isolate) {
   while (current_slot_ < first_slot_index_) {
     GetNext(isolate, 0);
   }
-  ASSERT(current_slot_ == first_slot_index_);
+  CHECK_EQ(current_slot_, first_slot_index_);
 }
 
 
@@ -3543,8 +3851,8 @@ Handle<Object> SlotRefValueBuilder::GetNext(Isolate* isolate, int lvl) {
     }
     case SlotRef::DEFERRED_OBJECT: {
       int length = slot.GetChildrenCount();
-      ASSERT(slot_refs_[current_slot_].Representation() == SlotRef::LITERAL ||
-             slot_refs_[current_slot_].Representation() == SlotRef::TAGGED);
+      CHECK(slot_refs_[current_slot_].Representation() == SlotRef::LITERAL ||
+            slot_refs_[current_slot_].Representation() == SlotRef::TAGGED);
 
       int object_index = materialized_objects_.length();
       if (object_index <  prev_materialized_count_) {
@@ -3553,16 +3861,24 @@ Handle<Object> SlotRefValueBuilder::GetNext(Isolate* isolate, int lvl) {
 
       Handle<Object> map_object = slot_refs_[current_slot_].GetValue(isolate);
       Handle<Map> map = Map::GeneralizeAllFieldRepresentations(
-          Handle<Map>::cast(map_object), Representation::Tagged());
+          Handle<Map>::cast(map_object));
       current_slot_++;
       // TODO(jarin) this should be unified with the code in
       // Deoptimizer::MaterializeNextHeapObject()
       switch (map->instance_type()) {
+        case MUTABLE_HEAP_NUMBER_TYPE:
         case HEAP_NUMBER_TYPE: {
           // Reuse the HeapNumber value directly as it is already properly
           // tagged and skip materializing the HeapNumber explicitly.
           Handle<Object> object = GetNext(isolate, lvl + 1);
           materialized_objects_.Add(object);
+          // On 32-bit architectures, there is an extra slot there because
+          // the escape analysis calculates the number of slots as
+          // object-size/pointer-size. To account for this, we read out
+          // any extra slots.
+          for (int i = 0; i < length - 2; i++) {
+            GetNext(isolate, lvl + 1);
+          }
           return object;
         }
         case JS_OBJECT_TYPE: {
@@ -3575,7 +3891,8 @@ Handle<Object> SlotRefValueBuilder::GetNext(Isolate* isolate, int lvl) {
           object->set_elements(FixedArrayBase::cast(*elements));
           for (int i = 0; i < length - 3; ++i) {
             Handle<Object> value = GetNext(isolate, lvl + 1);
-            object->FastPropertyAtPut(i, *value);
+            FieldIndex index = FieldIndex::ForPropertyIndex(object->map(), i);
+            object->FastPropertyAtPut(index, *value);
           }
           return object;
         }
@@ -3598,6 +3915,7 @@ Handle<Object> SlotRefValueBuilder::GetNext(Isolate* isolate, int lvl) {
           break;
       }
       UNREACHABLE();
+      break;
     }
 
     case SlotRef::DUPLICATE_OBJECT: {
@@ -3617,8 +3935,8 @@ Handle<Object> SlotRefValueBuilder::GetNext(Isolate* isolate, int lvl) {
 
 
 void SlotRefValueBuilder::Finish(Isolate* isolate) {
-  // We should have processed all slot
-  ASSERT(slot_refs_.length() == current_slot_);
+  // We should have processed all the slots
+  CHECK_EQ(slot_refs_.length(), current_slot_);
 
   if (materialized_objects_.length() > prev_materialized_count_) {
     // We have materialized some new objects, so we have to store them
@@ -3639,7 +3957,7 @@ Handle<FixedArray> MaterializedObjectStore::Get(Address fp) {
     return Handle<FixedArray>::null();
   }
   Handle<FixedArray> array = GetStackEntries();
-  ASSERT(array->length() > index);
+  CHECK_GT(array->length(), index);
   return Handle<FixedArray>::cast(Handle<Object>(array->get(index),
                                                  isolate()));
 }
@@ -3660,11 +3978,11 @@ void MaterializedObjectStore::Set(Address fp,
 
 void MaterializedObjectStore::Remove(Address fp) {
   int index = StackIdToIndex(fp);
-  ASSERT(index >= 0);
+  CHECK_GE(index, 0);
 
   frame_fps_.Remove(index);
   Handle<FixedArray> array = GetStackEntries();
-  ASSERT(array->length() > index);
+  CHECK_LT(index, array->length());
   for (int i = index; i < frame_fps_.length(); i++) {
     array->set(i, array->get(i + 1));
   }
@@ -3710,7 +4028,6 @@ Handle<FixedArray> MaterializedObjectStore::EnsureStackEntries(int length) {
   return new_array;
 }
 
-#ifdef ENABLE_DEBUGGER_SUPPORT
 
 DeoptimizedFrameInfo::DeoptimizedFrameInfo(Deoptimizer* deoptimizer,
                                            int frame_index,
@@ -3732,7 +4049,7 @@ DeoptimizedFrameInfo::DeoptimizedFrameInfo(Deoptimizer* deoptimizer,
 
   if (has_arguments_adaptor) {
     output_frame = deoptimizer->output_[frame_index - 1];
-    ASSERT(output_frame->GetFrameType() == StackFrame::ARGUMENTS_ADAPTOR);
+    CHECK_EQ(output_frame->GetFrameType(), StackFrame::ARGUMENTS_ADAPTOR);
   }
 
   parameters_count_ = output_frame->ComputeParametersCount();
@@ -3755,6 +4072,4 @@ void DeoptimizedFrameInfo::Iterate(ObjectVisitor* v) {
   v->VisitPointers(expression_stack_, expression_stack_ + expression_count_);
 }
 
-#endif  // ENABLE_DEBUGGER_SUPPORT
-
 } }  // namespace v8::internal