1 // Copyright 2013 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission.
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 #include "accessors.h"
32 #include "deoptimizer.h"
34 #include "full-codegen.h"
35 #include "global-handles.h"
36 #include "macro-assembler.h"
37 #include "prettyprinter.h"
43 static MemoryChunk* AllocateCodeChunk(MemoryAllocator* allocator) {
44 return allocator->AllocateChunk(Deoptimizer::GetMaxDeoptTableSize(),
46 #if defined(__native_client__)
47 // The Native Client port of V8 uses an interpreter,
48 // so code pages don't need PROT_EXEC.
57 DeoptimizerData::DeoptimizerData(MemoryAllocator* allocator)
58 : allocator_(allocator),
59 #ifdef ENABLE_DEBUGGER_SUPPORT
60 deoptimized_frame_info_(NULL),
63 for (int i = 0; i < Deoptimizer::kBailoutTypesWithCodeEntry; ++i) {
64 deopt_entry_code_entries_[i] = -1;
65 deopt_entry_code_[i] = AllocateCodeChunk(allocator);
70 DeoptimizerData::~DeoptimizerData() {
71 for (int i = 0; i < Deoptimizer::kBailoutTypesWithCodeEntry; ++i) {
72 allocator_->Free(deopt_entry_code_[i]);
73 deopt_entry_code_[i] = NULL;
78 #ifdef ENABLE_DEBUGGER_SUPPORT
79 void DeoptimizerData::Iterate(ObjectVisitor* v) {
80 if (deoptimized_frame_info_ != NULL) {
81 deoptimized_frame_info_->Iterate(v);
87 Code* Deoptimizer::FindDeoptimizingCode(Address addr) {
88 if (function_->IsHeapObject()) {
89 // Search all deoptimizing code in the native context of the function.
90 Context* native_context = function_->context()->native_context();
91 Object* element = native_context->DeoptimizedCodeListHead();
92 while (!element->IsUndefined()) {
93 Code* code = Code::cast(element);
94 ASSERT(code->kind() == Code::OPTIMIZED_FUNCTION);
95 if (code->contains(addr)) return code;
96 element = code->next_code_link();
103 // We rely on this function not causing a GC. It is called from generated code
104 // without having a real stack frame in place.
105 Deoptimizer* Deoptimizer::New(JSFunction* function,
111 Deoptimizer* deoptimizer = new Deoptimizer(isolate,
118 ASSERT(isolate->deoptimizer_data()->current_ == NULL);
119 isolate->deoptimizer_data()->current_ = deoptimizer;
124 // No larger than 2K on all platforms
125 static const int kDeoptTableMaxEpilogueCodeSize = 2 * KB;
128 size_t Deoptimizer::GetMaxDeoptTableSize() {
130 Deoptimizer::kMaxNumberOfEntries * Deoptimizer::table_entry_size_;
131 int commit_page_size = static_cast<int>(OS::CommitPageSize());
132 int page_count = ((kDeoptTableMaxEpilogueCodeSize + entries_size - 1) /
133 commit_page_size) + 1;
134 return static_cast<size_t>(commit_page_size * page_count);
138 Deoptimizer* Deoptimizer::Grab(Isolate* isolate) {
139 Deoptimizer* result = isolate->deoptimizer_data()->current_;
140 ASSERT(result != NULL);
141 result->DeleteFrameDescriptions();
142 isolate->deoptimizer_data()->current_ = NULL;
147 int Deoptimizer::ConvertJSFrameIndexToFrameIndex(int jsframe_index) {
148 if (jsframe_index == 0) return 0;
151 while (jsframe_index >= 0) {
152 FrameDescription* frame = output_[frame_index];
153 if (frame->GetFrameType() == StackFrame::JAVA_SCRIPT) {
159 return frame_index - 1;
163 #ifdef ENABLE_DEBUGGER_SUPPORT
164 DeoptimizedFrameInfo* Deoptimizer::DebuggerInspectableFrame(
165 JavaScriptFrame* frame,
168 ASSERT(frame->is_optimized());
169 ASSERT(isolate->deoptimizer_data()->deoptimized_frame_info_ == NULL);
171 // Get the function and code from the frame.
172 JSFunction* function = frame->function();
173 Code* code = frame->LookupCode();
175 // Locate the deoptimization point in the code. As we are at a call the
176 // return address must be at a place in the code with deoptimization support.
177 SafepointEntry safepoint_entry = code->GetSafepointEntry(frame->pc());
178 int deoptimization_index = safepoint_entry.deoptimization_index();
179 ASSERT(deoptimization_index != Safepoint::kNoDeoptimizationIndex);
181 // Always use the actual stack slots when calculating the fp to sp
182 // delta adding two for the function and context.
183 unsigned stack_slots = code->stack_slots();
184 unsigned fp_to_sp_delta = ((stack_slots + 2) * kPointerSize);
186 Deoptimizer* deoptimizer = new Deoptimizer(isolate,
188 Deoptimizer::DEBUGGER,
189 deoptimization_index,
193 Address tos = frame->fp() - fp_to_sp_delta;
194 deoptimizer->FillInputFrame(tos, frame);
196 // Calculate the output frames.
197 Deoptimizer::ComputeOutputFrames(deoptimizer);
199 // Create the GC safe output frame information and register it for GC
201 ASSERT_LT(jsframe_index, deoptimizer->jsframe_count());
203 // Convert JS frame index into frame index.
204 int frame_index = deoptimizer->ConvertJSFrameIndexToFrameIndex(jsframe_index);
206 bool has_arguments_adaptor =
208 deoptimizer->output_[frame_index - 1]->GetFrameType() ==
209 StackFrame::ARGUMENTS_ADAPTOR;
211 int construct_offset = has_arguments_adaptor ? 2 : 1;
212 bool has_construct_stub =
213 frame_index >= construct_offset &&
214 deoptimizer->output_[frame_index - construct_offset]->GetFrameType() ==
215 StackFrame::CONSTRUCT;
217 DeoptimizedFrameInfo* info = new DeoptimizedFrameInfo(deoptimizer,
219 has_arguments_adaptor,
221 isolate->deoptimizer_data()->deoptimized_frame_info_ = info;
223 // Get the "simulated" top and size for the requested frame.
224 FrameDescription* parameters_frame =
225 deoptimizer->output_[
226 has_arguments_adaptor ? (frame_index - 1) : frame_index];
228 uint32_t parameters_size = (info->parameters_count() + 1) * kPointerSize;
229 Address parameters_top = reinterpret_cast<Address>(
230 parameters_frame->GetTop() + (parameters_frame->GetFrameSize() -
233 uint32_t expressions_size = info->expression_count() * kPointerSize;
234 Address expressions_top = reinterpret_cast<Address>(
235 deoptimizer->output_[frame_index]->GetTop());
237 // Done with the GC-unsafe frame descriptions. This re-enables allocation.
238 deoptimizer->DeleteFrameDescriptions();
240 // Allocate a heap number for the doubles belonging to this frame.
241 deoptimizer->MaterializeHeapNumbersForDebuggerInspectableFrame(
242 parameters_top, parameters_size, expressions_top, expressions_size, info);
244 // Finished using the deoptimizer instance.
251 void Deoptimizer::DeleteDebuggerInspectableFrame(DeoptimizedFrameInfo* info,
253 ASSERT(isolate->deoptimizer_data()->deoptimized_frame_info_ == info);
255 isolate->deoptimizer_data()->deoptimized_frame_info_ = NULL;
259 void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm,
262 TableEntryGenerator generator(masm, type, count);
263 generator.Generate();
267 void Deoptimizer::VisitAllOptimizedFunctionsForContext(
268 Context* context, OptimizedFunctionVisitor* visitor) {
269 DisallowHeapAllocation no_allocation;
271 ASSERT(context->IsNativeContext());
273 visitor->EnterContext(context);
275 // Visit the list of optimized functions, removing elements that
276 // no longer refer to optimized code.
277 JSFunction* prev = NULL;
278 Object* element = context->OptimizedFunctionsListHead();
279 while (!element->IsUndefined()) {
280 JSFunction* function = JSFunction::cast(element);
281 Object* next = function->next_function_link();
282 if (function->code()->kind() != Code::OPTIMIZED_FUNCTION ||
283 (visitor->VisitFunction(function),
284 function->code()->kind() != Code::OPTIMIZED_FUNCTION)) {
285 // The function no longer refers to optimized code, or the visitor
286 // changed the code to which it refers to no longer be optimized code.
287 // Remove the function from this list.
289 prev->set_next_function_link(next);
291 context->SetOptimizedFunctionsListHead(next);
293 // The visitor should not alter the link directly.
294 ASSERT(function->next_function_link() == next);
295 // Set the next function link to undefined to indicate it is no longer
296 // in the optimized functions list.
297 function->set_next_function_link(context->GetHeap()->undefined_value());
299 // The visitor should not alter the link directly.
300 ASSERT(function->next_function_link() == next);
301 // preserve this element.
307 visitor->LeaveContext(context);
311 void Deoptimizer::VisitAllOptimizedFunctions(
313 OptimizedFunctionVisitor* visitor) {
314 DisallowHeapAllocation no_allocation;
316 // Run through the list of all native contexts.
317 Object* context = isolate->heap()->native_contexts_list();
318 while (!context->IsUndefined()) {
319 VisitAllOptimizedFunctionsForContext(Context::cast(context), visitor);
320 context = Context::cast(context)->get(Context::NEXT_CONTEXT_LINK);
325 // Unlink functions referring to code marked for deoptimization, then move
326 // marked code from the optimized code list to the deoptimized code list,
327 // and patch code for lazy deopt.
328 void Deoptimizer::DeoptimizeMarkedCodeForContext(Context* context) {
329 DisallowHeapAllocation no_allocation;
331 // A "closure" that unlinks optimized code that is going to be
332 // deoptimized from the functions that refer to it.
333 class SelectedCodeUnlinker: public OptimizedFunctionVisitor {
335 virtual void EnterContext(Context* context) { } // Don't care.
336 virtual void LeaveContext(Context* context) { } // Don't care.
337 virtual void VisitFunction(JSFunction* function) {
338 Code* code = function->code();
339 if (!code->marked_for_deoptimization()) return;
341 // Unlink this function and evict from optimized code map.
342 SharedFunctionInfo* shared = function->shared();
343 function->set_code(shared->code());
344 shared->EvictFromOptimizedCodeMap(code, "deoptimized function");
346 if (FLAG_trace_deopt) {
347 PrintF("[deoptimizer unlinked: ");
348 function->PrintName();
349 PrintF(" / %" V8PRIxPTR "]\n", reinterpret_cast<intptr_t>(function));
354 // Unlink all functions that refer to marked code.
355 SelectedCodeUnlinker unlinker;
356 VisitAllOptimizedFunctionsForContext(context, &unlinker);
358 // Move marked code from the optimized code list to the deoptimized
359 // code list, collecting them into a ZoneList.
360 Isolate* isolate = context->GetHeap()->isolate();
362 ZoneList<Code*> codes(10, &zone);
364 // Walk over all optimized code objects in this native context.
366 Object* element = context->OptimizedCodeListHead();
367 while (!element->IsUndefined()) {
368 Code* code = Code::cast(element);
369 ASSERT(code->kind() == Code::OPTIMIZED_FUNCTION);
370 Object* next = code->next_code_link();
371 if (code->marked_for_deoptimization()) {
372 // Put the code into the list for later patching.
373 codes.Add(code, &zone);
376 // Skip this code in the optimized code list.
377 prev->set_next_code_link(next);
379 // There was no previous node, the next node is the new head.
380 context->SetOptimizedCodeListHead(next);
383 // Move the code to the _deoptimized_ code list.
384 code->set_next_code_link(context->DeoptimizedCodeListHead());
385 context->SetDeoptimizedCodeListHead(code);
387 // Not marked; preserve this element.
393 // TODO(titzer): we need a handle scope only because of the macro assembler,
394 // which is only used in EnsureCodeForDeoptimizationEntry.
395 HandleScope scope(isolate);
396 // Now patch all the codes for deoptimization.
397 for (int i = 0; i < codes.length(); i++) {
398 // It is finally time to die, code object.
399 // Do platform-specific patching to force any activations to lazy deopt.
400 PatchCodeForDeoptimization(isolate, codes[i]);
402 // We might be in the middle of incremental marking with compaction.
403 // Tell collector to treat this code object in a special way and
404 // ignore all slots that might have been recorded on it.
405 isolate->heap()->mark_compact_collector()->InvalidateCode(codes[i]);
410 void Deoptimizer::DeoptimizeAll(Isolate* isolate) {
411 if (FLAG_trace_deopt) {
412 PrintF("[deoptimize all code in all contexts]\n");
414 DisallowHeapAllocation no_allocation;
415 // For all contexts, mark all code, then deoptimize.
416 Object* context = isolate->heap()->native_contexts_list();
417 while (!context->IsUndefined()) {
418 Context* native_context = Context::cast(context);
419 MarkAllCodeForContext(native_context);
420 DeoptimizeMarkedCodeForContext(native_context);
421 context = native_context->get(Context::NEXT_CONTEXT_LINK);
426 void Deoptimizer::DeoptimizeMarkedCode(Isolate* isolate) {
427 if (FLAG_trace_deopt) {
428 PrintF("[deoptimize marked code in all contexts]\n");
430 DisallowHeapAllocation no_allocation;
431 // For all contexts, deoptimize code already marked.
432 Object* context = isolate->heap()->native_contexts_list();
433 while (!context->IsUndefined()) {
434 Context* native_context = Context::cast(context);
435 DeoptimizeMarkedCodeForContext(native_context);
436 context = native_context->get(Context::NEXT_CONTEXT_LINK);
441 void Deoptimizer::DeoptimizeGlobalObject(JSObject* object) {
442 if (FLAG_trace_deopt) {
443 PrintF("[deoptimize global object @ 0x%08" V8PRIxPTR "]\n",
444 reinterpret_cast<intptr_t>(object));
446 if (object->IsJSGlobalProxy()) {
447 Object* proto = object->GetPrototype();
448 ASSERT(proto->IsJSGlobalObject());
449 Context* native_context = GlobalObject::cast(proto)->native_context();
450 MarkAllCodeForContext(native_context);
451 DeoptimizeMarkedCodeForContext(native_context);
452 } else if (object->IsGlobalObject()) {
453 Context* native_context = GlobalObject::cast(object)->native_context();
454 MarkAllCodeForContext(native_context);
455 DeoptimizeMarkedCodeForContext(native_context);
460 void Deoptimizer::MarkAllCodeForContext(Context* context) {
461 Object* element = context->OptimizedCodeListHead();
462 while (!element->IsUndefined()) {
463 Code* code = Code::cast(element);
464 ASSERT(code->kind() == Code::OPTIMIZED_FUNCTION);
465 code->set_marked_for_deoptimization(true);
466 element = code->next_code_link();
471 void Deoptimizer::DeoptimizeFunction(JSFunction* function) {
472 Code* code = function->code();
473 if (code->kind() == Code::OPTIMIZED_FUNCTION) {
474 // Mark the code for deoptimization and unlink any functions that also
475 // refer to that code. The code cannot be shared across native contexts,
476 // so we only need to search one.
477 code->set_marked_for_deoptimization(true);
478 DeoptimizeMarkedCodeForContext(function->context()->native_context());
483 void Deoptimizer::ComputeOutputFrames(Deoptimizer* deoptimizer) {
484 deoptimizer->DoComputeOutputFrames();
488 bool Deoptimizer::TraceEnabledFor(BailoutType deopt_type,
489 StackFrame::Type frame_type) {
490 switch (deopt_type) {
495 return (frame_type == StackFrame::STUB)
496 ? FLAG_trace_stub_failures
504 const char* Deoptimizer::MessageFor(BailoutType type) {
506 case EAGER: return "eager";
507 case SOFT: return "soft";
508 case LAZY: return "lazy";
509 case DEBUGGER: return "debugger";
516 Deoptimizer::Deoptimizer(Isolate* isolate,
517 JSFunction* function,
522 Code* optimized_code)
525 bailout_id_(bailout_id),
528 fp_to_sp_delta_(fp_to_sp_delta),
529 has_alignment_padding_(0),
534 deferred_objects_tagged_values_(0),
535 deferred_objects_double_values_(0),
536 deferred_objects_(0),
537 deferred_heap_numbers_(0),
538 jsframe_functions_(0),
539 jsframe_has_adapted_arguments_(0),
540 materialized_values_(NULL),
541 materialized_objects_(NULL),
542 materialization_value_index_(0),
543 materialization_object_index_(0),
545 // For COMPILED_STUBs called from builtins, the function pointer is a SMI
546 // indicating an internal frame.
547 if (function->IsSmi()) {
550 ASSERT(from != NULL);
551 if (function != NULL && function->IsOptimized()) {
552 function->shared()->increment_deopt_count();
553 if (bailout_type_ == Deoptimizer::SOFT) {
554 isolate->counters()->soft_deopts_executed()->Increment();
555 // Soft deopts shouldn't count against the overall re-optimization count
556 // that can eventually lead to disabling optimization for a function.
557 int opt_count = function->shared()->opt_count();
558 if (opt_count > 0) opt_count--;
559 function->shared()->set_opt_count(opt_count);
562 compiled_code_ = FindOptimizedCode(function, optimized_code);
565 ASSERT(compiled_code_ != NULL);
566 if (type == EAGER || type == SOFT || type == LAZY) {
567 ASSERT(compiled_code_->kind() != Code::FUNCTION);
571 StackFrame::Type frame_type = function == NULL
573 : StackFrame::JAVA_SCRIPT;
574 trace_ = TraceEnabledFor(type, frame_type);
576 CHECK(AllowHeapAllocation::IsAllowed());
577 disallow_heap_allocation_ = new DisallowHeapAllocation();
579 unsigned size = ComputeInputFrameSize();
580 input_ = new(size) FrameDescription(size, function);
581 input_->SetFrameType(frame_type);
585 Code* Deoptimizer::FindOptimizedCode(JSFunction* function,
586 Code* optimized_code) {
587 switch (bailout_type_) {
588 case Deoptimizer::SOFT:
589 case Deoptimizer::EAGER:
590 case Deoptimizer::LAZY: {
591 Code* compiled_code = FindDeoptimizingCode(from_);
592 return (compiled_code == NULL)
593 ? static_cast<Code*>(isolate_->FindCodeObject(from_))
596 case Deoptimizer::DEBUGGER:
597 ASSERT(optimized_code->contains(from_));
598 return optimized_code;
605 void Deoptimizer::PrintFunctionName() {
606 if (function_->IsJSFunction()) {
607 function_->PrintName();
609 PrintF("%s", Code::Kind2String(compiled_code_->kind()));
614 Deoptimizer::~Deoptimizer() {
615 ASSERT(input_ == NULL && output_ == NULL);
616 ASSERT(disallow_heap_allocation_ == NULL);
620 void Deoptimizer::DeleteFrameDescriptions() {
622 for (int i = 0; i < output_count_; ++i) {
623 if (output_[i] != input_) delete output_[i];
629 CHECK(!AllowHeapAllocation::IsAllowed());
630 CHECK(disallow_heap_allocation_ != NULL);
631 delete disallow_heap_allocation_;
632 disallow_heap_allocation_ = NULL;
637 Address Deoptimizer::GetDeoptimizationEntry(Isolate* isolate,
642 if (id >= kMaxNumberOfEntries) return NULL;
643 if (mode == ENSURE_ENTRY_CODE) {
644 EnsureCodeForDeoptimizationEntry(isolate, type, id);
646 ASSERT(mode == CALCULATE_ENTRY_ADDRESS);
648 DeoptimizerData* data = isolate->deoptimizer_data();
649 ASSERT(type < kBailoutTypesWithCodeEntry);
650 MemoryChunk* base = data->deopt_entry_code_[type];
651 return base->area_start() + (id * table_entry_size_);
655 int Deoptimizer::GetDeoptimizationId(Isolate* isolate,
658 DeoptimizerData* data = isolate->deoptimizer_data();
659 MemoryChunk* base = data->deopt_entry_code_[type];
660 Address start = base->area_start();
663 addr >= start + (kMaxNumberOfEntries * table_entry_size_)) {
664 return kNotDeoptimizationEntry;
667 static_cast<int>(addr - start) % table_entry_size_);
668 return static_cast<int>(addr - start) / table_entry_size_;
672 int Deoptimizer::GetOutputInfo(DeoptimizationOutputData* data,
674 SharedFunctionInfo* shared) {
675 // TODO(kasperl): For now, we do a simple linear search for the PC
676 // offset associated with the given node id. This should probably be
677 // changed to a binary search.
678 int length = data->DeoptPoints();
679 for (int i = 0; i < length; i++) {
680 if (data->AstId(i) == id) {
681 return data->PcAndState(i)->value();
684 PrintF("[couldn't find pc offset for node=%d]\n", id.ToInt());
685 PrintF("[method: %s]\n", *shared->DebugName()->ToCString());
686 // Print the source code if available.
687 HeapStringAllocator string_allocator;
688 StringStream stream(&string_allocator);
689 shared->SourceCodePrint(&stream, -1);
690 PrintF("[source:\n%s\n]", *stream.ToCString());
692 FATAL("unable to find pc offset during deoptimization");
697 int Deoptimizer::GetDeoptimizedCodeCount(Isolate* isolate) {
699 // Count all entries in the deoptimizing code list of every context.
700 Object* context = isolate->heap()->native_contexts_list();
701 while (!context->IsUndefined()) {
702 Context* native_context = Context::cast(context);
703 Object* element = native_context->DeoptimizedCodeListHead();
704 while (!element->IsUndefined()) {
705 Code* code = Code::cast(element);
706 ASSERT(code->kind() == Code::OPTIMIZED_FUNCTION);
708 element = code->next_code_link();
710 context = Context::cast(context)->get(Context::NEXT_CONTEXT_LINK);
716 // We rely on this function not causing a GC. It is called from generated code
717 // without having a real stack frame in place.
718 void Deoptimizer::DoComputeOutputFrames() {
719 // Print some helpful diagnostic information.
720 if (FLAG_log_timer_events &&
721 compiled_code_->kind() == Code::OPTIMIZED_FUNCTION) {
722 LOG(isolate(), CodeDeoptEvent(compiled_code_));
727 PrintF("[deoptimizing (DEOPT %s): begin 0x%08" V8PRIxPTR " ",
728 MessageFor(bailout_type_),
729 reinterpret_cast<intptr_t>(function_));
731 PrintF(" @%d, FP to SP delta: %d]\n", bailout_id_, fp_to_sp_delta_);
732 if (bailout_type_ == EAGER || bailout_type_ == SOFT) {
733 compiled_code_->PrintDeoptLocation(bailout_id_);
737 // Determine basic deoptimization information. The optimized frame is
738 // described by the input data.
739 DeoptimizationInputData* input_data =
740 DeoptimizationInputData::cast(compiled_code_->deoptimization_data());
741 BailoutId node_id = input_data->AstId(bailout_id_);
742 ByteArray* translations = input_data->TranslationByteArray();
743 unsigned translation_index =
744 input_data->TranslationIndex(bailout_id_)->value();
746 // Do the input frame to output frame(s) translation.
747 TranslationIterator iterator(translations, translation_index);
748 Translation::Opcode opcode =
749 static_cast<Translation::Opcode>(iterator.Next());
750 ASSERT(Translation::BEGIN == opcode);
752 // Read the number of output frames and allocate an array for their
754 int count = iterator.Next();
755 iterator.Next(); // Drop JS frames count.
756 ASSERT(output_ == NULL);
757 output_ = new FrameDescription*[count];
758 for (int i = 0; i < count; ++i) {
761 output_count_ = count;
763 // Translate each output frame.
764 for (int i = 0; i < count; ++i) {
765 // Read the ast node id, function, and frame height for this output frame.
766 Translation::Opcode opcode =
767 static_cast<Translation::Opcode>(iterator.Next());
769 case Translation::JS_FRAME:
770 DoComputeJSFrame(&iterator, i);
773 case Translation::ARGUMENTS_ADAPTOR_FRAME:
774 DoComputeArgumentsAdaptorFrame(&iterator, i);
776 case Translation::CONSTRUCT_STUB_FRAME:
777 DoComputeConstructStubFrame(&iterator, i);
779 case Translation::GETTER_STUB_FRAME:
780 DoComputeAccessorStubFrame(&iterator, i, false);
782 case Translation::SETTER_STUB_FRAME:
783 DoComputeAccessorStubFrame(&iterator, i, true);
785 case Translation::COMPILED_STUB_FRAME:
786 DoComputeCompiledStubFrame(&iterator, i);
788 case Translation::BEGIN:
789 case Translation::REGISTER:
790 case Translation::INT32_REGISTER:
791 case Translation::UINT32_REGISTER:
792 case Translation::DOUBLE_REGISTER:
793 case Translation::STACK_SLOT:
794 case Translation::INT32_STACK_SLOT:
795 case Translation::UINT32_STACK_SLOT:
796 case Translation::DOUBLE_STACK_SLOT:
797 case Translation::LITERAL:
798 case Translation::ARGUMENTS_OBJECT:
805 // Print some helpful diagnostic information.
807 double ms = timer.Elapsed().InMillisecondsF();
808 int index = output_count_ - 1; // Index of the topmost frame.
809 JSFunction* function = output_[index]->GetFunction();
810 PrintF("[deoptimizing (%s): end 0x%08" V8PRIxPTR " ",
811 MessageFor(bailout_type_),
812 reinterpret_cast<intptr_t>(function));
814 PrintF(" @%d => node=%d, pc=0x%08" V8PRIxPTR ", state=%s, alignment=%s,"
818 output_[index]->GetPc(),
819 FullCodeGenerator::State2String(
820 static_cast<FullCodeGenerator::State>(
821 output_[index]->GetState()->value())),
822 has_alignment_padding_ ? "with padding" : "no padding",
828 void Deoptimizer::DoComputeJSFrame(TranslationIterator* iterator,
830 BailoutId node_id = BailoutId(iterator->Next());
831 JSFunction* function;
832 if (frame_index != 0) {
833 function = JSFunction::cast(ComputeLiteral(iterator->Next()));
835 int closure_id = iterator->Next();
837 ASSERT_EQ(Translation::kSelfLiteralId, closure_id);
838 function = function_;
840 unsigned height = iterator->Next();
841 unsigned height_in_bytes = height * kPointerSize;
843 PrintF(" translating ");
844 function->PrintName();
845 PrintF(" => node=%d, height=%d\n", node_id.ToInt(), height_in_bytes);
848 // The 'fixed' part of the frame consists of the incoming parameters and
849 // the part described by JavaScriptFrameConstants.
850 unsigned fixed_frame_size = ComputeFixedSize(function);
851 unsigned input_frame_size = input_->GetFrameSize();
852 unsigned output_frame_size = height_in_bytes + fixed_frame_size;
854 // Allocate and store the output frame description.
855 FrameDescription* output_frame =
856 new(output_frame_size) FrameDescription(output_frame_size, function);
857 output_frame->SetFrameType(StackFrame::JAVA_SCRIPT);
859 bool is_bottommost = (0 == frame_index);
860 bool is_topmost = (output_count_ - 1 == frame_index);
861 ASSERT(frame_index >= 0 && frame_index < output_count_);
862 ASSERT(output_[frame_index] == NULL);
863 output_[frame_index] = output_frame;
865 // The top address for the bottommost output frame can be computed from
866 // the input frame pointer and the output frame's height. For all
867 // subsequent output frames, it can be computed from the previous one's
868 // top address and the current frame's size.
869 Register fp_reg = JavaScriptFrame::fp_register();
870 intptr_t top_address;
872 // Determine whether the input frame contains alignment padding.
873 has_alignment_padding_ = HasAlignmentPadding(function) ? 1 : 0;
874 // 2 = context and function in the frame.
875 // If the optimized frame had alignment padding, adjust the frame pointer
876 // to point to the new position of the old frame pointer after padding
877 // is removed. Subtract 2 * kPointerSize for the context and function slots.
878 top_address = input_->GetRegister(fp_reg.code()) - (2 * kPointerSize) -
879 height_in_bytes + has_alignment_padding_ * kPointerSize;
881 top_address = output_[frame_index - 1]->GetTop() - output_frame_size;
883 output_frame->SetTop(top_address);
885 // Compute the incoming parameter translation.
886 int parameter_count = function->shared()->formal_parameter_count() + 1;
887 unsigned output_offset = output_frame_size;
888 unsigned input_offset = input_frame_size;
889 for (int i = 0; i < parameter_count; ++i) {
890 output_offset -= kPointerSize;
891 DoTranslateCommand(iterator, frame_index, output_offset);
893 input_offset -= (parameter_count * kPointerSize);
895 // There are no translation commands for the caller's pc and fp, the
896 // context, and the function. Synthesize their values and set them up
899 // The caller's pc for the bottommost output frame is the same as in the
900 // input frame. For all subsequent output frames, it can be read from the
901 // previous one. This frame's pc can be computed from the non-optimized
902 // function code and AST id of the bailout.
903 output_offset -= kPCOnStackSize;
904 input_offset -= kPCOnStackSize;
907 value = input_->GetFrameSlot(input_offset);
909 value = output_[frame_index - 1]->GetPc();
911 output_frame->SetCallerPc(output_offset, value);
913 PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
914 V8PRIxPTR " ; caller's pc\n",
915 top_address + output_offset, output_offset, value);
918 // The caller's frame pointer for the bottommost output frame is the same
919 // as in the input frame. For all subsequent output frames, it can be
920 // read from the previous one. Also compute and set this frame's frame
922 output_offset -= kFPOnStackSize;
923 input_offset -= kFPOnStackSize;
925 value = input_->GetFrameSlot(input_offset);
927 value = output_[frame_index - 1]->GetFp();
929 output_frame->SetCallerFp(output_offset, value);
930 intptr_t fp_value = top_address + output_offset;
931 ASSERT(!is_bottommost || (input_->GetRegister(fp_reg.code()) +
932 has_alignment_padding_ * kPointerSize) == fp_value);
933 output_frame->SetFp(fp_value);
934 if (is_topmost) output_frame->SetRegister(fp_reg.code(), fp_value);
936 PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
937 V8PRIxPTR " ; caller's fp\n",
938 fp_value, output_offset, value);
940 ASSERT(!is_bottommost || !has_alignment_padding_ ||
941 (fp_value & kPointerSize) != 0);
943 // For the bottommost output frame the context can be gotten from the input
944 // frame. For all subsequent output frames it can be gotten from the function
945 // so long as we don't inline functions that need local contexts.
946 Register context_reg = JavaScriptFrame::context_register();
947 output_offset -= kPointerSize;
948 input_offset -= kPointerSize;
950 value = input_->GetFrameSlot(input_offset);
952 value = reinterpret_cast<intptr_t>(function->context());
954 output_frame->SetFrameSlot(output_offset, value);
955 output_frame->SetContext(value);
956 if (is_topmost) output_frame->SetRegister(context_reg.code(), value);
958 PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
959 V8PRIxPTR "; context\n",
960 top_address + output_offset, output_offset, value);
963 // The function was mentioned explicitly in the BEGIN_FRAME.
964 output_offset -= kPointerSize;
965 input_offset -= kPointerSize;
966 value = reinterpret_cast<intptr_t>(function);
967 // The function for the bottommost output frame should also agree with the
969 ASSERT(!is_bottommost || input_->GetFrameSlot(input_offset) == value);
970 output_frame->SetFrameSlot(output_offset, value);
972 PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
973 V8PRIxPTR "; function\n",
974 top_address + output_offset, output_offset, value);
977 // Translate the rest of the frame.
978 for (unsigned i = 0; i < height; ++i) {
979 output_offset -= kPointerSize;
980 DoTranslateCommand(iterator, frame_index, output_offset);
982 ASSERT(0 == output_offset);
984 // Compute this frame's PC, state, and continuation.
985 Code* non_optimized_code = function->shared()->code();
986 FixedArray* raw_data = non_optimized_code->deoptimization_data();
987 DeoptimizationOutputData* data = DeoptimizationOutputData::cast(raw_data);
988 Address start = non_optimized_code->instruction_start();
989 unsigned pc_and_state = GetOutputInfo(data, node_id, function->shared());
990 unsigned pc_offset = FullCodeGenerator::PcField::decode(pc_and_state);
991 intptr_t pc_value = reinterpret_cast<intptr_t>(start + pc_offset);
992 output_frame->SetPc(pc_value);
994 FullCodeGenerator::State state =
995 FullCodeGenerator::StateField::decode(pc_and_state);
996 output_frame->SetState(Smi::FromInt(state));
998 // Set the continuation for the topmost frame.
999 if (is_topmost && bailout_type_ != DEBUGGER) {
1000 Builtins* builtins = isolate_->builtins();
1001 Code* continuation = builtins->builtin(Builtins::kNotifyDeoptimized);
1002 if (bailout_type_ == LAZY) {
1003 continuation = builtins->builtin(Builtins::kNotifyLazyDeoptimized);
1004 } else if (bailout_type_ == SOFT) {
1005 continuation = builtins->builtin(Builtins::kNotifySoftDeoptimized);
1007 ASSERT(bailout_type_ == EAGER);
1009 output_frame->SetContinuation(
1010 reinterpret_cast<intptr_t>(continuation->entry()));
1015 void Deoptimizer::DoComputeArgumentsAdaptorFrame(TranslationIterator* iterator,
1017 JSFunction* function = JSFunction::cast(ComputeLiteral(iterator->Next()));
1018 unsigned height = iterator->Next();
1019 unsigned height_in_bytes = height * kPointerSize;
1021 PrintF(" translating arguments adaptor => height=%d\n", height_in_bytes);
1024 unsigned fixed_frame_size = ArgumentsAdaptorFrameConstants::kFrameSize;
1025 unsigned output_frame_size = height_in_bytes + fixed_frame_size;
1027 // Allocate and store the output frame description.
1028 FrameDescription* output_frame =
1029 new(output_frame_size) FrameDescription(output_frame_size, function);
1030 output_frame->SetFrameType(StackFrame::ARGUMENTS_ADAPTOR);
1032 // Arguments adaptor can not be topmost or bottommost.
1033 ASSERT(frame_index > 0 && frame_index < output_count_ - 1);
1034 ASSERT(output_[frame_index] == NULL);
1035 output_[frame_index] = output_frame;
1037 // The top address of the frame is computed from the previous
1038 // frame's top and this frame's size.
1039 intptr_t top_address;
1040 top_address = output_[frame_index - 1]->GetTop() - output_frame_size;
1041 output_frame->SetTop(top_address);
1043 // Compute the incoming parameter translation.
1044 int parameter_count = height;
1045 unsigned output_offset = output_frame_size;
1046 for (int i = 0; i < parameter_count; ++i) {
1047 output_offset -= kPointerSize;
1048 DoTranslateCommand(iterator, frame_index, output_offset);
1051 // Read caller's PC from the previous frame.
1052 output_offset -= kPCOnStackSize;
1053 intptr_t callers_pc = output_[frame_index - 1]->GetPc();
1054 output_frame->SetCallerPc(output_offset, callers_pc);
1056 PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
1057 V8PRIxPTR " ; caller's pc\n",
1058 top_address + output_offset, output_offset, callers_pc);
1061 // Read caller's FP from the previous frame, and set this frame's FP.
1062 output_offset -= kFPOnStackSize;
1063 intptr_t value = output_[frame_index - 1]->GetFp();
1064 output_frame->SetCallerFp(output_offset, value);
1065 intptr_t fp_value = top_address + output_offset;
1066 output_frame->SetFp(fp_value);
1068 PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
1069 V8PRIxPTR " ; caller's fp\n",
1070 fp_value, output_offset, value);
1073 // A marker value is used in place of the context.
1074 output_offset -= kPointerSize;
1075 intptr_t context = reinterpret_cast<intptr_t>(
1076 Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
1077 output_frame->SetFrameSlot(output_offset, context);
1079 PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
1080 V8PRIxPTR " ; context (adaptor sentinel)\n",
1081 top_address + output_offset, output_offset, context);
1084 // The function was mentioned explicitly in the ARGUMENTS_ADAPTOR_FRAME.
1085 output_offset -= kPointerSize;
1086 value = reinterpret_cast<intptr_t>(function);
1087 output_frame->SetFrameSlot(output_offset, value);
1089 PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
1090 V8PRIxPTR " ; function\n",
1091 top_address + output_offset, output_offset, value);
1094 // Number of incoming arguments.
1095 output_offset -= kPointerSize;
1096 value = reinterpret_cast<intptr_t>(Smi::FromInt(height - 1));
1097 output_frame->SetFrameSlot(output_offset, value);
1099 PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
1100 V8PRIxPTR " ; argc (%d)\n",
1101 top_address + output_offset, output_offset, value, height - 1);
1104 ASSERT(0 == output_offset);
1106 Builtins* builtins = isolate_->builtins();
1107 Code* adaptor_trampoline =
1108 builtins->builtin(Builtins::kArgumentsAdaptorTrampoline);
1109 intptr_t pc_value = reinterpret_cast<intptr_t>(
1110 adaptor_trampoline->instruction_start() +
1111 isolate_->heap()->arguments_adaptor_deopt_pc_offset()->value());
1112 output_frame->SetPc(pc_value);
1116 void Deoptimizer::DoComputeConstructStubFrame(TranslationIterator* iterator,
1118 Builtins* builtins = isolate_->builtins();
1119 Code* construct_stub = builtins->builtin(Builtins::kJSConstructStubGeneric);
1120 JSFunction* function = JSFunction::cast(ComputeLiteral(iterator->Next()));
1121 unsigned height = iterator->Next();
1122 unsigned height_in_bytes = height * kPointerSize;
1124 PrintF(" translating construct stub => height=%d\n", height_in_bytes);
1127 unsigned fixed_frame_size = ConstructFrameConstants::kFrameSize;
1128 unsigned output_frame_size = height_in_bytes + fixed_frame_size;
1130 // Allocate and store the output frame description.
1131 FrameDescription* output_frame =
1132 new(output_frame_size) FrameDescription(output_frame_size, function);
1133 output_frame->SetFrameType(StackFrame::CONSTRUCT);
1135 // Construct stub can not be topmost or bottommost.
1136 ASSERT(frame_index > 0 && frame_index < output_count_ - 1);
1137 ASSERT(output_[frame_index] == NULL);
1138 output_[frame_index] = output_frame;
1140 // The top address of the frame is computed from the previous
1141 // frame's top and this frame's size.
1142 intptr_t top_address;
1143 top_address = output_[frame_index - 1]->GetTop() - output_frame_size;
1144 output_frame->SetTop(top_address);
1146 // Compute the incoming parameter translation.
1147 int parameter_count = height;
1148 unsigned output_offset = output_frame_size;
1149 for (int i = 0; i < parameter_count; ++i) {
1150 output_offset -= kPointerSize;
1151 int deferred_object_index = deferred_objects_.length();
1152 DoTranslateCommand(iterator, frame_index, output_offset);
1153 // The allocated receiver of a construct stub frame is passed as the
1154 // receiver parameter through the translation. It might be encoding
1155 // a captured object, patch the slot address for a captured object.
1156 if (i == 0 && deferred_objects_.length() > deferred_object_index) {
1157 ASSERT(!deferred_objects_[deferred_object_index].is_arguments());
1158 deferred_objects_[deferred_object_index].patch_slot_address(top_address);
1162 // Read caller's PC from the previous frame.
1163 output_offset -= kPCOnStackSize;
1164 intptr_t callers_pc = output_[frame_index - 1]->GetPc();
1165 output_frame->SetCallerPc(output_offset, callers_pc);
1167 PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
1168 V8PRIxPTR " ; caller's pc\n",
1169 top_address + output_offset, output_offset, callers_pc);
1172 // Read caller's FP from the previous frame, and set this frame's FP.
1173 output_offset -= kFPOnStackSize;
1174 intptr_t value = output_[frame_index - 1]->GetFp();
1175 output_frame->SetCallerFp(output_offset, value);
1176 intptr_t fp_value = top_address + output_offset;
1177 output_frame->SetFp(fp_value);
1179 PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
1180 V8PRIxPTR " ; caller's fp\n",
1181 fp_value, output_offset, value);
1184 // The context can be gotten from the previous frame.
1185 output_offset -= kPointerSize;
1186 value = output_[frame_index - 1]->GetContext();
1187 output_frame->SetFrameSlot(output_offset, value);
1189 PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
1190 V8PRIxPTR " ; context\n",
1191 top_address + output_offset, output_offset, value);
1194 // A marker value is used in place of the function.
1195 output_offset -= kPointerSize;
1196 value = reinterpret_cast<intptr_t>(Smi::FromInt(StackFrame::CONSTRUCT));
1197 output_frame->SetFrameSlot(output_offset, value);
1199 PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
1200 V8PRIxPTR " ; function (construct sentinel)\n",
1201 top_address + output_offset, output_offset, value);
1204 // The output frame reflects a JSConstructStubGeneric frame.
1205 output_offset -= kPointerSize;
1206 value = reinterpret_cast<intptr_t>(construct_stub);
1207 output_frame->SetFrameSlot(output_offset, value);
1209 PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
1210 V8PRIxPTR " ; code object\n",
1211 top_address + output_offset, output_offset, value);
1214 // Number of incoming arguments.
1215 output_offset -= kPointerSize;
1216 value = reinterpret_cast<intptr_t>(Smi::FromInt(height - 1));
1217 output_frame->SetFrameSlot(output_offset, value);
1219 PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
1220 V8PRIxPTR " ; argc (%d)\n",
1221 top_address + output_offset, output_offset, value, height - 1);
1224 // Constructor function being invoked by the stub (only present on some
1225 // architectures, indicated by kConstructorOffset).
1226 if (ConstructFrameConstants::kConstructorOffset != kMinInt) {
1227 output_offset -= kPointerSize;
1228 value = reinterpret_cast<intptr_t>(function);
1229 output_frame->SetFrameSlot(output_offset, value);
1231 PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
1232 V8PRIxPTR " ; constructor function\n",
1233 top_address + output_offset, output_offset, value);
1237 // The newly allocated object was passed as receiver in the artificial
1238 // constructor stub environment created by HEnvironment::CopyForInlining().
1239 output_offset -= kPointerSize;
1240 value = output_frame->GetFrameSlot(output_frame_size - kPointerSize);
1241 output_frame->SetFrameSlot(output_offset, value);
1243 PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
1244 V8PRIxPTR " ; allocated receiver\n",
1245 top_address + output_offset, output_offset, value);
1248 ASSERT(0 == output_offset);
1250 intptr_t pc = reinterpret_cast<intptr_t>(
1251 construct_stub->instruction_start() +
1252 isolate_->heap()->construct_stub_deopt_pc_offset()->value());
1253 output_frame->SetPc(pc);
1257 void Deoptimizer::DoComputeAccessorStubFrame(TranslationIterator* iterator,
1259 bool is_setter_stub_frame) {
1260 JSFunction* accessor = JSFunction::cast(ComputeLiteral(iterator->Next()));
1261 // The receiver (and the implicit return value, if any) are expected in
1262 // registers by the LoadIC/StoreIC, so they don't belong to the output stack
1263 // frame. This means that we have to use a height of 0.
1264 unsigned height = 0;
1265 unsigned height_in_bytes = height * kPointerSize;
1266 const char* kind = is_setter_stub_frame ? "setter" : "getter";
1268 PrintF(" translating %s stub => height=%u\n", kind, height_in_bytes);
1271 // We need 1 stack entry for the return address + 4 stack entries from
1272 // StackFrame::INTERNAL (FP, context, frame type, code object, see
1273 // MacroAssembler::EnterFrame). For a setter stub frame we need one additional
1274 // entry for the implicit return value, see
1275 // StoreStubCompiler::CompileStoreViaSetter.
1276 unsigned fixed_frame_entries = (kPCOnStackSize / kPointerSize) +
1277 (kFPOnStackSize / kPointerSize) + 3 +
1278 (is_setter_stub_frame ? 1 : 0);
1279 unsigned fixed_frame_size = fixed_frame_entries * kPointerSize;
1280 unsigned output_frame_size = height_in_bytes + fixed_frame_size;
1282 // Allocate and store the output frame description.
1283 FrameDescription* output_frame =
1284 new(output_frame_size) FrameDescription(output_frame_size, accessor);
1285 output_frame->SetFrameType(StackFrame::INTERNAL);
1287 // A frame for an accessor stub can not be the topmost or bottommost one.
1288 ASSERT(frame_index > 0 && frame_index < output_count_ - 1);
1289 ASSERT(output_[frame_index] == NULL);
1290 output_[frame_index] = output_frame;
1292 // The top address of the frame is computed from the previous frame's top and
1293 // this frame's size.
1294 intptr_t top_address = output_[frame_index - 1]->GetTop() - output_frame_size;
1295 output_frame->SetTop(top_address);
1297 unsigned output_offset = output_frame_size;
1299 // Read caller's PC from the previous frame.
1300 output_offset -= kPCOnStackSize;
1301 intptr_t callers_pc = output_[frame_index - 1]->GetPc();
1302 output_frame->SetCallerPc(output_offset, callers_pc);
1304 PrintF(" 0x%08" V8PRIxPTR ": [top + %u] <- 0x%08" V8PRIxPTR
1306 top_address + output_offset, output_offset, callers_pc);
1309 // Read caller's FP from the previous frame, and set this frame's FP.
1310 output_offset -= kFPOnStackSize;
1311 intptr_t value = output_[frame_index - 1]->GetFp();
1312 output_frame->SetCallerFp(output_offset, value);
1313 intptr_t fp_value = top_address + output_offset;
1314 output_frame->SetFp(fp_value);
1316 PrintF(" 0x%08" V8PRIxPTR ": [top + %u] <- 0x%08" V8PRIxPTR
1318 fp_value, output_offset, value);
1321 // The context can be gotten from the previous frame.
1322 output_offset -= kPointerSize;
1323 value = output_[frame_index - 1]->GetContext();
1324 output_frame->SetFrameSlot(output_offset, value);
1326 PrintF(" 0x%08" V8PRIxPTR ": [top + %u] <- 0x%08" V8PRIxPTR
1328 top_address + output_offset, output_offset, value);
1331 // A marker value is used in place of the function.
1332 output_offset -= kPointerSize;
1333 value = reinterpret_cast<intptr_t>(Smi::FromInt(StackFrame::INTERNAL));
1334 output_frame->SetFrameSlot(output_offset, value);
1336 PrintF(" 0x%08" V8PRIxPTR ": [top + %u] <- 0x%08" V8PRIxPTR
1337 " ; function (%s sentinel)\n",
1338 top_address + output_offset, output_offset, value, kind);
1341 // Get Code object from accessor stub.
1342 output_offset -= kPointerSize;
1343 Builtins::Name name = is_setter_stub_frame ?
1344 Builtins::kStoreIC_Setter_ForDeopt :
1345 Builtins::kLoadIC_Getter_ForDeopt;
1346 Code* accessor_stub = isolate_->builtins()->builtin(name);
1347 value = reinterpret_cast<intptr_t>(accessor_stub);
1348 output_frame->SetFrameSlot(output_offset, value);
1350 PrintF(" 0x%08" V8PRIxPTR ": [top + %u] <- 0x%08" V8PRIxPTR
1352 top_address + output_offset, output_offset, value);
1356 Translation::Opcode opcode =
1357 static_cast<Translation::Opcode>(iterator->Next());
1358 iterator->Skip(Translation::NumberOfOperandsFor(opcode));
1360 if (is_setter_stub_frame) {
1361 // The implicit return value was part of the artificial setter stub
1363 output_offset -= kPointerSize;
1364 DoTranslateCommand(iterator, frame_index, output_offset);
1367 ASSERT(0 == output_offset);
1369 Smi* offset = is_setter_stub_frame ?
1370 isolate_->heap()->setter_stub_deopt_pc_offset() :
1371 isolate_->heap()->getter_stub_deopt_pc_offset();
1372 intptr_t pc = reinterpret_cast<intptr_t>(
1373 accessor_stub->instruction_start() + offset->value());
1374 output_frame->SetPc(pc);
1378 void Deoptimizer::DoComputeCompiledStubFrame(TranslationIterator* iterator,
1382 // | .... | | .... |
1383 // +-------------------------+ +-------------------------+
1384 // | JSFunction continuation | | JSFunction continuation |
1385 // +-------------------------+ +-------------------------+
1386 // | | saved frame (FP) | | saved frame (FP) |
1387 // | +=========================+<-fpreg +=========================+<-fpreg
1388 // | | JSFunction context | | JSFunction context |
1389 // v +-------------------------+ +-------------------------|
1390 // | COMPILED_STUB marker | | STUB_FAILURE marker |
1391 // +-------------------------+ +-------------------------+
1392 // | | | caller args.arguments_ |
1393 // | ... | +-------------------------+
1394 // | | | caller args.length_ |
1395 // |-------------------------|<-spreg +-------------------------+
1396 // | caller args pointer |
1397 // +-------------------------+
1398 // | caller stack param 1 |
1399 // parameters in registers +-------------------------+
1400 // and spilled to stack | .... |
1401 // +-------------------------+
1402 // | caller stack param n |
1403 // +-------------------------+<-spreg
1404 // reg = number of parameters
1405 // reg = failure handler address
1406 // reg = saved frame
1407 // reg = JSFunction context
1410 ASSERT(compiled_code_->is_crankshafted() &&
1411 compiled_code_->kind() != Code::OPTIMIZED_FUNCTION);
1412 int major_key = compiled_code_->major_key();
1413 CodeStubInterfaceDescriptor* descriptor =
1414 isolate_->code_stub_interface_descriptor(major_key);
1416 // The output frame must have room for all pushed register parameters
1417 // and the standard stack frame slots. Include space for an argument
1418 // object to the callee and optionally the space to pass the argument
1419 // object to the stub failure handler.
1420 ASSERT(descriptor->register_param_count_ >= 0);
1421 int height_in_bytes = kPointerSize * descriptor->register_param_count_ +
1422 sizeof(Arguments) + kPointerSize;
1423 int fixed_frame_size = StandardFrameConstants::kFixedFrameSize;
1424 int input_frame_size = input_->GetFrameSize();
1425 int output_frame_size = height_in_bytes + fixed_frame_size;
1427 PrintF(" translating %s => StubFailureTrampolineStub, height=%d\n",
1428 CodeStub::MajorName(static_cast<CodeStub::Major>(major_key), false),
1432 // The stub failure trampoline is a single frame.
1433 FrameDescription* output_frame =
1434 new(output_frame_size) FrameDescription(output_frame_size, NULL);
1435 output_frame->SetFrameType(StackFrame::STUB_FAILURE_TRAMPOLINE);
1436 ASSERT(frame_index == 0);
1437 output_[frame_index] = output_frame;
1439 // The top address for the output frame can be computed from the input
1440 // frame pointer and the output frame's height. Subtract space for the
1441 // context and function slots.
1442 Register fp_reg = StubFailureTrampolineFrame::fp_register();
1443 intptr_t top_address = input_->GetRegister(fp_reg.code()) -
1444 (2 * kPointerSize) - height_in_bytes;
1445 output_frame->SetTop(top_address);
1447 // Read caller's PC (JSFunction continuation) from the input frame.
1448 unsigned input_frame_offset = input_frame_size - kPCOnStackSize;
1449 unsigned output_frame_offset = output_frame_size - kFPOnStackSize;
1450 intptr_t value = input_->GetFrameSlot(input_frame_offset);
1451 output_frame->SetCallerPc(output_frame_offset, value);
1453 PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
1454 V8PRIxPTR " ; caller's pc\n",
1455 top_address + output_frame_offset, output_frame_offset, value);
1458 // Read caller's FP from the input frame, and set this frame's FP.
1459 input_frame_offset -= kFPOnStackSize;
1460 value = input_->GetFrameSlot(input_frame_offset);
1461 output_frame_offset -= kFPOnStackSize;
1462 output_frame->SetCallerFp(output_frame_offset, value);
1463 intptr_t frame_ptr = input_->GetRegister(fp_reg.code());
1464 output_frame->SetRegister(fp_reg.code(), frame_ptr);
1465 output_frame->SetFp(frame_ptr);
1467 PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
1468 V8PRIxPTR " ; caller's fp\n",
1469 top_address + output_frame_offset, output_frame_offset, value);
1472 // The context can be gotten from the input frame.
1473 Register context_reg = StubFailureTrampolineFrame::context_register();
1474 input_frame_offset -= kPointerSize;
1475 value = input_->GetFrameSlot(input_frame_offset);
1476 output_frame->SetRegister(context_reg.code(), value);
1477 output_frame_offset -= kPointerSize;
1478 output_frame->SetFrameSlot(output_frame_offset, value);
1480 PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
1481 V8PRIxPTR " ; context\n",
1482 top_address + output_frame_offset, output_frame_offset, value);
1485 // A marker value is used in place of the function.
1486 output_frame_offset -= kPointerSize;
1487 value = reinterpret_cast<intptr_t>(
1488 Smi::FromInt(StackFrame::STUB_FAILURE_TRAMPOLINE));
1489 output_frame->SetFrameSlot(output_frame_offset, value);
1491 PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
1492 V8PRIxPTR " ; function (stub failure sentinel)\n",
1493 top_address + output_frame_offset, output_frame_offset, value);
1496 intptr_t caller_arg_count = 0;
1497 bool arg_count_known = !descriptor->stack_parameter_count_.is_valid();
1499 // Build the Arguments object for the caller's parameters and a pointer to it.
1500 output_frame_offset -= kPointerSize;
1501 int args_arguments_offset = output_frame_offset;
1502 intptr_t the_hole = reinterpret_cast<intptr_t>(
1503 isolate_->heap()->the_hole_value());
1504 if (arg_count_known) {
1505 value = frame_ptr + StandardFrameConstants::kCallerSPOffset +
1506 (caller_arg_count - 1) * kPointerSize;
1511 output_frame->SetFrameSlot(args_arguments_offset, value);
1513 PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
1514 V8PRIxPTR " ; args.arguments %s\n",
1515 top_address + args_arguments_offset, args_arguments_offset, value,
1516 arg_count_known ? "" : "(the hole)");
1519 output_frame_offset -= kPointerSize;
1520 int length_frame_offset = output_frame_offset;
1521 value = arg_count_known ? caller_arg_count : the_hole;
1522 output_frame->SetFrameSlot(length_frame_offset, value);
1524 PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
1525 V8PRIxPTR " ; args.length %s\n",
1526 top_address + length_frame_offset, length_frame_offset, value,
1527 arg_count_known ? "" : "(the hole)");
1530 output_frame_offset -= kPointerSize;
1531 value = frame_ptr + StandardFrameConstants::kCallerSPOffset -
1532 (output_frame_size - output_frame_offset) + kPointerSize;
1533 output_frame->SetFrameSlot(output_frame_offset, value);
1535 PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
1536 V8PRIxPTR " ; args*\n",
1537 top_address + output_frame_offset, output_frame_offset, value);
1540 // Copy the register parameters to the failure frame.
1541 for (int i = 0; i < descriptor->register_param_count_; ++i) {
1542 output_frame_offset -= kPointerSize;
1543 DoTranslateCommand(iterator, 0, output_frame_offset);
1546 if (!arg_count_known) {
1547 DoTranslateCommand(iterator, 0, length_frame_offset,
1548 TRANSLATED_VALUE_IS_NATIVE);
1549 caller_arg_count = output_frame->GetFrameSlot(length_frame_offset);
1550 value = frame_ptr + StandardFrameConstants::kCallerSPOffset +
1551 (caller_arg_count - 1) * kPointerSize;
1552 output_frame->SetFrameSlot(args_arguments_offset, value);
1554 PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
1555 V8PRIxPTR " ; args.arguments\n",
1556 top_address + args_arguments_offset, args_arguments_offset, value);
1560 ASSERT(0 == output_frame_offset);
1562 // Copy the double registers from the input into the output frame.
1563 CopyDoubleRegisters(output_frame);
1565 // Fill registers containing handler and number of parameters.
1566 SetPlatformCompiledStubRegisters(output_frame, descriptor);
1568 // Compute this frame's PC, state, and continuation.
1569 Code* trampoline = NULL;
1570 StubFunctionMode function_mode = descriptor->function_mode_;
1571 StubFailureTrampolineStub(function_mode).FindCodeInCache(&trampoline,
1573 ASSERT(trampoline != NULL);
1574 output_frame->SetPc(reinterpret_cast<intptr_t>(
1575 trampoline->instruction_start()));
1576 output_frame->SetState(Smi::FromInt(FullCodeGenerator::NO_REGISTERS));
1577 Code* notify_failure =
1578 isolate_->builtins()->builtin(Builtins::kNotifyStubFailure);
1579 output_frame->SetContinuation(
1580 reinterpret_cast<intptr_t>(notify_failure->entry()));
1584 Handle<Object> Deoptimizer::MaterializeNextHeapObject() {
1585 int object_index = materialization_object_index_++;
1586 ObjectMaterializationDescriptor desc = deferred_objects_[object_index];
1587 const int length = desc.object_length();
1589 if (desc.duplicate_object() >= 0) {
1590 // Found a previously materialized object by de-duplication.
1591 object_index = desc.duplicate_object();
1592 materialized_objects_->Add(Handle<Object>());
1593 } else if (desc.is_arguments() && ArgumentsObjectIsAdapted(object_index)) {
1594 // Use the arguments adapter frame we just built to materialize the
1595 // arguments object. FunctionGetArguments can't throw an exception.
1596 Handle<JSFunction> function = ArgumentsObjectFunction(object_index);
1597 Handle<JSObject> arguments = Handle<JSObject>::cast(
1598 Accessors::FunctionGetArguments(function));
1599 materialized_objects_->Add(arguments);
1600 materialization_value_index_ += length;
1601 } else if (desc.is_arguments()) {
1602 // Construct an arguments object and copy the parameters to a newly
1603 // allocated arguments object backing store.
1604 Handle<JSFunction> function = ArgumentsObjectFunction(object_index);
1605 Handle<JSObject> arguments =
1606 isolate_->factory()->NewArgumentsObject(function, length);
1607 Handle<FixedArray> array = isolate_->factory()->NewFixedArray(length);
1608 ASSERT(array->length() == length);
1609 arguments->set_elements(*array);
1610 materialized_objects_->Add(arguments);
1611 for (int i = 0; i < length; ++i) {
1612 Handle<Object> value = MaterializeNextValue();
1613 array->set(i, *value);
1616 // Dispatch on the instance type of the object to be materialized.
1617 // We also need to make sure that the representation of all fields
1618 // in the given object are general enough to hold a tagged value.
1619 Handle<Map> map = Map::GeneralizeAllFieldRepresentations(
1620 Handle<Map>::cast(MaterializeNextValue()), Representation::Tagged());
1621 switch (map->instance_type()) {
1622 case HEAP_NUMBER_TYPE: {
1623 Handle<HeapNumber> object = isolate_->factory()->NewHeapNumber(0.0);
1624 materialized_objects_->Add(object);
1625 Handle<Object> number = MaterializeNextValue();
1626 object->set_value(number->Number());
1627 materialization_value_index_ += kDoubleSize / kPointerSize - 1;
1630 case JS_OBJECT_TYPE: {
1631 Handle<JSObject> object =
1632 isolate_->factory()->NewJSObjectFromMap(map, NOT_TENURED, false);
1633 materialized_objects_->Add(object);
1634 Handle<Object> properties = MaterializeNextValue();
1635 Handle<Object> elements = MaterializeNextValue();
1636 object->set_properties(FixedArray::cast(*properties));
1637 object->set_elements(FixedArrayBase::cast(*elements));
1638 for (int i = 0; i < length - 3; ++i) {
1639 Handle<Object> value = MaterializeNextValue();
1640 object->FastPropertyAtPut(i, *value);
1644 case JS_ARRAY_TYPE: {
1645 Handle<JSArray> object =
1646 isolate_->factory()->NewJSArray(0, map->elements_kind());
1647 materialized_objects_->Add(object);
1648 Handle<Object> properties = MaterializeNextValue();
1649 Handle<Object> elements = MaterializeNextValue();
1650 Handle<Object> length = MaterializeNextValue();
1651 object->set_properties(FixedArray::cast(*properties));
1652 object->set_elements(FixedArrayBase::cast(*elements));
1653 object->set_length(*length);
1657 PrintF("[couldn't handle instance type %d]\n", map->instance_type());
1662 return materialized_objects_->at(object_index);
1666 Handle<Object> Deoptimizer::MaterializeNextValue() {
1667 int value_index = materialization_value_index_++;
1668 Handle<Object> value = materialized_values_->at(value_index);
1669 if (*value == isolate_->heap()->arguments_marker()) {
1670 value = MaterializeNextHeapObject();
1676 void Deoptimizer::MaterializeHeapObjects(JavaScriptFrameIterator* it) {
1677 ASSERT_NE(DEBUGGER, bailout_type_);
1679 // Walk all JavaScript output frames with the given frame iterator.
1680 for (int frame_index = 0; frame_index < jsframe_count(); ++frame_index) {
1681 if (frame_index != 0) it->Advance();
1682 JavaScriptFrame* frame = it->frame();
1683 jsframe_functions_.Add(handle(frame->function(), isolate_));
1684 jsframe_has_adapted_arguments_.Add(frame->has_adapted_arguments());
1687 // Handlify all tagged object values before triggering any allocation.
1688 List<Handle<Object> > values(deferred_objects_tagged_values_.length());
1689 for (int i = 0; i < deferred_objects_tagged_values_.length(); ++i) {
1690 values.Add(Handle<Object>(deferred_objects_tagged_values_[i], isolate_));
1693 // Play it safe and clear all unhandlified values before we continue.
1694 deferred_objects_tagged_values_.Clear();
1696 // Materialize all heap numbers before looking at arguments because when the
1697 // output frames are used to materialize arguments objects later on they need
1698 // to already contain valid heap numbers.
1699 for (int i = 0; i < deferred_heap_numbers_.length(); i++) {
1700 HeapNumberMaterializationDescriptor<Address> d = deferred_heap_numbers_[i];
1701 Handle<Object> num = isolate_->factory()->NewNumber(d.value());
1703 PrintF("Materialized a new heap number %p [%e] in slot %p\n",
1704 reinterpret_cast<void*>(*num),
1708 Memory::Object_at(d.destination()) = *num;
1711 // Materialize all heap numbers required for arguments/captured objects.
1712 for (int i = 0; i < deferred_objects_double_values_.length(); i++) {
1713 HeapNumberMaterializationDescriptor<int> d =
1714 deferred_objects_double_values_[i];
1715 Handle<Object> num = isolate_->factory()->NewNumber(d.value());
1717 PrintF("Materialized a new heap number %p [%e] for object at %d\n",
1718 reinterpret_cast<void*>(*num),
1722 ASSERT(values.at(d.destination())->IsTheHole());
1723 values.Set(d.destination(), num);
1726 // Play it safe and clear all object double values before we continue.
1727 deferred_objects_double_values_.Clear();
1729 // Materialize arguments/captured objects.
1730 if (!deferred_objects_.is_empty()) {
1731 List<Handle<Object> > materialized_objects(deferred_objects_.length());
1732 materialized_objects_ = &materialized_objects;
1733 materialized_values_ = &values;
1735 while (materialization_object_index_ < deferred_objects_.length()) {
1736 int object_index = materialization_object_index_;
1737 ObjectMaterializationDescriptor descriptor =
1738 deferred_objects_.at(object_index);
1740 // Find a previously materialized object by de-duplication or
1741 // materialize a new instance of the object if necessary. Store
1742 // the materialized object into the frame slot.
1743 Handle<Object> object = MaterializeNextHeapObject();
1744 Memory::Object_at(descriptor.slot_address()) = *object;
1746 if (descriptor.is_arguments()) {
1747 PrintF("Materialized %sarguments object of length %d for %p: ",
1748 ArgumentsObjectIsAdapted(object_index) ? "(adapted) " : "",
1749 Handle<JSObject>::cast(object)->elements()->length(),
1750 reinterpret_cast<void*>(descriptor.slot_address()));
1752 PrintF("Materialized captured object of size %d for %p: ",
1753 Handle<HeapObject>::cast(object)->Size(),
1754 reinterpret_cast<void*>(descriptor.slot_address()));
1756 object->ShortPrint();
1761 ASSERT(materialization_object_index_ == materialized_objects_->length());
1762 ASSERT(materialization_value_index_ == materialized_values_->length());
1767 #ifdef ENABLE_DEBUGGER_SUPPORT
1768 void Deoptimizer::MaterializeHeapNumbersForDebuggerInspectableFrame(
1769 Address parameters_top,
1770 uint32_t parameters_size,
1771 Address expressions_top,
1772 uint32_t expressions_size,
1773 DeoptimizedFrameInfo* info) {
1774 ASSERT_EQ(DEBUGGER, bailout_type_);
1775 Address parameters_bottom = parameters_top + parameters_size;
1776 Address expressions_bottom = expressions_top + expressions_size;
1777 for (int i = 0; i < deferred_heap_numbers_.length(); i++) {
1778 HeapNumberMaterializationDescriptor<Address> d = deferred_heap_numbers_[i];
1780 // Check of the heap number to materialize actually belong to the frame
1782 Address slot = d.destination();
1783 if (parameters_top <= slot && slot < parameters_bottom) {
1784 Handle<Object> num = isolate_->factory()->NewNumber(d.value());
1786 int index = (info->parameters_count() - 1) -
1787 static_cast<int>(slot - parameters_top) / kPointerSize;
1790 PrintF("Materializing a new heap number %p [%e] in slot %p"
1791 "for parameter slot #%d\n",
1792 reinterpret_cast<void*>(*num),
1798 info->SetParameter(index, *num);
1799 } else if (expressions_top <= slot && slot < expressions_bottom) {
1800 Handle<Object> num = isolate_->factory()->NewNumber(d.value());
1802 int index = info->expression_count() - 1 -
1803 static_cast<int>(slot - expressions_top) / kPointerSize;
1806 PrintF("Materializing a new heap number %p [%e] in slot %p"
1807 "for expression slot #%d\n",
1808 reinterpret_cast<void*>(*num),
1814 info->SetExpression(index, *num);
1821 static const char* TraceValueType(bool is_smi, bool is_native = false) {
1824 } else if (is_smi) {
1828 return "heap number";
1832 void Deoptimizer::DoTranslateObject(TranslationIterator* iterator,
1835 disasm::NameConverter converter;
1836 Address object_slot = deferred_objects_[object_index].slot_address();
1838 Translation::Opcode opcode =
1839 static_cast<Translation::Opcode>(iterator->Next());
1842 case Translation::BEGIN:
1843 case Translation::JS_FRAME:
1844 case Translation::ARGUMENTS_ADAPTOR_FRAME:
1845 case Translation::CONSTRUCT_STUB_FRAME:
1846 case Translation::GETTER_STUB_FRAME:
1847 case Translation::SETTER_STUB_FRAME:
1848 case Translation::COMPILED_STUB_FRAME:
1852 case Translation::REGISTER: {
1853 int input_reg = iterator->Next();
1854 intptr_t input_value = input_->GetRegister(input_reg);
1856 PrintF(" object @0x%08" V8PRIxPTR ": [field #%d] <- ",
1857 reinterpret_cast<intptr_t>(object_slot),
1859 PrintF("0x%08" V8PRIxPTR " ; %s ", input_value,
1860 converter.NameOfCPURegister(input_reg));
1861 reinterpret_cast<Object*>(input_value)->ShortPrint();
1864 AddObjectTaggedValue(input_value);
1868 case Translation::INT32_REGISTER: {
1869 int input_reg = iterator->Next();
1870 intptr_t value = input_->GetRegister(input_reg);
1871 bool is_smi = Smi::IsValid(value);
1873 PrintF(" object @0x%08" V8PRIxPTR ": [field #%d] <- ",
1874 reinterpret_cast<intptr_t>(object_slot),
1876 PrintF("%" V8PRIdPTR " ; %s (%s)\n", value,
1877 converter.NameOfCPURegister(input_reg),
1878 TraceValueType(is_smi));
1881 intptr_t tagged_value =
1882 reinterpret_cast<intptr_t>(Smi::FromInt(static_cast<int>(value)));
1883 AddObjectTaggedValue(tagged_value);
1885 double double_value = static_cast<double>(static_cast<int32_t>(value));
1886 AddObjectDoubleValue(double_value);
1891 case Translation::UINT32_REGISTER: {
1892 int input_reg = iterator->Next();
1893 uintptr_t value = static_cast<uintptr_t>(input_->GetRegister(input_reg));
1894 bool is_smi = (value <= static_cast<uintptr_t>(Smi::kMaxValue));
1896 PrintF(" object @0x%08" V8PRIxPTR ": [field #%d] <- ",
1897 reinterpret_cast<intptr_t>(object_slot),
1899 PrintF("%" V8PRIdPTR " ; uint %s (%s)\n", value,
1900 converter.NameOfCPURegister(input_reg),
1901 TraceValueType(is_smi));
1904 intptr_t tagged_value =
1905 reinterpret_cast<intptr_t>(Smi::FromInt(static_cast<int>(value)));
1906 AddObjectTaggedValue(tagged_value);
1908 double double_value = static_cast<double>(static_cast<uint32_t>(value));
1909 AddObjectDoubleValue(double_value);
1914 case Translation::DOUBLE_REGISTER: {
1915 int input_reg = iterator->Next();
1916 double value = input_->GetDoubleRegister(input_reg);
1918 PrintF(" object @0x%08" V8PRIxPTR ": [field #%d] <- ",
1919 reinterpret_cast<intptr_t>(object_slot),
1921 PrintF("%e ; %s\n", value,
1922 DoubleRegister::AllocationIndexToString(input_reg));
1924 AddObjectDoubleValue(value);
1928 case Translation::STACK_SLOT: {
1929 int input_slot_index = iterator->Next();
1930 unsigned input_offset = input_->GetOffsetFromSlotIndex(input_slot_index);
1931 intptr_t input_value = input_->GetFrameSlot(input_offset);
1933 PrintF(" object @0x%08" V8PRIxPTR ": [field #%d] <- ",
1934 reinterpret_cast<intptr_t>(object_slot),
1936 PrintF("0x%08" V8PRIxPTR " ; [sp + %d] ", input_value, input_offset);
1937 reinterpret_cast<Object*>(input_value)->ShortPrint();
1940 AddObjectTaggedValue(input_value);
1944 case Translation::INT32_STACK_SLOT: {
1945 int input_slot_index = iterator->Next();
1946 unsigned input_offset = input_->GetOffsetFromSlotIndex(input_slot_index);
1947 intptr_t value = input_->GetFrameSlot(input_offset);
1948 bool is_smi = Smi::IsValid(value);
1950 PrintF(" object @0x%08" V8PRIxPTR ": [field #%d] <- ",
1951 reinterpret_cast<intptr_t>(object_slot),
1953 PrintF("%" V8PRIdPTR " ; [sp + %d] (%s)\n",
1954 value, input_offset, TraceValueType(is_smi));
1957 intptr_t tagged_value =
1958 reinterpret_cast<intptr_t>(Smi::FromInt(static_cast<int>(value)));
1959 AddObjectTaggedValue(tagged_value);
1961 double double_value = static_cast<double>(static_cast<int32_t>(value));
1962 AddObjectDoubleValue(double_value);
1967 case Translation::UINT32_STACK_SLOT: {
1968 int input_slot_index = iterator->Next();
1969 unsigned input_offset = input_->GetOffsetFromSlotIndex(input_slot_index);
1971 static_cast<uintptr_t>(input_->GetFrameSlot(input_offset));
1972 bool is_smi = (value <= static_cast<uintptr_t>(Smi::kMaxValue));
1974 PrintF(" object @0x%08" V8PRIxPTR ": [field #%d] <- ",
1975 reinterpret_cast<intptr_t>(object_slot),
1977 PrintF("%" V8PRIdPTR " ; [sp + %d] (uint %s)\n",
1978 value, input_offset, TraceValueType(is_smi));
1981 intptr_t tagged_value =
1982 reinterpret_cast<intptr_t>(Smi::FromInt(static_cast<int>(value)));
1983 AddObjectTaggedValue(tagged_value);
1985 double double_value = static_cast<double>(static_cast<uint32_t>(value));
1986 AddObjectDoubleValue(double_value);
1991 case Translation::DOUBLE_STACK_SLOT: {
1992 int input_slot_index = iterator->Next();
1993 unsigned input_offset = input_->GetOffsetFromSlotIndex(input_slot_index);
1994 double value = input_->GetDoubleFrameSlot(input_offset);
1996 PrintF(" object @0x%08" V8PRIxPTR ": [field #%d] <- ",
1997 reinterpret_cast<intptr_t>(object_slot),
1999 PrintF("%e ; [sp + %d]\n", value, input_offset);
2001 AddObjectDoubleValue(value);
2005 case Translation::LITERAL: {
2006 Object* literal = ComputeLiteral(iterator->Next());
2008 PrintF(" object @0x%08" V8PRIxPTR ": [field #%d] <- ",
2009 reinterpret_cast<intptr_t>(object_slot),
2011 literal->ShortPrint();
2012 PrintF(" ; literal\n");
2014 intptr_t value = reinterpret_cast<intptr_t>(literal);
2015 AddObjectTaggedValue(value);
2019 case Translation::DUPLICATED_OBJECT: {
2020 int object_index = iterator->Next();
2022 PrintF(" nested @0x%08" V8PRIxPTR ": [field #%d] <- ",
2023 reinterpret_cast<intptr_t>(object_slot),
2025 isolate_->heap()->arguments_marker()->ShortPrint();
2026 PrintF(" ; duplicate of object #%d\n", object_index);
2028 // Use the materialization marker value as a sentinel and fill in
2029 // the object after the deoptimized frame is built.
2030 intptr_t value = reinterpret_cast<intptr_t>(
2031 isolate_->heap()->arguments_marker());
2032 AddObjectDuplication(0, object_index);
2033 AddObjectTaggedValue(value);
2037 case Translation::ARGUMENTS_OBJECT:
2038 case Translation::CAPTURED_OBJECT: {
2039 int length = iterator->Next();
2040 bool is_args = opcode == Translation::ARGUMENTS_OBJECT;
2042 PrintF(" nested @0x%08" V8PRIxPTR ": [field #%d] <- ",
2043 reinterpret_cast<intptr_t>(object_slot),
2045 isolate_->heap()->arguments_marker()->ShortPrint();
2046 PrintF(" ; object (length = %d, is_args = %d)\n", length, is_args);
2048 // Use the materialization marker value as a sentinel and fill in
2049 // the object after the deoptimized frame is built.
2050 intptr_t value = reinterpret_cast<intptr_t>(
2051 isolate_->heap()->arguments_marker());
2052 AddObjectStart(0, length, is_args);
2053 AddObjectTaggedValue(value);
2054 // We save the object values on the side and materialize the actual
2055 // object after the deoptimized frame is built.
2056 int object_index = deferred_objects_.length() - 1;
2057 for (int i = 0; i < length; i++) {
2058 DoTranslateObject(iterator, object_index, i);
2066 void Deoptimizer::DoTranslateCommand(TranslationIterator* iterator,
2068 unsigned output_offset,
2069 DeoptimizerTranslatedValueType value_type) {
2070 disasm::NameConverter converter;
2071 // A GC-safe temporary placeholder that we can put in the output frame.
2072 const intptr_t kPlaceholder = reinterpret_cast<intptr_t>(Smi::FromInt(0));
2073 bool is_native = value_type == TRANSLATED_VALUE_IS_NATIVE;
2075 Translation::Opcode opcode =
2076 static_cast<Translation::Opcode>(iterator->Next());
2079 case Translation::BEGIN:
2080 case Translation::JS_FRAME:
2081 case Translation::ARGUMENTS_ADAPTOR_FRAME:
2082 case Translation::CONSTRUCT_STUB_FRAME:
2083 case Translation::GETTER_STUB_FRAME:
2084 case Translation::SETTER_STUB_FRAME:
2085 case Translation::COMPILED_STUB_FRAME:
2089 case Translation::REGISTER: {
2090 int input_reg = iterator->Next();
2091 intptr_t input_value = input_->GetRegister(input_reg);
2094 " 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08" V8PRIxPTR " ; %s ",
2095 output_[frame_index]->GetTop() + output_offset,
2098 converter.NameOfCPURegister(input_reg));
2099 reinterpret_cast<Object*>(input_value)->ShortPrint();
2102 output_[frame_index]->SetFrameSlot(output_offset, input_value);
2106 case Translation::INT32_REGISTER: {
2107 int input_reg = iterator->Next();
2108 intptr_t value = input_->GetRegister(input_reg);
2109 bool is_smi = (value_type == TRANSLATED_VALUE_IS_TAGGED) &&
2110 Smi::IsValid(value);
2113 " 0x%08" V8PRIxPTR ": [top + %d] <- %" V8PRIdPTR " ; %s (%s)\n",
2114 output_[frame_index]->GetTop() + output_offset,
2117 converter.NameOfCPURegister(input_reg),
2118 TraceValueType(is_smi, is_native));
2121 intptr_t tagged_value =
2122 reinterpret_cast<intptr_t>(Smi::FromInt(static_cast<int>(value)));
2123 output_[frame_index]->SetFrameSlot(output_offset, tagged_value);
2124 } else if (value_type == TRANSLATED_VALUE_IS_NATIVE) {
2125 output_[frame_index]->SetFrameSlot(output_offset, value);
2127 // We save the untagged value on the side and store a GC-safe
2128 // temporary placeholder in the frame.
2129 ASSERT(value_type == TRANSLATED_VALUE_IS_TAGGED);
2130 AddDoubleValue(output_[frame_index]->GetTop() + output_offset,
2131 static_cast<double>(static_cast<int32_t>(value)));
2132 output_[frame_index]->SetFrameSlot(output_offset, kPlaceholder);
2137 case Translation::UINT32_REGISTER: {
2138 int input_reg = iterator->Next();
2139 uintptr_t value = static_cast<uintptr_t>(input_->GetRegister(input_reg));
2140 bool is_smi = (value_type == TRANSLATED_VALUE_IS_TAGGED) &&
2141 (value <= static_cast<uintptr_t>(Smi::kMaxValue));
2144 " 0x%08" V8PRIxPTR ": [top + %d] <- %" V8PRIuPTR
2145 " ; uint %s (%s)\n",
2146 output_[frame_index]->GetTop() + output_offset,
2149 converter.NameOfCPURegister(input_reg),
2150 TraceValueType(is_smi, is_native));
2153 intptr_t tagged_value =
2154 reinterpret_cast<intptr_t>(Smi::FromInt(static_cast<int>(value)));
2155 output_[frame_index]->SetFrameSlot(output_offset, tagged_value);
2156 } else if (value_type == TRANSLATED_VALUE_IS_NATIVE) {
2157 output_[frame_index]->SetFrameSlot(output_offset, value);
2159 // We save the untagged value on the side and store a GC-safe
2160 // temporary placeholder in the frame.
2161 ASSERT(value_type == TRANSLATED_VALUE_IS_TAGGED);
2162 AddDoubleValue(output_[frame_index]->GetTop() + output_offset,
2163 static_cast<double>(static_cast<uint32_t>(value)));
2164 output_[frame_index]->SetFrameSlot(output_offset, kPlaceholder);
2169 case Translation::DOUBLE_REGISTER: {
2170 int input_reg = iterator->Next();
2171 double value = input_->GetDoubleRegister(input_reg);
2173 PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- %e ; %s\n",
2174 output_[frame_index]->GetTop() + output_offset,
2177 DoubleRegister::AllocationIndexToString(input_reg));
2179 // We save the untagged value on the side and store a GC-safe
2180 // temporary placeholder in the frame.
2181 AddDoubleValue(output_[frame_index]->GetTop() + output_offset, value);
2182 output_[frame_index]->SetFrameSlot(output_offset, kPlaceholder);
2186 case Translation::STACK_SLOT: {
2187 int input_slot_index = iterator->Next();
2188 unsigned input_offset = input_->GetOffsetFromSlotIndex(input_slot_index);
2189 intptr_t input_value = input_->GetFrameSlot(input_offset);
2191 PrintF(" 0x%08" V8PRIxPTR ": ",
2192 output_[frame_index]->GetTop() + output_offset);
2193 PrintF("[top + %d] <- 0x%08" V8PRIxPTR " ; [sp + %d] ",
2197 reinterpret_cast<Object*>(input_value)->ShortPrint();
2200 output_[frame_index]->SetFrameSlot(output_offset, input_value);
2204 case Translation::INT32_STACK_SLOT: {
2205 int input_slot_index = iterator->Next();
2206 unsigned input_offset = input_->GetOffsetFromSlotIndex(input_slot_index);
2207 intptr_t value = input_->GetFrameSlot(input_offset);
2208 bool is_smi = (value_type == TRANSLATED_VALUE_IS_TAGGED) &&
2209 Smi::IsValid(value);
2211 PrintF(" 0x%08" V8PRIxPTR ": ",
2212 output_[frame_index]->GetTop() + output_offset);
2213 PrintF("[top + %d] <- %" V8PRIdPTR " ; [sp + %d] (%s)\n",
2217 TraceValueType(is_smi, is_native));
2220 intptr_t tagged_value =
2221 reinterpret_cast<intptr_t>(Smi::FromInt(static_cast<int>(value)));
2222 output_[frame_index]->SetFrameSlot(output_offset, tagged_value);
2223 } else if (value_type == TRANSLATED_VALUE_IS_NATIVE) {
2224 output_[frame_index]->SetFrameSlot(output_offset, value);
2226 // We save the untagged value on the side and store a GC-safe
2227 // temporary placeholder in the frame.
2228 ASSERT(value_type == TRANSLATED_VALUE_IS_TAGGED);
2229 AddDoubleValue(output_[frame_index]->GetTop() + output_offset,
2230 static_cast<double>(static_cast<int32_t>(value)));
2231 output_[frame_index]->SetFrameSlot(output_offset, kPlaceholder);
2236 case Translation::UINT32_STACK_SLOT: {
2237 int input_slot_index = iterator->Next();
2238 unsigned input_offset = input_->GetOffsetFromSlotIndex(input_slot_index);
2240 static_cast<uintptr_t>(input_->GetFrameSlot(input_offset));
2241 bool is_smi = (value_type == TRANSLATED_VALUE_IS_TAGGED) &&
2242 (value <= static_cast<uintptr_t>(Smi::kMaxValue));
2244 PrintF(" 0x%08" V8PRIxPTR ": ",
2245 output_[frame_index]->GetTop() + output_offset);
2246 PrintF("[top + %d] <- %" V8PRIuPTR " ; [sp + %d] (uint32 %s)\n",
2250 TraceValueType(is_smi, is_native));
2253 intptr_t tagged_value =
2254 reinterpret_cast<intptr_t>(Smi::FromInt(static_cast<int>(value)));
2255 output_[frame_index]->SetFrameSlot(output_offset, tagged_value);
2256 } else if (value_type == TRANSLATED_VALUE_IS_NATIVE) {
2257 output_[frame_index]->SetFrameSlot(output_offset, value);
2259 // We save the untagged value on the side and store a GC-safe
2260 // temporary placeholder in the frame.
2261 ASSERT(value_type == TRANSLATED_VALUE_IS_TAGGED);
2262 AddDoubleValue(output_[frame_index]->GetTop() + output_offset,
2263 static_cast<double>(static_cast<uint32_t>(value)));
2264 output_[frame_index]->SetFrameSlot(output_offset, kPlaceholder);
2269 case Translation::DOUBLE_STACK_SLOT: {
2270 int input_slot_index = iterator->Next();
2271 unsigned input_offset = input_->GetOffsetFromSlotIndex(input_slot_index);
2272 double value = input_->GetDoubleFrameSlot(input_offset);
2274 PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- %e ; [sp + %d]\n",
2275 output_[frame_index]->GetTop() + output_offset,
2280 // We save the untagged value on the side and store a GC-safe
2281 // temporary placeholder in the frame.
2282 AddDoubleValue(output_[frame_index]->GetTop() + output_offset, value);
2283 output_[frame_index]->SetFrameSlot(output_offset, kPlaceholder);
2287 case Translation::LITERAL: {
2288 Object* literal = ComputeLiteral(iterator->Next());
2290 PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- ",
2291 output_[frame_index]->GetTop() + output_offset,
2293 literal->ShortPrint();
2294 PrintF(" ; literal\n");
2296 intptr_t value = reinterpret_cast<intptr_t>(literal);
2297 output_[frame_index]->SetFrameSlot(output_offset, value);
2301 case Translation::DUPLICATED_OBJECT: {
2302 int object_index = iterator->Next();
2304 PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- ",
2305 output_[frame_index]->GetTop() + output_offset,
2307 isolate_->heap()->arguments_marker()->ShortPrint();
2308 PrintF(" ; duplicate of object #%d\n", object_index);
2310 // Use the materialization marker value as a sentinel and fill in
2311 // the object after the deoptimized frame is built.
2312 intptr_t value = reinterpret_cast<intptr_t>(
2313 isolate_->heap()->arguments_marker());
2314 AddObjectDuplication(output_[frame_index]->GetTop() + output_offset,
2316 output_[frame_index]->SetFrameSlot(output_offset, value);
2320 case Translation::ARGUMENTS_OBJECT:
2321 case Translation::CAPTURED_OBJECT: {
2322 int length = iterator->Next();
2323 bool is_args = opcode == Translation::ARGUMENTS_OBJECT;
2325 PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- ",
2326 output_[frame_index]->GetTop() + output_offset,
2328 isolate_->heap()->arguments_marker()->ShortPrint();
2329 PrintF(" ; object (length = %d, is_args = %d)\n", length, is_args);
2331 // Use the materialization marker value as a sentinel and fill in
2332 // the object after the deoptimized frame is built.
2333 intptr_t value = reinterpret_cast<intptr_t>(
2334 isolate_->heap()->arguments_marker());
2335 AddObjectStart(output_[frame_index]->GetTop() + output_offset,
2337 output_[frame_index]->SetFrameSlot(output_offset, value);
2338 // We save the object values on the side and materialize the actual
2339 // object after the deoptimized frame is built.
2340 int object_index = deferred_objects_.length() - 1;
2341 for (int i = 0; i < length; i++) {
2342 DoTranslateObject(iterator, object_index, i);
2350 unsigned Deoptimizer::ComputeInputFrameSize() const {
2351 unsigned fixed_size = ComputeFixedSize(function_);
2352 // The fp-to-sp delta already takes the context and the function
2353 // into account so we have to avoid double counting them (-2).
2354 unsigned result = fixed_size + fp_to_sp_delta_ - (2 * kPointerSize);
2356 if (compiled_code_->kind() == Code::OPTIMIZED_FUNCTION) {
2357 unsigned stack_slots = compiled_code_->stack_slots();
2358 unsigned outgoing_size = ComputeOutgoingArgumentSize();
2359 ASSERT(result == fixed_size + (stack_slots * kPointerSize) + outgoing_size);
2366 unsigned Deoptimizer::ComputeFixedSize(JSFunction* function) const {
2367 // The fixed part of the frame consists of the return address, frame
2368 // pointer, function, context, and all the incoming arguments.
2369 return ComputeIncomingArgumentSize(function) +
2370 StandardFrameConstants::kFixedFrameSize;
2374 unsigned Deoptimizer::ComputeIncomingArgumentSize(JSFunction* function) const {
2375 // The incoming arguments is the values for formal parameters and
2376 // the receiver. Every slot contains a pointer.
2377 if (function->IsSmi()) {
2378 ASSERT(Smi::cast(function) == Smi::FromInt(StackFrame::STUB));
2381 unsigned arguments = function->shared()->formal_parameter_count() + 1;
2382 return arguments * kPointerSize;
2386 unsigned Deoptimizer::ComputeOutgoingArgumentSize() const {
2387 DeoptimizationInputData* data = DeoptimizationInputData::cast(
2388 compiled_code_->deoptimization_data());
2389 unsigned height = data->ArgumentsStackHeight(bailout_id_)->value();
2390 return height * kPointerSize;
2394 Object* Deoptimizer::ComputeLiteral(int index) const {
2395 DeoptimizationInputData* data = DeoptimizationInputData::cast(
2396 compiled_code_->deoptimization_data());
2397 FixedArray* literals = data->LiteralArray();
2398 return literals->get(index);
2402 void Deoptimizer::AddObjectStart(intptr_t slot, int length, bool is_args) {
2403 ObjectMaterializationDescriptor object_desc(
2404 reinterpret_cast<Address>(slot), jsframe_count_, length, -1, is_args);
2405 deferred_objects_.Add(object_desc);
2409 void Deoptimizer::AddObjectDuplication(intptr_t slot, int object_index) {
2410 ObjectMaterializationDescriptor object_desc(
2411 reinterpret_cast<Address>(slot), jsframe_count_, -1, object_index, false);
2412 deferred_objects_.Add(object_desc);
2416 void Deoptimizer::AddObjectTaggedValue(intptr_t value) {
2417 deferred_objects_tagged_values_.Add(reinterpret_cast<Object*>(value));
2421 void Deoptimizer::AddObjectDoubleValue(double value) {
2422 deferred_objects_tagged_values_.Add(isolate()->heap()->the_hole_value());
2423 HeapNumberMaterializationDescriptor<int> value_desc(
2424 deferred_objects_tagged_values_.length() - 1, value);
2425 deferred_objects_double_values_.Add(value_desc);
2429 void Deoptimizer::AddDoubleValue(intptr_t slot_address, double value) {
2430 HeapNumberMaterializationDescriptor<Address> value_desc(
2431 reinterpret_cast<Address>(slot_address), value);
2432 deferred_heap_numbers_.Add(value_desc);
2436 void Deoptimizer::EnsureCodeForDeoptimizationEntry(Isolate* isolate,
2439 // We cannot run this if the serializer is enabled because this will
2440 // cause us to emit relocation information for the external
2441 // references. This is fine because the deoptimizer's code section
2442 // isn't meant to be serialized at all.
2443 ASSERT(type == EAGER || type == SOFT || type == LAZY);
2444 DeoptimizerData* data = isolate->deoptimizer_data();
2445 int entry_count = data->deopt_entry_code_entries_[type];
2446 if (max_entry_id < entry_count) return;
2447 entry_count = Max(entry_count, Deoptimizer::kMinNumberOfEntries);
2448 while (max_entry_id >= entry_count) entry_count *= 2;
2449 ASSERT(entry_count <= Deoptimizer::kMaxNumberOfEntries);
2451 MacroAssembler masm(isolate, NULL, 16 * KB);
2452 masm.set_emit_debug_code(false);
2453 GenerateDeoptimizationEntries(&masm, entry_count, type);
2455 masm.GetCode(&desc);
2456 ASSERT(!RelocInfo::RequiresRelocation(desc));
2458 MemoryChunk* chunk = data->deopt_entry_code_[type];
2459 ASSERT(static_cast<int>(Deoptimizer::GetMaxDeoptTableSize()) >=
2461 chunk->CommitArea(desc.instr_size);
2462 CopyBytes(chunk->area_start(), desc.buffer,
2463 static_cast<size_t>(desc.instr_size));
2464 CPU::FlushICache(chunk->area_start(), desc.instr_size);
2466 data->deopt_entry_code_entries_[type] = entry_count;
2470 FrameDescription::FrameDescription(uint32_t frame_size,
2471 JSFunction* function)
2472 : frame_size_(frame_size),
2473 function_(function),
2477 context_(kZapUint32) {
2478 // Zap all the registers.
2479 for (int r = 0; r < Register::kNumRegisters; r++) {
2480 SetRegister(r, kZapUint32);
2483 // Zap all the slots.
2484 for (unsigned o = 0; o < frame_size; o += kPointerSize) {
2485 SetFrameSlot(o, kZapUint32);
2490 int FrameDescription::ComputeFixedSize() {
2491 return StandardFrameConstants::kFixedFrameSize +
2492 (ComputeParametersCount() + 1) * kPointerSize;
2496 unsigned FrameDescription::GetOffsetFromSlotIndex(int slot_index) {
2497 if (slot_index >= 0) {
2498 // Local or spill slots. Skip the fixed part of the frame
2499 // including all arguments.
2500 unsigned base = GetFrameSize() - ComputeFixedSize();
2501 return base - ((slot_index + 1) * kPointerSize);
2503 // Incoming parameter.
2504 int arg_size = (ComputeParametersCount() + 1) * kPointerSize;
2505 unsigned base = GetFrameSize() - arg_size;
2506 return base - ((slot_index + 1) * kPointerSize);
2511 int FrameDescription::ComputeParametersCount() {
2513 case StackFrame::JAVA_SCRIPT:
2514 return function_->shared()->formal_parameter_count();
2515 case StackFrame::ARGUMENTS_ADAPTOR: {
2516 // Last slot contains number of incomming arguments as a smi.
2517 // Can't use GetExpression(0) because it would cause infinite recursion.
2518 return reinterpret_cast<Smi*>(*GetFrameSlotPointer(0))->value();
2520 case StackFrame::STUB:
2521 return -1; // Minus receiver.
2529 Object* FrameDescription::GetParameter(int index) {
2531 ASSERT(index < ComputeParametersCount());
2532 // The slot indexes for incoming arguments are negative.
2533 unsigned offset = GetOffsetFromSlotIndex(index - ComputeParametersCount());
2534 return reinterpret_cast<Object*>(*GetFrameSlotPointer(offset));
2538 unsigned FrameDescription::GetExpressionCount() {
2539 ASSERT_EQ(StackFrame::JAVA_SCRIPT, type_);
2540 unsigned size = GetFrameSize() - ComputeFixedSize();
2541 return size / kPointerSize;
2545 Object* FrameDescription::GetExpression(int index) {
2546 ASSERT_EQ(StackFrame::JAVA_SCRIPT, type_);
2547 unsigned offset = GetOffsetFromSlotIndex(index);
2548 return reinterpret_cast<Object*>(*GetFrameSlotPointer(offset));
2552 void TranslationBuffer::Add(int32_t value, Zone* zone) {
2553 // Encode the sign bit in the least significant bit.
2554 bool is_negative = (value < 0);
2555 uint32_t bits = ((is_negative ? -value : value) << 1) |
2556 static_cast<int32_t>(is_negative);
2557 // Encode the individual bytes using the least significant bit of
2558 // each byte to indicate whether or not more bytes follow.
2560 uint32_t next = bits >> 7;
2561 contents_.Add(((bits << 1) & 0xFF) | (next != 0), zone);
2563 } while (bits != 0);
2567 int32_t TranslationIterator::Next() {
2568 // Run through the bytes until we reach one with a least significant
2569 // bit of zero (marks the end).
2571 for (int i = 0; true; i += 7) {
2573 uint8_t next = buffer_->get(index_++);
2574 bits |= (next >> 1) << i;
2575 if ((next & 1) == 0) break;
2577 // The bits encode the sign in the least significant bit.
2578 bool is_negative = (bits & 1) == 1;
2579 int32_t result = bits >> 1;
2580 return is_negative ? -result : result;
2584 Handle<ByteArray> TranslationBuffer::CreateByteArray(Factory* factory) {
2585 int length = contents_.length();
2586 Handle<ByteArray> result = factory->NewByteArray(length, TENURED);
2588 result->GetDataStartAddress(), contents_.ToVector().start(), length);
2593 void Translation::BeginConstructStubFrame(int literal_id, unsigned height) {
2594 buffer_->Add(CONSTRUCT_STUB_FRAME, zone());
2595 buffer_->Add(literal_id, zone());
2596 buffer_->Add(height, zone());
2600 void Translation::BeginGetterStubFrame(int literal_id) {
2601 buffer_->Add(GETTER_STUB_FRAME, zone());
2602 buffer_->Add(literal_id, zone());
2606 void Translation::BeginSetterStubFrame(int literal_id) {
2607 buffer_->Add(SETTER_STUB_FRAME, zone());
2608 buffer_->Add(literal_id, zone());
2612 void Translation::BeginArgumentsAdaptorFrame(int literal_id, unsigned height) {
2613 buffer_->Add(ARGUMENTS_ADAPTOR_FRAME, zone());
2614 buffer_->Add(literal_id, zone());
2615 buffer_->Add(height, zone());
2619 void Translation::BeginJSFrame(BailoutId node_id,
2622 buffer_->Add(JS_FRAME, zone());
2623 buffer_->Add(node_id.ToInt(), zone());
2624 buffer_->Add(literal_id, zone());
2625 buffer_->Add(height, zone());
2629 void Translation::BeginCompiledStubFrame() {
2630 buffer_->Add(COMPILED_STUB_FRAME, zone());
2634 void Translation::BeginArgumentsObject(int args_length) {
2635 buffer_->Add(ARGUMENTS_OBJECT, zone());
2636 buffer_->Add(args_length, zone());
2640 void Translation::BeginCapturedObject(int length) {
2641 buffer_->Add(CAPTURED_OBJECT, zone());
2642 buffer_->Add(length, zone());
2646 void Translation::DuplicateObject(int object_index) {
2647 buffer_->Add(DUPLICATED_OBJECT, zone());
2648 buffer_->Add(object_index, zone());
2652 void Translation::StoreRegister(Register reg) {
2653 buffer_->Add(REGISTER, zone());
2654 buffer_->Add(reg.code(), zone());
2658 void Translation::StoreInt32Register(Register reg) {
2659 buffer_->Add(INT32_REGISTER, zone());
2660 buffer_->Add(reg.code(), zone());
2664 void Translation::StoreUint32Register(Register reg) {
2665 buffer_->Add(UINT32_REGISTER, zone());
2666 buffer_->Add(reg.code(), zone());
2670 void Translation::StoreDoubleRegister(DoubleRegister reg) {
2671 buffer_->Add(DOUBLE_REGISTER, zone());
2672 buffer_->Add(DoubleRegister::ToAllocationIndex(reg), zone());
2676 void Translation::StoreStackSlot(int index) {
2677 buffer_->Add(STACK_SLOT, zone());
2678 buffer_->Add(index, zone());
2682 void Translation::StoreInt32StackSlot(int index) {
2683 buffer_->Add(INT32_STACK_SLOT, zone());
2684 buffer_->Add(index, zone());
2688 void Translation::StoreUint32StackSlot(int index) {
2689 buffer_->Add(UINT32_STACK_SLOT, zone());
2690 buffer_->Add(index, zone());
2694 void Translation::StoreDoubleStackSlot(int index) {
2695 buffer_->Add(DOUBLE_STACK_SLOT, zone());
2696 buffer_->Add(index, zone());
2700 void Translation::StoreLiteral(int literal_id) {
2701 buffer_->Add(LITERAL, zone());
2702 buffer_->Add(literal_id, zone());
2706 void Translation::StoreArgumentsObject(bool args_known,
2709 buffer_->Add(ARGUMENTS_OBJECT, zone());
2710 buffer_->Add(args_known, zone());
2711 buffer_->Add(args_index, zone());
2712 buffer_->Add(args_length, zone());
2716 int Translation::NumberOfOperandsFor(Opcode opcode) {
2718 case GETTER_STUB_FRAME:
2719 case SETTER_STUB_FRAME:
2720 case DUPLICATED_OBJECT:
2721 case ARGUMENTS_OBJECT:
2722 case CAPTURED_OBJECT:
2724 case INT32_REGISTER:
2725 case UINT32_REGISTER:
2726 case DOUBLE_REGISTER:
2728 case INT32_STACK_SLOT:
2729 case UINT32_STACK_SLOT:
2730 case DOUBLE_STACK_SLOT:
2732 case COMPILED_STUB_FRAME:
2735 case ARGUMENTS_ADAPTOR_FRAME:
2736 case CONSTRUCT_STUB_FRAME:
2746 #if defined(OBJECT_PRINT) || defined(ENABLE_DISASSEMBLER)
2748 const char* Translation::StringFor(Opcode opcode) {
2749 #define TRANSLATION_OPCODE_CASE(item) case item: return #item;
2751 TRANSLATION_OPCODE_LIST(TRANSLATION_OPCODE_CASE)
2753 #undef TRANSLATION_OPCODE_CASE
2761 // We can't intermix stack decoding and allocations because
2762 // deoptimization infrastracture is not GC safe.
2763 // Thus we build a temporary structure in malloced space.
2764 SlotRef SlotRef::ComputeSlotForNextArgument(TranslationIterator* iterator,
2765 DeoptimizationInputData* data,
2766 JavaScriptFrame* frame) {
2767 Translation::Opcode opcode =
2768 static_cast<Translation::Opcode>(iterator->Next());
2771 case Translation::BEGIN:
2772 case Translation::JS_FRAME:
2773 case Translation::ARGUMENTS_ADAPTOR_FRAME:
2774 case Translation::CONSTRUCT_STUB_FRAME:
2775 case Translation::GETTER_STUB_FRAME:
2776 case Translation::SETTER_STUB_FRAME:
2777 // Peeled off before getting here.
2780 case Translation::DUPLICATED_OBJECT:
2781 case Translation::ARGUMENTS_OBJECT:
2782 case Translation::CAPTURED_OBJECT:
2783 // This can be only emitted for local slots not for argument slots.
2786 case Translation::REGISTER:
2787 case Translation::INT32_REGISTER:
2788 case Translation::UINT32_REGISTER:
2789 case Translation::DOUBLE_REGISTER:
2790 // We are at safepoint which corresponds to call. All registers are
2791 // saved by caller so there would be no live registers at this
2792 // point. Thus these translation commands should not be used.
2795 case Translation::STACK_SLOT: {
2796 int slot_index = iterator->Next();
2797 Address slot_addr = SlotAddress(frame, slot_index);
2798 return SlotRef(slot_addr, SlotRef::TAGGED);
2801 case Translation::INT32_STACK_SLOT: {
2802 int slot_index = iterator->Next();
2803 Address slot_addr = SlotAddress(frame, slot_index);
2804 return SlotRef(slot_addr, SlotRef::INT32);
2807 case Translation::UINT32_STACK_SLOT: {
2808 int slot_index = iterator->Next();
2809 Address slot_addr = SlotAddress(frame, slot_index);
2810 return SlotRef(slot_addr, SlotRef::UINT32);
2813 case Translation::DOUBLE_STACK_SLOT: {
2814 int slot_index = iterator->Next();
2815 Address slot_addr = SlotAddress(frame, slot_index);
2816 return SlotRef(slot_addr, SlotRef::DOUBLE);
2819 case Translation::LITERAL: {
2820 int literal_index = iterator->Next();
2821 return SlotRef(data->GetIsolate(),
2822 data->LiteralArray()->get(literal_index));
2825 case Translation::COMPILED_STUB_FRAME:
2835 void SlotRef::ComputeSlotsForArguments(Vector<SlotRef>* args_slots,
2836 TranslationIterator* it,
2837 DeoptimizationInputData* data,
2838 JavaScriptFrame* frame) {
2839 // Process the translation commands for the arguments.
2841 // Skip the translation command for the receiver.
2842 it->Skip(Translation::NumberOfOperandsFor(
2843 static_cast<Translation::Opcode>(it->Next())));
2845 // Compute slots for arguments.
2846 for (int i = 0; i < args_slots->length(); ++i) {
2847 (*args_slots)[i] = ComputeSlotForNextArgument(it, data, frame);
2852 Vector<SlotRef> SlotRef::ComputeSlotMappingForArguments(
2853 JavaScriptFrame* frame,
2854 int inlined_jsframe_index,
2855 int formal_parameter_count) {
2856 DisallowHeapAllocation no_gc;
2857 int deopt_index = Safepoint::kNoDeoptimizationIndex;
2858 DeoptimizationInputData* data =
2859 static_cast<OptimizedFrame*>(frame)->GetDeoptimizationData(&deopt_index);
2860 TranslationIterator it(data->TranslationByteArray(),
2861 data->TranslationIndex(deopt_index)->value());
2862 Translation::Opcode opcode = static_cast<Translation::Opcode>(it.Next());
2863 ASSERT(opcode == Translation::BEGIN);
2864 it.Next(); // Drop frame count.
2865 int jsframe_count = it.Next();
2867 ASSERT(jsframe_count > inlined_jsframe_index);
2868 int jsframes_to_skip = inlined_jsframe_index;
2870 opcode = static_cast<Translation::Opcode>(it.Next());
2871 if (opcode == Translation::ARGUMENTS_ADAPTOR_FRAME) {
2872 if (jsframes_to_skip == 0) {
2873 ASSERT(Translation::NumberOfOperandsFor(opcode) == 2);
2875 it.Skip(1); // literal id
2876 int height = it.Next();
2878 // We reached the arguments adaptor frame corresponding to the
2879 // inlined function in question. Number of arguments is height - 1.
2880 Vector<SlotRef> args_slots =
2881 Vector<SlotRef>::New(height - 1); // Minus receiver.
2882 ComputeSlotsForArguments(&args_slots, &it, data, frame);
2885 } else if (opcode == Translation::JS_FRAME) {
2886 if (jsframes_to_skip == 0) {
2887 // Skip over operands to advance to the next opcode.
2888 it.Skip(Translation::NumberOfOperandsFor(opcode));
2890 // We reached the frame corresponding to the inlined function
2891 // in question. Process the translation commands for the
2892 // arguments. Number of arguments is equal to the number of
2893 // format parameter count.
2894 Vector<SlotRef> args_slots =
2895 Vector<SlotRef>::New(formal_parameter_count);
2896 ComputeSlotsForArguments(&args_slots, &it, data, frame);
2902 // Skip over operands to advance to the next opcode.
2903 it.Skip(Translation::NumberOfOperandsFor(opcode));
2907 return Vector<SlotRef>();
2910 #ifdef ENABLE_DEBUGGER_SUPPORT
2912 DeoptimizedFrameInfo::DeoptimizedFrameInfo(Deoptimizer* deoptimizer,
2914 bool has_arguments_adaptor,
2915 bool has_construct_stub) {
2916 FrameDescription* output_frame = deoptimizer->output_[frame_index];
2917 function_ = output_frame->GetFunction();
2918 has_construct_stub_ = has_construct_stub;
2919 expression_count_ = output_frame->GetExpressionCount();
2920 expression_stack_ = new Object*[expression_count_];
2921 // Get the source position using the unoptimized code.
2922 Address pc = reinterpret_cast<Address>(output_frame->GetPc());
2923 Code* code = Code::cast(deoptimizer->isolate()->FindCodeObject(pc));
2924 source_position_ = code->SourcePosition(pc);
2926 for (int i = 0; i < expression_count_; i++) {
2927 SetExpression(i, output_frame->GetExpression(i));
2930 if (has_arguments_adaptor) {
2931 output_frame = deoptimizer->output_[frame_index - 1];
2932 ASSERT(output_frame->GetFrameType() == StackFrame::ARGUMENTS_ADAPTOR);
2935 parameters_count_ = output_frame->ComputeParametersCount();
2936 parameters_ = new Object*[parameters_count_];
2937 for (int i = 0; i < parameters_count_; i++) {
2938 SetParameter(i, output_frame->GetParameter(i));
2943 DeoptimizedFrameInfo::~DeoptimizedFrameInfo() {
2944 delete[] expression_stack_;
2945 delete[] parameters_;
2949 void DeoptimizedFrameInfo::Iterate(ObjectVisitor* v) {
2950 v->VisitPointer(BitCast<Object**>(&function_));
2951 v->VisitPointers(parameters_, parameters_ + parameters_count_);
2952 v->VisitPointers(expression_stack_, expression_stack_ + expression_count_);
2955 #endif // ENABLE_DEBUGGER_SUPPORT
2957 } } // namespace v8::internal