X-Git-Url: http://review.tizen.org/git/?a=blobdiff_plain;f=src%2Fv8%2Fsrc%2Fdeoptimizer.cc;h=1df7df84d03e4dbb0a2ecb6a7c44c51493c3b235;hb=4a1a0bdd01eef90b0826a0e761d3379d3715c10f;hp=70501612a257207a1e5d3ddadee27b08b751b935;hpb=b1be5ca53587d23e7aeb77b26861fdc0a181ffd8;p=platform%2Fframework%2Fweb%2Fcrosswalk.git diff --git a/src/v8/src/deoptimizer.cc b/src/v8/src/deoptimizer.cc index 7050161..1df7df8 100644 --- a/src/v8/src/deoptimizer.cc +++ b/src/v8/src/deoptimizer.cc @@ -19,7 +19,7 @@ namespace internal { static MemoryChunk* AllocateCodeChunk(MemoryAllocator* allocator) { return allocator->AllocateChunk(Deoptimizer::GetMaxDeoptTableSize(), - OS::CommitPageSize(), + base::OS::CommitPageSize(), #if defined(__native_client__) // The Native Client port of V8 uses an interpreter, // so code pages don't need PROT_EXEC. @@ -101,7 +101,7 @@ static const int kDeoptTableMaxEpilogueCodeSize = 2 * KB; size_t Deoptimizer::GetMaxDeoptTableSize() { int entries_size = Deoptimizer::kMaxNumberOfEntries * Deoptimizer::table_entry_size_; - int commit_page_size = static_cast(OS::CommitPageSize()); + int commit_page_size = static_cast(base::OS::CommitPageSize()); int page_count = ((kDeoptTableMaxEpilogueCodeSize + entries_size - 1) / commit_page_size) + 1; return static_cast(commit_page_size * page_count); @@ -352,8 +352,11 @@ void Deoptimizer::DeoptimizeMarkedCodeForContext(Context* context) { } SafepointEntry safepoint = code->GetSafepointEntry(it.frame()->pc()); int deopt_index = safepoint.deoptimization_index(); - bool safe_to_deopt = deopt_index != Safepoint::kNoDeoptimizationIndex; - CHECK(topmost_optimized_code == NULL || safe_to_deopt); + // Turbofan deopt is checked when we are patching addresses on stack. + bool turbofanned = code->is_turbofanned(); + bool safe_to_deopt = + deopt_index != Safepoint::kNoDeoptimizationIndex || turbofanned; + CHECK(topmost_optimized_code == NULL || safe_to_deopt || turbofanned); if (topmost_optimized_code == NULL) { topmost_optimized_code = code; safe_to_deopt_topmost_optimized_code = safe_to_deopt; @@ -374,6 +377,7 @@ void Deoptimizer::DeoptimizeMarkedCodeForContext(Context* context) { Code* code = Code::cast(element); CHECK_EQ(code->kind(), Code::OPTIMIZED_FUNCTION); Object* next = code->next_code_link(); + if (code->marked_for_deoptimization()) { // Put the code into the list for later patching. codes.Add(code, &zone); @@ -396,6 +400,10 @@ void Deoptimizer::DeoptimizeMarkedCodeForContext(Context* context) { element = next; } + if (FLAG_turbo_deoptimization) { + PatchStackForMarkedCode(isolate); + } + // TODO(titzer): we need a handle scope only because of the macro assembler, // which is only used in EnsureCodeForDeoptimizationEntry. HandleScope scope(isolate); @@ -404,17 +412,81 @@ void Deoptimizer::DeoptimizeMarkedCodeForContext(Context* context) { for (int i = 0; i < codes.length(); i++) { #ifdef DEBUG if (codes[i] == topmost_optimized_code) { - ASSERT(safe_to_deopt_topmost_optimized_code); + DCHECK(safe_to_deopt_topmost_optimized_code); } #endif // It is finally time to die, code object. + + // Remove the code from optimized code map. + DeoptimizationInputData* deopt_data = + DeoptimizationInputData::cast(codes[i]->deoptimization_data()); + SharedFunctionInfo* shared = + SharedFunctionInfo::cast(deopt_data->SharedFunctionInfo()); + shared->EvictFromOptimizedCodeMap(codes[i], "deoptimized code"); + // Do platform-specific patching to force any activations to lazy deopt. - PatchCodeForDeoptimization(isolate, codes[i]); + // + // We skip patching Turbofan code - we patch return addresses on stack. + // TODO(jarin) We should still zap the code object (but we have to + // be careful not to zap the deoptimization block). + if (!codes[i]->is_turbofanned()) { + PatchCodeForDeoptimization(isolate, codes[i]); - // We might be in the middle of incremental marking with compaction. - // Tell collector to treat this code object in a special way and - // ignore all slots that might have been recorded on it. - isolate->heap()->mark_compact_collector()->InvalidateCode(codes[i]); + // We might be in the middle of incremental marking with compaction. + // Tell collector to treat this code object in a special way and + // ignore all slots that might have been recorded on it. + isolate->heap()->mark_compact_collector()->InvalidateCode(codes[i]); + } + } +} + + +static int FindPatchAddressForReturnAddress(Code* code, int pc) { + DeoptimizationInputData* input_data = + DeoptimizationInputData::cast(code->deoptimization_data()); + int patch_count = input_data->ReturnAddressPatchCount(); + for (int i = 0; i < patch_count; i++) { + int return_pc = input_data->ReturnAddressPc(i)->value(); + int patch_pc = input_data->PatchedAddressPc(i)->value(); + // If the supplied pc matches the return pc or if the address + // has been already patched, return the patch pc. + if (pc == return_pc || pc == patch_pc) { + return patch_pc; + } + } + return -1; +} + + +// For all marked Turbofanned code on stack, change the return address to go +// to the deoptimization block. +void Deoptimizer::PatchStackForMarkedCode(Isolate* isolate) { + // TODO(jarin) We should tolerate missing patch entry for the topmost frame. + for (StackFrameIterator it(isolate, isolate->thread_local_top()); !it.done(); + it.Advance()) { + StackFrame::Type type = it.frame()->type(); + if (type == StackFrame::OPTIMIZED) { + Code* code = it.frame()->LookupCode(); + if (code->is_turbofanned() && code->marked_for_deoptimization()) { + JSFunction* function = + static_cast(it.frame())->function(); + Address* pc_address = it.frame()->pc_address(); + int pc_offset = + static_cast(*pc_address - code->instruction_start()); + int new_pc_offset = FindPatchAddressForReturnAddress(code, pc_offset); + + if (FLAG_trace_deopt) { + CodeTracer::Scope scope(isolate->GetCodeTracer()); + PrintF(scope.file(), "[patching stack address for function: "); + function->PrintName(scope.file()); + PrintF(scope.file(), " (Pc offset %i -> %i)]\n", pc_offset, + new_pc_offset); + } + + CHECK_LE(0, new_pc_offset); + *pc_address += new_pc_offset - pc_offset; + } + } } } @@ -459,9 +531,11 @@ void Deoptimizer::DeoptimizeGlobalObject(JSObject* object) { reinterpret_cast(object)); } if (object->IsJSGlobalProxy()) { - Object* proto = object->GetPrototype(); - CHECK(proto->IsJSGlobalObject()); - Context* native_context = GlobalObject::cast(proto)->native_context(); + PrototypeIterator iter(object->GetIsolate(), object); + // TODO(verwaest): This CHECK will be hit if the global proxy is detached. + CHECK(iter.GetCurrent()->IsJSGlobalObject()); + Context* native_context = + GlobalObject::cast(iter.GetCurrent())->native_context(); MarkAllCodeForContext(native_context); DeoptimizeMarkedCodeForContext(native_context); } else if (object->IsGlobalObject()) { @@ -562,7 +636,7 @@ Deoptimizer::Deoptimizer(Isolate* isolate, if (function->IsSmi()) { function = NULL; } - ASSERT(from != NULL); + DCHECK(from != NULL); if (function != NULL && function->IsOptimized()) { function->shared()->increment_deopt_count(); if (bailout_type_ == Deoptimizer::SOFT) { @@ -577,9 +651,9 @@ Deoptimizer::Deoptimizer(Isolate* isolate, compiled_code_ = FindOptimizedCode(function, optimized_code); #if DEBUG - ASSERT(compiled_code_ != NULL); + DCHECK(compiled_code_ != NULL); if (type == EAGER || type == SOFT || type == LAZY) { - ASSERT(compiled_code_->kind() != Code::FUNCTION); + DCHECK(compiled_code_->kind() != Code::FUNCTION); } #endif @@ -610,7 +684,7 @@ Code* Deoptimizer::FindOptimizedCode(JSFunction* function, : compiled_code; } case Deoptimizer::DEBUGGER: - ASSERT(optimized_code->contains(from_)); + DCHECK(optimized_code->contains(from_)); return optimized_code; } FATAL("Could not find code for optimized function"); @@ -629,8 +703,8 @@ void Deoptimizer::PrintFunctionName() { Deoptimizer::~Deoptimizer() { - ASSERT(input_ == NULL && output_ == NULL); - ASSERT(disallow_heap_allocation_ == NULL); + DCHECK(input_ == NULL && output_ == NULL); + DCHECK(disallow_heap_allocation_ == NULL); delete trace_scope_; } @@ -681,7 +755,7 @@ int Deoptimizer::GetDeoptimizationId(Isolate* isolate, addr >= start + (kMaxNumberOfEntries * table_entry_size_)) { return kNotDeoptimizationEntry; } - ASSERT_EQ(0, + DCHECK_EQ(0, static_cast(addr - start) % table_entry_size_); return static_cast(addr - start) / table_entry_size_; } @@ -699,13 +773,10 @@ int Deoptimizer::GetOutputInfo(DeoptimizationOutputData* data, return data->PcAndState(i)->value(); } } - PrintF(stderr, "[couldn't find pc offset for node=%d]\n", id.ToInt()); - PrintF(stderr, "[method: %s]\n", shared->DebugName()->ToCString().get()); - // Print the source code if available. - HeapStringAllocator string_allocator; - StringStream stream(&string_allocator); - shared->SourceCodePrint(&stream, -1); - PrintF(stderr, "[source:\n%s\n]", stream.ToCString().get()); + OFStream os(stderr); + os << "[couldn't find pc offset for node=" << id.ToInt() << "]\n" + << "[method: " << shared->DebugName()->ToCString().get() << "]\n" + << "[source:\n" << SourceCodeOf(shared) << "\n]" << endl; FATAL("unable to find pc offset during deoptimization"); return -1; @@ -721,7 +792,7 @@ int Deoptimizer::GetDeoptimizedCodeCount(Isolate* isolate) { Object* element = native_context->DeoptimizedCodeListHead(); while (!element->IsUndefined()) { Code* code = Code::cast(element); - ASSERT(code->kind() == Code::OPTIMIZED_FUNCTION); + DCHECK(code->kind() == Code::OPTIMIZED_FUNCTION); length++; element = code->next_code_link(); } @@ -739,7 +810,7 @@ void Deoptimizer::DoComputeOutputFrames() { compiled_code_->kind() == Code::OPTIMIZED_FUNCTION) { LOG(isolate(), CodeDeoptEvent(compiled_code_)); } - ElapsedTimer timer; + base::ElapsedTimer timer; // Determine basic deoptimization information. The optimized frame is // described by the input data. @@ -758,7 +829,8 @@ void Deoptimizer::DoComputeOutputFrames() { input_data->OptimizationId()->value(), bailout_id_, fp_to_sp_delta_); - if (bailout_type_ == EAGER || bailout_type_ == SOFT) { + if (bailout_type_ == EAGER || bailout_type_ == SOFT || + (compiled_code_->is_hydrogen_stub())) { compiled_code_->PrintDeoptLocation(trace_scope_->file(), bailout_id_); } } @@ -772,13 +844,13 @@ void Deoptimizer::DoComputeOutputFrames() { TranslationIterator iterator(translations, translation_index); Translation::Opcode opcode = static_cast(iterator.Next()); - ASSERT(Translation::BEGIN == opcode); + DCHECK(Translation::BEGIN == opcode); USE(opcode); // Read the number of output frames and allocate an array for their // descriptions. int count = iterator.Next(); iterator.Next(); // Drop JS frames count. - ASSERT(output_ == NULL); + DCHECK(output_ == NULL); output_ = new FrameDescription*[count]; for (int i = 0; i < count; ++i) { output_[i] = NULL; @@ -903,7 +975,10 @@ void Deoptimizer::DoComputeJSFrame(TranslationIterator* iterator, intptr_t top_address; if (is_bottommost) { // Determine whether the input frame contains alignment padding. - has_alignment_padding_ = HasAlignmentPadding(function) ? 1 : 0; + has_alignment_padding_ = + (!compiled_code_->is_turbofanned() && HasAlignmentPadding(function)) + ? 1 + : 0; // 2 = context and function in the frame. // If the optimized frame had alignment padding, adjust the frame pointer // to point to the new position of the old frame pointer after padding @@ -963,7 +1038,7 @@ void Deoptimizer::DoComputeJSFrame(TranslationIterator* iterator, } output_frame->SetCallerFp(output_offset, value); intptr_t fp_value = top_address + output_offset; - ASSERT(!is_bottommost || (input_->GetRegister(fp_reg.code()) + + DCHECK(!is_bottommost || (input_->GetRegister(fp_reg.code()) + has_alignment_padding_ * kPointerSize) == fp_value); output_frame->SetFp(fp_value); if (is_topmost) output_frame->SetRegister(fp_reg.code(), fp_value); @@ -973,7 +1048,7 @@ void Deoptimizer::DoComputeJSFrame(TranslationIterator* iterator, V8PRIxPTR " ; caller's fp\n", fp_value, output_offset, value); } - ASSERT(!is_bottommost || !has_alignment_padding_ || + DCHECK(!is_bottommost || !has_alignment_padding_ || (fp_value & kPointerSize) != 0); if (FLAG_enable_ool_constant_pool) { @@ -1022,7 +1097,7 @@ void Deoptimizer::DoComputeJSFrame(TranslationIterator* iterator, value = reinterpret_cast(function); // The function for the bottommost output frame should also agree with the // input frame. - ASSERT(!is_bottommost || input_->GetFrameSlot(input_offset) == value); + DCHECK(!is_bottommost || input_->GetFrameSlot(input_offset) == value); output_frame->SetFrameSlot(output_offset, value); if (trace_scope_ != NULL) { PrintF(trace_scope_->file(), @@ -1188,7 +1263,7 @@ void Deoptimizer::DoComputeArgumentsAdaptorFrame(TranslationIterator* iterator, top_address + output_offset, output_offset, value, height - 1); } - ASSERT(0 == output_offset); + DCHECK(0 == output_offset); Builtins* builtins = isolate_->builtins(); Code* adaptor_trampoline = @@ -1226,8 +1301,8 @@ void Deoptimizer::DoComputeConstructStubFrame(TranslationIterator* iterator, output_frame->SetFrameType(StackFrame::CONSTRUCT); // Construct stub can not be topmost or bottommost. - ASSERT(frame_index > 0 && frame_index < output_count_ - 1); - ASSERT(output_[frame_index] == NULL); + DCHECK(frame_index > 0 && frame_index < output_count_ - 1); + DCHECK(output_[frame_index] == NULL); output_[frame_index] = output_frame; // The top address of the frame is computed from the previous @@ -1548,19 +1623,23 @@ void Deoptimizer::DoComputeCompiledStubFrame(TranslationIterator* iterator, // reg = JSFunction context // - CHECK(compiled_code_->is_crankshafted() && - compiled_code_->kind() != Code::OPTIMIZED_FUNCTION); - int major_key = compiled_code_->major_key(); + CHECK(compiled_code_->is_hydrogen_stub()); + int major_key = CodeStub::GetMajorKey(compiled_code_); CodeStubInterfaceDescriptor* descriptor = isolate_->code_stub_interface_descriptor(major_key); + // Check that there is a matching descriptor to the major key. + // This will fail if there has not been one installed to the isolate. + DCHECK_EQ(descriptor->MajorKey(), major_key); // The output frame must have room for all pushed register parameters // and the standard stack frame slots. Include space for an argument // object to the callee and optionally the space to pass the argument // object to the stub failure handler. - CHECK_GE(descriptor->register_param_count_, 0); - int height_in_bytes = kPointerSize * descriptor->register_param_count_ + - sizeof(Arguments) + kPointerSize; + int param_count = descriptor->GetEnvironmentParameterCount(); + CHECK_GE(param_count, 0); + + int height_in_bytes = kPointerSize * param_count + sizeof(Arguments) + + kPointerSize; int fixed_frame_size = StandardFrameConstants::kFixedFrameSize; int input_frame_size = input_->GetFrameSize(); int output_frame_size = height_in_bytes + fixed_frame_size; @@ -1654,7 +1733,7 @@ void Deoptimizer::DoComputeCompiledStubFrame(TranslationIterator* iterator, } intptr_t caller_arg_count = 0; - bool arg_count_known = !descriptor->stack_parameter_count_.is_valid(); + bool arg_count_known = !descriptor->stack_parameter_count().is_valid(); // Build the Arguments object for the caller's parameters and a pointer to it. output_frame_offset -= kPointerSize; @@ -1702,11 +1781,12 @@ void Deoptimizer::DoComputeCompiledStubFrame(TranslationIterator* iterator, // Copy the register parameters to the failure frame. int arguments_length_offset = -1; - for (int i = 0; i < descriptor->register_param_count_; ++i) { + for (int i = 0; i < param_count; ++i) { output_frame_offset -= kPointerSize; DoTranslateCommand(iterator, 0, output_frame_offset); - if (!arg_count_known && descriptor->IsParameterCountRegister(i)) { + if (!arg_count_known && + descriptor->IsEnvironmentParameterCountRegister(i)) { arguments_length_offset = output_frame_offset; } } @@ -1742,17 +1822,17 @@ void Deoptimizer::DoComputeCompiledStubFrame(TranslationIterator* iterator, } // Copy the double registers from the input into the output frame. - CopySIMD128Registers(output_frame); + CopyDoubleRegisters(output_frame); // Fill registers containing handler and number of parameters. SetPlatformCompiledStubRegisters(output_frame, descriptor); // Compute this frame's PC, state, and continuation. Code* trampoline = NULL; - StubFunctionMode function_mode = descriptor->function_mode_; + StubFunctionMode function_mode = descriptor->function_mode(); StubFailureTrampolineStub(isolate_, function_mode).FindCodeInCache(&trampoline); - ASSERT(trampoline != NULL); + DCHECK(trampoline != NULL); output_frame->SetPc(reinterpret_cast( trampoline->instruction_start())); if (FLAG_enable_ool_constant_pool) { @@ -1799,7 +1879,7 @@ Handle Deoptimizer::MaterializeNextHeapObject() { Handle arguments = isolate_->factory()->NewArgumentsObject(function, length); Handle array = isolate_->factory()->NewFixedArray(length); - ASSERT_EQ(array->length(), length); + DCHECK_EQ(array->length(), length); arguments->set_elements(*array); materialized_objects_->Add(arguments); for (int i = 0; i < length; ++i) { @@ -1813,9 +1893,11 @@ Handle Deoptimizer::MaterializeNextHeapObject() { Handle map = Map::GeneralizeAllFieldRepresentations( Handle::cast(MaterializeNextValue())); switch (map->instance_type()) { + case MUTABLE_HEAP_NUMBER_TYPE: case HEAP_NUMBER_TYPE: { // Reuse the HeapNumber value directly as it is already properly - // tagged and skip materializing the HeapNumber explicitly. + // tagged and skip materializing the HeapNumber explicitly. Turn mutable + // heap numbers immutable. Handle object = MaterializeNextValue(); if (object_index < prev_materialized_count_) { materialized_objects_->Add(Handle( @@ -1877,6 +1959,9 @@ Handle Deoptimizer::MaterializeNextHeapObject() { Handle Deoptimizer::MaterializeNextValue() { int value_index = materialization_value_index_++; Handle value = materialized_values_->at(value_index); + if (value->IsMutableHeapNumber()) { + HeapNumber::cast(*value)->set_map(isolate_->heap()->heap_number_map()); + } if (*value == isolate_->heap()->arguments_marker()) { value = MaterializeNextHeapObject(); } @@ -1885,7 +1970,7 @@ Handle Deoptimizer::MaterializeNextValue() { void Deoptimizer::MaterializeHeapObjects(JavaScriptFrameIterator* it) { - ASSERT_NE(DEBUGGER, bailout_type_); + DCHECK_NE(DEBUGGER, bailout_type_); MaterializedObjectStore* materialized_store = isolate_->materialized_object_store(); @@ -1926,61 +2011,6 @@ void Deoptimizer::MaterializeHeapObjects(JavaScriptFrameIterator* it) { Memory::Object_at(d.destination()) = *num; } - // Materialize all float32x4 before looking at arguments because when the - // output frames are used to materialize arguments objects later on they need - // to already contain valid float32x4 values. - for (int i = 0; i < deferred_float32x4s_.length(); i++) { - SIMD128MaterializationDescriptor
d = deferred_float32x4s_[i]; - float32x4_value_t x4 = d.value().f4; - Handle float32x4 = isolate_->factory()->NewFloat32x4(x4); - if (trace_scope_ != NULL) { - PrintF(trace_scope_->file(), - "Materialized a new float32x4 %p " - "[float32x4(%e, %e, %e, %e)] in slot %p\n", - reinterpret_cast(*float32x4), - x4.storage[0], x4.storage[1], x4.storage[2], x4.storage[3], - d.destination()); - } - Memory::Object_at(d.destination()) = *float32x4; - } - - // Materialize all float64x2 before looking at arguments because when the - // output frames are used to materialize arguments objects later on they need - // to already contain valid float64x2 values. - for (int i = 0; i < deferred_float64x2s_.length(); i++) { - SIMD128MaterializationDescriptor
d = deferred_float64x2s_[i]; - float64x2_value_t x2 = d.value().d2; - Handle float64x2 = isolate_->factory()->NewFloat64x2(x2); - if (trace_scope_ != NULL) { - PrintF(trace_scope_->file(), - "Materialized a new float64x2 %p " - "[float64x2(%e, %e)] in slot %p\n", - reinterpret_cast(*float64x2), - x2.storage[0], x2.storage[1], - d.destination()); - } - Memory::Object_at(d.destination()) = *float64x2; - } - - // Materialize all int32x4 before looking at arguments because when the - // output frames are used to materialize arguments objects later on they need - // to already contain valid int32x4 values. - for (int i = 0; i < deferred_int32x4s_.length(); i++) { - SIMD128MaterializationDescriptor
d = deferred_int32x4s_[i]; - int32x4_value_t x4 = d.value().i4; - Handle int32x4 = isolate_->factory()->NewInt32x4(x4); - if (trace_scope_ != NULL) { - PrintF(trace_scope_->file(), - "Materialized a new int32x4 %p " - "[int32x4(%u, %u, %u, %u)] in slot %p\n", - reinterpret_cast(*int32x4), - x4.storage[0], x4.storage[1], x4.storage[2], x4.storage[3], - d.destination()); - } - Memory::Object_at(d.destination()) = *int32x4; - } - - // Materialize all heap numbers required for arguments/captured objects. for (int i = 0; i < deferred_objects_double_values_.length(); i++) { HeapNumberMaterializationDescriptor d = @@ -1993,76 +2023,13 @@ void Deoptimizer::MaterializeHeapObjects(JavaScriptFrameIterator* it) { d.value(), d.destination()); } - ASSERT(values.at(d.destination())->IsTheHole()); + DCHECK(values.at(d.destination())->IsTheHole()); values.Set(d.destination(), num); } // Play it safe and clear all object double values before we continue. deferred_objects_double_values_.Clear(); - // Materialize all float32x4 values required for arguments/captured objects. - for (int i = 0; i < deferred_objects_float32x4_values_.length(); i++) { - SIMD128MaterializationDescriptor d = - deferred_objects_float32x4_values_[i]; - float32x4_value_t x4 = d.value().f4; - Handle float32x4 = isolate_->factory()->NewFloat32x4(x4); - if (trace_scope_ != NULL) { - PrintF(trace_scope_->file(), - "Materialized a new float32x4 %p " - "[float32x4(%e, %e, %e, %e)] for object at %d\n", - reinterpret_cast(*float32x4), - x4.storage[0], x4.storage[1], x4.storage[2], x4.storage[3], - d.destination()); - } - ASSERT(values.at(d.destination())->IsTheHole()); - values.Set(d.destination(), float32x4); - } - - // Play it safe and clear all object float32x4 values before we continue. - deferred_objects_float32x4_values_.Clear(); - - // Materialize all float64x2 values required for arguments/captured objects. - for (int i = 0; i < deferred_objects_float64x2_values_.length(); i++) { - SIMD128MaterializationDescriptor d = - deferred_objects_float64x2_values_[i]; - float64x2_value_t x2 = d.value().d2; - Handle float64x2 = isolate_->factory()->NewFloat64x2(x2); - if (trace_scope_ != NULL) { - PrintF(trace_scope_->file(), - "Materialized a new float64x2 %p " - "[float64x2(%e, %e)] for object at %d\n", - reinterpret_cast(*float64x2), - x2.storage[0], x2.storage[1], - d.destination()); - } - ASSERT(values.at(d.destination())->IsTheHole()); - values.Set(d.destination(), float64x2); - } - - // Play it safe and clear all object float64x2 values before we continue. - deferred_objects_float64x2_values_.Clear(); - - // Materialize all int32x4 values required for arguments/captured objects. - for (int i = 0; i < deferred_objects_int32x4_values_.length(); i++) { - SIMD128MaterializationDescriptor d = - deferred_objects_int32x4_values_[i]; - int32x4_value_t x4 = d.value().i4; - Handle int32x4 = isolate_->factory()->NewInt32x4(x4); - if (trace_scope_ != NULL) { - PrintF(trace_scope_->file(), - "Materialized a new int32x4 %p " - "[int32x4(%u, %u, %u, %u)] for object at %d\n", - reinterpret_cast(*int32x4), - x4.storage[0], x4.storage[1], x4.storage[2], x4.storage[3], - d.destination()); - } - ASSERT(values.at(d.destination())->IsTheHole()); - values.Set(d.destination(), int32x4); - } - - // Play it safe and clear all object int32x4 values before we continue. - deferred_objects_int32x4_values_.Clear(); - // Materialize arguments/captured objects. if (!deferred_objects_.is_empty()) { List > materialized_objects(deferred_objects_.length()); @@ -2192,16 +2159,10 @@ void Deoptimizer::DoTranslateObjectAndSkip(TranslationIterator* iterator) { case Translation::INT32_REGISTER: case Translation::UINT32_REGISTER: case Translation::DOUBLE_REGISTER: - case Translation::FLOAT32x4_REGISTER: - case Translation::FLOAT64x2_REGISTER: - case Translation::INT32x4_REGISTER: case Translation::STACK_SLOT: case Translation::INT32_STACK_SLOT: case Translation::UINT32_STACK_SLOT: case Translation::DOUBLE_STACK_SLOT: - case Translation::FLOAT32x4_STACK_SLOT: - case Translation::FLOAT64x2_STACK_SLOT: - case Translation::INT32x4_STACK_SLOT: case Translation::LITERAL: { // The value is not part of any materialized object, so we can ignore it. iterator->Skip(Translation::NumberOfOperandsFor(opcode)); @@ -2351,49 +2312,6 @@ void Deoptimizer::DoTranslateObject(TranslationIterator* iterator, return; } - case Translation::FLOAT32x4_REGISTER: - case Translation::FLOAT64x2_REGISTER: - case Translation::INT32x4_REGISTER: { - int input_reg = iterator->Next(); - simd128_value_t value = input_->GetSIMD128Register(input_reg); - if (trace_scope_ != NULL) { - if (opcode == Translation::FLOAT32x4_REGISTER) { - float32x4_value_t x4 = value.f4; - PrintF(trace_scope_->file(), - " object @0x%08" V8PRIxPTR ": [field #%d] <- ", - reinterpret_cast(object_slot), - field_index); - PrintF(trace_scope_->file(), - "float32x4(%e, %e, %e, %e) ; %s\n", - x4.storage[0], x4.storage[1], x4.storage[2], x4.storage[3], - SIMD128Register::AllocationIndexToString(input_reg)); - } else if (opcode == Translation::FLOAT64x2_REGISTER) { - float64x2_value_t x2 = value.d2; - PrintF(trace_scope_->file(), - " object @0x%08" V8PRIxPTR ": [field #%d] <- ", - reinterpret_cast(object_slot), - field_index); - PrintF(trace_scope_->file(), - "float64x2(%e, %e) ; %s\n", - x2.storage[0], x2.storage[1], - SIMD128Register::AllocationIndexToString(input_reg)); - } else { - ASSERT(opcode == Translation::INT32x4_REGISTER); - int32x4_value_t x4 = value.i4; - PrintF(trace_scope_->file(), - " object @0x%08" V8PRIxPTR ": [field #%d] <- ", - reinterpret_cast(object_slot), - field_index); - PrintF(trace_scope_->file(), - "int32x4(%u, %u, %u, %u) ; %s\n", - x4.storage[0], x4.storage[1], x4.storage[2], x4.storage[3], - SIMD128Register::AllocationIndexToString(input_reg)); - } - } - AddObjectSIMD128Value(value, opcode); - return; - } - case Translation::STACK_SLOT: { int input_slot_index = iterator->Next(); unsigned input_offset = input_->GetOffsetFromSlotIndex(input_slot_index); @@ -2481,50 +2399,6 @@ void Deoptimizer::DoTranslateObject(TranslationIterator* iterator, return; } - case Translation::FLOAT32x4_STACK_SLOT: - case Translation::FLOAT64x2_STACK_SLOT: - case Translation::INT32x4_STACK_SLOT: { - int input_slot_index = iterator->Next(); - unsigned input_offset = input_->GetOffsetFromSlotIndex(input_slot_index); - simd128_value_t value = input_->GetSIMD128FrameSlot(input_offset); - if (trace_scope_ != NULL) { - if (opcode == Translation::FLOAT32x4_STACK_SLOT) { - float32x4_value_t x4 = value.f4; - PrintF(trace_scope_->file(), - " object @0x%08" V8PRIxPTR ": [field #%d] <- ", - reinterpret_cast(object_slot), - field_index); - PrintF(trace_scope_->file(), - "float32x4(%e, %e, %e, %e) ; [sp + %d]\n", - x4.storage[0], x4.storage[1], x4.storage[2], x4.storage[3], - input_offset); - } else if (opcode == Translation::FLOAT64x2_STACK_SLOT) { - float64x2_value_t x2 = value.d2; - PrintF(trace_scope_->file(), - " object @0x%08" V8PRIxPTR ": [field #%d] <- ", - reinterpret_cast(object_slot), - field_index); - PrintF(trace_scope_->file(), - "float64x2(%e, %e) ; [sp + %d]\n", - x2.storage[0], x2.storage[1], - input_offset); - } else { - ASSERT(opcode == Translation::INT32x4_STACK_SLOT); - int32x4_value_t x4 = value.i4; - PrintF(trace_scope_->file(), - " object @0x%08" V8PRIxPTR ": [field #%d] <- ", - reinterpret_cast(object_slot), - field_index); - PrintF(trace_scope_->file(), - "int32x4(%u, %u, %u, %u) ; [sp + %d]\n", - x4.storage[0], x4.storage[1], x4.storage[2], x4.storage[3], - input_offset); - } - } - AddObjectSIMD128Value(value, opcode); - return; - } - case Translation::LITERAL: { Object* literal = ComputeLiteral(iterator->Next()); if (trace_scope_ != NULL) { @@ -2709,50 +2583,6 @@ void Deoptimizer::DoTranslateCommand(TranslationIterator* iterator, return; } - case Translation::FLOAT32x4_REGISTER: - case Translation::FLOAT64x2_REGISTER: - case Translation::INT32x4_REGISTER: { - int input_reg = iterator->Next(); - simd128_value_t value = input_->GetSIMD128Register(input_reg); - if (trace_scope_ != NULL) { - if (opcode == Translation::FLOAT32x4_REGISTER) { - float32x4_value_t x4 = value.f4; - PrintF(trace_scope_->file(), - " 0x%08" V8PRIxPTR ":" - " [top + %d] <- float32x4(%e, %e, %e, %e) ; %s\n", - output_[frame_index]->GetTop() + output_offset, - output_offset, - x4.storage[0], x4.storage[1], x4.storage[2], x4.storage[3], - SIMD128Register::AllocationIndexToString(input_reg)); - } else if (opcode == Translation::FLOAT64x2_REGISTER) { - float64x2_value_t x2 = value.d2; - PrintF(trace_scope_->file(), - " 0x%08" V8PRIxPTR ":" - " [top + %d] <- float64x2(%e, %e) ; %s\n", - output_[frame_index]->GetTop() + output_offset, - output_offset, - x2.storage[0], x2.storage[1], - SIMD128Register::AllocationIndexToString(input_reg)); - } else { - ASSERT(opcode == Translation::INT32x4_REGISTER); - int32x4_value_t x4 = value.i4; - PrintF(trace_scope_->file(), - " 0x%08" V8PRIxPTR ":" - " [top + %d] <- int32x4(%u, %u, %u, %u) ; %s\n", - output_[frame_index]->GetTop() + output_offset, - output_offset, - x4.storage[0], x4.storage[1], x4.storage[2], x4.storage[3], - SIMD128Register::AllocationIndexToString(input_reg)); - } - } - // We save the untagged value on the side and store a GC-safe - // temporary placeholder in the frame. - AddSIMD128Value(output_[frame_index]->GetTop() + output_offset, value, - opcode); - output_[frame_index]->SetFrameSlot(output_offset, kPlaceholder); - return; - } - case Translation::STACK_SLOT: { int input_slot_index = iterator->Next(); unsigned input_offset = input_->GetOffsetFromSlotIndex(input_slot_index); @@ -2854,51 +2684,6 @@ void Deoptimizer::DoTranslateCommand(TranslationIterator* iterator, return; } - case Translation::FLOAT32x4_STACK_SLOT: - case Translation::FLOAT64x2_STACK_SLOT: - case Translation::INT32x4_STACK_SLOT: { - int input_slot_index = iterator->Next(); - unsigned input_offset = input_->GetOffsetFromSlotIndex(input_slot_index); - simd128_value_t value = input_->GetSIMD128FrameSlot(input_offset); - if (trace_scope_ != NULL) { - if (opcode == Translation::FLOAT32x4_STACK_SLOT) { - float32x4_value_t x4 = value.f4; - PrintF(trace_scope_->file(), - " 0x%08" V8PRIxPTR ": " - "[top + %d] <- float32x4(%e, %e, %e, %e) ; [sp + %d]\n", - output_[frame_index]->GetTop() + output_offset, - output_offset, - x4.storage[0], x4.storage[1], x4.storage[2], x4.storage[3], - input_offset); - } else if (opcode == Translation::FLOAT64x2_STACK_SLOT) { - float64x2_value_t x2 = value.d2; - PrintF(trace_scope_->file(), - " 0x%08" V8PRIxPTR ": " - "[top + %d] <- float64x2(%e, %e) ; [sp + %d]\n", - output_[frame_index]->GetTop() + output_offset, - output_offset, - x2.storage[0], x2.storage[1], - input_offset); - } else { - ASSERT(opcode == Translation::INT32x4_STACK_SLOT); - int32x4_value_t x4 = value.i4; - PrintF(trace_scope_->file(), - " 0x%08" V8PRIxPTR ": " - "[top + %d] <- int32x4(%u, %u, %u, %u) ; [sp + %d]\n", - output_[frame_index]->GetTop() + output_offset, - output_offset, - x4.storage[0], x4.storage[1], x4.storage[2], x4.storage[3], - input_offset); - } - } - // We save the untagged value on the side and store a GC-safe - // temporary placeholder in the frame. - AddSIMD128Value(output_[frame_index]->GetTop() + output_offset, value, - opcode); - output_[frame_index]->SetFrameSlot(output_offset, kPlaceholder); - return; - } - case Translation::LITERAL: { Object* literal = ComputeLiteral(iterator->Next()); if (trace_scope_ != NULL) { @@ -3045,27 +2830,6 @@ void Deoptimizer::AddObjectDoubleValue(double value) { } -void Deoptimizer::AddObjectSIMD128Value(simd128_value_t value, - int translation_opcode) { - deferred_objects_tagged_values_.Add(isolate()->heap()->the_hole_value()); - SIMD128MaterializationDescriptor value_desc( - deferred_objects_tagged_values_.length() - 1, value); - Translation::Opcode opcode = - static_cast(translation_opcode); - if (opcode == Translation::FLOAT32x4_REGISTER || - opcode == Translation::FLOAT32x4_STACK_SLOT) { - deferred_objects_float32x4_values_.Add(value_desc); - } else if (opcode == Translation::FLOAT64x2_REGISTER || - opcode == Translation::FLOAT64x2_STACK_SLOT) { - deferred_objects_float64x2_values_.Add(value_desc); - } else { - ASSERT(opcode == Translation::INT32x4_REGISTER || - opcode == Translation::INT32x4_STACK_SLOT); - deferred_objects_int32x4_values_.Add(value_desc); - } -} - - void Deoptimizer::AddDoubleValue(intptr_t slot_address, double value) { HeapNumberMaterializationDescriptor
value_desc( reinterpret_cast
(slot_address), value); @@ -3073,27 +2837,6 @@ void Deoptimizer::AddDoubleValue(intptr_t slot_address, double value) { } -void Deoptimizer::AddSIMD128Value(intptr_t slot_address, - simd128_value_t value, - int translation_opcode) { - SIMD128MaterializationDescriptor
value_desc( - reinterpret_cast
(slot_address), value); - Translation::Opcode opcode = - static_cast(translation_opcode); - if (opcode == Translation::FLOAT32x4_REGISTER || - opcode == Translation::FLOAT32x4_STACK_SLOT) { - deferred_float32x4s_.Add(value_desc); - } else if (opcode == Translation::FLOAT64x2_REGISTER || - opcode == Translation::FLOAT64x2_STACK_SLOT) { - deferred_float64x2s_.Add(value_desc); - } else { - ASSERT(opcode == Translation::INT32x4_REGISTER || - opcode == Translation::INT32x4_STACK_SLOT); - deferred_int32x4s_.Add(value_desc); - } -} - - void Deoptimizer::EnsureCodeForDeoptimizationEntry(Isolate* isolate, BailoutType type, int max_entry_id) { @@ -3114,7 +2857,7 @@ void Deoptimizer::EnsureCodeForDeoptimizationEntry(Isolate* isolate, GenerateDeoptimizationEntries(&masm, entry_count, type); CodeDesc desc; masm.GetCode(&desc); - ASSERT(!RelocInfo::RequiresRelocation(desc)); + DCHECK(!RelocInfo::RequiresRelocation(desc)); MemoryChunk* chunk = data->deopt_entry_code_[type]; CHECK(static_cast(Deoptimizer::GetMaxDeoptTableSize()) >= @@ -3122,7 +2865,7 @@ void Deoptimizer::EnsureCodeForDeoptimizationEntry(Isolate* isolate, chunk->CommitArea(desc.instr_size); CopyBytes(chunk->area_start(), desc.buffer, static_cast(desc.instr_size)); - CPU::FlushICache(chunk->area_start(), desc.instr_size); + CpuFeatures::FlushICache(chunk->area_start(), desc.instr_size); data->deopt_entry_code_entries_[type] = entry_count; } @@ -3208,7 +2951,7 @@ unsigned FrameDescription::GetExpressionCount() { Object* FrameDescription::GetExpression(int index) { - ASSERT_EQ(StackFrame::JAVA_SCRIPT, type_); + DCHECK_EQ(StackFrame::JAVA_SCRIPT, type_); unsigned offset = GetOffsetFromSlotIndex(index); return reinterpret_cast(*GetFrameSlotPointer(offset)); } @@ -3234,7 +2977,7 @@ int32_t TranslationIterator::Next() { // bit of zero (marks the end). uint32_t bits = 0; for (int i = 0; true; i += 7) { - ASSERT(HasNext()); + DCHECK(HasNext()); uint8_t next = buffer_->get(index_++); bits |= (next >> 1) << i; if ((next & 1) == 0) break; @@ -3337,12 +3080,6 @@ void Translation::StoreDoubleRegister(DoubleRegister reg) { } -void Translation::StoreSIMD128Register(SIMD128Register reg, Opcode opcode) { - buffer_->Add(opcode, zone()); - buffer_->Add(SIMD128Register::ToAllocationIndex(reg), zone()); -} - - void Translation::StoreStackSlot(int index) { buffer_->Add(STACK_SLOT, zone()); buffer_->Add(index, zone()); @@ -3367,12 +3104,6 @@ void Translation::StoreDoubleStackSlot(int index) { } -void Translation::StoreSIMD128StackSlot(int index, Opcode opcode) { - buffer_->Add(opcode, zone()); - buffer_->Add(index, zone()); -} - - void Translation::StoreLiteral(int literal_id) { buffer_->Add(LITERAL, zone()); buffer_->Add(literal_id, zone()); @@ -3400,16 +3131,10 @@ int Translation::NumberOfOperandsFor(Opcode opcode) { case INT32_REGISTER: case UINT32_REGISTER: case DOUBLE_REGISTER: - case FLOAT32x4_REGISTER: - case FLOAT64x2_REGISTER: - case INT32x4_REGISTER: case STACK_SLOT: case INT32_STACK_SLOT: case UINT32_STACK_SLOT: case DOUBLE_STACK_SLOT: - case FLOAT32x4_STACK_SLOT: - case FLOAT64x2_STACK_SLOT: - case INT32x4_STACK_SLOT: case LITERAL: case COMPILED_STUB_FRAME: return 1; @@ -3473,9 +3198,6 @@ SlotRef SlotRefValueBuilder::ComputeSlotForNextArgument( case Translation::INT32_REGISTER: case Translation::UINT32_REGISTER: case Translation::DOUBLE_REGISTER: - case Translation::FLOAT32x4_REGISTER: - case Translation::FLOAT64x2_REGISTER: - case Translation::INT32x4_REGISTER: // We are at safepoint which corresponds to call. All registers are // saved by caller so there would be no live registers at this // point. Thus these translation commands should not be used. @@ -3505,24 +3227,6 @@ SlotRef SlotRefValueBuilder::ComputeSlotForNextArgument( return SlotRef(slot_addr, SlotRef::DOUBLE); } - case Translation::FLOAT32x4_STACK_SLOT: { - int slot_index = iterator->Next(); - Address slot_addr = SlotAddress(frame, slot_index); - return SlotRef(slot_addr, SlotRef::FLOAT32x4); - } - - case Translation::FLOAT64x2_STACK_SLOT: { - int slot_index = iterator->Next(); - Address slot_addr = SlotAddress(frame, slot_index); - return SlotRef(slot_addr, SlotRef::FLOAT64x2); - } - - case Translation::INT32x4_STACK_SLOT: { - int slot_index = iterator->Next(); - Address slot_addr = SlotAddress(frame, slot_index); - return SlotRef(slot_addr, SlotRef::INT32x4); - } - case Translation::LITERAL: { int literal_index = iterator->Next(); return SlotRef(data->GetIsolate(), @@ -3642,7 +3346,11 @@ Handle SlotRef::GetValue(Isolate* isolate) { return Handle(Memory::Object_at(addr_), isolate); case INT32: { +#if V8_TARGET_BIG_ENDIAN && V8_HOST_ARCH_64_BIT + int value = Memory::int32_at(addr_ + kIntSize); +#else int value = Memory::int32_at(addr_); +#endif if (Smi::IsValid(value)) { return Handle(Smi::FromInt(value), isolate); } else { @@ -3651,7 +3359,11 @@ Handle SlotRef::GetValue(Isolate* isolate) { } case UINT32: { +#if V8_TARGET_BIG_ENDIAN && V8_HOST_ARCH_64_BIT + uint32_t value = Memory::uint32_at(addr_ + kIntSize); +#else uint32_t value = Memory::uint32_at(addr_); +#endif if (value <= static_cast(Smi::kMaxValue)) { return Handle(Smi::FromInt(static_cast(value)), isolate); } else { @@ -3664,15 +3376,6 @@ Handle SlotRef::GetValue(Isolate* isolate) { return isolate->factory()->NewNumber(value); } - case FLOAT32x4: - return isolate->factory()->NewFloat32x4(read_simd128_value(addr_).f4); - - case FLOAT64x2: - return isolate->factory()->NewFloat64x2(read_simd128_value(addr_).d2); - - case INT32x4: - return isolate->factory()->NewInt32x4(read_simd128_value(addr_).i4); - case LITERAL: return literal_; @@ -3773,6 +3476,7 @@ Handle SlotRefValueBuilder::GetNext(Isolate* isolate, int lvl) { // TODO(jarin) this should be unified with the code in // Deoptimizer::MaterializeNextHeapObject() switch (map->instance_type()) { + case MUTABLE_HEAP_NUMBER_TYPE: case HEAP_NUMBER_TYPE: { // Reuse the HeapNumber value directly as it is already properly // tagged and skip materializing the HeapNumber explicitly.