static MemoryChunk* AllocateCodeChunk(MemoryAllocator* allocator) {
return allocator->AllocateChunk(Deoptimizer::GetMaxDeoptTableSize(),
- OS::CommitPageSize(),
+ base::OS::CommitPageSize(),
#if defined(__native_client__)
// The Native Client port of V8 uses an interpreter,
// so code pages don't need PROT_EXEC.
size_t Deoptimizer::GetMaxDeoptTableSize() {
int entries_size =
Deoptimizer::kMaxNumberOfEntries * Deoptimizer::table_entry_size_;
- int commit_page_size = static_cast<int>(OS::CommitPageSize());
+ int commit_page_size = static_cast<int>(base::OS::CommitPageSize());
int page_count = ((kDeoptTableMaxEpilogueCodeSize + entries_size - 1) /
commit_page_size) + 1;
return static_cast<size_t>(commit_page_size * page_count);
}
SafepointEntry safepoint = code->GetSafepointEntry(it.frame()->pc());
int deopt_index = safepoint.deoptimization_index();
- bool safe_to_deopt = deopt_index != Safepoint::kNoDeoptimizationIndex;
- CHECK(topmost_optimized_code == NULL || safe_to_deopt);
+ // Turbofan deopt is checked when we are patching addresses on stack.
+ bool turbofanned = code->is_turbofanned();
+ bool safe_to_deopt =
+ deopt_index != Safepoint::kNoDeoptimizationIndex || turbofanned;
+ CHECK(topmost_optimized_code == NULL || safe_to_deopt || turbofanned);
if (topmost_optimized_code == NULL) {
topmost_optimized_code = code;
safe_to_deopt_topmost_optimized_code = safe_to_deopt;
Code* code = Code::cast(element);
CHECK_EQ(code->kind(), Code::OPTIMIZED_FUNCTION);
Object* next = code->next_code_link();
+
if (code->marked_for_deoptimization()) {
// Put the code into the list for later patching.
codes.Add(code, &zone);
element = next;
}
+ if (FLAG_turbo_deoptimization) {
+ PatchStackForMarkedCode(isolate);
+ }
+
// TODO(titzer): we need a handle scope only because of the macro assembler,
// which is only used in EnsureCodeForDeoptimizationEntry.
HandleScope scope(isolate);
for (int i = 0; i < codes.length(); i++) {
#ifdef DEBUG
if (codes[i] == topmost_optimized_code) {
- ASSERT(safe_to_deopt_topmost_optimized_code);
+ DCHECK(safe_to_deopt_topmost_optimized_code);
}
#endif
// It is finally time to die, code object.
+
+ // Remove the code from optimized code map.
+ DeoptimizationInputData* deopt_data =
+ DeoptimizationInputData::cast(codes[i]->deoptimization_data());
+ SharedFunctionInfo* shared =
+ SharedFunctionInfo::cast(deopt_data->SharedFunctionInfo());
+ shared->EvictFromOptimizedCodeMap(codes[i], "deoptimized code");
+
// Do platform-specific patching to force any activations to lazy deopt.
- PatchCodeForDeoptimization(isolate, codes[i]);
+ //
+ // We skip patching Turbofan code - we patch return addresses on stack.
+ // TODO(jarin) We should still zap the code object (but we have to
+ // be careful not to zap the deoptimization block).
+ if (!codes[i]->is_turbofanned()) {
+ PatchCodeForDeoptimization(isolate, codes[i]);
- // We might be in the middle of incremental marking with compaction.
- // Tell collector to treat this code object in a special way and
- // ignore all slots that might have been recorded on it.
- isolate->heap()->mark_compact_collector()->InvalidateCode(codes[i]);
+ // We might be in the middle of incremental marking with compaction.
+ // Tell collector to treat this code object in a special way and
+ // ignore all slots that might have been recorded on it.
+ isolate->heap()->mark_compact_collector()->InvalidateCode(codes[i]);
+ }
+ }
+}
+
+
+static int FindPatchAddressForReturnAddress(Code* code, int pc) {
+ DeoptimizationInputData* input_data =
+ DeoptimizationInputData::cast(code->deoptimization_data());
+ int patch_count = input_data->ReturnAddressPatchCount();
+ for (int i = 0; i < patch_count; i++) {
+ int return_pc = input_data->ReturnAddressPc(i)->value();
+ int patch_pc = input_data->PatchedAddressPc(i)->value();
+ // If the supplied pc matches the return pc or if the address
+ // has been already patched, return the patch pc.
+ if (pc == return_pc || pc == patch_pc) {
+ return patch_pc;
+ }
+ }
+ return -1;
+}
+
+
+// For all marked Turbofanned code on stack, change the return address to go
+// to the deoptimization block.
+void Deoptimizer::PatchStackForMarkedCode(Isolate* isolate) {
+ // TODO(jarin) We should tolerate missing patch entry for the topmost frame.
+ for (StackFrameIterator it(isolate, isolate->thread_local_top()); !it.done();
+ it.Advance()) {
+ StackFrame::Type type = it.frame()->type();
+ if (type == StackFrame::OPTIMIZED) {
+ Code* code = it.frame()->LookupCode();
+ if (code->is_turbofanned() && code->marked_for_deoptimization()) {
+ JSFunction* function =
+ static_cast<OptimizedFrame*>(it.frame())->function();
+ Address* pc_address = it.frame()->pc_address();
+ int pc_offset =
+ static_cast<int>(*pc_address - code->instruction_start());
+ int new_pc_offset = FindPatchAddressForReturnAddress(code, pc_offset);
+
+ if (FLAG_trace_deopt) {
+ CodeTracer::Scope scope(isolate->GetCodeTracer());
+ PrintF(scope.file(), "[patching stack address for function: ");
+ function->PrintName(scope.file());
+ PrintF(scope.file(), " (Pc offset %i -> %i)]\n", pc_offset,
+ new_pc_offset);
+ }
+
+ CHECK_LE(0, new_pc_offset);
+ *pc_address += new_pc_offset - pc_offset;
+ }
+ }
}
}
reinterpret_cast<intptr_t>(object));
}
if (object->IsJSGlobalProxy()) {
- Object* proto = object->GetPrototype();
- CHECK(proto->IsJSGlobalObject());
- Context* native_context = GlobalObject::cast(proto)->native_context();
+ PrototypeIterator iter(object->GetIsolate(), object);
+ // TODO(verwaest): This CHECK will be hit if the global proxy is detached.
+ CHECK(iter.GetCurrent()->IsJSGlobalObject());
+ Context* native_context =
+ GlobalObject::cast(iter.GetCurrent())->native_context();
MarkAllCodeForContext(native_context);
DeoptimizeMarkedCodeForContext(native_context);
} else if (object->IsGlobalObject()) {
if (function->IsSmi()) {
function = NULL;
}
- ASSERT(from != NULL);
+ DCHECK(from != NULL);
if (function != NULL && function->IsOptimized()) {
function->shared()->increment_deopt_count();
if (bailout_type_ == Deoptimizer::SOFT) {
compiled_code_ = FindOptimizedCode(function, optimized_code);
#if DEBUG
- ASSERT(compiled_code_ != NULL);
+ DCHECK(compiled_code_ != NULL);
if (type == EAGER || type == SOFT || type == LAZY) {
- ASSERT(compiled_code_->kind() != Code::FUNCTION);
+ DCHECK(compiled_code_->kind() != Code::FUNCTION);
}
#endif
: compiled_code;
}
case Deoptimizer::DEBUGGER:
- ASSERT(optimized_code->contains(from_));
+ DCHECK(optimized_code->contains(from_));
return optimized_code;
}
FATAL("Could not find code for optimized function");
Deoptimizer::~Deoptimizer() {
- ASSERT(input_ == NULL && output_ == NULL);
- ASSERT(disallow_heap_allocation_ == NULL);
+ DCHECK(input_ == NULL && output_ == NULL);
+ DCHECK(disallow_heap_allocation_ == NULL);
delete trace_scope_;
}
addr >= start + (kMaxNumberOfEntries * table_entry_size_)) {
return kNotDeoptimizationEntry;
}
- ASSERT_EQ(0,
+ DCHECK_EQ(0,
static_cast<int>(addr - start) % table_entry_size_);
return static_cast<int>(addr - start) / table_entry_size_;
}
return data->PcAndState(i)->value();
}
}
- PrintF(stderr, "[couldn't find pc offset for node=%d]\n", id.ToInt());
- PrintF(stderr, "[method: %s]\n", shared->DebugName()->ToCString().get());
- // Print the source code if available.
- HeapStringAllocator string_allocator;
- StringStream stream(&string_allocator);
- shared->SourceCodePrint(&stream, -1);
- PrintF(stderr, "[source:\n%s\n]", stream.ToCString().get());
+ OFStream os(stderr);
+ os << "[couldn't find pc offset for node=" << id.ToInt() << "]\n"
+ << "[method: " << shared->DebugName()->ToCString().get() << "]\n"
+ << "[source:\n" << SourceCodeOf(shared) << "\n]" << endl;
FATAL("unable to find pc offset during deoptimization");
return -1;
Object* element = native_context->DeoptimizedCodeListHead();
while (!element->IsUndefined()) {
Code* code = Code::cast(element);
- ASSERT(code->kind() == Code::OPTIMIZED_FUNCTION);
+ DCHECK(code->kind() == Code::OPTIMIZED_FUNCTION);
length++;
element = code->next_code_link();
}
compiled_code_->kind() == Code::OPTIMIZED_FUNCTION) {
LOG(isolate(), CodeDeoptEvent(compiled_code_));
}
- ElapsedTimer timer;
+ base::ElapsedTimer timer;
// Determine basic deoptimization information. The optimized frame is
// described by the input data.
input_data->OptimizationId()->value(),
bailout_id_,
fp_to_sp_delta_);
- if (bailout_type_ == EAGER || bailout_type_ == SOFT) {
+ if (bailout_type_ == EAGER || bailout_type_ == SOFT ||
+ (compiled_code_->is_hydrogen_stub())) {
compiled_code_->PrintDeoptLocation(trace_scope_->file(), bailout_id_);
}
}
TranslationIterator iterator(translations, translation_index);
Translation::Opcode opcode =
static_cast<Translation::Opcode>(iterator.Next());
- ASSERT(Translation::BEGIN == opcode);
+ DCHECK(Translation::BEGIN == opcode);
USE(opcode);
// Read the number of output frames and allocate an array for their
// descriptions.
int count = iterator.Next();
iterator.Next(); // Drop JS frames count.
- ASSERT(output_ == NULL);
+ DCHECK(output_ == NULL);
output_ = new FrameDescription*[count];
for (int i = 0; i < count; ++i) {
output_[i] = NULL;
intptr_t top_address;
if (is_bottommost) {
// Determine whether the input frame contains alignment padding.
- has_alignment_padding_ = HasAlignmentPadding(function) ? 1 : 0;
+ has_alignment_padding_ =
+ (!compiled_code_->is_turbofanned() && HasAlignmentPadding(function))
+ ? 1
+ : 0;
// 2 = context and function in the frame.
// If the optimized frame had alignment padding, adjust the frame pointer
// to point to the new position of the old frame pointer after padding
}
output_frame->SetCallerFp(output_offset, value);
intptr_t fp_value = top_address + output_offset;
- ASSERT(!is_bottommost || (input_->GetRegister(fp_reg.code()) +
+ DCHECK(!is_bottommost || (input_->GetRegister(fp_reg.code()) +
has_alignment_padding_ * kPointerSize) == fp_value);
output_frame->SetFp(fp_value);
if (is_topmost) output_frame->SetRegister(fp_reg.code(), fp_value);
V8PRIxPTR " ; caller's fp\n",
fp_value, output_offset, value);
}
- ASSERT(!is_bottommost || !has_alignment_padding_ ||
+ DCHECK(!is_bottommost || !has_alignment_padding_ ||
(fp_value & kPointerSize) != 0);
if (FLAG_enable_ool_constant_pool) {
value = reinterpret_cast<intptr_t>(function);
// The function for the bottommost output frame should also agree with the
// input frame.
- ASSERT(!is_bottommost || input_->GetFrameSlot(input_offset) == value);
+ DCHECK(!is_bottommost || input_->GetFrameSlot(input_offset) == value);
output_frame->SetFrameSlot(output_offset, value);
if (trace_scope_ != NULL) {
PrintF(trace_scope_->file(),
top_address + output_offset, output_offset, value, height - 1);
}
- ASSERT(0 == output_offset);
+ DCHECK(0 == output_offset);
Builtins* builtins = isolate_->builtins();
Code* adaptor_trampoline =
output_frame->SetFrameType(StackFrame::CONSTRUCT);
// Construct stub can not be topmost or bottommost.
- ASSERT(frame_index > 0 && frame_index < output_count_ - 1);
- ASSERT(output_[frame_index] == NULL);
+ DCHECK(frame_index > 0 && frame_index < output_count_ - 1);
+ DCHECK(output_[frame_index] == NULL);
output_[frame_index] = output_frame;
// The top address of the frame is computed from the previous
// reg = JSFunction context
//
- CHECK(compiled_code_->is_crankshafted() &&
- compiled_code_->kind() != Code::OPTIMIZED_FUNCTION);
- int major_key = compiled_code_->major_key();
+ CHECK(compiled_code_->is_hydrogen_stub());
+ int major_key = CodeStub::GetMajorKey(compiled_code_);
CodeStubInterfaceDescriptor* descriptor =
isolate_->code_stub_interface_descriptor(major_key);
+ // Check that there is a matching descriptor to the major key.
+ // This will fail if there has not been one installed to the isolate.
+ DCHECK_EQ(descriptor->MajorKey(), major_key);
// The output frame must have room for all pushed register parameters
// and the standard stack frame slots. Include space for an argument
// object to the callee and optionally the space to pass the argument
// object to the stub failure handler.
- CHECK_GE(descriptor->register_param_count_, 0);
- int height_in_bytes = kPointerSize * descriptor->register_param_count_ +
- sizeof(Arguments) + kPointerSize;
+ int param_count = descriptor->GetEnvironmentParameterCount();
+ CHECK_GE(param_count, 0);
+
+ int height_in_bytes = kPointerSize * param_count + sizeof(Arguments) +
+ kPointerSize;
int fixed_frame_size = StandardFrameConstants::kFixedFrameSize;
int input_frame_size = input_->GetFrameSize();
int output_frame_size = height_in_bytes + fixed_frame_size;
}
intptr_t caller_arg_count = 0;
- bool arg_count_known = !descriptor->stack_parameter_count_.is_valid();
+ bool arg_count_known = !descriptor->stack_parameter_count().is_valid();
// Build the Arguments object for the caller's parameters and a pointer to it.
output_frame_offset -= kPointerSize;
// Copy the register parameters to the failure frame.
int arguments_length_offset = -1;
- for (int i = 0; i < descriptor->register_param_count_; ++i) {
+ for (int i = 0; i < param_count; ++i) {
output_frame_offset -= kPointerSize;
DoTranslateCommand(iterator, 0, output_frame_offset);
- if (!arg_count_known && descriptor->IsParameterCountRegister(i)) {
+ if (!arg_count_known &&
+ descriptor->IsEnvironmentParameterCountRegister(i)) {
arguments_length_offset = output_frame_offset;
}
}
}
// Copy the double registers from the input into the output frame.
- CopySIMD128Registers(output_frame);
+ CopyDoubleRegisters(output_frame);
// Fill registers containing handler and number of parameters.
SetPlatformCompiledStubRegisters(output_frame, descriptor);
// Compute this frame's PC, state, and continuation.
Code* trampoline = NULL;
- StubFunctionMode function_mode = descriptor->function_mode_;
+ StubFunctionMode function_mode = descriptor->function_mode();
StubFailureTrampolineStub(isolate_,
function_mode).FindCodeInCache(&trampoline);
- ASSERT(trampoline != NULL);
+ DCHECK(trampoline != NULL);
output_frame->SetPc(reinterpret_cast<intptr_t>(
trampoline->instruction_start()));
if (FLAG_enable_ool_constant_pool) {
Handle<JSObject> arguments =
isolate_->factory()->NewArgumentsObject(function, length);
Handle<FixedArray> array = isolate_->factory()->NewFixedArray(length);
- ASSERT_EQ(array->length(), length);
+ DCHECK_EQ(array->length(), length);
arguments->set_elements(*array);
materialized_objects_->Add(arguments);
for (int i = 0; i < length; ++i) {
Handle<Map> map = Map::GeneralizeAllFieldRepresentations(
Handle<Map>::cast(MaterializeNextValue()));
switch (map->instance_type()) {
+ case MUTABLE_HEAP_NUMBER_TYPE:
case HEAP_NUMBER_TYPE: {
// Reuse the HeapNumber value directly as it is already properly
- // tagged and skip materializing the HeapNumber explicitly.
+ // tagged and skip materializing the HeapNumber explicitly. Turn mutable
+ // heap numbers immutable.
Handle<Object> object = MaterializeNextValue();
if (object_index < prev_materialized_count_) {
materialized_objects_->Add(Handle<Object>(
Handle<Object> Deoptimizer::MaterializeNextValue() {
int value_index = materialization_value_index_++;
Handle<Object> value = materialized_values_->at(value_index);
+ if (value->IsMutableHeapNumber()) {
+ HeapNumber::cast(*value)->set_map(isolate_->heap()->heap_number_map());
+ }
if (*value == isolate_->heap()->arguments_marker()) {
value = MaterializeNextHeapObject();
}
void Deoptimizer::MaterializeHeapObjects(JavaScriptFrameIterator* it) {
- ASSERT_NE(DEBUGGER, bailout_type_);
+ DCHECK_NE(DEBUGGER, bailout_type_);
MaterializedObjectStore* materialized_store =
isolate_->materialized_object_store();
Memory::Object_at(d.destination()) = *num;
}
- // Materialize all float32x4 before looking at arguments because when the
- // output frames are used to materialize arguments objects later on they need
- // to already contain valid float32x4 values.
- for (int i = 0; i < deferred_float32x4s_.length(); i++) {
- SIMD128MaterializationDescriptor<Address> d = deferred_float32x4s_[i];
- float32x4_value_t x4 = d.value().f4;
- Handle<Object> float32x4 = isolate_->factory()->NewFloat32x4(x4);
- if (trace_scope_ != NULL) {
- PrintF(trace_scope_->file(),
- "Materialized a new float32x4 %p "
- "[float32x4(%e, %e, %e, %e)] in slot %p\n",
- reinterpret_cast<void*>(*float32x4),
- x4.storage[0], x4.storage[1], x4.storage[2], x4.storage[3],
- d.destination());
- }
- Memory::Object_at(d.destination()) = *float32x4;
- }
-
- // Materialize all float64x2 before looking at arguments because when the
- // output frames are used to materialize arguments objects later on they need
- // to already contain valid float64x2 values.
- for (int i = 0; i < deferred_float64x2s_.length(); i++) {
- SIMD128MaterializationDescriptor<Address> d = deferred_float64x2s_[i];
- float64x2_value_t x2 = d.value().d2;
- Handle<Object> float64x2 = isolate_->factory()->NewFloat64x2(x2);
- if (trace_scope_ != NULL) {
- PrintF(trace_scope_->file(),
- "Materialized a new float64x2 %p "
- "[float64x2(%e, %e)] in slot %p\n",
- reinterpret_cast<void*>(*float64x2),
- x2.storage[0], x2.storage[1],
- d.destination());
- }
- Memory::Object_at(d.destination()) = *float64x2;
- }
-
- // Materialize all int32x4 before looking at arguments because when the
- // output frames are used to materialize arguments objects later on they need
- // to already contain valid int32x4 values.
- for (int i = 0; i < deferred_int32x4s_.length(); i++) {
- SIMD128MaterializationDescriptor<Address> d = deferred_int32x4s_[i];
- int32x4_value_t x4 = d.value().i4;
- Handle<Object> int32x4 = isolate_->factory()->NewInt32x4(x4);
- if (trace_scope_ != NULL) {
- PrintF(trace_scope_->file(),
- "Materialized a new int32x4 %p "
- "[int32x4(%u, %u, %u, %u)] in slot %p\n",
- reinterpret_cast<void*>(*int32x4),
- x4.storage[0], x4.storage[1], x4.storage[2], x4.storage[3],
- d.destination());
- }
- Memory::Object_at(d.destination()) = *int32x4;
- }
-
-
// Materialize all heap numbers required for arguments/captured objects.
for (int i = 0; i < deferred_objects_double_values_.length(); i++) {
HeapNumberMaterializationDescriptor<int> d =
d.value(),
d.destination());
}
- ASSERT(values.at(d.destination())->IsTheHole());
+ DCHECK(values.at(d.destination())->IsTheHole());
values.Set(d.destination(), num);
}
// Play it safe and clear all object double values before we continue.
deferred_objects_double_values_.Clear();
- // Materialize all float32x4 values required for arguments/captured objects.
- for (int i = 0; i < deferred_objects_float32x4_values_.length(); i++) {
- SIMD128MaterializationDescriptor<int> d =
- deferred_objects_float32x4_values_[i];
- float32x4_value_t x4 = d.value().f4;
- Handle<Object> float32x4 = isolate_->factory()->NewFloat32x4(x4);
- if (trace_scope_ != NULL) {
- PrintF(trace_scope_->file(),
- "Materialized a new float32x4 %p "
- "[float32x4(%e, %e, %e, %e)] for object at %d\n",
- reinterpret_cast<void*>(*float32x4),
- x4.storage[0], x4.storage[1], x4.storage[2], x4.storage[3],
- d.destination());
- }
- ASSERT(values.at(d.destination())->IsTheHole());
- values.Set(d.destination(), float32x4);
- }
-
- // Play it safe and clear all object float32x4 values before we continue.
- deferred_objects_float32x4_values_.Clear();
-
- // Materialize all float64x2 values required for arguments/captured objects.
- for (int i = 0; i < deferred_objects_float64x2_values_.length(); i++) {
- SIMD128MaterializationDescriptor<int> d =
- deferred_objects_float64x2_values_[i];
- float64x2_value_t x2 = d.value().d2;
- Handle<Object> float64x2 = isolate_->factory()->NewFloat64x2(x2);
- if (trace_scope_ != NULL) {
- PrintF(trace_scope_->file(),
- "Materialized a new float64x2 %p "
- "[float64x2(%e, %e)] for object at %d\n",
- reinterpret_cast<void*>(*float64x2),
- x2.storage[0], x2.storage[1],
- d.destination());
- }
- ASSERT(values.at(d.destination())->IsTheHole());
- values.Set(d.destination(), float64x2);
- }
-
- // Play it safe and clear all object float64x2 values before we continue.
- deferred_objects_float64x2_values_.Clear();
-
- // Materialize all int32x4 values required for arguments/captured objects.
- for (int i = 0; i < deferred_objects_int32x4_values_.length(); i++) {
- SIMD128MaterializationDescriptor<int> d =
- deferred_objects_int32x4_values_[i];
- int32x4_value_t x4 = d.value().i4;
- Handle<Object> int32x4 = isolate_->factory()->NewInt32x4(x4);
- if (trace_scope_ != NULL) {
- PrintF(trace_scope_->file(),
- "Materialized a new int32x4 %p "
- "[int32x4(%u, %u, %u, %u)] for object at %d\n",
- reinterpret_cast<void*>(*int32x4),
- x4.storage[0], x4.storage[1], x4.storage[2], x4.storage[3],
- d.destination());
- }
- ASSERT(values.at(d.destination())->IsTheHole());
- values.Set(d.destination(), int32x4);
- }
-
- // Play it safe and clear all object int32x4 values before we continue.
- deferred_objects_int32x4_values_.Clear();
-
// Materialize arguments/captured objects.
if (!deferred_objects_.is_empty()) {
List<Handle<Object> > materialized_objects(deferred_objects_.length());
case Translation::INT32_REGISTER:
case Translation::UINT32_REGISTER:
case Translation::DOUBLE_REGISTER:
- case Translation::FLOAT32x4_REGISTER:
- case Translation::FLOAT64x2_REGISTER:
- case Translation::INT32x4_REGISTER:
case Translation::STACK_SLOT:
case Translation::INT32_STACK_SLOT:
case Translation::UINT32_STACK_SLOT:
case Translation::DOUBLE_STACK_SLOT:
- case Translation::FLOAT32x4_STACK_SLOT:
- case Translation::FLOAT64x2_STACK_SLOT:
- case Translation::INT32x4_STACK_SLOT:
case Translation::LITERAL: {
// The value is not part of any materialized object, so we can ignore it.
iterator->Skip(Translation::NumberOfOperandsFor(opcode));
return;
}
- case Translation::FLOAT32x4_REGISTER:
- case Translation::FLOAT64x2_REGISTER:
- case Translation::INT32x4_REGISTER: {
- int input_reg = iterator->Next();
- simd128_value_t value = input_->GetSIMD128Register(input_reg);
- if (trace_scope_ != NULL) {
- if (opcode == Translation::FLOAT32x4_REGISTER) {
- float32x4_value_t x4 = value.f4;
- PrintF(trace_scope_->file(),
- " object @0x%08" V8PRIxPTR ": [field #%d] <- ",
- reinterpret_cast<intptr_t>(object_slot),
- field_index);
- PrintF(trace_scope_->file(),
- "float32x4(%e, %e, %e, %e) ; %s\n",
- x4.storage[0], x4.storage[1], x4.storage[2], x4.storage[3],
- SIMD128Register::AllocationIndexToString(input_reg));
- } else if (opcode == Translation::FLOAT64x2_REGISTER) {
- float64x2_value_t x2 = value.d2;
- PrintF(trace_scope_->file(),
- " object @0x%08" V8PRIxPTR ": [field #%d] <- ",
- reinterpret_cast<intptr_t>(object_slot),
- field_index);
- PrintF(trace_scope_->file(),
- "float64x2(%e, %e) ; %s\n",
- x2.storage[0], x2.storage[1],
- SIMD128Register::AllocationIndexToString(input_reg));
- } else {
- ASSERT(opcode == Translation::INT32x4_REGISTER);
- int32x4_value_t x4 = value.i4;
- PrintF(trace_scope_->file(),
- " object @0x%08" V8PRIxPTR ": [field #%d] <- ",
- reinterpret_cast<intptr_t>(object_slot),
- field_index);
- PrintF(trace_scope_->file(),
- "int32x4(%u, %u, %u, %u) ; %s\n",
- x4.storage[0], x4.storage[1], x4.storage[2], x4.storage[3],
- SIMD128Register::AllocationIndexToString(input_reg));
- }
- }
- AddObjectSIMD128Value(value, opcode);
- return;
- }
-
case Translation::STACK_SLOT: {
int input_slot_index = iterator->Next();
unsigned input_offset = input_->GetOffsetFromSlotIndex(input_slot_index);
return;
}
- case Translation::FLOAT32x4_STACK_SLOT:
- case Translation::FLOAT64x2_STACK_SLOT:
- case Translation::INT32x4_STACK_SLOT: {
- int input_slot_index = iterator->Next();
- unsigned input_offset = input_->GetOffsetFromSlotIndex(input_slot_index);
- simd128_value_t value = input_->GetSIMD128FrameSlot(input_offset);
- if (trace_scope_ != NULL) {
- if (opcode == Translation::FLOAT32x4_STACK_SLOT) {
- float32x4_value_t x4 = value.f4;
- PrintF(trace_scope_->file(),
- " object @0x%08" V8PRIxPTR ": [field #%d] <- ",
- reinterpret_cast<intptr_t>(object_slot),
- field_index);
- PrintF(trace_scope_->file(),
- "float32x4(%e, %e, %e, %e) ; [sp + %d]\n",
- x4.storage[0], x4.storage[1], x4.storage[2], x4.storage[3],
- input_offset);
- } else if (opcode == Translation::FLOAT64x2_STACK_SLOT) {
- float64x2_value_t x2 = value.d2;
- PrintF(trace_scope_->file(),
- " object @0x%08" V8PRIxPTR ": [field #%d] <- ",
- reinterpret_cast<intptr_t>(object_slot),
- field_index);
- PrintF(trace_scope_->file(),
- "float64x2(%e, %e) ; [sp + %d]\n",
- x2.storage[0], x2.storage[1],
- input_offset);
- } else {
- ASSERT(opcode == Translation::INT32x4_STACK_SLOT);
- int32x4_value_t x4 = value.i4;
- PrintF(trace_scope_->file(),
- " object @0x%08" V8PRIxPTR ": [field #%d] <- ",
- reinterpret_cast<intptr_t>(object_slot),
- field_index);
- PrintF(trace_scope_->file(),
- "int32x4(%u, %u, %u, %u) ; [sp + %d]\n",
- x4.storage[0], x4.storage[1], x4.storage[2], x4.storage[3],
- input_offset);
- }
- }
- AddObjectSIMD128Value(value, opcode);
- return;
- }
-
case Translation::LITERAL: {
Object* literal = ComputeLiteral(iterator->Next());
if (trace_scope_ != NULL) {
return;
}
- case Translation::FLOAT32x4_REGISTER:
- case Translation::FLOAT64x2_REGISTER:
- case Translation::INT32x4_REGISTER: {
- int input_reg = iterator->Next();
- simd128_value_t value = input_->GetSIMD128Register(input_reg);
- if (trace_scope_ != NULL) {
- if (opcode == Translation::FLOAT32x4_REGISTER) {
- float32x4_value_t x4 = value.f4;
- PrintF(trace_scope_->file(),
- " 0x%08" V8PRIxPTR ":"
- " [top + %d] <- float32x4(%e, %e, %e, %e) ; %s\n",
- output_[frame_index]->GetTop() + output_offset,
- output_offset,
- x4.storage[0], x4.storage[1], x4.storage[2], x4.storage[3],
- SIMD128Register::AllocationIndexToString(input_reg));
- } else if (opcode == Translation::FLOAT64x2_REGISTER) {
- float64x2_value_t x2 = value.d2;
- PrintF(trace_scope_->file(),
- " 0x%08" V8PRIxPTR ":"
- " [top + %d] <- float64x2(%e, %e) ; %s\n",
- output_[frame_index]->GetTop() + output_offset,
- output_offset,
- x2.storage[0], x2.storage[1],
- SIMD128Register::AllocationIndexToString(input_reg));
- } else {
- ASSERT(opcode == Translation::INT32x4_REGISTER);
- int32x4_value_t x4 = value.i4;
- PrintF(trace_scope_->file(),
- " 0x%08" V8PRIxPTR ":"
- " [top + %d] <- int32x4(%u, %u, %u, %u) ; %s\n",
- output_[frame_index]->GetTop() + output_offset,
- output_offset,
- x4.storage[0], x4.storage[1], x4.storage[2], x4.storage[3],
- SIMD128Register::AllocationIndexToString(input_reg));
- }
- }
- // We save the untagged value on the side and store a GC-safe
- // temporary placeholder in the frame.
- AddSIMD128Value(output_[frame_index]->GetTop() + output_offset, value,
- opcode);
- output_[frame_index]->SetFrameSlot(output_offset, kPlaceholder);
- return;
- }
-
case Translation::STACK_SLOT: {
int input_slot_index = iterator->Next();
unsigned input_offset = input_->GetOffsetFromSlotIndex(input_slot_index);
return;
}
- case Translation::FLOAT32x4_STACK_SLOT:
- case Translation::FLOAT64x2_STACK_SLOT:
- case Translation::INT32x4_STACK_SLOT: {
- int input_slot_index = iterator->Next();
- unsigned input_offset = input_->GetOffsetFromSlotIndex(input_slot_index);
- simd128_value_t value = input_->GetSIMD128FrameSlot(input_offset);
- if (trace_scope_ != NULL) {
- if (opcode == Translation::FLOAT32x4_STACK_SLOT) {
- float32x4_value_t x4 = value.f4;
- PrintF(trace_scope_->file(),
- " 0x%08" V8PRIxPTR ": "
- "[top + %d] <- float32x4(%e, %e, %e, %e) ; [sp + %d]\n",
- output_[frame_index]->GetTop() + output_offset,
- output_offset,
- x4.storage[0], x4.storage[1], x4.storage[2], x4.storage[3],
- input_offset);
- } else if (opcode == Translation::FLOAT64x2_STACK_SLOT) {
- float64x2_value_t x2 = value.d2;
- PrintF(trace_scope_->file(),
- " 0x%08" V8PRIxPTR ": "
- "[top + %d] <- float64x2(%e, %e) ; [sp + %d]\n",
- output_[frame_index]->GetTop() + output_offset,
- output_offset,
- x2.storage[0], x2.storage[1],
- input_offset);
- } else {
- ASSERT(opcode == Translation::INT32x4_STACK_SLOT);
- int32x4_value_t x4 = value.i4;
- PrintF(trace_scope_->file(),
- " 0x%08" V8PRIxPTR ": "
- "[top + %d] <- int32x4(%u, %u, %u, %u) ; [sp + %d]\n",
- output_[frame_index]->GetTop() + output_offset,
- output_offset,
- x4.storage[0], x4.storage[1], x4.storage[2], x4.storage[3],
- input_offset);
- }
- }
- // We save the untagged value on the side and store a GC-safe
- // temporary placeholder in the frame.
- AddSIMD128Value(output_[frame_index]->GetTop() + output_offset, value,
- opcode);
- output_[frame_index]->SetFrameSlot(output_offset, kPlaceholder);
- return;
- }
-
case Translation::LITERAL: {
Object* literal = ComputeLiteral(iterator->Next());
if (trace_scope_ != NULL) {
}
-void Deoptimizer::AddObjectSIMD128Value(simd128_value_t value,
- int translation_opcode) {
- deferred_objects_tagged_values_.Add(isolate()->heap()->the_hole_value());
- SIMD128MaterializationDescriptor<int> value_desc(
- deferred_objects_tagged_values_.length() - 1, value);
- Translation::Opcode opcode =
- static_cast<Translation::Opcode>(translation_opcode);
- if (opcode == Translation::FLOAT32x4_REGISTER ||
- opcode == Translation::FLOAT32x4_STACK_SLOT) {
- deferred_objects_float32x4_values_.Add(value_desc);
- } else if (opcode == Translation::FLOAT64x2_REGISTER ||
- opcode == Translation::FLOAT64x2_STACK_SLOT) {
- deferred_objects_float64x2_values_.Add(value_desc);
- } else {
- ASSERT(opcode == Translation::INT32x4_REGISTER ||
- opcode == Translation::INT32x4_STACK_SLOT);
- deferred_objects_int32x4_values_.Add(value_desc);
- }
-}
-
-
void Deoptimizer::AddDoubleValue(intptr_t slot_address, double value) {
HeapNumberMaterializationDescriptor<Address> value_desc(
reinterpret_cast<Address>(slot_address), value);
}
-void Deoptimizer::AddSIMD128Value(intptr_t slot_address,
- simd128_value_t value,
- int translation_opcode) {
- SIMD128MaterializationDescriptor<Address> value_desc(
- reinterpret_cast<Address>(slot_address), value);
- Translation::Opcode opcode =
- static_cast<Translation::Opcode>(translation_opcode);
- if (opcode == Translation::FLOAT32x4_REGISTER ||
- opcode == Translation::FLOAT32x4_STACK_SLOT) {
- deferred_float32x4s_.Add(value_desc);
- } else if (opcode == Translation::FLOAT64x2_REGISTER ||
- opcode == Translation::FLOAT64x2_STACK_SLOT) {
- deferred_float64x2s_.Add(value_desc);
- } else {
- ASSERT(opcode == Translation::INT32x4_REGISTER ||
- opcode == Translation::INT32x4_STACK_SLOT);
- deferred_int32x4s_.Add(value_desc);
- }
-}
-
-
void Deoptimizer::EnsureCodeForDeoptimizationEntry(Isolate* isolate,
BailoutType type,
int max_entry_id) {
GenerateDeoptimizationEntries(&masm, entry_count, type);
CodeDesc desc;
masm.GetCode(&desc);
- ASSERT(!RelocInfo::RequiresRelocation(desc));
+ DCHECK(!RelocInfo::RequiresRelocation(desc));
MemoryChunk* chunk = data->deopt_entry_code_[type];
CHECK(static_cast<int>(Deoptimizer::GetMaxDeoptTableSize()) >=
chunk->CommitArea(desc.instr_size);
CopyBytes(chunk->area_start(), desc.buffer,
static_cast<size_t>(desc.instr_size));
- CPU::FlushICache(chunk->area_start(), desc.instr_size);
+ CpuFeatures::FlushICache(chunk->area_start(), desc.instr_size);
data->deopt_entry_code_entries_[type] = entry_count;
}
Object* FrameDescription::GetExpression(int index) {
- ASSERT_EQ(StackFrame::JAVA_SCRIPT, type_);
+ DCHECK_EQ(StackFrame::JAVA_SCRIPT, type_);
unsigned offset = GetOffsetFromSlotIndex(index);
return reinterpret_cast<Object*>(*GetFrameSlotPointer(offset));
}
// bit of zero (marks the end).
uint32_t bits = 0;
for (int i = 0; true; i += 7) {
- ASSERT(HasNext());
+ DCHECK(HasNext());
uint8_t next = buffer_->get(index_++);
bits |= (next >> 1) << i;
if ((next & 1) == 0) break;
}
-void Translation::StoreSIMD128Register(SIMD128Register reg, Opcode opcode) {
- buffer_->Add(opcode, zone());
- buffer_->Add(SIMD128Register::ToAllocationIndex(reg), zone());
-}
-
-
void Translation::StoreStackSlot(int index) {
buffer_->Add(STACK_SLOT, zone());
buffer_->Add(index, zone());
}
-void Translation::StoreSIMD128StackSlot(int index, Opcode opcode) {
- buffer_->Add(opcode, zone());
- buffer_->Add(index, zone());
-}
-
-
void Translation::StoreLiteral(int literal_id) {
buffer_->Add(LITERAL, zone());
buffer_->Add(literal_id, zone());
case INT32_REGISTER:
case UINT32_REGISTER:
case DOUBLE_REGISTER:
- case FLOAT32x4_REGISTER:
- case FLOAT64x2_REGISTER:
- case INT32x4_REGISTER:
case STACK_SLOT:
case INT32_STACK_SLOT:
case UINT32_STACK_SLOT:
case DOUBLE_STACK_SLOT:
- case FLOAT32x4_STACK_SLOT:
- case FLOAT64x2_STACK_SLOT:
- case INT32x4_STACK_SLOT:
case LITERAL:
case COMPILED_STUB_FRAME:
return 1;
case Translation::INT32_REGISTER:
case Translation::UINT32_REGISTER:
case Translation::DOUBLE_REGISTER:
- case Translation::FLOAT32x4_REGISTER:
- case Translation::FLOAT64x2_REGISTER:
- case Translation::INT32x4_REGISTER:
// We are at safepoint which corresponds to call. All registers are
// saved by caller so there would be no live registers at this
// point. Thus these translation commands should not be used.
return SlotRef(slot_addr, SlotRef::DOUBLE);
}
- case Translation::FLOAT32x4_STACK_SLOT: {
- int slot_index = iterator->Next();
- Address slot_addr = SlotAddress(frame, slot_index);
- return SlotRef(slot_addr, SlotRef::FLOAT32x4);
- }
-
- case Translation::FLOAT64x2_STACK_SLOT: {
- int slot_index = iterator->Next();
- Address slot_addr = SlotAddress(frame, slot_index);
- return SlotRef(slot_addr, SlotRef::FLOAT64x2);
- }
-
- case Translation::INT32x4_STACK_SLOT: {
- int slot_index = iterator->Next();
- Address slot_addr = SlotAddress(frame, slot_index);
- return SlotRef(slot_addr, SlotRef::INT32x4);
- }
-
case Translation::LITERAL: {
int literal_index = iterator->Next();
return SlotRef(data->GetIsolate(),
return Handle<Object>(Memory::Object_at(addr_), isolate);
case INT32: {
+#if V8_TARGET_BIG_ENDIAN && V8_HOST_ARCH_64_BIT
+ int value = Memory::int32_at(addr_ + kIntSize);
+#else
int value = Memory::int32_at(addr_);
+#endif
if (Smi::IsValid(value)) {
return Handle<Object>(Smi::FromInt(value), isolate);
} else {
}
case UINT32: {
+#if V8_TARGET_BIG_ENDIAN && V8_HOST_ARCH_64_BIT
+ uint32_t value = Memory::uint32_at(addr_ + kIntSize);
+#else
uint32_t value = Memory::uint32_at(addr_);
+#endif
if (value <= static_cast<uint32_t>(Smi::kMaxValue)) {
return Handle<Object>(Smi::FromInt(static_cast<int>(value)), isolate);
} else {
return isolate->factory()->NewNumber(value);
}
- case FLOAT32x4:
- return isolate->factory()->NewFloat32x4(read_simd128_value(addr_).f4);
-
- case FLOAT64x2:
- return isolate->factory()->NewFloat64x2(read_simd128_value(addr_).d2);
-
- case INT32x4:
- return isolate->factory()->NewInt32x4(read_simd128_value(addr_).i4);
-
case LITERAL:
return literal_;
// TODO(jarin) this should be unified with the code in
// Deoptimizer::MaterializeNextHeapObject()
switch (map->instance_type()) {
+ case MUTABLE_HEAP_NUMBER_TYPE:
case HEAP_NUMBER_TYPE: {
// Reuse the HeapNumber value directly as it is already properly
// tagged and skip materializing the HeapNumber explicitly.