}
+ExternalReference ExternalReference::frame_alignment_marker_location(
+ Isolate* isolate) {
+ return ExternalReference(
+ isolate->factory()->frame_alignment_marker().location());
+}
+
+
ExternalReference ExternalReference::roots_address(Isolate* isolate) {
return ExternalReference(isolate->heap()->roots_address());
}
// Static variable Factory::arguments_marker.location()
static ExternalReference arguments_marker_location(Isolate* isolate);
+ // Static variable Factory::frame_alignment_marker.location()
+ static ExternalReference frame_alignment_marker_location(Isolate* isolate);
+
// Static variable Heap::roots_address()
static ExternalReference roots_address(Isolate* isolate);
input_(NULL),
output_count_(0),
output_(NULL),
+ frame_alignment_marker_(isolate->heap()->frame_alignment_marker()),
+ has_alignment_padding_(0),
deferred_heap_numbers_(0) {
if (FLAG_trace_deopt && type != OSR) {
if (type == DEBUGGER) {
return OFFSET_OF(Deoptimizer, output_count_);
}
static int output_offset() { return OFFSET_OF(Deoptimizer, output_); }
+ static int frame_alignment_marker_offset() {
+ return OFFSET_OF(Deoptimizer, frame_alignment_marker_); }
+ static int has_alignment_padding_offset() {
+ return OFFSET_OF(Deoptimizer, has_alignment_padding_);
+ }
static int GetDeoptimizedCodeCount(Isolate* isolate);
// Array of output frame descriptions.
FrameDescription** output_;
+ // Frames can be dynamically padded on ia32 to align untagged doubles.
+ Object* frame_alignment_marker_;
+ intptr_t has_alignment_padding_;
+
List<HeapNumberMaterializationDescriptor> deferred_heap_numbers_;
static const int table_entry_size_;
set_the_hole_value(obj);
{ MaybeObject* maybe_obj = CreateOddball("arguments_marker",
- Smi::FromInt(-4),
+ Smi::FromInt(-2),
Oddball::kArgumentMarker);
if (!maybe_obj->ToObject(&obj)) return false;
}
set_arguments_marker(obj);
{ MaybeObject* maybe_obj = CreateOddball("no_interceptor_result_sentinel",
- Smi::FromInt(-2),
+ Smi::FromInt(-3),
Oddball::kOther);
if (!maybe_obj->ToObject(&obj)) return false;
}
set_no_interceptor_result_sentinel(obj);
{ MaybeObject* maybe_obj = CreateOddball("termination_exception",
- Smi::FromInt(-3),
+ Smi::FromInt(-4),
Oddball::kOther);
if (!maybe_obj->ToObject(&obj)) return false;
}
set_termination_exception(obj);
+ { MaybeObject* maybe_obj = CreateOddball("frame_alignment_marker",
+ Smi::FromInt(-5),
+ Oddball::kOther);
+ if (!maybe_obj->ToObject(&obj)) return false;
+ }
+ set_frame_alignment_marker(obj);
+ STATIC_ASSERT(Oddball::kLeastHiddenOddballNumber == -5);
+
// Allocate the empty string.
{ MaybeObject* maybe_obj = AllocateRawAsciiString(0, TENURED);
if (!maybe_obj->ToObject(&obj)) return false;
V(Object, true_value, TrueValue) \
V(Object, false_value, FalseValue) \
V(Object, arguments_marker, ArgumentsMarker) \
+ V(Object, frame_alignment_marker, FrameAlignmentMarker) \
V(Map, heap_number_map, HeapNumberMap) \
V(Map, global_context_map, GlobalContextMap) \
V(Map, fixed_array_map, FixedArrayMap) \
output_[0]->SetPc(reinterpret_cast<uint32_t>(from_));
} else {
// Setup the frame pointer and the context pointer.
- output_[0]->SetRegister(ebp.code(), input_->GetRegister(ebp.code()));
+ // All OSR stack frames are dynamically aligned to an 8-byte boundary.
+ int frame_pointer = input_->GetRegister(ebp.code());
+ if ((frame_pointer & 0x4) == 0) {
+ // Return address at FP + 4 should be aligned, so FP mod 8 should be 4.
+ frame_pointer -= kPointerSize;
+ has_alignment_padding_ = 1;
+ }
+ output_[0]->SetRegister(ebp.code(), frame_pointer);
output_[0]->SetRegister(esi.code(), input_->GetRegister(esi.code()));
unsigned pc_offset = data->OsrPcOffset()->value();
// top address and the current frame's size.
uint32_t top_address;
if (is_bottommost) {
- // 2 = context and function in the frame.
- top_address =
- input_->GetRegister(ebp.code()) - (2 * kPointerSize) - height_in_bytes;
+ // If the optimized frame had alignment padding, adjust the frame pointer
+ // to point to the new position of the old frame pointer after padding
+ // is removed. Subtract 2 * kPointerSize for the context and function slots.
+ top_address = input_->GetRegister(ebp.code()) - (2 * kPointerSize) -
+ height_in_bytes + has_alignment_padding_ * kPointerSize;
} else {
top_address = output_[frame_index - 1]->GetTop() - output_frame_size;
}
}
output_frame->SetFrameSlot(output_offset, value);
intptr_t fp_value = top_address + output_offset;
- ASSERT(!is_bottommost || input_->GetRegister(ebp.code()) == fp_value);
+ ASSERT(!is_bottommost ||
+ input_->GetRegister(ebp.code()) + has_alignment_padding_ * kPointerSize
+ == fp_value);
output_frame->SetFp(fp_value);
if (is_topmost) output_frame->SetRegister(ebp.code(), fp_value);
if (FLAG_trace_deopt) {
__ cmp(ecx, Operand(esp));
__ j(not_equal, &pop_loop);
+ // If frame was dynamically aligned, pop padding.
+ Label sentinel, sentinel_done;
+ __ pop(Operand(ecx));
+ __ cmp(ecx, Operand(eax, Deoptimizer::frame_alignment_marker_offset()));
+ __ j(equal, &sentinel);
+ __ push(Operand(ecx));
+ __ jmp(&sentinel_done);
+ __ bind(&sentinel);
+ __ mov(Operand(eax, Deoptimizer::has_alignment_padding_offset()),
+ Immediate(1));
+ __ bind(&sentinel_done);
// Compute the output frame in the deoptimizer.
__ push(eax);
__ PrepareCallCFunction(1, ebx);
}
__ pop(eax);
+ if (type() == OSR) {
+ // If alignment padding is added, push the sentinel.
+ Label no_osr_padding;
+ __ cmp(Operand(eax, Deoptimizer::has_alignment_padding_offset()),
+ Immediate(0));
+ __ j(equal, &no_osr_padding, Label::kNear);
+ __ push(Operand(eax, Deoptimizer::frame_alignment_marker_offset()));
+ __ bind(&no_osr_padding);
+ }
+
+
// Replace the current frame with the output frames.
Label outer_push_loop, inner_push_loop;
// Outer loop state: eax = current FrameDescription**, edx = one past the
// the frame (that is done in GeneratePrologue).
FrameScope frame_scope(masm_, StackFrame::MANUAL);
+ dynamic_frame_alignment_ = chunk()->num_double_slots() > 2 ||
+ info()->osr_ast_id() != AstNode::kNoNumber;
+
return GeneratePrologue() &&
GenerateBody() &&
GenerateDeferredCode() &&
__ bind(&ok);
}
+ if (dynamic_frame_alignment_) {
+ Label do_not_pad, align_loop;
+ STATIC_ASSERT(kDoubleSize == 2 * kPointerSize);
+ // Align esp to a multiple of 2 * kPointerSize.
+ __ test(esp, Immediate(kPointerSize));
+ __ j(zero, &do_not_pad, Label::kNear);
+ __ push(Immediate(0));
+ __ mov(ebx, esp);
+ // Copy arguments, receiver, and return address.
+ __ mov(ecx, Immediate(scope()->num_parameters() + 2));
+
+ __ bind(&align_loop);
+ __ mov(eax, Operand(ebx, 1 * kPointerSize));
+ __ mov(Operand(ebx, 0), eax);
+ __ add(Operand(ebx), Immediate(kPointerSize));
+ __ dec(ecx);
+ __ j(not_zero, &align_loop, Label::kNear);
+ __ mov(Operand(ebx, 0),
+ Immediate(isolate()->factory()->frame_alignment_marker()));
+
+ __ bind(&do_not_pad);
+ }
+
__ push(ebp); // Caller's frame pointer.
__ mov(ebp, esp);
__ push(esi); // Callee's context.
}
__ mov(esp, ebp);
__ pop(ebp);
+ if (dynamic_frame_alignment_) {
+ Label aligned;
+ // Frame alignment marker (padding) is below arguments,
+ // and receiver, so its return-address-relative offset is
+ // (num_arguments + 2) words.
+ __ cmp(Operand(esp, (GetParameterCount() + 2) * kPointerSize),
+ Immediate(factory()->frame_alignment_marker()));
+ __ j(not_equal, &aligned);
+ __ Ret((GetParameterCount() + 2) * kPointerSize, ecx);
+ __ bind(&aligned);
+ }
__ Ret((GetParameterCount() + 1) * kPointerSize, ecx);
}
inlined_function_count_(0),
scope_(info->scope()),
status_(UNUSED),
+ dynamic_frame_alignment_(false),
deferred_(8),
osr_pc_offset_(-1),
deoptimization_reloc_size(),
int strict_mode_flag() const {
return info()->is_strict_mode() ? kStrictMode : kNonStrictMode;
}
+ bool dynamic_frame_alignment() const { return dynamic_frame_alignment_; }
+ void set_dynamic_frame_alignment(bool value) {
+ dynamic_frame_alignment_ = value;
+ }
LChunk* chunk() const { return chunk_; }
Scope* scope() const { return scope_; }
int inlined_function_count_;
Scope* const scope_;
Status status_;
+ bool dynamic_frame_alignment_;
TranslationBuffer translations_;
ZoneList<LDeferredCode*> deferred_;
int osr_pc_offset_;
int LChunk::GetNextSpillIndex(bool is_double) {
// Skip a slot if for a double-width slot.
- if (is_double) spill_slot_count_++;
+ if (is_double) {
+ spill_slot_count_ |= 1; // Make it odd, so incrementing makes it even.
+ spill_slot_count_++;
+ num_double_slots_++;
+ }
return spill_slot_count_++;
}
graph_(graph),
instructions_(32),
pointer_maps_(8),
+ num_double_slots_(0),
inlined_closures_(1) { }
void AddInstruction(LInstruction* instruction, HBasicBlock* block);
int ParameterAt(int index);
int GetParameterStackSlot(int index) const;
int spill_slot_count() const { return spill_slot_count_; }
+ int num_double_slots() const { return num_double_slots_; }
+
CompilationInfo* info() const { return info_; }
HGraph* graph() const { return graph_; }
const ZoneList<LInstruction*>* instructions() const { return &instructions_; }
HGraph* const graph_;
ZoneList<LInstruction*> instructions_;
ZoneList<LPointerMap*> pointer_maps_;
+ int num_double_slots_;
ZoneList<Handle<JSFunction> > inlined_closures_;
};
} else {
ASSERT(number->IsSmi());
int value = Smi::cast(number)->value();
- // Hidden oddballs have negative smis.
- const int kLeastHiddenOddballNumber = -4;
ASSERT(value <= 1);
+ // Hidden oddballs have negative smis.
ASSERT(value >= kLeastHiddenOddballNumber);
}
}
static const byte kUndefined = 5;
static const byte kOther = 6;
+ // The ToNumber value of a hidden oddball is a negative smi.
+ static const int kLeastHiddenOddballNumber = -5;
+
typedef FixedBodyDescriptor<kToStringOffset,
kToNumberOffset + kPointerSize,
kSize> BodyDescriptor;