// Remember source position and frame to handle step next.
thread_local_.last_statement_position_ =
debug_info->code()->SourceStatementPosition(frame->pc());
- thread_local_.last_fp_ = frame->fp();
+ thread_local_.last_fp_ = frame->UnpaddedFP();
} else {
// If there's restarter frame on top of the stack, just get the pointer
// to function which is going to be restarted.
// propagated on the next Debug::Break.
thread_local_.last_statement_position_ =
debug_info->code()->SourceStatementPosition(frame->pc());
- thread_local_.last_fp_ = frame->fp();
+ thread_local_.last_fp_ = frame->UnpaddedFP();
}
// Step in or Step in min
// Continue if we are still on the same frame and in the same statement.
int current_statement_position =
break_location_iterator->code()->SourceStatementPosition(frame->pc());
- return thread_local_.last_fp_ == frame->fp() &&
+ return thread_local_.last_fp_ == frame->UnpaddedFP() &&
thread_local_.last_statement_position_ == current_statement_position;
}
void Debug::ActivateStepIn(StackFrame* frame) {
ASSERT(!StepOutActive());
- thread_local_.step_into_fp_ = frame->fp();
+ thread_local_.step_into_fp_ = frame->UnpaddedFP();
}
void Debug::ActivateStepOut(StackFrame* frame) {
ASSERT(!StepInActive());
- thread_local_.step_out_fp_ = frame->fp();
+ thread_local_.step_out_fp_ = frame->UnpaddedFP();
}
bailout_type_(type),
from_(from),
fp_to_sp_delta_(fp_to_sp_delta),
+ has_alignment_padding_(0),
input_(NULL),
output_count_(0),
jsframe_count_(0),
PrintF("[deoptimizing: end 0x%08" V8PRIxPTR " ",
reinterpret_cast<intptr_t>(function));
function->PrintName();
- PrintF(" => node=%u, pc=0x%08" V8PRIxPTR ", state=%s, took %0.3f ms]\n",
+ PrintF(" => node=%u, pc=0x%08" V8PRIxPTR ", state=%s, alignment=%s,"
+ " took %0.3f ms]\n",
node_id,
output_[index]->GetPc(),
FullCodeGenerator::State2String(
static_cast<FullCodeGenerator::State>(
output_[index]->GetState()->value())),
+ has_alignment_padding_ ? "with padding" : "no padding",
ms);
}
}
if (FLAG_trace_deopt) {
PrintF(" 0x%08" V8PRIxPTR ": ",
output_[frame_index]->GetTop() + output_offset);
- PrintF("[top + %d] <- 0x%08" V8PRIxPTR " ; [esp + %d] ",
+ PrintF("[top + %d] <- 0x%08" V8PRIxPTR " ; [sp + %d] ",
output_offset,
input_value,
input_offset);
if (FLAG_trace_deopt) {
PrintF(" 0x%08" V8PRIxPTR ": ",
output_[frame_index]->GetTop() + output_offset);
- PrintF("[top + %d] <- %" V8PRIdPTR " ; [esp + %d] (%s)\n",
+ PrintF("[top + %d] <- %" V8PRIdPTR " ; [sp + %d] (%s)\n",
output_offset,
value,
input_offset,
input_->GetOffsetFromSlotIndex(input_slot_index);
double value = input_->GetDoubleFrameSlot(input_offset);
if (FLAG_trace_deopt) {
- PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- %e ; [esp + %d]\n",
+ PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- %e ; [sp + %d]\n",
output_[frame_index]->GetTop() + output_offset,
output_offset,
value,
}
static int output_offset() { return OFFSET_OF(Deoptimizer, output_); }
+ static int has_alignment_padding_offset() {
+ return OFFSET_OF(Deoptimizer, has_alignment_padding_);
+ }
+
static int GetDeoptimizedCodeCount(Isolate* isolate);
static const int kNotDeoptimizationEntry = -1;
BailoutType bailout_type_;
Address from_;
int fp_to_sp_delta_;
+ int has_alignment_padding_;
// Input frame description.
FrameDescription* input_;
}
+Address StackFrame::UnpaddedFP() const {
+#if defined(V8_TARGET_ARCH_IA32) || defined(V8_HOST_ARCH_IA32)
+ if (!is_optimized()) return fp();
+ int32_t alignment_state = Memory::int32_at(
+ fp() + JavaScriptFrameConstants::kDynamicAlignmentStateOffset);
+
+ return (alignment_state == kAlignmentPaddingPushed) ?
+ (fp() + kPointerSize) : fp();
+#else
+ return fp();
+#endif
+}
+
+
Code* EntryFrame::unchecked_code() const {
return HEAP->raw_unchecked_js_entry_code();
}
Address fp() const { return state_.fp; }
Address caller_sp() const { return GetCallerStackPointer(); }
+ // If this frame is optimized and was dynamically aligned return its old
+ // unaligned frame pointer. When the frame is deoptimized its FP will shift
+ // up one word and become unaligned.
+ Address UnpaddedFP() const;
+
Address pc() const { return *pc_address(); }
void set_pc(Address pc) { *pc_address() = pc; }
blocks_(8, zone),
values_(16, zone),
phi_list_(NULL),
- zone_(zone) {
+ zone_(zone),
+ is_recursive_(false) {
start_environment_ =
new(zone) HEnvironment(NULL, info->scope(), info->closure(), zone);
start_environment_->set_ast_id(AstNode::kFunctionEntryId);
return;
}
if (TryInlineCall(expr)) return;
+
+ if (expr->target().is_identical_to(info()->closure())) {
+ graph()->MarkRecursive();
+ }
+
call = PreProcessCall(new(zone()) HCallKnownGlobal(expr->target(),
argument_count));
} else {
osr_values_.set(values);
}
+ void MarkRecursive() {
+ is_recursive_ = true;
+ }
+
+ bool is_recursive() const {
+ return is_recursive_;
+ }
+
private:
void Postorder(HBasicBlock* block,
BitVector* visited,
Zone* zone_;
+ bool is_recursive_;
+
DISALLOW_COPY_AND_ASSIGN(HGraph);
};
PrintF("[on-stack replacement: begin 0x%08" V8PRIxPTR " ",
reinterpret_cast<intptr_t>(function_));
function_->PrintName();
- PrintF(" => node=%u, frame=%d->%d]\n",
+ PrintF(" => node=%u, frame=%d->%d, ebp:esp=0x%08x:0x%08x]\n",
ast_id,
input_frame_size,
- output_frame_size);
+ output_frame_size,
+ input_->GetRegister(ebp.code()),
+ input_->GetRegister(esp.code()));
}
// There's only one output frame in the OSR case.
name = "function";
break;
}
- PrintF(" [esp + %d] <- 0x%08x ; [esp + %d] (fixed part - %s)\n",
+ PrintF(" [sp + %d] <- 0x%08x ; [sp + %d] (fixed part - %s)\n",
output_offset,
input_value,
input_offset,
output_offset -= kPointerSize;
}
+ // All OSR stack frames are dynamically aligned to an 8-byte boundary.
+ int frame_pointer = input_->GetRegister(ebp.code());
+ if ((frame_pointer & kPointerSize) != 0) {
+ frame_pointer -= kPointerSize;
+ has_alignment_padding_ = 1;
+ }
+
+ int32_t alignment_state = (has_alignment_padding_ == 1) ?
+ kAlignmentPaddingPushed :
+ kNoAlignmentPadding;
+ if (FLAG_trace_osr) {
+ PrintF(" [sp + %d] <- 0x%08x ; (alignment state)\n",
+ output_offset,
+ alignment_state);
+ }
+ output_[0]->SetFrameSlot(output_offset, alignment_state);
+ output_offset -= kPointerSize;
+
// Translate the rest of the frame.
while (ok && input_offset >= 0) {
ok = DoOsrTranslateCommand(&iterator, &input_offset);
output_[0]->SetPc(reinterpret_cast<uint32_t>(from_));
} else {
// Set up the frame pointer and the context pointer.
- output_[0]->SetRegister(ebp.code(), input_->GetRegister(ebp.code()));
+ output_[0]->SetRegister(ebp.code(), frame_pointer);
output_[0]->SetRegister(esi.code(), input_->GetRegister(esi.code()));
unsigned pc_offset = data->OsrPcOffset()->value();
ASSERT(output_[frame_index] == NULL);
output_[frame_index] = output_frame;
+ // Compute the incoming parameter translation.
+ int parameter_count = function->shared()->formal_parameter_count() + 1;
+ unsigned output_offset = output_frame_size;
+ unsigned input_offset = input_frame_size;
+
+ unsigned alignment_state_offset =
+ input_offset - parameter_count * kPointerSize -
+ StandardFrameConstants::kFixedFrameSize -
+ kPointerSize;
+ ASSERT(JavaScriptFrameConstants::kDynamicAlignmentStateOffset ==
+ JavaScriptFrameConstants::kLocal0Offset);
+
// The top address for the bottommost output frame can be computed from
// the input frame pointer and the output frame's height. For all
// subsequent output frames, it can be computed from the previous one's
// top address and the current frame's size.
uint32_t top_address;
if (is_bottommost) {
+ int32_t alignment_state = input_->GetFrameSlot(alignment_state_offset);
+ has_alignment_padding_ =
+ (alignment_state == kAlignmentPaddingPushed) ? 1 : 0;
// 2 = context and function in the frame.
- top_address =
- input_->GetRegister(ebp.code()) - (2 * kPointerSize) - height_in_bytes;
+ // If the optimized frame had alignment padding, adjust the frame pointer
+ // to point to the new position of the old frame pointer after padding
+ // is removed. Subtract 2 * kPointerSize for the context and function slots.
+ top_address = input_->GetRegister(ebp.code()) - (2 * kPointerSize) -
+ height_in_bytes + has_alignment_padding_ * kPointerSize;
} else {
top_address = output_[frame_index - 1]->GetTop() - output_frame_size;
}
output_frame->SetTop(top_address);
- // Compute the incoming parameter translation.
- int parameter_count = function->shared()->formal_parameter_count() + 1;
- unsigned output_offset = output_frame_size;
- unsigned input_offset = input_frame_size;
for (int i = 0; i < parameter_count; ++i) {
output_offset -= kPointerSize;
DoTranslateCommand(iterator, frame_index, output_offset);
}
output_frame->SetFrameSlot(output_offset, value);
intptr_t fp_value = top_address + output_offset;
- ASSERT(!is_bottommost || input_->GetRegister(ebp.code()) == fp_value);
+ ASSERT(!is_bottommost ||
+ (input_->GetRegister(ebp.code()) + has_alignment_padding_ * kPointerSize) ==
+ fp_value);
output_frame->SetFp(fp_value);
if (is_topmost) output_frame->SetRegister(ebp.code(), fp_value);
if (FLAG_trace_deopt) {
PrintF(" 0x%08x: [top + %d] <- 0x%08x ; caller's fp\n",
fp_value, output_offset, value);
}
+ ASSERT(!is_bottommost || !has_alignment_padding_ ||
+ (fp_value & kPointerSize) != 0);
// For the bottommost output frame the context can be gotten from the input
// frame. For all subsequent output frames it can be gotten from the function
}
__ pop(eax);
+ if (type() != OSR) {
+ // If frame was dynamically aligned, pop padding.
+ Label no_padding;
+ __ cmp(Operand(eax, Deoptimizer::has_alignment_padding_offset()),
+ Immediate(0));
+ __ j(equal, &no_padding);
+ __ pop(ecx);
+ if (FLAG_debug_code) {
+ __ cmp(ecx, Immediate(kAlignmentZapValue));
+ __ Assert(equal, "alignment marker expected");
+ }
+ __ bind(&no_padding);
+ } else {
+ // If frame needs dynamic alignment push padding.
+ Label no_padding;
+ __ cmp(Operand(eax, Deoptimizer::has_alignment_padding_offset()),
+ Immediate(0));
+ __ j(equal, &no_padding);
+ __ push(Immediate(kAlignmentZapValue));
+ __ bind(&no_padding);
+ }
+
// Replace the current frame with the output frames.
Label outer_push_loop, inner_push_loop;
// Outer loop state: eax = current FrameDescription**, edx = one past the
// Number of registers for which space is reserved in safepoints.
const int kNumSafepointRegisters = 8;
+const int kNoAlignmentPadding = 0;
+const int kAlignmentPaddingPushed = 2;
+const int kAlignmentZapValue = 0x12345678; // Not heap object tagged.
+
// ----------------------------------------------------
// Caller SP-relative.
static const int kParam0Offset = -2 * kPointerSize;
static const int kReceiverOffset = -1 * kPointerSize;
+
+ static const int kDynamicAlignmentStateOffset = kLocal0Offset;
};
// the frame (that is done in GeneratePrologue).
FrameScope frame_scope(masm_, StackFrame::MANUAL);
+ dynamic_frame_alignment_ = (chunk()->num_double_slots() > 2 &&
+ !chunk()->graph()->is_recursive()) ||
+ info()->osr_ast_id() != AstNode::kNoNumber;
+
return GeneratePrologue() &&
GenerateBody() &&
GenerateDeferredCode() &&
__ bind(&ok);
}
+
+ if (dynamic_frame_alignment_) {
+ // Move state of dynamic frame alignment into edx.
+ __ mov(edx, Immediate(kNoAlignmentPadding));
+
+ Label do_not_pad, align_loop;
+ STATIC_ASSERT(kDoubleSize == 2 * kPointerSize);
+ // Align esp + 4 to a multiple of 2 * kPointerSize.
+ __ test(esp, Immediate(kPointerSize));
+ __ j(not_zero, &do_not_pad, Label::kNear);
+ __ push(Immediate(0));
+ __ mov(ebx, esp);
+ __ mov(edx, Immediate(kAlignmentPaddingPushed));
+ // Copy arguments, receiver, and return address.
+ __ mov(ecx, Immediate(scope()->num_parameters() + 2));
+
+ __ bind(&align_loop);
+ __ mov(eax, Operand(ebx, 1 * kPointerSize));
+ __ mov(Operand(ebx, 0), eax);
+ __ add(Operand(ebx), Immediate(kPointerSize));
+ __ dec(ecx);
+ __ j(not_zero, &align_loop, Label::kNear);
+ __ mov(Operand(ebx, 0), Immediate(kAlignmentZapValue));
+ __ bind(&do_not_pad);
+ }
+
__ push(ebp); // Caller's frame pointer.
__ mov(ebp, esp);
__ push(esi); // Callee's context.
__ push(edi); // Callee's JS function.
+ if (dynamic_frame_alignment_ && FLAG_debug_code) {
+ __ test(esp, Immediate(kPointerSize));
+ __ Assert(zero, "frame is expected to be aligned");
+ }
+
// Reserve space for the stack slots needed by the code.
int slots = GetStackSlotCount();
- if (slots > 0) {
+ ASSERT_GE(slots, 1);
+ if (slots == 1) {
+ if (dynamic_frame_alignment_) {
+ __ push(edx);
+ } else {
+ __ push(Immediate(kNoAlignmentPadding));
+ }
+ } else {
if (FLAG_debug_code) {
__ mov(Operand(eax), Immediate(slots));
Label loop;
__ j(not_zero, &loop);
} else {
__ sub(Operand(esp), Immediate(slots * kPointerSize));
-#ifdef _MSC_VER
+ #ifdef _MSC_VER
// On windows, you may not access the stack more than one page below
// the most recently mapped page. To make the allocated area randomly
// accessible, we write to each page in turn (the value is irrelevant).
offset -= kPageSize) {
__ mov(Operand(esp, offset), eax);
}
-#endif
+ #endif
+ }
+
+ // Store dynamic frame alignment state in the first local.
+ if (dynamic_frame_alignment_) {
+ __ mov(Operand(ebp,
+ JavaScriptFrameConstants::kDynamicAlignmentStateOffset),
+ edx);
+ } else {
+ __ mov(Operand(ebp,
+ JavaScriptFrameConstants::kDynamicAlignmentStateOffset),
+ Immediate(kNoAlignmentPadding));
}
}
__ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
__ CallRuntime(Runtime::kTraceExit, 1);
}
+ if (dynamic_frame_alignment_) {
+ // Fetch the state of the dynamic frame alignment.
+ __ mov(edx, Operand(ebp,
+ JavaScriptFrameConstants::kDynamicAlignmentStateOffset));
+ }
__ mov(esp, ebp);
__ pop(ebp);
+ if (dynamic_frame_alignment_) {
+ Label no_padding;
+ __ cmp(edx, Immediate(kNoAlignmentPadding));
+ __ j(equal, &no_padding);
+ if (FLAG_debug_code) {
+ __ cmp(Operand(esp, (GetParameterCount() + 2) * kPointerSize),
+ Immediate(kAlignmentZapValue));
+ __ Assert(equal, "expected alignment marker");
+ }
+ __ Ret((GetParameterCount() + 2) * kPointerSize, ecx);
+ __ bind(&no_padding);
+ }
__ Ret((GetParameterCount() + 1) * kPointerSize, ecx);
}
status_(UNUSED),
translations_(zone),
deferred_(8, zone),
+ dynamic_frame_alignment_(false),
osr_pc_offset_(-1),
last_lazy_deopt_pc_(0),
safepoints_(zone),
Status status_;
TranslationBuffer translations_;
ZoneList<LDeferredCode*> deferred_;
+ bool dynamic_frame_alignment_;
int osr_pc_offset_;
int last_lazy_deopt_pc_;
int LChunk::GetNextSpillIndex(bool is_double) {
// Skip a slot if for a double-width slot.
- if (is_double) spill_slot_count_++;
+ if (is_double) {
+ spill_slot_count_++;
+ spill_slot_count_ |= 1;
+ num_double_slots_++;
+ }
return spill_slot_count_++;
}
chunk_ = new(zone()) LChunk(info(), graph());
HPhase phase("L_Building chunk", chunk_);
status_ = BUILDING;
+
+ // Reserve the first spill slot for the state of dynamic alignment.
+ int alignment_state_index = chunk_->GetNextSpillIndex(false);
+ ASSERT_EQ(alignment_state_index, 0);
+ USE(alignment_state_index);
+
const ZoneList<HBasicBlock*>* blocks = graph()->blocks();
for (int i = 0; i < blocks->length(); i++) {
HBasicBlock* next = NULL;
public:
LChunk(CompilationInfo* info, HGraph* graph)
: spill_slot_count_(0),
+ num_double_slots_(0),
info_(info),
graph_(graph),
instructions_(32, graph->zone()),
int ParameterAt(int index);
int GetParameterStackSlot(int index) const;
int spill_slot_count() const { return spill_slot_count_; }
+ int num_double_slots() const { return num_double_slots_; }
CompilationInfo* info() const { return info_; }
HGraph* graph() const { return graph_; }
const ZoneList<LInstruction*>* instructions() const { return &instructions_; }
private:
int spill_slot_count_;
+ int num_double_slots_;
CompilationInfo* info_;
HGraph* const graph_;
ZoneList<LInstruction*> instructions_;