cgen_->DeleteFrame();
__ jmp(&entry_label_);
} else {
+ // Preconfigured entry frame is not used on ARM.
+ ASSERT(entry_frame_ == NULL);
// Forward jump. The current frame is added to the end of the list
// of frames reaching the target block and a jump to the merge code
// is emitted.
__ bind(&original_fall_through);
} else {
+ // Preconfigured entry frame is not used on ARM.
+ ASSERT(entry_frame_ == NULL);
// Forward branch. A copy of the current frame is added to the end
// of the list of frames reaching the target block and a branch to
// the merge code is emitted.
cgen_->frame()->SpillAll();
VirtualFrame* target_frame = new VirtualFrame(cgen_->frame());
target_frame->Adjust(1);
+ // We do not expect a call with a preconfigured entry frame.
+ ASSERT(entry_frame_ == NULL);
AddReachingFrame(target_frame);
__ bl(&merge_labels_.last());
bool had_fall_through = false;
if (cgen_->has_valid_frame()) {
had_fall_through = true;
- AddReachingFrame(cgen_->frame());
+ AddReachingFrame(cgen_->frame()); // Return value ignored.
RegisterFile empty;
cgen_->SetFrame(NULL, &empty);
}
// Compute the frame to use for entry to the block.
- ComputeEntryFrame(mergable_elements);
+ if (entry_frame_ == NULL) {
+ ComputeEntryFrame(mergable_elements);
+ }
// Some moves required to merge to an expected frame require purely
// frame state changes, and do not require any code generation.
#define V8_CODEGEN_INL_H_
#include "codegen.h"
+#include "register-allocator-inl.h"
namespace v8 { namespace internal {
+void DeferredCode::SetEntryFrame(Result* arg) {
+ ASSERT(generator()->has_valid_frame());
+ generator()->frame()->Push(arg);
+ enter()->set_entry_frame(new VirtualFrame(generator()->frame()));
+ *arg = generator()->frame()->Pop();
+}
+
+
// -----------------------------------------------------------------------------
// Support for "structured" code comments.
//
MacroAssembler* masm() const { return masm_; }
CodeGenerator* generator() const { return generator_; }
+ // Set the virtual frame for entry to the deferred code as a
+ // snapshot of the code generator's current frame (plus additional
+ // results). This is optional, but should be done before branching
+ // or jumping to the deferred code.
+ inline void SetEntryFrame(Result* arg);
+
JumpTarget* enter() { return &enter_; }
+
void BindExit() { exit_.Bind(0); }
void BindExit(Result* result) { exit_.Bind(result, 1); }
void BindExit(Result* result0, Result* result1) {
// Adjust for function-level loop nesting.
loop_nesting_ += fun->loop_nesting();
+ JumpTarget::set_compiling_deferred_code(false);
+
{
CodeGenState state(this);
if (HasStackOverflow()) {
ClearDeferred();
} else {
+ JumpTarget::set_compiling_deferred_code(true);
ProcessDeferred();
+ JumpTarget::set_compiling_deferred_code(false);
}
// There is no need to delete the register allocator, it is a
switch (op) {
case Token::ADD: {
+ operand->ToRegister();
+ frame_->Spill(operand->reg());
+ __ add(Operand(operand->reg()), Immediate(value));
+
DeferredCode* deferred = NULL;
if (reversed) {
deferred = new DeferredInlineSmiAddReversed(this, smi_value,
} else {
deferred = new DeferredInlineSmiAdd(this, smi_value, overwrite_mode);
}
- operand->ToRegister();
- frame_->Spill(operand->reg());
- __ add(Operand(operand->reg()), Immediate(value));
+ deferred->SetEntryFrame(operand);
deferred->enter()->Branch(overflow, operand, not_taken);
__ test(operand->reg(), Immediate(kSmiTagMask));
deferred->enter()->Branch(not_zero, operand, not_taken);
if (reversed) {
answer = allocator()->Allocate();
ASSERT(answer.is_valid());
- deferred = new DeferredInlineSmiSubReversed(this,
- smi_value,
+ deferred = new DeferredInlineSmiSubReversed(this, smi_value,
overwrite_mode);
__ Set(answer.reg(), Immediate(value));
// We are in the reversed case so they can't both be Smi constants.
} else {
operand->ToRegister();
frame_->Spill(operand->reg());
- deferred = new DeferredInlineSmiSub(this,
- smi_value,
- overwrite_mode);
+ deferred = new DeferredInlineSmiSub(this, smi_value, overwrite_mode);
__ sub(Operand(operand->reg()), Immediate(value));
answer = *operand;
}
+ deferred->SetEntryFrame(operand);
deferred->enter()->Branch(overflow, operand, not_taken);
__ test(answer.reg(), Immediate(kSmiTagMask));
deferred->enter()->Branch(not_zero, operand, not_taken);
} else {
// Inline the inobject property case.
Comment cmnt(masm, "[ Inlined named property load");
- DeferredReferenceGetNamedValue* deferred =
- new DeferredReferenceGetNamedValue(cgen_, GetName());
Result receiver = cgen_->frame()->Pop();
receiver.ToRegister();
- // Check that the receiver is a heap object.
- __ test(receiver.reg(), Immediate(kSmiTagMask));
- deferred->enter()->Branch(zero, &receiver, not_taken);
// Preallocate the value register to ensure that there is no
// spill emitted between the patch site label and the offset in
- // the load instruction.
+ // the load instruction and that all frames reaching the
+ // deferred code are identical.
Result value = cgen_->allocator()->Allocate();
ASSERT(value.is_valid());
+
+ // Check that the receiver is a heap object.
+ __ test(receiver.reg(), Immediate(kSmiTagMask));
+
+ DeferredReferenceGetNamedValue* deferred =
+ new DeferredReferenceGetNamedValue(cgen_, GetName());
+ deferred->SetEntryFrame(&receiver);
+ deferred->enter()->Branch(zero, &receiver, not_taken);
+
__ bind(deferred->patch_site());
// This is the map check instruction that will be patched (so we can't
// use the double underscore macro that may insert instructions).
cgen_->frame()->MergeTo(entry_frame_);
cgen_->DeleteFrame();
__ jmp(&entry_label_);
+ } else if (entry_frame_ != NULL) {
+ // Forward jump with a preconfigured entry frame. Assert the
+ // current frame matches the expected one and jump to the block.
+ ASSERT(cgen_->frame()->Equals(entry_frame_));
+ cgen_->DeleteFrame();
+ __ jmp(&entry_label_);
} else {
- // Forward jump. The current frame is added to the end of the list
- // of frames reaching the target block and a jump to the merge code
- // is emitted.
+ // Forward jump. Remember the current frame and emit a jump to
+ // its merge code.
AddReachingFrame(cgen_->frame());
RegisterFile empty;
cgen_->SetFrame(NULL, &empty);
cgen_->SetFrame(fall_through_frame, &non_frame_registers);
__ bind(&original_fall_through);
+ } else if (entry_frame_ != NULL) {
+ // Forward branch with a preconfigured entry frame. Assert the
+ // current frame matches the expected one and branch to the block.
+ ASSERT(cgen_->frame()->Equals(entry_frame_));
+ // Use masm_-> instead of __ as forward branches are expected to
+ // be a fixed size (no inserted coverage-checking instructions
+ // please). This is used in Reference::GetValue.
+ masm_->j(cc, &entry_label_, hint);
+ is_linked_ = true;
+
} else {
- // Forward branch. A copy of the current frame is added to the end of the
- // list of frames reaching the target block and a branch to the merge code
- // is emitted. Use masm_-> instead of __ as forward branches are expected
- // to be a fixed size (no inserted coverage-checking instructions please).
- // This is used in Reference::GetValue.
+ // Forward branch. A copy of the current frame is remembered and
+ // a branch to the merge code is emitted.
AddReachingFrame(new VirtualFrame(cgen_->frame()));
masm_->j(cc, &merge_labels_.last(), hint);
is_linked_ = true;
cgen_->frame()->SpillAll();
VirtualFrame* target_frame = new VirtualFrame(cgen_->frame());
target_frame->Adjust(1);
+ // We do not expect a call with a preconfigured entry frame.
+ ASSERT(entry_frame_ == NULL);
AddReachingFrame(target_frame);
__ call(&merge_labels_.last());
// block.
ASSERT(!cgen_->has_valid_frame() || cgen_->HasValidEntryRegisters());
+ // Fast case: the jump target was manually configured with an entry
+ // frame to use.
+ if (entry_frame_ != NULL) {
+ // Assert no reaching frames to deal with.
+ ASSERT(reaching_frames_.is_empty());
+ ASSERT(!cgen_->has_valid_frame());
+
+ RegisterFile reserved = RegisterAllocator::Reserved();
+ if (direction_ == BIDIRECTIONAL) {
+ // Copy the entry frame so the original can be used for a
+ // possible backward jump.
+ cgen_->SetFrame(new VirtualFrame(entry_frame_), &reserved);
+ } else {
+ // Take ownership of the entry frame.
+ cgen_->SetFrame(entry_frame_, &reserved);
+ entry_frame_ = NULL;
+ }
+ __ bind(&entry_label_);
+ is_linked_ = false;
+ is_bound_ = true;
+ return;
+ }
+
if (direction_ == FORWARD_ONLY) {
// A simple case: no forward jumps and no possible backward jumps.
if (!is_linked()) {
bool had_fall_through = false;
if (cgen_->has_valid_frame()) {
had_fall_through = true;
- AddReachingFrame(cgen_->frame());
+ AddReachingFrame(cgen_->frame()); // Return value ignored.
RegisterFile empty;
cgen_->SetFrame(NULL, &empty);
}
// Compute the frame to use for entry to the block.
- ComputeEntryFrame(mergable_elements);
+ if (entry_frame_ == NULL) {
+ ComputeEntryFrame(mergable_elements);
+ }
// Some moves required to merge to an expected frame require purely
// frame state changes, and do not require any code generation.
// -------------------------------------------------------------------------
// JumpTarget implementation.
+bool JumpTarget::compiling_deferred_code_ = false;
+
+
JumpTarget::JumpTarget(CodeGenerator* cgen, Directionality direction)
: cgen_(cgen),
direction_(direction),
// the directionality of the block. Compute: an entry frame for the
// block.
+ Counters::compute_entry_frame.Increment();
+#ifdef DEBUG
+ if (compiling_deferred_code_) {
+ ASSERT(reaching_frames_.length() > 1);
+ VirtualFrame* frame = reaching_frames_[0];
+ bool all_identical = true;
+ for (int i = 1; i < reaching_frames_.length(); i++) {
+ if (!frame->Equals(reaching_frames_[i])) {
+ all_identical = false;
+ break;
+ }
+ }
+ ASSERT(!all_identical || all_identical);
+ }
+#endif
+
// Choose an initial frame.
VirtualFrame* initial_frame = reaching_frames_[0];
void JumpTarget::AddReachingFrame(VirtualFrame* frame) {
ASSERT(reaching_frames_.length() == merge_labels_.length());
+ ASSERT(entry_frame_ == NULL);
Label fresh;
merge_labels_.Add(fresh);
reaching_frames_.Add(frame);
static const int kAllElements = -1; // Not a valid number of elements.
+ static void set_compiling_deferred_code(bool flag) {
+ compiling_deferred_code_ = flag;
+ }
+
protected:
// The code generator gives access to its current frame.
CodeGenerator* cgen_;
void DoBind(int mergable_elements);
private:
- // Add a virtual frame reaching this labeled block via a forward
- // jump, and a fresh label for its merge code.
+ static bool compiling_deferred_code_;
+
+ // Add a virtual frame reaching this labeled block via a forward jump,
+ // and a corresponding merge code label.
void AddReachingFrame(VirtualFrame* frame);
- // Compute a frame to use for entry to this block. Mergable
- // elements is as described for the Bind function.
+ // Compute a frame to use for entry to this block. Mergable elements
+ // is as described for the Bind function.
void ComputeEntryFrame(int mergable_elements);
DISALLOW_COPY_AND_ASSIGN(JumpTarget);
SC(enum_cache_misses, V8.EnumCacheMisses) \
SC(reloc_info_count, V8.RelocInfoCount) \
SC(reloc_info_size, V8.RelocInfoSize) \
- SC(zone_segment_bytes, V8.ZoneSegmentBytes)
+ SC(zone_segment_bytes, V8.ZoneSegmentBytes) \
+ SC(compute_entry_frame, V8.ComputeEntryFrame)
// This file contains all the v8 counters that are in use.