'bootstrapper.cc', 'builtins.cc', 'checks.cc', 'code-stubs.cc',
'codegen.cc', 'compilation-cache.cc', 'compiler.cc', 'contexts.cc',
'conversions.cc', 'counters.cc', 'dateparser.cc', 'debug.cc',
- 'disassembler.cc', 'execution.cc', 'factory.cc', 'flags.cc', 'frames.cc',
- 'global-handles.cc', 'handles.cc', 'hashmap.cc', 'heap.cc', 'ic.cc',
- 'interpreter-irregexp.cc', 'jsregexp.cc', 'log.cc', 'mark-compact.cc',
- 'messages.cc', 'objects.cc', 'parser.cc', 'property.cc',
- 'regexp-macro-assembler.cc', 'regexp-macro-assembler-irregexp.cc',
- 'regexp-stack.cc', 'rewriter.cc', 'runtime.cc', 'scanner.cc',
+ 'disassembler.cc', 'execution.cc', 'factory.cc', 'flags.cc',
+ 'frames.cc', 'global-handles.cc', 'handles.cc', 'hashmap.cc', 'heap.cc',
+ 'ic.cc', 'interpreter-irregexp.cc', 'jsregexp.cc', 'jump-target.cc',
+ 'log.cc', 'mark-compact.cc', 'messages.cc', 'objects.cc', 'parser.cc',
+ 'property.cc', 'regexp-macro-assembler.cc',
+ 'regexp-macro-assembler-irregexp.cc', 'regexp-stack.cc',
+ 'register-allocator.cc', 'rewriter.cc', 'runtime.cc', 'scanner.cc',
'scopeinfo.cc', 'scopes.cc', 'serialize.cc', 'snapshot-common.cc',
'spaces.cc', 'string-stream.cc', 'stub-cache.cc', 'token.cc', 'top.cc',
- 'unicode.cc', 'usage-analyzer.cc', 'utils.cc', 'v8-counters.cc', 'v8.cc',
- 'v8threads.cc', 'variables.cc', 'zone.cc'
+ 'unicode.cc', 'usage-analyzer.cc', 'utils.cc', 'v8-counters.cc',
+ 'v8.cc', 'v8threads.cc', 'variables.cc', 'virtual-frame.cc', 'zone.cc'
+ ],
+ 'arch:arm': [
+ 'assembler-arm.cc', 'builtins-arm.cc', 'codegen-arm.cc', 'cpu-arm.cc',
+ 'disasm-arm.cc', 'debug-arm.cc', 'frames-arm.cc', 'ic-arm.cc',
+ 'jump-target-arm.cc', 'macro-assembler-arm.cc',
+ 'regexp-macro-assembler-arm.cc', 'register-allocator-arm.cc',
+ 'stub-cache-arm.cc', 'virtual-frame-arm.cc'
+ ],
+ 'arch:ia32': [
+ 'assembler-ia32.cc', 'builtins-ia32.cc', 'codegen-ia32.cc',
+ 'cpu-ia32.cc', 'disasm-ia32.cc', 'debug-ia32.cc', 'frames-ia32.cc',
+ 'ic-ia32.cc', 'jump-target-ia32.cc', 'macro-assembler-ia32.cc',
+ 'regexp-macro-assembler-ia32.cc', 'register-allocator-ia32.cc',
+ 'stub-cache-ia32.cc', 'virtual-frame-ia32.cc'
],
- 'arch:arm': ['assembler-arm.cc', 'builtins-arm.cc', 'codegen-arm.cc',
- 'cpu-arm.cc', 'debug-arm.cc', 'disasm-arm.cc', 'frames-arm.cc',
- 'ic-arm.cc', 'macro-assembler-arm.cc', 'regexp-macro-assembler-arm.cc',
- 'stub-cache-arm.cc'],
- 'arch:ia32': ['assembler-ia32.cc', 'builtins-ia32.cc', 'codegen-ia32.cc',
- 'cpu-ia32.cc', 'debug-ia32.cc', 'disasm-ia32.cc', 'frames-ia32.cc',
- 'ic-ia32.cc', 'macro-assembler-ia32.cc', 'regexp-macro-assembler-ia32.cc',
- 'stub-cache-ia32.cc'],
'simulator:arm': ['simulator-arm.cc'],
'os:freebsd': ['platform-freebsd.cc'],
'os:linux': ['platform-linux.cc'],
};
+const int kNumRegisters = 16;
+
extern Register no_reg;
extern Register r0;
extern Register r1;
}
+// Branch hints are not used on the ARM. They are defined so that they can
+// appear in shared function signatures, but will be ignored in ARM
+// implementations.
+enum Hint { no_hint };
+
+
// The pc store offset may be 8 or 12 depending on the processor implementation.
int PcStoreOffset();
}
+void Assembler::xchg(Register dst, Register src) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ if (src.is(eax) || dst.is(eax)) { // Single-byte encoding
+ EMIT(0x90 | (src.is(eax) ? dst.code() : src.code()));
+ } else {
+ EMIT(0x87);
+ EMIT(0xC0 | src.code() << 3 | dst.code());
+ }
+}
+
+
void Assembler::adc(Register dst, int32_t imm32) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
}
+void Assembler::setcc(Condition cc, Register reg) {
+ ASSERT(reg.is_byte_register());
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ EMIT(0x0F);
+ EMIT(0x90 | cc);
+ EMIT(0xC0 | reg.code());
+}
+
+
void Assembler::cvttss2si(Register dst, const Operand& src) {
ASSERT(CpuFeatures::IsEnabled(CpuFeatures::SSE2));
EnsureSpace ensure_space(this);
struct Register {
bool is_valid() const { return 0 <= code_ && code_ < 8; }
bool is(Register reg) const { return code_ == reg.code_; }
+ // eax, ebx, ecx and edx are byte registers, the rest are not.
+ bool is_byte_register() const { return code_ <= 3; }
int code() const {
ASSERT(is_valid());
return code_;
int code_;
};
+const int kNumRegisters = 8;
+
extern Register eax;
extern Register ecx;
extern Register edx;
};
+// The result of negating a hint is as if the corresponding condition
+// were negated by NegateCondition. That is, no_hint is mapped to
+// itself and not_taken and taken are mapped to each other.
+inline Hint NegateHint(Hint hint) {
+ return (hint == no_hint)
+ ? no_hint
+ : ((hint == not_taken) ? taken : not_taken);
+}
+
// -----------------------------------------------------------------------------
// Machine instruction Immediates
void cmov(Condition cc, Register dst, Handle<Object> handle);
void cmov(Condition cc, Register dst, const Operand& src);
+ // Exchange two registers
+ void xchg(Register dst, Register src);
+
// Arithmetics
void adc(Register dst, int32_t imm32);
void adc(Register dst, const Operand& src);
void frndint();
void sahf();
+ void setcc(Condition cc, Register reg);
void cpuid();
// unknown pc location. Assembler::bind() is used to bind a label to the
// current pc. A label can be bound only once.
-class Label : public ZoneObject { // LabelShadows are dynamically allocated.
+class Label BASE_EMBEDDED {
public:
INLINE(Label()) { Unuse(); }
INLINE(~Label()) { ASSERT(!is_linked()); }
friend class Assembler;
friend class RegexpAssembler;
friend class Displacement;
- friend class LabelShadow;
+ friend class ShadowTarget;
friend class RegExpMacroAssemblerIrregexp;
};
-// A LabelShadow represents a label that is temporarily shadowed by another
-// label (represented by the original label during shadowing). They are used
-// to catch jumps to labels in certain contexts, e.g. try blocks. After
-// shadowing ends, the formerly shadowed label is again represented by the
-// original label and the LabelShadow can be used as a label in its own
-// right, representing the formerly shadowing label.
-class LabelShadow : public Label {
- public:
- explicit LabelShadow(Label* original) {
- ASSERT(original != NULL);
- original_label_ = original;
- original_pos_ = original->pos_;
- original->Unuse();
-#ifdef DEBUG
- is_shadowing_ = true;
-#endif
- }
-
- ~LabelShadow() {
- ASSERT(!is_shadowing_);
- }
-
- void StopShadowing() {
- ASSERT(is_shadowing_ && is_unused());
- pos_ = original_label_->pos_;
- original_label_->pos_ = original_pos_;
-#ifdef DEBUG
- is_shadowing_ = false;
-#endif
- }
-
- Label* original_label() const { return original_label_; }
-
- private:
- // During shadowing, the currently shadowing label. After shadowing, the
- // label that was shadowed.
- Label* original_label_;
-
- // During shadowing, the saved state of the original label.
- int original_pos_;
-
-#ifdef DEBUG
- bool is_shadowing_;
-#endif
-};
-
-
// -----------------------------------------------------------------------------
// Relocation information
}
-void LabelCollector::AddLabel(Label* label) {
+void TargetCollector::AddTarget(JumpTarget* target) {
// Add the label to the collector, but discard duplicates.
- int length = labels_->length();
+ int length = targets_->length();
for (int i = 0; i < length; i++) {
- if (labels_->at(i) == label) return;
+ if (targets_->at(i) == target) return;
}
- labels_->Add(label);
+ targets_->Add(target);
}
#include "variables.h"
#include "macro-assembler.h"
#include "jsregexp.h"
+#include "jump-target.h"
namespace v8 { namespace internal {
V(ThisFunction)
+// Forward declarations
+class TargetCollector;
+
#define DEF_FORWARD_DECLARATION(type) class type;
NODE_LIST(DEF_FORWARD_DECLARATION)
#undef DEF_FORWARD_DECLARATION
virtual VariableProxy* AsVariableProxy() { return NULL; }
virtual Property* AsProperty() { return NULL; }
virtual Call* AsCall() { return NULL; }
- virtual LabelCollector* AsLabelCollector() { return NULL; }
+ virtual TargetCollector* AsTargetCollector() { return NULL; }
virtual BreakableStatement* AsBreakableStatement() { return NULL; }
virtual IterationStatement* AsIterationStatement() { return NULL; }
virtual UnaryOperation* AsUnaryOperation() { return NULL; }
virtual BreakableStatement* AsBreakableStatement() { return this; }
// Code generation
- Label* break_target() { return &break_target_; }
+ JumpTarget* break_target() { return &break_target_; }
// Used during code generation for restoring the stack when a
// break/continue crosses a statement that keeps stuff on the stack.
private:
ZoneStringList* labels_;
Type type_;
- Label break_target_;
+ JumpTarget break_target_;
int break_stack_height_;
};
Statement* body() const { return body_; }
// Code generation
- Label* continue_target() { return &continue_target_; }
+ JumpTarget* continue_target() { return &continue_target_; }
protected:
explicit IterationStatement(ZoneStringList* labels)
private:
Statement* body_;
- Label continue_target_;
+ JumpTarget continue_target_;
};
};
-// NOTE: LabelCollectors are represented as nodes to fit in the target
+// NOTE: TargetCollectors are represented as nodes to fit in the target
// stack in the compiler; this should probably be reworked.
-class LabelCollector: public Node {
+class TargetCollector: public Node {
public:
- explicit LabelCollector(ZoneList<Label*>* labels) : labels_(labels) { }
+ explicit TargetCollector(ZoneList<JumpTarget*>* targets)
+ : targets_(targets) {
+ }
- // Adds a label to the collector. The collector stores a pointer not
- // a copy of the label to make binding work, so make sure not to
- // pass in references to something on the stack.
- void AddLabel(Label* label);
+ // Adds a jump target to the collector. The collector stores a pointer not
+ // a copy of the target to make binding work, so make sure not to pass in
+ // references to something on the stack.
+ void AddTarget(JumpTarget* target);
- // Virtual behaviour. LabelCollectors are never part of the AST.
+ // Virtual behaviour. TargetCollectors are never part of the AST.
virtual void Accept(AstVisitor* v) { UNREACHABLE(); }
- virtual LabelCollector* AsLabelCollector() { return this; }
+ virtual TargetCollector* AsTargetCollector() { return this; }
- ZoneList<Label*>* labels() { return labels_; }
+ ZoneList<JumpTarget*>* targets() { return targets_; }
private:
- ZoneList<Label*>* labels_;
+ ZoneList<JumpTarget*>* targets_;
};
class TryStatement: public Statement {
public:
explicit TryStatement(Block* try_block)
- : try_block_(try_block), escaping_labels_(NULL) { }
+ : try_block_(try_block), escaping_targets_(NULL) { }
- void set_escaping_labels(ZoneList<Label*>* labels) {
- escaping_labels_ = labels;
+ void set_escaping_targets(ZoneList<JumpTarget*>* targets) {
+ escaping_targets_ = targets;
}
Block* try_block() const { return try_block_; }
- ZoneList<Label*>* escaping_labels() const { return escaping_labels_; }
+ ZoneList<JumpTarget*>* escaping_targets() const { return escaping_targets_; }
private:
Block* try_block_;
- ZoneList<Label*>* escaping_labels_;
+ ZoneList<JumpTarget*>* escaping_targets_;
};
#define __ masm_->
-// -------------------------------------------------------------------------
-// VirtualFrame implementation.
-
-VirtualFrame::VirtualFrame(CodeGenerator* cgen) {
- ASSERT(cgen->scope() != NULL);
-
- masm_ = cgen->masm();
- frame_local_count_ = cgen->scope()->num_stack_slots();
- parameter_count_ = cgen->scope()->num_parameters();
-}
-
-
-void VirtualFrame::Enter() {
- Comment cmnt(masm_, "[ Enter JS frame");
-#ifdef DEBUG
- { Label done, fail;
- __ tst(r1, Operand(kSmiTagMask));
- __ b(eq, &fail);
- __ ldr(r2, FieldMemOperand(r1, HeapObject::kMapOffset));
- __ ldrb(r2, FieldMemOperand(r2, Map::kInstanceTypeOffset));
- __ cmp(r2, Operand(JS_FUNCTION_TYPE));
- __ b(eq, &done);
- __ bind(&fail);
- __ stop("CodeGenerator::EnterJSFrame - r1 not a function");
- __ bind(&done);
- }
-#endif // DEBUG
-
- __ stm(db_w, sp, r1.bit() | cp.bit() | fp.bit() | lr.bit());
- // Adjust FP to point to saved FP.
- __ add(fp, sp, Operand(2 * kPointerSize));
-}
-
-
-void VirtualFrame::Exit() {
- Comment cmnt(masm_, "[ Exit JS frame");
- // Drop the execution stack down to the frame pointer and restore the caller
- // frame pointer and return address.
- __ mov(sp, fp);
- __ ldm(ia_w, sp, fp.bit() | lr.bit());
-}
-
-
-void VirtualFrame::AllocateLocals() {
- if (frame_local_count_ > 0) {
- Comment cmnt(masm_, "[ Allocate space for locals");
- // Initialize stack slots with 'undefined' value.
- __ mov(ip, Operand(Factory::undefined_value()));
- for (int i = 0; i < frame_local_count_; i++) {
- __ push(ip);
- }
- }
-}
-
-
-void VirtualFrame::Drop(int count) {
- ASSERT(count >= 0);
- if (count > 0) {
- __ add(sp, sp, Operand(count * kPointerSize));
- }
-}
-
-
-void VirtualFrame::Pop() { Drop(1); }
-
-
-void VirtualFrame::Pop(Register reg) {
- __ pop(reg);
-}
-
-
-void VirtualFrame::Push(Register reg) {
- __ push(reg);
-}
-
-
// -------------------------------------------------------------------------
// CodeGenState implementation.
CodeGenState::CodeGenState(CodeGenerator* owner,
TypeofState typeof_state,
- Label* true_target,
- Label* false_target)
+ JumpTarget* true_target,
+ JumpTarget* false_target)
: owner_(owner),
typeof_state_(typeof_state),
true_target_(true_target),
masm_(new MacroAssembler(NULL, buffer_size)),
scope_(NULL),
frame_(NULL),
+ allocator_(NULL),
cc_reg_(al),
state_(NULL),
- break_stack_height_(0) {
+ break_stack_height_(0),
+ function_return_is_shadowed_(false),
+ in_spilled_code_(false) {
}
// Calling conventions:
-// r0: the number of arguments
-// fp: frame pointer
+// fp: caller's frame pointer
// sp: stack pointer
-// pp: caller's parameter pointer
+// r1: called JS function
// cp: callee's context
void CodeGenerator::GenCode(FunctionLiteral* fun) {
// Initialize state.
ASSERT(scope_ == NULL);
scope_ = fun->scope();
+ ASSERT(allocator_ == NULL);
+ RegisterAllocator register_allocator(this);
+ allocator_ = ®ister_allocator;
ASSERT(frame_ == NULL);
- VirtualFrame virtual_frame(this);
- frame_ = &virtual_frame;
+ frame_ = new VirtualFrame(this);
cc_reg_ = al;
+ function_return_.Initialize(this, JumpTarget::BIDIRECTIONAL);
+ function_return_is_shadowed_ = false;
+ set_in_spilled_code(false);
{
CodeGenState state(this);
- // Entry
- // stack: function, receiver, arguments, return address
- // r0: number of arguments
+ // Entry:
+ // Stack: receiver, arguments
+ // lr: return address
+ // fp: caller's frame pointer
// sp: stack pointer
- // fp: frame pointer
- // pp: caller's parameter pointer
+ // r1: called JS function
// cp: callee's context
-
+ allocator_->Initialize();
frame_->Enter();
// tos: code slot
#ifdef DEBUG
if (strlen(FLAG_stop_at) > 0 &&
fun->name()->IsEqualTo(CStrVector(FLAG_stop_at))) {
+ frame_->SpillAll();
__ stop("stop-at");
}
#endif
// Allocate space for locals and initialize them.
- frame_->AllocateLocals();
+ frame_->AllocateStackSlots(scope_->num_stack_slots());
+ VirtualFrame::SpilledScope spilled_scope(this);
if (scope_->num_heap_slots() > 0) {
// Allocate local context.
// Get outer context and create a new context based on it.
__ ldr(r0, frame_->Function());
- frame_->Push(r0);
- __ CallRuntime(Runtime::kNewContext, 1); // r0 holds the result
+ frame_->EmitPush(r0);
+ frame_->CallRuntime(Runtime::kNewContext, 1); // r0 holds the result
if (kDebug) {
- Label verified_true;
+ JumpTarget verified_true(this);
__ cmp(r0, Operand(cp));
- __ b(eq, &verified_true);
+ verified_true.Branch(eq);
__ stop("NewContext: r0 is expected to be the same as cp");
- __ bind(&verified_true);
+ verified_true.Bind();
}
// Update context local.
__ str(cp, frame_->Context());
Slot* slot = par->slot();
if (slot != NULL && slot->type() == Slot::CONTEXT) {
ASSERT(!scope_->is_global_scope()); // no parameters in global scope
- __ ldr(r1, frame_->Parameter(i));
+ __ ldr(r1, frame_->ParameterAt(i));
// Loads r2 with context; used below in RecordWrite.
__ str(r1, SlotOperand(slot, r2));
// Load the offset into r3.
const int kReceiverDisplacement = 2 + scope_->num_parameters();
__ add(r1, fp, Operand(kReceiverDisplacement * kPointerSize));
__ mov(r0, Operand(Smi::FromInt(scope_->num_parameters())));
+ frame_->Adjust(3);
__ stm(db_w, sp, r0.bit() | r1.bit() | r2.bit());
- __ CallStub(&stub);
- frame_->Push(r0);
+ frame_->CallStub(&stub, 3);
+ frame_->EmitPush(r0);
arguments_ref.SetValue(NOT_CONST_INIT);
}
shadow_ref.SetValue(NOT_CONST_INIT);
}
- frame_->Pop(); // Value is no longer needed.
+ frame_->Drop(); // Value is no longer needed.
}
// Generate code to 'execute' declarations and initialize functions
}
if (FLAG_trace) {
- __ CallRuntime(Runtime::kTraceEnter, 0);
+ frame_->CallRuntime(Runtime::kTraceEnter, 0);
// Ignore the return value.
}
CheckStack();
bool should_trace =
is_builtin ? FLAG_trace_builtin_calls : FLAG_trace_calls;
if (should_trace) {
- __ CallRuntime(Runtime::kDebugTrace, 0);
+ frame_->CallRuntime(Runtime::kDebugTrace, 0);
// Ignore the return value.
}
#endif
- VisitStatements(body);
+ VisitStatementsAndSpill(body);
}
}
- // exit
- // r0: result
- // sp: stack pointer
- // fp: frame pointer
- // pp: parameter pointer
- // cp: callee's context
- __ mov(r0, Operand(Factory::undefined_value()));
+ // Generate the return sequence if necessary.
+ if (frame_ != NULL || function_return_.is_linked()) {
+ // exit
+ // r0: result
+ // sp: stack pointer
+ // fp: frame pointer
+ // pp: parameter pointer
+ // cp: callee's context
+ __ mov(r0, Operand(Factory::undefined_value()));
- __ bind(&function_return_);
- if (FLAG_trace) {
- // Push the return value on the stack as the parameter.
- // Runtime::TraceExit returns the parameter as it is.
- frame_->Push(r0);
- __ CallRuntime(Runtime::kTraceExit, 1);
- }
+ function_return_.Bind();
+ if (FLAG_trace) {
+ // Push the return value on the stack as the parameter.
+ // Runtime::TraceExit returns the parameter as it is.
+ frame_->EmitPush(r0);
+ frame_->CallRuntime(Runtime::kTraceExit, 1);
+ }
- // Tear down the frame which will restore the caller's frame pointer and the
- // link register.
- frame_->Exit();
+ // Tear down the frame which will restore the caller's frame pointer and
+ // the link register.
+ frame_->Exit();
- __ add(sp, sp, Operand((scope_->num_parameters() + 1) * kPointerSize));
- __ mov(pc, lr);
+ __ add(sp, sp, Operand((scope_->num_parameters() + 1) * kPointerSize));
+ __ mov(pc, lr);
+ }
// Code generation state must be reset.
- scope_ = NULL;
- frame_ = NULL;
ASSERT(!has_cc());
ASSERT(state_ == NULL);
+ ASSERT(!function_return_is_shadowed_);
+ function_return_.Unuse();
+ DeleteFrame();
+
+ // Process any deferred code using the register allocator.
+ ProcessDeferred();
+
+ allocator_ = NULL;
+ scope_ = NULL;
}
int index = slot->index();
switch (slot->type()) {
case Slot::PARAMETER:
- return frame_->Parameter(index);
+ return frame_->ParameterAt(index);
case Slot::LOCAL:
- return frame_->Local(index);
+ return frame_->LocalAt(index);
case Slot::CONTEXT: {
// Follow the context chain if necessary.
}
-MemOperand CodeGenerator::ContextSlotOperandCheckExtensions(Slot* slot,
- Register tmp,
- Register tmp2,
- Label* slow) {
+MemOperand CodeGenerator::ContextSlotOperandCheckExtensions(
+ Slot* slot,
+ Register tmp,
+ Register tmp2,
+ JumpTarget* slow) {
ASSERT(slot->type() == Slot::CONTEXT);
- int index = slot->index();
Register context = cp;
+
for (Scope* s = scope(); s != slot->var()->scope(); s = s->outer_scope()) {
if (s->num_heap_slots() > 0) {
if (s->calls_eval()) {
// Check that extension is NULL.
__ ldr(tmp2, ContextOperand(context, Context::EXTENSION_INDEX));
__ tst(tmp2, tmp2);
- __ b(ne, slow);
+ slow->Branch(ne);
}
__ ldr(tmp, ContextOperand(context, Context::CLOSURE_INDEX));
__ ldr(tmp, FieldMemOperand(tmp, JSFunction::kContextOffset));
// Check that last extension is NULL.
__ ldr(tmp2, ContextOperand(context, Context::EXTENSION_INDEX));
__ tst(tmp2, tmp2);
- __ b(ne, slow);
+ slow->Branch(ne);
__ ldr(tmp, ContextOperand(context, Context::FCONTEXT_INDEX));
- return ContextOperand(tmp, index);
+ return ContextOperand(tmp, slot->index());
}
// test for 'true'.
void CodeGenerator::LoadCondition(Expression* x,
TypeofState typeof_state,
- Label* true_target,
- Label* false_target,
+ JumpTarget* true_target,
+ JumpTarget* false_target,
bool force_cc) {
+ ASSERT(!in_spilled_code());
ASSERT(!has_cc());
+ int original_height = frame_->height();
{ CodeGenState new_state(this, typeof_state, true_target, false_target);
Visit(x);
+
+ // If we hit a stack overflow, we may not have actually visited
+ // the expression. In that case, we ensure that we have a
+ // valid-looking frame state because we will continue to generate
+ // code as we unwind the C++ stack.
+ //
+ // It's possible to have both a stack overflow and a valid frame
+ // state (eg, a subexpression overflowed, visiting it returned
+ // with a dummied frame state, and visiting this expression
+ // returned with a normal-looking state).
+ if (HasStackOverflow() &&
+ has_valid_frame() &&
+ !has_cc() &&
+ frame_->height() == original_height) {
+ true_target->Jump();
+ }
}
- if (force_cc && !has_cc()) {
+ if (force_cc && frame_ != NULL && !has_cc()) {
// Convert the TOS value to a boolean in the condition code register.
- // Visiting an expression may possibly choose neither (a) to leave a
- // value in the condition code register nor (b) to leave a value in TOS
- // (eg, by compiling to only jumps to the targets). In that case the
- // code generated by ToBoolean is wrong because it assumes the value of
- // the expression in TOS. So long as there is always a value in TOS or
- // the condition code register when control falls through to here (there
- // is), the code generated by ToBoolean is dead and therefore safe.
ToBoolean(true_target, false_target);
}
- ASSERT(has_cc() || !force_cc);
+ ASSERT(!force_cc || !has_valid_frame() || has_cc());
+ ASSERT(!has_valid_frame() ||
+ (has_cc() && frame_->height() == original_height) ||
+ (!has_cc() && frame_->height() == original_height + 1));
}
void CodeGenerator::Load(Expression* x, TypeofState typeof_state) {
- Label true_target;
- Label false_target;
+#ifdef DEBUG
+ int original_height = frame_->height();
+#endif
+ ASSERT(!in_spilled_code());
+ JumpTarget true_target(this);
+ JumpTarget false_target(this);
LoadCondition(x, typeof_state, &true_target, &false_target, false);
if (has_cc()) {
- // convert cc_reg_ into a bool
- Label loaded, materialize_true;
- __ b(cc_reg_, &materialize_true);
+ // Convert cc_reg_ into a boolean value.
+ JumpTarget loaded(this);
+ JumpTarget materialize_true(this);
+ materialize_true.Branch(cc_reg_);
__ mov(r0, Operand(Factory::false_value()));
- frame_->Push(r0);
- __ b(&loaded);
- __ bind(&materialize_true);
+ frame_->EmitPush(r0);
+ loaded.Jump();
+ materialize_true.Bind();
__ mov(r0, Operand(Factory::true_value()));
- frame_->Push(r0);
- __ bind(&loaded);
+ frame_->EmitPush(r0);
+ loaded.Bind();
cc_reg_ = al;
}
if (true_target.is_linked() || false_target.is_linked()) {
- // we have at least one condition value
- // that has been "translated" into a branch,
- // thus it needs to be loaded explicitly again
- Label loaded;
- __ b(&loaded); // don't lose current TOS
+ // We have at least one condition value that has been "translated"
+ // into a branch, thus it needs to be loaded explicitly.
+ JumpTarget loaded(this);
+ if (frame_ != NULL) {
+ loaded.Jump(); // Don't lose the current TOS.
+ }
bool both = true_target.is_linked() && false_target.is_linked();
- // reincarnate "true", if necessary
+ // Load "true" if necessary.
if (true_target.is_linked()) {
- __ bind(&true_target);
+ true_target.Bind();
__ mov(r0, Operand(Factory::true_value()));
- frame_->Push(r0);
+ frame_->EmitPush(r0);
}
- // if both "true" and "false" need to be reincarnated,
- // jump across code for "false"
- if (both)
- __ b(&loaded);
- // reincarnate "false", if necessary
+ // If both "true" and "false" need to be loaded jump across the code for
+ // "false".
+ if (both) {
+ loaded.Jump();
+ }
+ // Load "false" if necessary.
if (false_target.is_linked()) {
- __ bind(&false_target);
+ false_target.Bind();
__ mov(r0, Operand(Factory::false_value()));
- frame_->Push(r0);
+ frame_->EmitPush(r0);
}
- // everything is loaded at this point
- __ bind(&loaded);
+ // A value is loaded on all paths reaching this point.
+ loaded.Bind();
}
+ ASSERT(has_valid_frame());
ASSERT(!has_cc());
+ ASSERT(frame_->height() == original_height + 1);
}
void CodeGenerator::LoadGlobal() {
+ VirtualFrame::SpilledScope spilled_scope(this);
__ ldr(r0, GlobalObject());
- frame_->Push(r0);
+ frame_->EmitPush(r0);
}
void CodeGenerator::LoadGlobalReceiver(Register scratch) {
+ VirtualFrame::SpilledScope spilled_scope(this);
__ ldr(scratch, ContextOperand(cp, Context::GLOBAL_INDEX));
__ ldr(scratch,
FieldMemOperand(scratch, GlobalObject::kGlobalReceiverOffset));
- frame_->Push(scratch);
+ frame_->EmitPush(scratch);
}
// that we have the INSIDE_TYPEOF typeof state. => Need to handle global
// variables w/o reference errors elsewhere.
void CodeGenerator::LoadTypeofExpression(Expression* x) {
+ VirtualFrame::SpilledScope spilled_scope(this);
Variable* variable = x->AsVariableProxy()->AsVariable();
if (variable != NULL && !variable->is_this() && variable->is_global()) {
// NOTE: This is somewhat nasty. We force the compiler to load
// TODO(1241834): Fetch the position from the variable instead of using
// no position.
Property property(&global, &key, RelocInfo::kNoPosition);
- Load(&property);
+ LoadAndSpill(&property);
} else {
- Load(x, INSIDE_TYPEOF);
+ LoadAndSpill(x, INSIDE_TYPEOF);
}
}
void CodeGenerator::LoadReference(Reference* ref) {
+ VirtualFrame::SpilledScope spilled_scope(this);
Comment cmnt(masm_, "[ LoadReference");
Expression* e = ref->expression();
Property* property = e->AsProperty();
if (property != NULL) {
// The expression is either a property or a variable proxy that rewrites
// to a property.
- Load(property->obj());
+ LoadAndSpill(property->obj());
// We use a named reference if the key is a literal symbol, unless it is
// a string that can be legally parsed as an integer. This is because
// otherwise we will not get into the slow case code that handles [] on
!String::cast(*(literal->handle()))->AsArrayIndex(&dummy)) {
ref->set_type(Reference::NAMED);
} else {
- Load(property->key());
+ LoadAndSpill(property->key());
ref->set_type(Reference::KEYED);
}
} else if (var != NULL) {
}
} else {
// Anything else is a runtime error.
- Load(e);
- __ CallRuntime(Runtime::kThrowReferenceError, 1);
+ LoadAndSpill(e);
+ frame_->CallRuntime(Runtime::kThrowReferenceError, 1);
}
}
void CodeGenerator::UnloadReference(Reference* ref) {
+ VirtualFrame::SpilledScope spilled_scope(this);
// Pop a reference from the stack while preserving TOS.
Comment cmnt(masm_, "[ UnloadReference");
int size = ref->size();
if (size > 0) {
- frame_->Pop(r0);
+ frame_->EmitPop(r0);
frame_->Drop(size);
- frame_->Push(r0);
+ frame_->EmitPush(r0);
}
}
// ECMA-262, section 9.2, page 30: ToBoolean(). Convert the given
// register to a boolean in the condition code register. The code
// may jump to 'false_target' in case the register converts to 'false'.
-void CodeGenerator::ToBoolean(Label* true_target,
- Label* false_target) {
+void CodeGenerator::ToBoolean(JumpTarget* true_target,
+ JumpTarget* false_target) {
+ VirtualFrame::SpilledScope spilled_scope(this);
// Note: The generated code snippet does not change stack variables.
// Only the condition code should be set.
- frame_->Pop(r0);
+ frame_->EmitPop(r0);
// Fast case checks
// Check if the value is 'false'.
__ cmp(r0, Operand(Factory::false_value()));
- __ b(eq, false_target);
+ false_target->Branch(eq);
// Check if the value is 'true'.
__ cmp(r0, Operand(Factory::true_value()));
- __ b(eq, true_target);
+ true_target->Branch(eq);
// Check if the value is 'undefined'.
__ cmp(r0, Operand(Factory::undefined_value()));
- __ b(eq, false_target);
+ false_target->Branch(eq);
// Check if the value is a smi.
__ cmp(r0, Operand(Smi::FromInt(0)));
- __ b(eq, false_target);
+ false_target->Branch(eq);
__ tst(r0, Operand(kSmiTagMask));
- __ b(eq, true_target);
+ true_target->Branch(eq);
// Slow case: call the runtime.
- frame_->Push(r0);
- __ CallRuntime(Runtime::kToBool, 1);
+ frame_->EmitPush(r0);
+ frame_->CallRuntime(Runtime::kToBool, 1);
// Convert the result (r0) to a condition code.
__ cmp(r0, Operand(Factory::false_value()));
void CodeGenerator::GenericBinaryOperation(Token::Value op) {
+ VirtualFrame::SpilledScope spilled_scope(this);
// sp[0] : y
// sp[1] : x
// result : r0
case Token::SHL:
case Token::SHR:
case Token::SAR: {
- frame_->Pop(r0); // r0 : y
- frame_->Pop(r1); // r1 : x
+ frame_->EmitPop(r0); // r0 : y
+ frame_->EmitPop(r1); // r1 : x
GenericBinaryOpStub stub(op);
- __ CallStub(&stub);
+ frame_->CallStub(&stub, 0);
break;
}
case Token::DIV: {
- __ mov(r0, Operand(1));
- __ InvokeBuiltin(Builtins::DIV, CALL_JS);
+ Result arg_count = allocator_->Allocate(r0);
+ ASSERT(arg_count.is_valid());
+ __ mov(arg_count.reg(), Operand(1));
+ frame_->InvokeBuiltin(Builtins::DIV, CALL_JS, &arg_count, 2);
break;
}
case Token::MOD: {
- __ mov(r0, Operand(1));
- __ InvokeBuiltin(Builtins::MOD, CALL_JS);
+ Result arg_count = allocator_->Allocate(r0);
+ ASSERT(arg_count.is_valid());
+ __ mov(arg_count.reg(), Operand(1));
+ frame_->InvokeBuiltin(Builtins::MOD, CALL_JS, &arg_count, 2);
break;
}
case Token::COMMA:
- frame_->Pop(r0);
+ frame_->EmitPop(r0);
// simply discard left value
- frame_->Pop();
+ frame_->Drop();
break;
default:
}
-class DeferredInlinedSmiOperation: public DeferredCode {
+class DeferredInlineSmiOperation: public DeferredCode {
public:
- DeferredInlinedSmiOperation(CodeGenerator* generator, Token::Value op,
- int value, bool reversed) :
- DeferredCode(generator), op_(op), value_(value), reversed_(reversed) {
+ DeferredInlineSmiOperation(CodeGenerator* generator,
+ Token::Value op,
+ int value,
+ bool reversed)
+ : DeferredCode(generator),
+ op_(op),
+ value_(value),
+ reversed_(reversed) {
set_comment("[ DeferredInlinedSmiOperation");
}
- virtual void Generate() {
- switch (op_) {
- case Token::ADD: {
- if (reversed_) {
- // revert optimistic add
- __ sub(r0, r0, Operand(Smi::FromInt(value_)));
- __ mov(r1, Operand(Smi::FromInt(value_))); // x
- } else {
- // revert optimistic add
- __ sub(r1, r0, Operand(Smi::FromInt(value_)));
- __ mov(r0, Operand(Smi::FromInt(value_)));
- }
- break;
- }
+ virtual void Generate();
- case Token::SUB: {
- if (reversed_) {
- // revert optimistic sub
- __ rsb(r0, r0, Operand(Smi::FromInt(value_)));
- __ mov(r1, Operand(Smi::FromInt(value_)));
- } else {
- __ add(r1, r0, Operand(Smi::FromInt(value_)));
- __ mov(r0, Operand(Smi::FromInt(value_)));
- }
- break;
+ private:
+ Token::Value op_;
+ int value_;
+ bool reversed_;
+};
+
+
+void DeferredInlineSmiOperation::Generate() {
+ enter()->Bind();
+ VirtualFrame::SpilledScope spilled_scope(generator());
+
+ switch (op_) {
+ case Token::ADD: {
+ if (reversed_) {
+ // revert optimistic add
+ __ sub(r0, r0, Operand(Smi::FromInt(value_)));
+ __ mov(r1, Operand(Smi::FromInt(value_)));
+ } else {
+ // revert optimistic add
+ __ sub(r1, r0, Operand(Smi::FromInt(value_)));
+ __ mov(r0, Operand(Smi::FromInt(value_)));
}
+ break;
+ }
- case Token::BIT_OR:
- case Token::BIT_XOR:
- case Token::BIT_AND: {
- if (reversed_) {
- __ mov(r1, Operand(Smi::FromInt(value_)));
- } else {
- __ mov(r1, Operand(r0));
- __ mov(r0, Operand(Smi::FromInt(value_)));
- }
- break;
+ case Token::SUB: {
+ if (reversed_) {
+ // revert optimistic sub
+ __ rsb(r0, r0, Operand(Smi::FromInt(value_)));
+ __ mov(r1, Operand(Smi::FromInt(value_)));
+ } else {
+ __ add(r1, r0, Operand(Smi::FromInt(value_)));
+ __ mov(r0, Operand(Smi::FromInt(value_)));
}
+ break;
+ }
- case Token::SHL:
- case Token::SHR:
- case Token::SAR: {
- if (!reversed_) {
- __ mov(r1, Operand(r0));
- __ mov(r0, Operand(Smi::FromInt(value_)));
- } else {
- UNREACHABLE(); // should have been handled in SmiOperation
- }
- break;
+ case Token::BIT_OR:
+ case Token::BIT_XOR:
+ case Token::BIT_AND: {
+ if (reversed_) {
+ __ mov(r1, Operand(Smi::FromInt(value_)));
+ } else {
+ __ mov(r1, Operand(r0));
+ __ mov(r0, Operand(Smi::FromInt(value_)));
}
+ break;
+ }
- default:
- // other cases should have been handled before this point.
- UNREACHABLE();
- break;
+ case Token::SHL:
+ case Token::SHR:
+ case Token::SAR: {
+ if (!reversed_) {
+ __ mov(r1, Operand(r0));
+ __ mov(r0, Operand(Smi::FromInt(value_)));
+ } else {
+ UNREACHABLE(); // should have been handled in SmiOperation
+ }
+ break;
}
- GenericBinaryOpStub igostub(op_);
- __ CallStub(&igostub);
+ default:
+ // other cases should have been handled before this point.
+ UNREACHABLE();
+ break;
}
- private:
- Token::Value op_;
- int value_;
- bool reversed_;
-};
+ GenericBinaryOpStub igostub(op_);
+ Result arg0 = generator()->allocator()->Allocate(r0);
+ ASSERT(arg0.is_valid());
+ Result arg1 = generator()->allocator()->Allocate(r1);
+ ASSERT(arg1.is_valid());
+ generator()->frame()->CallStub(&igostub, &arg0, &arg1, 0);
+ exit_.Jump();
+}
void CodeGenerator::SmiOperation(Token::Value op,
Handle<Object> value,
bool reversed) {
+ VirtualFrame::SpilledScope spilled_scope(this);
// NOTE: This is an attempt to inline (a bit) more of the code for
// some possible smi operations (like + and -) when (at least) one
// of the operands is a literal smi. With this optimization, the
int int_value = Smi::cast(*value)->value();
- Label exit;
- frame_->Pop(r0);
+ JumpTarget exit(this);
+ frame_->EmitPop(r0);
switch (op) {
case Token::ADD: {
DeferredCode* deferred =
- new DeferredInlinedSmiOperation(this, op, int_value, reversed);
+ new DeferredInlineSmiOperation(this, op, int_value, reversed);
__ add(r0, r0, Operand(value), SetCC);
- __ b(vs, deferred->enter());
+ deferred->enter()->Branch(vs);
__ tst(r0, Operand(kSmiTagMask));
- __ b(ne, deferred->enter());
- __ bind(deferred->exit());
+ deferred->enter()->Branch(ne);
+ deferred->BindExit();
break;
}
case Token::SUB: {
DeferredCode* deferred =
- new DeferredInlinedSmiOperation(this, op, int_value, reversed);
+ new DeferredInlineSmiOperation(this, op, int_value, reversed);
if (!reversed) {
__ sub(r0, r0, Operand(value), SetCC);
} else {
__ rsb(r0, r0, Operand(value), SetCC);
}
- __ b(vs, deferred->enter());
+ deferred->enter()->Branch(vs);
__ tst(r0, Operand(kSmiTagMask));
- __ b(ne, deferred->enter());
- __ bind(deferred->exit());
+ deferred->enter()->Branch(ne);
+ deferred->BindExit();
break;
}
case Token::BIT_XOR:
case Token::BIT_AND: {
DeferredCode* deferred =
- new DeferredInlinedSmiOperation(this, op, int_value, reversed);
+ new DeferredInlineSmiOperation(this, op, int_value, reversed);
__ tst(r0, Operand(kSmiTagMask));
- __ b(ne, deferred->enter());
+ deferred->enter()->Branch(ne);
switch (op) {
case Token::BIT_OR: __ orr(r0, r0, Operand(value)); break;
case Token::BIT_XOR: __ eor(r0, r0, Operand(value)); break;
case Token::BIT_AND: __ and_(r0, r0, Operand(value)); break;
default: UNREACHABLE();
}
- __ bind(deferred->exit());
+ deferred->BindExit();
break;
}
case Token::SAR: {
if (reversed) {
__ mov(ip, Operand(value));
- frame_->Push(ip);
- frame_->Push(r0);
+ frame_->EmitPush(ip);
+ frame_->EmitPush(r0);
GenericBinaryOperation(op);
} else {
int shift_value = int_value & 0x1f; // least significant 5 bits
DeferredCode* deferred =
- new DeferredInlinedSmiOperation(this, op, shift_value, false);
+ new DeferredInlineSmiOperation(this, op, shift_value, false);
__ tst(r0, Operand(kSmiTagMask));
- __ b(ne, deferred->enter());
+ deferred->enter()->Branch(ne);
__ mov(r2, Operand(r0, ASR, kSmiTagSize)); // remove tags
switch (op) {
case Token::SHL: {
__ mov(r2, Operand(r2, LSL, shift_value));
// check that the *unsigned* result fits in a smi
__ add(r3, r2, Operand(0x40000000), SetCC);
- __ b(mi, deferred->enter());
+ deferred->enter()->Branch(mi);
break;
}
case Token::SHR: {
// smi tagging these two cases can only happen with shifts
// by 0 or 1 when handed a valid smi
__ and_(r3, r2, Operand(0xc0000000), SetCC);
- __ b(ne, deferred->enter());
+ deferred->enter()->Branch(ne);
break;
}
case Token::SAR: {
default: UNREACHABLE();
}
__ mov(r0, Operand(r2, LSL, kSmiTagSize));
- __ bind(deferred->exit());
+ deferred->BindExit();
}
break;
}
default:
if (!reversed) {
- frame_->Push(r0);
+ frame_->EmitPush(r0);
__ mov(r0, Operand(value));
- frame_->Push(r0);
+ frame_->EmitPush(r0);
} else {
__ mov(ip, Operand(value));
- frame_->Push(ip);
- frame_->Push(r0);
+ frame_->EmitPush(ip);
+ frame_->EmitPush(r0);
}
GenericBinaryOperation(op);
break;
}
- __ bind(&exit);
+ exit.Bind();
}
void CodeGenerator::Comparison(Condition cc, bool strict) {
+ VirtualFrame::SpilledScope spilled_scope(this);
// sp[0] : y
// sp[1] : x
// result : cc register
// Strict only makes sense for equality comparisons.
ASSERT(!strict || cc == eq);
- Label exit, smi;
+ JumpTarget exit(this);
+ JumpTarget smi(this);
// Implement '>' and '<=' by reversal to obtain ECMA-262 conversion order.
if (cc == gt || cc == le) {
cc = ReverseCondition(cc);
- frame_->Pop(r1);
- frame_->Pop(r0);
+ frame_->EmitPop(r1);
+ frame_->EmitPop(r0);
} else {
- frame_->Pop(r0);
- frame_->Pop(r1);
+ frame_->EmitPop(r0);
+ frame_->EmitPop(r1);
}
__ orr(r2, r0, Operand(r1));
__ tst(r2, Operand(kSmiTagMask));
- __ b(eq, &smi);
+ smi.Branch(eq);
// Perform non-smi comparison by runtime call.
- frame_->Push(r1);
+ frame_->EmitPush(r1);
// Figure out which native to call and setup the arguments.
Builtins::JavaScript native;
- int argc;
+ int arg_count = 1;
if (cc == eq) {
native = strict ? Builtins::STRICT_EQUALS : Builtins::EQUALS;
- argc = 1;
} else {
native = Builtins::COMPARE;
int ncr; // NaN compare result
ASSERT(cc == gt || cc == ge); // remaining cases
ncr = LESS;
}
- frame_->Push(r0);
+ frame_->EmitPush(r0);
+ arg_count++;
__ mov(r0, Operand(Smi::FromInt(ncr)));
- argc = 2;
}
// Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
// tagged as a small integer.
- frame_->Push(r0);
- __ mov(r0, Operand(argc));
- __ InvokeBuiltin(native, CALL_JS);
- __ cmp(r0, Operand(0));
- __ b(&exit);
+ frame_->EmitPush(r0);
+ Result arg_count_register = allocator_->Allocate(r0);
+ ASSERT(arg_count_register.is_valid());
+ __ mov(arg_count_register.reg(), Operand(arg_count));
+ Result result = frame_->InvokeBuiltin(native,
+ CALL_JS,
+ &arg_count_register,
+ arg_count + 1);
+ __ cmp(result.reg(), Operand(0));
+ result.Unuse();
+ exit.Jump();
// test smi equality by pointer comparison.
- __ bind(&smi);
+ smi.Bind();
__ cmp(r1, Operand(r0));
- __ bind(&exit);
+ exit.Bind();
cc_reg_ = cc;
}
// Call the function on the stack with the given arguments.
void CodeGenerator::CallWithArguments(ZoneList<Expression*>* args,
int position) {
+ VirtualFrame::SpilledScope spilled_scope(this);
// Push the arguments ("left-to-right") on the stack.
- for (int i = 0; i < args->length(); i++) {
- Load(args->at(i));
+ int arg_count = args->length();
+ for (int i = 0; i < arg_count; i++) {
+ LoadAndSpill(args->at(i));
}
// Record the position for debugging purposes.
CodeForSourcePosition(position);
// Use the shared code stub to call the function.
- CallFunctionStub call_function(args->length());
- __ CallStub(&call_function);
+ CallFunctionStub call_function(arg_count);
+ frame_->CallStub(&call_function, arg_count + 1);
// Restore context and pop function from the stack.
__ ldr(cp, frame_->Context());
- frame_->Pop(); // discard the TOS
+ frame_->Drop(); // discard the TOS
}
-void CodeGenerator::Branch(bool if_true, Label* L) {
+void CodeGenerator::Branch(bool if_true, JumpTarget* target) {
+ VirtualFrame::SpilledScope spilled_scope(this);
ASSERT(has_cc());
Condition cc = if_true ? cc_reg_ : NegateCondition(cc_reg_);
- __ b(cc, L);
+ target->Branch(cc);
cc_reg_ = al;
}
void CodeGenerator::CheckStack() {
+ VirtualFrame::SpilledScope spilled_scope(this);
if (FLAG_check_stack) {
Comment cmnt(masm_, "[ check stack");
StackCheckStub stub;
- __ CallStub(&stub);
+ frame_->CallStub(&stub, 0);
}
}
+void CodeGenerator::VisitStatements(ZoneList<Statement*>* statements) {
+#ifdef DEBUG
+ int original_height = frame_->height();
+#endif
+ VirtualFrame::SpilledScope spilled_scope(this);
+ for (int i = 0; frame_ != NULL && i < statements->length(); i++) {
+ VisitAndSpill(statements->at(i));
+ }
+ ASSERT(!has_valid_frame() || frame_->height() == original_height);
+}
+
+
void CodeGenerator::VisitBlock(Block* node) {
+#ifdef DEBUG
+ int original_height = frame_->height();
+#endif
+ VirtualFrame::SpilledScope spilled_scope(this);
Comment cmnt(masm_, "[ Block");
- CodeForStatement(node);
+ CodeForStatementPosition(node);
node->set_break_stack_height(break_stack_height_);
- VisitStatements(node->statements());
- __ bind(node->break_target());
+ node->break_target()->Initialize(this);
+ VisitStatementsAndSpill(node->statements());
+ if (node->break_target()->is_linked()) {
+ node->break_target()->Bind();
+ }
+ ASSERT(!has_valid_frame() || frame_->height() == original_height);
}
void CodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
+ VirtualFrame::SpilledScope spilled_scope(this);
__ mov(r0, Operand(pairs));
- frame_->Push(r0);
- frame_->Push(cp);
+ frame_->EmitPush(r0);
+ frame_->EmitPush(cp);
__ mov(r0, Operand(Smi::FromInt(is_eval() ? 1 : 0)));
- frame_->Push(r0);
- __ CallRuntime(Runtime::kDeclareGlobals, 3);
+ frame_->EmitPush(r0);
+ frame_->CallRuntime(Runtime::kDeclareGlobals, 3);
// The result is discarded.
}
void CodeGenerator::VisitDeclaration(Declaration* node) {
+#ifdef DEBUG
+ int original_height = frame_->height();
+#endif
+ VirtualFrame::SpilledScope spilled_scope(this);
Comment cmnt(masm_, "[ Declaration");
- CodeForStatement(node);
+ CodeForStatementPosition(node);
Variable* var = node->proxy()->var();
ASSERT(var != NULL); // must have been resolved
Slot* slot = var->slot();
// during variable resolution and must have mode DYNAMIC.
ASSERT(var->is_dynamic());
// For now, just do a runtime call.
- frame_->Push(cp);
+ frame_->EmitPush(cp);
__ mov(r0, Operand(var->name()));
- frame_->Push(r0);
+ frame_->EmitPush(r0);
// Declaration nodes are always declared in only two modes.
ASSERT(node->mode() == Variable::VAR || node->mode() == Variable::CONST);
PropertyAttributes attr = node->mode() == Variable::VAR ? NONE : READ_ONLY;
__ mov(r0, Operand(Smi::FromInt(attr)));
- frame_->Push(r0);
+ frame_->EmitPush(r0);
// Push initial value, if any.
// Note: For variables we must not push an initial value (such as
// 'undefined') because we may have a (legal) redeclaration and we
// must not destroy the current value.
if (node->mode() == Variable::CONST) {
__ mov(r0, Operand(Factory::the_hole_value()));
- frame_->Push(r0);
+ frame_->EmitPush(r0);
} else if (node->fun() != NULL) {
- Load(node->fun());
+ LoadAndSpill(node->fun());
} else {
__ mov(r0, Operand(0)); // no initial value!
- frame_->Push(r0);
+ frame_->EmitPush(r0);
}
- __ CallRuntime(Runtime::kDeclareContextSlot, 4);
+ frame_->CallRuntime(Runtime::kDeclareContextSlot, 4);
// Ignore the return value (declarations are statements).
+ ASSERT(frame_->height() == original_height);
return;
}
{
// Set initial value.
Reference target(this, node->proxy());
- Load(val);
+ LoadAndSpill(val);
target.SetValue(NOT_CONST_INIT);
// The reference is removed from the stack (preserving TOS) when
// it goes out of scope.
}
// Get rid of the assigned value (declarations are statements).
- frame_->Pop();
+ frame_->Drop();
}
+ ASSERT(frame_->height() == original_height);
}
void CodeGenerator::VisitExpressionStatement(ExpressionStatement* node) {
+#ifdef DEBUG
+ int original_height = frame_->height();
+#endif
+ VirtualFrame::SpilledScope spilled_scope(this);
Comment cmnt(masm_, "[ ExpressionStatement");
- CodeForStatement(node);
+ CodeForStatementPosition(node);
Expression* expression = node->expression();
expression->MarkAsStatement();
- Load(expression);
- frame_->Pop();
+ LoadAndSpill(expression);
+ frame_->Drop();
+ ASSERT(frame_->height() == original_height);
}
void CodeGenerator::VisitEmptyStatement(EmptyStatement* node) {
+#ifdef DEBUG
+ int original_height = frame_->height();
+#endif
+ VirtualFrame::SpilledScope spilled_scope(this);
Comment cmnt(masm_, "// EmptyStatement");
- CodeForStatement(node);
+ CodeForStatementPosition(node);
// nothing to do
+ ASSERT(frame_->height() == original_height);
}
void CodeGenerator::VisitIfStatement(IfStatement* node) {
+#ifdef DEBUG
+ int original_height = frame_->height();
+#endif
+ VirtualFrame::SpilledScope spilled_scope(this);
Comment cmnt(masm_, "[ IfStatement");
- // Generate different code depending on which
- // parts of the if statement are present or not.
+ // Generate different code depending on which parts of the if statement
+ // are present or not.
bool has_then_stm = node->HasThenStatement();
bool has_else_stm = node->HasElseStatement();
- CodeForStatement(node);
+ CodeForStatementPosition(node);
- Label exit;
+ JumpTarget exit(this);
if (has_then_stm && has_else_stm) {
Comment cmnt(masm_, "[ IfThenElse");
- Label then;
- Label else_;
+ JumpTarget then(this);
+ JumpTarget else_(this);
// if (cond)
- LoadCondition(node->condition(), NOT_INSIDE_TYPEOF, &then, &else_, true);
- Branch(false, &else_);
+ LoadConditionAndSpill(node->condition(), NOT_INSIDE_TYPEOF,
+ &then, &else_, true);
+ if (frame_ != NULL) {
+ Branch(false, &else_);
+ }
// then
- __ bind(&then);
- Visit(node->then_statement());
- __ b(&exit);
+ if (frame_ != NULL || then.is_linked()) {
+ then.Bind();
+ VisitAndSpill(node->then_statement());
+ }
+ if (frame_ != NULL) {
+ exit.Jump();
+ }
// else
- __ bind(&else_);
- Visit(node->else_statement());
+ if (else_.is_linked()) {
+ else_.Bind();
+ VisitAndSpill(node->else_statement());
+ }
} else if (has_then_stm) {
Comment cmnt(masm_, "[ IfThen");
ASSERT(!has_else_stm);
- Label then;
+ JumpTarget then(this);
// if (cond)
- LoadCondition(node->condition(), NOT_INSIDE_TYPEOF, &then, &exit, true);
- Branch(false, &exit);
+ LoadConditionAndSpill(node->condition(), NOT_INSIDE_TYPEOF,
+ &then, &exit, true);
+ if (frame_ != NULL) {
+ Branch(false, &exit);
+ }
// then
- __ bind(&then);
- Visit(node->then_statement());
+ if (frame_ != NULL || then.is_linked()) {
+ then.Bind();
+ VisitAndSpill(node->then_statement());
+ }
} else if (has_else_stm) {
Comment cmnt(masm_, "[ IfElse");
ASSERT(!has_then_stm);
- Label else_;
+ JumpTarget else_(this);
// if (!cond)
- LoadCondition(node->condition(), NOT_INSIDE_TYPEOF, &exit, &else_, true);
- Branch(true, &exit);
+ LoadConditionAndSpill(node->condition(), NOT_INSIDE_TYPEOF,
+ &exit, &else_, true);
+ if (frame_ != NULL) {
+ Branch(true, &exit);
+ }
// else
- __ bind(&else_);
- Visit(node->else_statement());
+ if (frame_ != NULL || else_.is_linked()) {
+ else_.Bind();
+ VisitAndSpill(node->else_statement());
+ }
} else {
Comment cmnt(masm_, "[ If");
ASSERT(!has_then_stm && !has_else_stm);
// if (cond)
- LoadCondition(node->condition(), NOT_INSIDE_TYPEOF, &exit, &exit, false);
- if (has_cc()) {
- cc_reg_ = al;
- } else {
- frame_->Pop();
+ LoadConditionAndSpill(node->condition(), NOT_INSIDE_TYPEOF,
+ &exit, &exit, false);
+ if (frame_ != NULL) {
+ if (has_cc()) {
+ cc_reg_ = al;
+ } else {
+ frame_->Drop();
+ }
}
}
// end
- __ bind(&exit);
+ if (exit.is_linked()) {
+ exit.Bind();
+ }
+ ASSERT(!has_valid_frame() || frame_->height() == original_height);
}
void CodeGenerator::CleanStack(int num_bytes) {
+ VirtualFrame::SpilledScope spilled_scope(this);
ASSERT(num_bytes % kPointerSize == 0);
frame_->Drop(num_bytes / kPointerSize);
}
void CodeGenerator::VisitContinueStatement(ContinueStatement* node) {
+ VirtualFrame::SpilledScope spilled_scope(this);
Comment cmnt(masm_, "[ ContinueStatement");
- CodeForStatement(node);
+ CodeForStatementPosition(node);
CleanStack(break_stack_height_ - node->target()->break_stack_height());
- __ b(node->target()->continue_target());
+ node->target()->continue_target()->Jump();
}
void CodeGenerator::VisitBreakStatement(BreakStatement* node) {
+ VirtualFrame::SpilledScope spilled_scope(this);
Comment cmnt(masm_, "[ BreakStatement");
- CodeForStatement(node);
+ CodeForStatementPosition(node);
CleanStack(break_stack_height_ - node->target()->break_stack_height());
- __ b(node->target()->break_target());
+ node->target()->break_target()->Jump();
}
void CodeGenerator::VisitReturnStatement(ReturnStatement* node) {
+ VirtualFrame::SpilledScope spilled_scope(this);
Comment cmnt(masm_, "[ ReturnStatement");
- CodeForStatement(node);
- Load(node->expression());
- // Move the function result into r0.
- frame_->Pop(r0);
- __ b(&function_return_);
+ if (function_return_is_shadowed_) {
+ CodeForStatementPosition(node);
+ LoadAndSpill(node->expression());
+ frame_->EmitPop(r0);
+ function_return_.Jump();
+ } else {
+ // Load the returned value.
+ CodeForStatementPosition(node);
+ LoadAndSpill(node->expression());
+
+ // Pop the result from the frame and prepare the frame for
+ // returning thus making it easier to merge.
+ frame_->EmitPop(r0);
+ frame_->PrepareForReturn();
+
+ function_return_.Jump();
+ }
}
void CodeGenerator::VisitWithEnterStatement(WithEnterStatement* node) {
+#ifdef DEBUG
+ int original_height = frame_->height();
+#endif
+ VirtualFrame::SpilledScope spilled_scope(this);
Comment cmnt(masm_, "[ WithEnterStatement");
- CodeForStatement(node);
- Load(node->expression());
+ CodeForStatementPosition(node);
+ LoadAndSpill(node->expression());
if (node->is_catch_block()) {
- __ CallRuntime(Runtime::kPushCatchContext, 1);
+ frame_->CallRuntime(Runtime::kPushCatchContext, 1);
} else {
- __ CallRuntime(Runtime::kPushContext, 1);
+ frame_->CallRuntime(Runtime::kPushContext, 1);
}
if (kDebug) {
- Label verified_true;
+ JumpTarget verified_true(this);
__ cmp(r0, Operand(cp));
- __ b(eq, &verified_true);
+ verified_true.Branch(eq);
__ stop("PushContext: r0 is expected to be the same as cp");
- __ bind(&verified_true);
+ verified_true.Bind();
}
// Update context local.
__ str(cp, frame_->Context());
+ ASSERT(frame_->height() == original_height);
}
void CodeGenerator::VisitWithExitStatement(WithExitStatement* node) {
+#ifdef DEBUG
+ int original_height = frame_->height();
+#endif
+ VirtualFrame::SpilledScope spilled_scope(this);
Comment cmnt(masm_, "[ WithExitStatement");
- CodeForStatement(node);
+ CodeForStatementPosition(node);
// Pop context.
__ ldr(cp, ContextOperand(cp, Context::PREVIOUS_INDEX));
// Update context local.
__ str(cp, frame_->Context());
+ ASSERT(frame_->height() == original_height);
}
int CodeGenerator::FastCaseSwitchMaxOverheadFactor() {
- return kFastSwitchMaxOverheadFactor;
+ return kFastSwitchMaxOverheadFactor;
}
int CodeGenerator::FastCaseSwitchMinCaseCount() {
- return kFastSwitchMinCaseCount;
+ return kFastSwitchMinCaseCount;
}
SwitchStatement* node,
int min_index,
int range,
- Label* fail_label,
+ Label* default_label,
Vector<Label*> case_targets,
Vector<Label> case_labels) {
+ VirtualFrame::SpilledScope spilled_scope(this);
+ JumpTarget setup_default(this);
+ JumpTarget is_smi(this);
- ASSERT(kSmiTag == 0 && kSmiTagSize <= 2);
+ // A non-null default label pointer indicates a default case among
+ // the case labels. Otherwise we use the break target as a
+ // "default" for failure to hit the jump table.
+ JumpTarget* default_target =
+ (default_label == NULL) ? node->break_target() : &setup_default;
- frame_->Pop(r0);
+ ASSERT(kSmiTag == 0 && kSmiTagSize <= 2);
+ frame_->EmitPop(r0);
// Test for a Smi value in a HeapNumber.
- Label is_smi;
__ tst(r0, Operand(kSmiTagMask));
- __ b(eq, &is_smi);
+ is_smi.Branch(eq);
__ ldr(r1, MemOperand(r0, HeapObject::kMapOffset - kHeapObjectTag));
__ ldrb(r1, MemOperand(r1, Map::kInstanceTypeOffset - kHeapObjectTag));
__ cmp(r1, Operand(HEAP_NUMBER_TYPE));
- __ b(ne, fail_label);
- frame_->Push(r0);
- __ CallRuntime(Runtime::kNumberToSmi, 1);
- __ bind(&is_smi);
+ default_target->Branch(ne);
+ frame_->EmitPush(r0);
+ frame_->CallRuntime(Runtime::kNumberToSmi, 1);
+ is_smi.Bind();
if (min_index != 0) {
// Small positive numbers can be immediate operands.
}
}
__ tst(r0, Operand(0x80000000 | kSmiTagMask));
- __ b(ne, fail_label);
+ default_target->Branch(ne);
__ cmp(r0, Operand(Smi::FromInt(range)));
- __ b(ge, fail_label);
+ default_target->Branch(ge);
+ VirtualFrame* start_frame = new VirtualFrame(frame_);
__ SmiJumpTable(r0, case_targets);
- GenerateFastCaseSwitchCases(node, case_labels);
+ GenerateFastCaseSwitchCases(node, case_labels, start_frame);
+
+ // If there was a default case among the case labels, we need to
+ // emit code to jump to it from the default target used for failure
+ // to hit the jump table.
+ if (default_label != NULL) {
+ if (has_valid_frame()) {
+ node->break_target()->Jump();
+ }
+ setup_default.Bind();
+ frame_->MergeTo(start_frame);
+ __ b(default_label);
+ DeleteFrame();
+ }
+ if (node->break_target()->is_linked()) {
+ node->break_target()->Bind();
+ }
+
+ delete start_frame;
}
void CodeGenerator::VisitSwitchStatement(SwitchStatement* node) {
+#ifdef DEBUG
+ int original_height = frame_->height();
+#endif
+ VirtualFrame::SpilledScope spilled_scope(this);
Comment cmnt(masm_, "[ SwitchStatement");
- CodeForStatement(node);
+ CodeForStatementPosition(node);
node->set_break_stack_height(break_stack_height_);
+ node->break_target()->Initialize(this);
- Load(node->tag());
-
+ LoadAndSpill(node->tag());
if (TryGenerateFastCaseSwitchStatement(node)) {
- return;
+ ASSERT(!has_valid_frame() || frame_->height() == original_height);
+ return;
}
- Label next, fall_through, default_case;
+ JumpTarget next_test(this);
+ JumpTarget fall_through(this);
+ JumpTarget default_entry(this);
+ JumpTarget default_exit(this, JumpTarget::BIDIRECTIONAL);
ZoneList<CaseClause*>* cases = node->cases();
int length = cases->length();
+ CaseClause* default_clause = NULL;
for (int i = 0; i < length; i++) {
CaseClause* clause = cases->at(i);
-
- Comment cmnt(masm_, "[ case clause");
-
if (clause->is_default()) {
- // Continue matching cases. The program will execute the default case's
- // statements if it does not match any of the cases.
- __ b(&next);
-
- // Bind the default case label, so we can branch to it when we
- // have compared against all other cases.
- ASSERT(default_case.is_unused()); // at most one default clause
- __ bind(&default_case);
+ // Remember the default clause and compile it at the end.
+ default_clause = clause;
+ continue;
+ }
+
+ Comment cmnt(masm_, "[ Case clause");
+ // Compile the test.
+ next_test.Bind();
+ next_test.Unuse();
+ // Duplicate TOS.
+ __ ldr(r0, frame_->Top());
+ frame_->EmitPush(r0);
+ LoadAndSpill(clause->label());
+ Comparison(eq, true);
+ Branch(false, &next_test);
+
+ // Before entering the body from the test, remove the switch value from
+ // the stack.
+ frame_->Drop();
+
+ // Label the body so that fall through is enabled.
+ if (i > 0 && cases->at(i - 1)->is_default()) {
+ default_exit.Bind();
} else {
- __ bind(&next);
- next.Unuse();
- __ ldr(r0, frame_->Top());
- frame_->Push(r0); // duplicate TOS
- Load(clause->label());
- Comparison(eq, true);
- Branch(false, &next);
+ fall_through.Bind();
+ fall_through.Unuse();
}
+ VisitStatementsAndSpill(clause->statements());
- // Entering the case statement for the first time. Remove the switch value
- // from the stack.
- frame_->Pop();
-
- // Generate code for the body.
- // This is also the target for the fall through from the previous case's
- // statements which has to skip over the matching code and the popping of
- // the switch value.
- __ bind(&fall_through);
- fall_through.Unuse();
- VisitStatements(clause->statements());
- __ b(&fall_through);
+ // If control flow can fall through from the body, jump to the next body
+ // or the end of the statement.
+ if (frame_ != NULL) {
+ if (i < length - 1 && cases->at(i + 1)->is_default()) {
+ default_entry.Jump();
+ } else {
+ fall_through.Jump();
+ }
+ }
}
- __ bind(&next);
- // Reached the end of the case statements without matching any of the cases.
- if (default_case.is_bound()) {
- // A default case exists -> execute its statements.
- __ b(&default_case);
- } else {
- // Remove the switch value from the stack.
- frame_->Pop();
+ // The final "test" removes the switch value.
+ next_test.Bind();
+ frame_->Drop();
+
+ // If there is a default clause, compile it.
+ if (default_clause != NULL) {
+ Comment cmnt(masm_, "[ Default clause");
+ default_entry.Bind();
+ VisitStatementsAndSpill(default_clause->statements());
+ // If control flow can fall out of the default and there is a case after
+ // it, jup to that case's body.
+ if (frame_ != NULL && default_exit.is_bound()) {
+ default_exit.Jump();
+ }
+ }
+
+ if (fall_through.is_linked()) {
+ fall_through.Bind();
}
- __ bind(&fall_through);
- __ bind(node->break_target());
+ if (node->break_target()->is_linked()) {
+ node->break_target()->Bind();
+ }
+ ASSERT(!has_valid_frame() || frame_->height() == original_height);
}
void CodeGenerator::VisitLoopStatement(LoopStatement* node) {
+#ifdef DEBUG
+ int original_height = frame_->height();
+#endif
+ VirtualFrame::SpilledScope spilled_scope(this);
Comment cmnt(masm_, "[ LoopStatement");
- CodeForStatement(node);
+ CodeForStatementPosition(node);
node->set_break_stack_height(break_stack_height_);
+ node->break_target()->Initialize(this);
- // simple condition analysis
+ // Simple condition analysis. ALWAYS_TRUE and ALWAYS_FALSE represent a
+ // known result for the test expression, with no side effects.
enum { ALWAYS_TRUE, ALWAYS_FALSE, DONT_KNOW } info = DONT_KNOW;
if (node->cond() == NULL) {
ASSERT(node->type() == LoopStatement::FOR_LOOP);
}
}
- Label loop, entry;
-
- // init
- if (node->init() != NULL) {
- ASSERT(node->type() == LoopStatement::FOR_LOOP);
- Visit(node->init());
- }
- if (node->type() != LoopStatement::DO_LOOP && info != ALWAYS_TRUE) {
- __ b(&entry);
- }
-
- // body
- __ bind(&loop);
- Visit(node->body());
-
- // next
- __ bind(node->continue_target());
- if (node->next() != NULL) {
- // Record source position of the statement as this code which is after the
- // code for the body actually belongs to the loop statement and not the
- // body.
- CodeForStatement(node);
- ASSERT(node->type() == LoopStatement::FOR_LOOP);
- Visit(node->next());
- }
+ switch (node->type()) {
+ case LoopStatement::DO_LOOP: {
+ JumpTarget body(this, JumpTarget::BIDIRECTIONAL);
+
+ // Label the top of the loop for the backward CFG edge. If the test
+ // is always true we can use the continue target, and if the test is
+ // always false there is no need.
+ if (info == ALWAYS_TRUE) {
+ node->continue_target()->Initialize(this, JumpTarget::BIDIRECTIONAL);
+ node->continue_target()->Bind();
+ } else if (info == ALWAYS_FALSE) {
+ node->continue_target()->Initialize(this);
+ } else {
+ ASSERT(info == DONT_KNOW);
+ node->continue_target()->Initialize(this);
+ body.Bind();
+ }
- // cond
- __ bind(&entry);
- switch (info) {
- case ALWAYS_TRUE:
CheckStack(); // TODO(1222600): ignore if body contains calls.
- __ b(&loop);
+ VisitAndSpill(node->body());
+
+ // Compile the test.
+ if (info == ALWAYS_TRUE) {
+ if (has_valid_frame()) {
+ // If control can fall off the end of the body, jump back to the
+ // top.
+ node->continue_target()->Jump();
+ }
+ } else if (info == ALWAYS_FALSE) {
+ // If we have a continue in the body, we only have to bind its jump
+ // target.
+ if (node->continue_target()->is_linked()) {
+ node->continue_target()->Bind();
+ }
+ } else {
+ ASSERT(info == DONT_KNOW);
+ // We have to compile the test expression if it can be reached by
+ // control flow falling out of the body or via continue.
+ if (node->continue_target()->is_linked()) {
+ node->continue_target()->Bind();
+ }
+ if (has_valid_frame()) {
+ LoadConditionAndSpill(node->cond(), NOT_INSIDE_TYPEOF,
+ &body, node->break_target(), true);
+ if (has_valid_frame()) {
+ // A invalid frame here indicates that control did not
+ // fall out of the test expression.
+ Branch(true, &body);
+ }
+ }
+ }
break;
- case ALWAYS_FALSE:
+ }
+
+ case LoopStatement::WHILE_LOOP: {
+ // If the test is never true and has no side effects there is no need
+ // to compile the test or body.
+ if (info == ALWAYS_FALSE) break;
+
+ // Label the top of the loop with the continue target for the backward
+ // CFG edge.
+ node->continue_target()->Initialize(this, JumpTarget::BIDIRECTIONAL);
+ node->continue_target()->Bind();
+
+ if (info == DONT_KNOW) {
+ JumpTarget body(this);
+ LoadConditionAndSpill(node->cond(), NOT_INSIDE_TYPEOF,
+ &body, node->break_target(), true);
+ if (has_valid_frame()) {
+ // A NULL frame indicates that control did not fall out of the
+ // test expression.
+ Branch(false, node->break_target());
+ }
+ if (has_valid_frame() || body.is_linked()) {
+ body.Bind();
+ }
+ }
+
+ if (has_valid_frame()) {
+ CheckStack(); // TODO(1222600): ignore if body contains calls.
+ VisitAndSpill(node->body());
+
+ // If control flow can fall out of the body, jump back to the top.
+ if (has_valid_frame()) {
+ node->continue_target()->Jump();
+ }
+ }
break;
- case DONT_KNOW:
- CheckStack(); // TODO(1222600): ignore if body contains calls.
- LoadCondition(node->cond(),
- NOT_INSIDE_TYPEOF,
- &loop,
- node->break_target(),
- true);
- Branch(true, &loop);
+ }
+
+ case LoopStatement::FOR_LOOP: {
+ JumpTarget loop(this, JumpTarget::BIDIRECTIONAL);
+
+ if (node->init() != NULL) {
+ VisitAndSpill(node->init());
+ }
+
+ // There is no need to compile the test or body.
+ if (info == ALWAYS_FALSE) break;
+
+ // If there is no update statement, label the top of the loop with the
+ // continue target, otherwise with the loop target.
+ if (node->next() == NULL) {
+ node->continue_target()->Initialize(this, JumpTarget::BIDIRECTIONAL);
+ node->continue_target()->Bind();
+ } else {
+ node->continue_target()->Initialize(this);
+ loop.Bind();
+ }
+
+ // If the test is always true, there is no need to compile it.
+ if (info == DONT_KNOW) {
+ JumpTarget body(this);
+ LoadConditionAndSpill(node->cond(), NOT_INSIDE_TYPEOF,
+ &body, node->break_target(), true);
+ if (has_valid_frame()) {
+ Branch(false, node->break_target());
+ }
+ if (has_valid_frame() || body.is_linked()) {
+ body.Bind();
+ }
+ }
+
+ if (has_valid_frame()) {
+ CheckStack(); // TODO(1222600): ignore if body contains calls.
+ VisitAndSpill(node->body());
+
+ if (node->next() == NULL) {
+ // If there is no update statement and control flow can fall out
+ // of the loop, jump directly to the continue label.
+ if (has_valid_frame()) {
+ node->continue_target()->Jump();
+ }
+ } else {
+ // If there is an update statement and control flow can reach it
+ // via falling out of the body of the loop or continuing, we
+ // compile the update statement.
+ if (node->continue_target()->is_linked()) {
+ node->continue_target()->Bind();
+ }
+ if (has_valid_frame()) {
+ // Record source position of the statement as this code which is
+ // after the code for the body actually belongs to the loop
+ // statement and not the body.
+ CodeForStatementPosition(node);
+ VisitAndSpill(node->next());
+ loop.Jump();
+ }
+ }
+ }
break;
+ }
}
- // exit
- __ bind(node->break_target());
+ if (node->break_target()->is_linked()) {
+ node->break_target()->Bind();
+ }
+ ASSERT(!has_valid_frame() || frame_->height() == original_height);
}
void CodeGenerator::VisitForInStatement(ForInStatement* node) {
+#ifdef DEBUG
+ int original_height = frame_->height();
+#endif
+ ASSERT(!in_spilled_code());
+ VirtualFrame::SpilledScope spilled_scope(this);
Comment cmnt(masm_, "[ ForInStatement");
- CodeForStatement(node);
+ CodeForStatementPosition(node);
// We keep stuff on the stack while the body is executing.
// Record it, so that a break/continue crossing this statement
const int kForInStackSize = 5 * kPointerSize;
break_stack_height_ += kForInStackSize;
node->set_break_stack_height(break_stack_height_);
+ node->break_target()->Initialize(this);
+ node->continue_target()->Initialize(this);
- Label loop, next, entry, cleanup, exit, primitive, jsobject;
- Label filter_key, end_del_check, fixed_array, non_string;
+ JumpTarget primitive(this);
+ JumpTarget jsobject(this);
+ JumpTarget fixed_array(this);
+ JumpTarget entry(this, JumpTarget::BIDIRECTIONAL);
+ JumpTarget end_del_check(this);
+ JumpTarget exit(this);
// Get the object to enumerate over (converted to JSObject).
- Load(node->enumerable());
- frame_->Pop(r0);
+ LoadAndSpill(node->enumerable());
// Both SpiderMonkey and kjs ignore null and undefined in contrast
// to the specification. 12.6.4 mandates a call to ToObject.
+ frame_->EmitPop(r0);
__ cmp(r0, Operand(Factory::undefined_value()));
- __ b(eq, &exit);
+ exit.Branch(eq);
__ cmp(r0, Operand(Factory::null_value()));
- __ b(eq, &exit);
+ exit.Branch(eq);
// Stack layout in body:
// [iteration counter (Smi)]
// Check if enumerable is already a JSObject
__ tst(r0, Operand(kSmiTagMask));
- __ b(eq, &primitive);
+ primitive.Branch(eq);
__ ldr(r1, FieldMemOperand(r0, HeapObject::kMapOffset));
__ ldrb(r1, FieldMemOperand(r1, Map::kInstanceTypeOffset));
__ cmp(r1, Operand(FIRST_JS_OBJECT_TYPE));
- __ b(hs, &jsobject);
-
- __ bind(&primitive);
- frame_->Push(r0);
- __ mov(r0, Operand(0));
- __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_JS);
+ jsobject.Branch(hs);
+ primitive.Bind();
+ frame_->EmitPush(r0);
+ Result arg_count = allocator_->Allocate(r0);
+ ASSERT(arg_count.is_valid());
+ __ mov(arg_count.reg(), Operand(0));
+ frame_->InvokeBuiltin(Builtins::TO_OBJECT, CALL_JS, &arg_count, 1);
- __ bind(&jsobject);
-
+ jsobject.Bind();
// Get the set of properties (as a FixedArray or Map).
- frame_->Push(r0); // duplicate the object being enumerated
- frame_->Push(r0);
- __ CallRuntime(Runtime::kGetPropertyNamesFast, 1);
+ frame_->EmitPush(r0); // duplicate the object being enumerated
+ frame_->EmitPush(r0);
+ frame_->CallRuntime(Runtime::kGetPropertyNamesFast, 1);
// If we got a Map, we can do a fast modification check.
// Otherwise, we got a FixedArray, and we have to do a slow check.
__ mov(r2, Operand(r0));
__ ldr(r1, FieldMemOperand(r2, HeapObject::kMapOffset));
__ cmp(r1, Operand(Factory::meta_map()));
- __ b(ne, &fixed_array);
+ fixed_array.Branch(ne);
// Get enum cache
__ mov(r1, Operand(r0));
__ ldr(r2,
FieldMemOperand(r1, DescriptorArray::kEnumCacheBridgeCacheOffset));
- frame_->Push(r0); // map
- frame_->Push(r2); // enum cache bridge cache
+ frame_->EmitPush(r0); // map
+ frame_->EmitPush(r2); // enum cache bridge cache
__ ldr(r0, FieldMemOperand(r2, FixedArray::kLengthOffset));
__ mov(r0, Operand(r0, LSL, kSmiTagSize));
- frame_->Push(r0);
+ frame_->EmitPush(r0);
__ mov(r0, Operand(Smi::FromInt(0)));
- frame_->Push(r0);
- __ b(&entry);
-
-
- __ bind(&fixed_array);
+ frame_->EmitPush(r0);
+ entry.Jump();
+ fixed_array.Bind();
__ mov(r1, Operand(Smi::FromInt(0)));
- frame_->Push(r1); // insert 0 in place of Map
- frame_->Push(r0);
+ frame_->EmitPush(r1); // insert 0 in place of Map
+ frame_->EmitPush(r0);
// Push the length of the array and the initial index onto the stack.
__ ldr(r0, FieldMemOperand(r0, FixedArray::kLengthOffset));
__ mov(r0, Operand(r0, LSL, kSmiTagSize));
- frame_->Push(r0);
+ frame_->EmitPush(r0);
__ mov(r0, Operand(Smi::FromInt(0))); // init index
- frame_->Push(r0);
-
- __ b(&entry);
-
- // Body.
- __ bind(&loop);
- Visit(node->body());
-
- // Next.
- __ bind(node->continue_target());
- __ bind(&next);
- frame_->Pop(r0);
- __ add(r0, r0, Operand(Smi::FromInt(1)));
- frame_->Push(r0);
+ frame_->EmitPush(r0);
// Condition.
- __ bind(&entry);
-
+ entry.Bind();
// sp[0] : index
// sp[1] : array/enum cache length
// sp[2] : array or enum cache
// sp[3] : 0 or map
// sp[4] : enumerable
- __ ldr(r0, frame_->Element(0)); // load the current count
- __ ldr(r1, frame_->Element(1)); // load the length
+ __ ldr(r0, frame_->ElementAt(0)); // load the current count
+ __ ldr(r1, frame_->ElementAt(1)); // load the length
__ cmp(r0, Operand(r1)); // compare to the array length
- __ b(hs, &cleanup);
+ node->break_target()->Branch(hs);
- __ ldr(r0, frame_->Element(0));
+ __ ldr(r0, frame_->ElementAt(0));
// Get the i'th entry of the array.
- __ ldr(r2, frame_->Element(2));
+ __ ldr(r2, frame_->ElementAt(2));
__ add(r2, r2, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
__ ldr(r3, MemOperand(r2, r0, LSL, kPointerSizeLog2 - kSmiTagSize));
// Get Map or 0.
- __ ldr(r2, frame_->Element(3));
+ __ ldr(r2, frame_->ElementAt(3));
// Check if this (still) matches the map of the enumerable.
// If not, we have to filter the key.
- __ ldr(r1, frame_->Element(4));
+ __ ldr(r1, frame_->ElementAt(4));
__ ldr(r1, FieldMemOperand(r1, HeapObject::kMapOffset));
__ cmp(r1, Operand(r2));
- __ b(eq, &end_del_check);
+ end_del_check.Branch(eq);
// Convert the entry to a string (or null if it isn't a property anymore).
- __ ldr(r0, frame_->Element(4)); // push enumerable
- frame_->Push(r0);
- frame_->Push(r3); // push entry
- __ mov(r0, Operand(1));
- __ InvokeBuiltin(Builtins::FILTER_KEY, CALL_JS);
- __ mov(r3, Operand(r0));
+ __ ldr(r0, frame_->ElementAt(4)); // push enumerable
+ frame_->EmitPush(r0);
+ frame_->EmitPush(r3); // push entry
+ Result arg_count_register = allocator_->Allocate(r0);
+ ASSERT(arg_count_register.is_valid());
+ __ mov(arg_count_register.reg(), Operand(1));
+ Result result = frame_->InvokeBuiltin(Builtins::FILTER_KEY,
+ CALL_JS,
+ &arg_count_register,
+ 2);
+ __ mov(r3, Operand(result.reg()));
+ result.Unuse();
// If the property has been removed while iterating, we just skip it.
__ cmp(r3, Operand(Factory::null_value()));
- __ b(eq, &next);
-
-
- __ bind(&end_del_check);
+ node->continue_target()->Branch(eq);
- // Store the entry in the 'each' expression and take another spin in the loop.
- // r3: i'th entry of the enum cache (or string there of)
- frame_->Push(r3); // push entry
+ end_del_check.Bind();
+ // Store the entry in the 'each' expression and take another spin in the
+ // loop. r3: i'th entry of the enum cache (or string there of)
+ frame_->EmitPush(r3); // push entry
{ Reference each(this, node->each());
if (!each.is_illegal()) {
if (each.size() > 0) {
- __ ldr(r0, frame_->Element(each.size()));
- frame_->Push(r0);
+ __ ldr(r0, frame_->ElementAt(each.size()));
+ frame_->EmitPush(r0);
}
// If the reference was to a slot we rely on the convenient property
// that it doesn't matter whether a value (eg, r3 pushed above) is
// ie, now the topmost value of the non-zero sized reference), since
// we will discard the top of stack after unloading the reference
// anyway.
- frame_->Pop(r0);
+ frame_->EmitPop(r0);
}
}
}
// Discard the i'th entry pushed above or else the remainder of the
// reference, whichever is currently on top of the stack.
- frame_->Pop();
+ frame_->Drop();
+
+ // Body.
CheckStack(); // TODO(1222600): ignore if body contains calls.
- __ jmp(&loop);
+ VisitAndSpill(node->body());
+
+ // Next.
+ node->continue_target()->Bind();
+ frame_->EmitPop(r0);
+ __ add(r0, r0, Operand(Smi::FromInt(1)));
+ frame_->EmitPush(r0);
+ entry.Jump();
// Cleanup.
- __ bind(&cleanup);
- __ bind(node->break_target());
+ node->break_target()->Bind();
frame_->Drop(5);
// Exit.
- __ bind(&exit);
-
+ exit.Bind();
break_stack_height_ -= kForInStackSize;
+ ASSERT(frame_->height() == original_height);
}
void CodeGenerator::VisitTryCatch(TryCatch* node) {
+#ifdef DEBUG
+ int original_height = frame_->height();
+#endif
+ VirtualFrame::SpilledScope spilled_scope(this);
Comment cmnt(masm_, "[ TryCatch");
- CodeForStatement(node);
+ CodeForStatementPosition(node);
- Label try_block, exit;
+ JumpTarget try_block(this);
+ JumpTarget exit(this);
- __ bl(&try_block);
+ try_block.Call();
// --- Catch block ---
- frame_->Push(r0);
+ frame_->EmitPush(r0);
// Store the caught exception in the catch variable.
{ Reference ref(this, node->catch_var());
}
// Remove the exception from the stack.
- frame_->Pop();
+ frame_->Drop();
- VisitStatements(node->catch_block()->statements());
- __ b(&exit);
+ VisitStatementsAndSpill(node->catch_block()->statements());
+ if (frame_ != NULL) {
+ exit.Jump();
+ }
// --- Try block ---
- __ bind(&try_block);
+ try_block.Bind();
- __ PushTryHandler(IN_JAVASCRIPT, TRY_CATCH_HANDLER);
+ frame_->PushTryHandler(TRY_CATCH_HANDLER);
+ int handler_height = frame_->height();
// Shadow the labels for all escapes from the try block, including
// returns. During shadowing, the original label is hidden as the
//
// We should probably try to unify the escaping labels and the return
// label.
- int nof_escapes = node->escaping_labels()->length();
- List<LabelShadow*> shadows(1 + nof_escapes);
- shadows.Add(new LabelShadow(&function_return_));
+ int nof_escapes = node->escaping_targets()->length();
+ List<ShadowTarget*> shadows(1 + nof_escapes);
+
+ // Add the shadow target for the function return.
+ static const int kReturnShadowIndex = 0;
+ shadows.Add(new ShadowTarget(&function_return_));
+ bool function_return_was_shadowed = function_return_is_shadowed_;
+ function_return_is_shadowed_ = true;
+ ASSERT(shadows[kReturnShadowIndex]->other_target() == &function_return_);
+
+ // Add the remaining shadow targets.
for (int i = 0; i < nof_escapes; i++) {
- shadows.Add(new LabelShadow(node->escaping_labels()->at(i)));
+ shadows.Add(new ShadowTarget(node->escaping_targets()->at(i)));
}
// Generate code for the statements in the try block.
- VisitStatements(node->try_block()->statements());
- // Discard the code slot from the handler.
- frame_->Pop();
+ VisitStatementsAndSpill(node->try_block()->statements());
// Stop the introduced shadowing and count the number of required unlinks.
// After shadowing stops, the original labels are unshadowed and the
shadows[i]->StopShadowing();
if (shadows[i]->is_linked()) nof_unlinks++;
}
-
- // Unlink from try chain.
- // The code slot has already been discarded, so the next index is
- // adjusted by 1.
- const int kNextIndex =
- (StackHandlerConstants::kNextOffset / kPointerSize) - 1;
- __ ldr(r1, frame_->Element(kNextIndex)); // read next_sp
- __ mov(r3, Operand(ExternalReference(Top::k_handler_address)));
- __ str(r1, MemOperand(r3));
- // The code slot has already been dropped from the handler.
- frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1);
- if (nof_unlinks > 0) __ b(&exit);
+ function_return_is_shadowed_ = function_return_was_shadowed;
+
+ const int kNextIndex = StackHandlerConstants::kNextOffset / kPointerSize;
+ // If we can fall off the end of the try block, unlink from try chain.
+ if (has_valid_frame()) {
+ // The next handler address is at kNextIndex in the stack.
+ __ ldr(r1, frame_->ElementAt(kNextIndex));
+ __ mov(r3, Operand(ExternalReference(Top::k_handler_address)));
+ __ str(r1, MemOperand(r3));
+ frame_->Drop(StackHandlerConstants::kSize / kPointerSize);
+ if (nof_unlinks > 0) {
+ exit.Jump();
+ }
+ }
// Generate unlink code for the (formerly) shadowing labels that have been
// jumped to.
for (int i = 0; i <= nof_escapes; i++) {
if (shadows[i]->is_linked()) {
// Unlink from try chain;
- __ bind(shadows[i]);
+ shadows[i]->Bind();
+ // Because we can be jumping here (to spilled code) from unspilled
+ // code, we need to reestablish a spilled frame at this block.
+ frame_->SpillAll();
// Reload sp from the top handler, because some statements that we
// break from (eg, for...in) may have left stuff on the stack.
__ mov(r3, Operand(ExternalReference(Top::k_handler_address)));
__ ldr(sp, MemOperand(r3));
+ // The stack pointer was restored to just below the code slot
+ // (the topmost slot) in the handler.
+ frame_->Forget(frame_->height() - handler_height + 1);
- __ ldr(r1, frame_->Element(kNextIndex));
+ // kNextIndex is off by one because the code slot has already
+ // been dropped.
+ __ ldr(r1, frame_->ElementAt(kNextIndex - 1));
__ str(r1, MemOperand(r3));
// The code slot has already been dropped from the handler.
frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1);
- __ b(shadows[i]->original_label());
+ if (!function_return_is_shadowed_ && i == kReturnShadowIndex) {
+ frame_->PrepareForReturn();
+ }
+ shadows[i]->other_target()->Jump();
}
}
- __ bind(&exit);
+ exit.Bind();
+ ASSERT(!has_valid_frame() || frame_->height() == original_height);
}
void CodeGenerator::VisitTryFinally(TryFinally* node) {
+#ifdef DEBUG
+ int original_height = frame_->height();
+#endif
+ VirtualFrame::SpilledScope spilled_scope(this);
Comment cmnt(masm_, "[ TryFinally");
- CodeForStatement(node);
+ CodeForStatementPosition(node);
// State: Used to keep track of reason for entering the finally
// block. Should probably be extended to hold information for
// break/continue from within the try block.
enum { FALLING, THROWING, JUMPING };
- Label exit, unlink, try_block, finally_block;
+ JumpTarget unlink(this);
+ JumpTarget try_block(this);
+ JumpTarget finally_block(this);
- __ bl(&try_block);
+ try_block.Call();
- frame_->Push(r0); // save exception object on the stack
+ frame_->EmitPush(r0); // save exception object on the stack
// In case of thrown exceptions, this is where we continue.
__ mov(r2, Operand(Smi::FromInt(THROWING)));
- __ b(&finally_block);
-
+ finally_block.Jump();
// --- Try block ---
- __ bind(&try_block);
+ try_block.Bind();
- __ PushTryHandler(IN_JAVASCRIPT, TRY_FINALLY_HANDLER);
+ frame_->PushTryHandler(TRY_FINALLY_HANDLER);
+ int handler_height = frame_->height();
// Shadow the labels for all escapes from the try block, including
// returns. Shadowing hides the original label as the LabelShadow and
//
// We should probably try to unify the escaping labels and the return
// label.
- int nof_escapes = node->escaping_labels()->length();
- List<LabelShadow*> shadows(1 + nof_escapes);
- shadows.Add(new LabelShadow(&function_return_));
+ int nof_escapes = node->escaping_targets()->length();
+ List<ShadowTarget*> shadows(1 + nof_escapes);
+
+ // Add the shadow target for the function return.
+ static const int kReturnShadowIndex = 0;
+ shadows.Add(new ShadowTarget(&function_return_));
+ bool function_return_was_shadowed = function_return_is_shadowed_;
+ function_return_is_shadowed_ = true;
+ ASSERT(shadows[kReturnShadowIndex]->other_target() == &function_return_);
+
+ // Add the remaining shadow targets.
for (int i = 0; i < nof_escapes; i++) {
- shadows.Add(new LabelShadow(node->escaping_labels()->at(i)));
+ shadows.Add(new ShadowTarget(node->escaping_targets()->at(i)));
}
// Generate code for the statements in the try block.
- VisitStatements(node->try_block()->statements());
+ VisitStatementsAndSpill(node->try_block()->statements());
// Stop the introduced shadowing and count the number of required unlinks.
// After shadowing stops, the original labels are unshadowed and the
shadows[i]->StopShadowing();
if (shadows[i]->is_linked()) nof_unlinks++;
}
-
- // Set the state on the stack to FALLING.
- __ mov(r0, Operand(Factory::undefined_value())); // fake TOS
- frame_->Push(r0);
- __ mov(r2, Operand(Smi::FromInt(FALLING)));
- if (nof_unlinks > 0) __ b(&unlink);
+ function_return_is_shadowed_ = function_return_was_shadowed;
+
+ // If we can fall off the end of the try block, set the state on the stack
+ // to FALLING.
+ if (has_valid_frame()) {
+ __ mov(r0, Operand(Factory::undefined_value())); // fake TOS
+ frame_->EmitPush(r0);
+ __ mov(r2, Operand(Smi::FromInt(FALLING)));
+ if (nof_unlinks > 0) {
+ unlink.Jump();
+ }
+ }
// Generate code to set the state for the (formerly) shadowing labels that
// have been jumped to.
for (int i = 0; i <= nof_escapes; i++) {
if (shadows[i]->is_linked()) {
- __ bind(shadows[i]);
- if (shadows[i]->original_label() == &function_return_) {
+ // Because we can be jumping here (to spilled code) from
+ // unspilled code, we need to reestablish a spilled frame at
+ // this block.
+ shadows[i]->Bind();
+ frame_->SpillAll();
+ if (i == kReturnShadowIndex) {
// If this label shadowed the function return, materialize the
// return value on the stack.
- frame_->Push(r0);
+ frame_->EmitPush(r0);
} else {
- // Fake TOS for labels that shadowed breaks and continues.
+ // Fake TOS for targets that shadowed breaks and continues.
__ mov(r0, Operand(Factory::undefined_value()));
- frame_->Push(r0);
+ frame_->EmitPush(r0);
}
__ mov(r2, Operand(Smi::FromInt(JUMPING + i)));
- __ b(&unlink);
+ unlink.Jump();
}
}
// Unlink from try chain;
- __ bind(&unlink);
+ unlink.Bind();
- frame_->Pop(r0); // Preserve TOS result in r0 across stack manipulation.
+ // Preserve TOS result in r0 across stack manipulation.
+ frame_->EmitPop(r0);
// Reload sp from the top handler, because some statements that we
// break from (eg, for...in) may have left stuff on the stack.
__ mov(r3, Operand(ExternalReference(Top::k_handler_address)));
__ ldr(sp, MemOperand(r3));
+ // The stack pointer was restored to just below the code slot
+ // (the topmost slot) in the handler.
+ frame_->Forget(frame_->height() - handler_height + 1);
const int kNextIndex = (StackHandlerConstants::kNextOffset
+ StackHandlerConstants::kAddressDisplacement)
/ kPointerSize;
- __ ldr(r1, frame_->Element(kNextIndex));
+ __ ldr(r1, frame_->ElementAt(kNextIndex));
__ str(r1, MemOperand(r3));
ASSERT(StackHandlerConstants::kCodeOffset == 0); // first field is code
- // The stack pointer was restored to just below the code slot (the
- // topmost slot) of the handler, so all but the code slot need to be
- // dropped.
+ // Drop the rest of the handler (not including the already dropped
+ // code slot).
frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1);
- // Restore result to TOS.
- frame_->Push(r0);
+ // Restore the result to TOS.
+ frame_->EmitPush(r0);
// --- Finally block ---
- __ bind(&finally_block);
+ finally_block.Bind();
// Push the state on the stack.
- frame_->Push(r2);
+ frame_->EmitPush(r2);
// We keep two elements on the stack - the (possibly faked) result
// and the state - while evaluating the finally block. Record it, so
break_stack_height_ += kFinallyStackSize;
// Generate code for the statements in the finally block.
- VisitStatements(node->finally_block()->statements());
+ VisitStatementsAndSpill(node->finally_block()->statements());
- // Restore state and return value or faked TOS.
- frame_->Pop(r2);
- frame_->Pop(r0);
break_stack_height_ -= kFinallyStackSize;
-
- // Generate code to jump to the right destination for all used (formerly)
- // shadowing labels.
- for (int i = 0; i <= nof_escapes; i++) {
- if (shadows[i]->is_bound()) {
- __ cmp(r2, Operand(Smi::FromInt(JUMPING + i)));
- __ b(eq, shadows[i]->original_label());
+ if (has_valid_frame()) {
+ JumpTarget exit(this);
+ // Restore state and return value or faked TOS.
+ frame_->EmitPop(r2);
+ frame_->EmitPop(r0);
+
+ // Generate code to jump to the right destination for all used (formerly)
+ // shadowing labels.
+ for (int i = 0; i <= nof_escapes; i++) {
+ if (shadows[i]->is_bound()) {
+ JumpTarget* original = shadows[i]->other_target();
+ __ cmp(r2, Operand(Smi::FromInt(JUMPING + i)));
+ if (!function_return_is_shadowed_ && i == kReturnShadowIndex) {
+ JumpTarget skip(this);
+ skip.Branch(ne);
+ frame_->PrepareForReturn();
+ original->Jump();
+ skip.Bind();
+ } else {
+ original->Branch(eq);
+ }
+ }
}
- }
- // Check if we need to rethrow the exception.
- __ cmp(r2, Operand(Smi::FromInt(THROWING)));
- __ b(ne, &exit);
+ // Check if we need to rethrow the exception.
+ __ cmp(r2, Operand(Smi::FromInt(THROWING)));
+ exit.Branch(ne);
- // Rethrow exception.
- frame_->Push(r0);
- __ CallRuntime(Runtime::kReThrow, 1);
+ // Rethrow exception.
+ frame_->EmitPush(r0);
+ frame_->CallRuntime(Runtime::kReThrow, 1);
- // Done.
- __ bind(&exit);
+ // Done.
+ exit.Bind();
+ }
+ ASSERT(!has_valid_frame() || frame_->height() == original_height);
}
void CodeGenerator::VisitDebuggerStatement(DebuggerStatement* node) {
+#ifdef DEBUG
+ int original_height = frame_->height();
+#endif
+ VirtualFrame::SpilledScope spilled_scope(this);
Comment cmnt(masm_, "[ DebuggerStatament");
- CodeForStatement(node);
- __ CallRuntime(Runtime::kDebugBreak, 0);
+ CodeForStatementPosition(node);
+ frame_->CallRuntime(Runtime::kDebugBreak, 0);
// Ignore the return value.
+ ASSERT(frame_->height() == original_height);
}
void CodeGenerator::InstantiateBoilerplate(Handle<JSFunction> boilerplate) {
+ VirtualFrame::SpilledScope spilled_scope(this);
ASSERT(boilerplate->IsBoilerplate());
// Push the boilerplate on the stack.
__ mov(r0, Operand(boilerplate));
- frame_->Push(r0);
+ frame_->EmitPush(r0);
// Create a new closure.
- frame_->Push(cp);
- __ CallRuntime(Runtime::kNewClosure, 2);
- frame_->Push(r0);
+ frame_->EmitPush(cp);
+ frame_->CallRuntime(Runtime::kNewClosure, 2);
+ frame_->EmitPush(r0);
}
void CodeGenerator::VisitFunctionLiteral(FunctionLiteral* node) {
+#ifdef DEBUG
+ int original_height = frame_->height();
+#endif
+ VirtualFrame::SpilledScope spilled_scope(this);
Comment cmnt(masm_, "[ FunctionLiteral");
// Build the function boilerplate and instantiate it.
Handle<JSFunction> boilerplate = BuildBoilerplate(node);
// Check for stack-overflow exception.
- if (HasStackOverflow()) return;
+ if (HasStackOverflow()) {
+ ASSERT(frame_->height() == original_height);
+ return;
+ }
InstantiateBoilerplate(boilerplate);
+ ASSERT(frame_->height() == original_height + 1);
}
void CodeGenerator::VisitFunctionBoilerplateLiteral(
FunctionBoilerplateLiteral* node) {
+#ifdef DEBUG
+ int original_height = frame_->height();
+#endif
+ VirtualFrame::SpilledScope spilled_scope(this);
Comment cmnt(masm_, "[ FunctionBoilerplateLiteral");
InstantiateBoilerplate(node->boilerplate());
+ ASSERT(frame_->height() == original_height + 1);
}
void CodeGenerator::VisitConditional(Conditional* node) {
+#ifdef DEBUG
+ int original_height = frame_->height();
+#endif
+ VirtualFrame::SpilledScope spilled_scope(this);
Comment cmnt(masm_, "[ Conditional");
- Label then, else_, exit;
- LoadCondition(node->condition(), NOT_INSIDE_TYPEOF, &then, &else_, true);
+ JumpTarget then(this);
+ JumpTarget else_(this);
+ JumpTarget exit(this);
+ LoadConditionAndSpill(node->condition(), NOT_INSIDE_TYPEOF,
+ &then, &else_, true);
Branch(false, &else_);
- __ bind(&then);
- Load(node->then_expression(), typeof_state());
- __ b(&exit);
- __ bind(&else_);
- Load(node->else_expression(), typeof_state());
- __ bind(&exit);
+ then.Bind();
+ LoadAndSpill(node->then_expression(), typeof_state());
+ exit.Jump();
+ else_.Bind();
+ LoadAndSpill(node->else_expression(), typeof_state());
+ exit.Bind();
+ ASSERT(frame_->height() == original_height + 1);
}
void CodeGenerator::LoadFromSlot(Slot* slot, TypeofState typeof_state) {
+ VirtualFrame::SpilledScope spilled_scope(this);
if (slot->type() == Slot::LOOKUP) {
ASSERT(slot->var()->is_dynamic());
- Label slow, done;
+ JumpTarget slow(this);
+ JumpTarget done(this);
// Generate fast-case code for variables that might be shadowed by
// eval-introduced variables. Eval is used a lot without
// containing the eval.
if (slot->var()->mode() == Variable::DYNAMIC_GLOBAL) {
LoadFromGlobalSlotCheckExtensions(slot, typeof_state, r1, r2, &slow);
- __ b(&done);
+ // If there was no control flow to slow, we can exit early.
+ if (!slow.is_linked()) {
+ frame_->EmitPush(r0);
+ return;
+ }
+
+ done.Jump();
} else if (slot->var()->mode() == Variable::DYNAMIC_LOCAL) {
Slot* potential_slot = slot->var()->local_if_not_shadowed()->slot();
r1,
r2,
&slow));
- __ b(&done);
+ // There is always control flow to slow from
+ // ContextSlotOperandCheckExtensions.
+ done.Jump();
}
}
- __ bind(&slow);
- frame_->Push(cp);
+ slow.Bind();
+ frame_->EmitPush(cp);
__ mov(r0, Operand(slot->var()->name()));
- frame_->Push(r0);
+ frame_->EmitPush(r0);
if (typeof_state == INSIDE_TYPEOF) {
- __ CallRuntime(Runtime::kLoadContextSlotNoReferenceError, 2);
+ frame_->CallRuntime(Runtime::kLoadContextSlotNoReferenceError, 2);
} else {
- __ CallRuntime(Runtime::kLoadContextSlot, 2);
+ frame_->CallRuntime(Runtime::kLoadContextSlot, 2);
}
- __ bind(&done);
- frame_->Push(r0);
+ done.Bind();
+ frame_->EmitPush(r0);
} else {
// Note: We would like to keep the assert below, but it fires because of
// Special handling for locals allocated in registers.
__ ldr(r0, SlotOperand(slot, r2));
- frame_->Push(r0);
+ frame_->EmitPush(r0);
if (slot->var()->mode() == Variable::CONST) {
// Const slots may contain 'the hole' value (the constant hasn't been
// initialized yet) which needs to be converted into the 'undefined'
// value.
Comment cmnt(masm_, "[ Unhole const");
- frame_->Pop(r0);
+ frame_->EmitPop(r0);
__ cmp(r0, Operand(Factory::the_hole_value()));
__ mov(r0, Operand(Factory::undefined_value()), LeaveCC, eq);
- frame_->Push(r0);
+ frame_->EmitPush(r0);
}
}
}
TypeofState typeof_state,
Register tmp,
Register tmp2,
- Label* slow) {
+ JumpTarget* slow) {
// Check that no extension objects have been created by calls to
// eval from the current scope to the global scope.
Register context = cp;
// Check that extension is NULL.
__ ldr(tmp2, ContextOperand(context, Context::EXTENSION_INDEX));
__ tst(tmp2, tmp2);
- __ b(ne, slow);
+ slow->Branch(ne);
}
// Load next context in chain.
__ ldr(tmp, ContextOperand(context, Context::CLOSURE_INDEX));
// Check that extension is NULL.
__ ldr(tmp2, ContextOperand(tmp, Context::EXTENSION_INDEX));
__ tst(tmp2, tmp2);
- __ b(ne, slow);
+ slow->Branch(ne);
// Load next context in chain.
__ ldr(tmp, ContextOperand(tmp, Context::CLOSURE_INDEX));
__ ldr(tmp, FieldMemOperand(tmp, JSFunction::kContextOffset));
// Load the global object.
LoadGlobal();
// Setup the name register.
- __ mov(r2, Operand(slot->var()->name()));
+ Result name = allocator_->Allocate(r2);
+ ASSERT(name.is_valid()); // We are in spilled code.
+ __ mov(name.reg(), Operand(slot->var()->name()));
// Call IC stub.
if (typeof_state == INSIDE_TYPEOF) {
- __ Call(ic, RelocInfo::CODE_TARGET);
+ frame_->CallCodeObject(ic, RelocInfo::CODE_TARGET, &name, 0);
} else {
- __ Call(ic, RelocInfo::CODE_TARGET_CONTEXT);
+ frame_->CallCodeObject(ic, RelocInfo::CODE_TARGET_CONTEXT, &name, 0);
}
- // Pop the global object. The result is in r0.
- frame_->Pop();
+ // Drop the global object. The result is in r0.
+ frame_->Drop();
}
void CodeGenerator::VisitSlot(Slot* node) {
+#ifdef DEBUG
+ int original_height = frame_->height();
+#endif
+ VirtualFrame::SpilledScope spilled_scope(this);
Comment cmnt(masm_, "[ Slot");
LoadFromSlot(node, typeof_state());
+ ASSERT(frame_->height() == original_height + 1);
}
void CodeGenerator::VisitVariableProxy(VariableProxy* node) {
+#ifdef DEBUG
+ int original_height = frame_->height();
+#endif
+ VirtualFrame::SpilledScope spilled_scope(this);
Comment cmnt(masm_, "[ VariableProxy");
Variable* var = node->var();
} else {
ASSERT(var->is_global());
Reference ref(this, node);
- ref.GetValue(typeof_state());
+ ref.GetValueAndSpill(typeof_state());
}
+ ASSERT(frame_->height() == original_height + 1);
}
void CodeGenerator::VisitLiteral(Literal* node) {
+#ifdef DEBUG
+ int original_height = frame_->height();
+#endif
+ VirtualFrame::SpilledScope spilled_scope(this);
Comment cmnt(masm_, "[ Literal");
__ mov(r0, Operand(node->handle()));
- frame_->Push(r0);
+ frame_->EmitPush(r0);
+ ASSERT(frame_->height() == original_height + 1);
}
void CodeGenerator::VisitRegExpLiteral(RegExpLiteral* node) {
+#ifdef DEBUG
+ int original_height = frame_->height();
+#endif
+ VirtualFrame::SpilledScope spilled_scope(this);
Comment cmnt(masm_, "[ RexExp Literal");
// Retrieve the literal array and check the allocated entry.
FixedArray::kHeaderSize + node->literal_index() * kPointerSize;
__ ldr(r2, FieldMemOperand(r1, literal_offset));
- Label done;
+ JumpTarget done(this);
__ cmp(r2, Operand(Factory::undefined_value()));
- __ b(ne, &done);
+ done.Branch(ne);
// If the entry is undefined we call the runtime system to computed
// the literal.
- frame_->Push(r1); // literal array (0)
+ frame_->EmitPush(r1); // literal array (0)
__ mov(r0, Operand(Smi::FromInt(node->literal_index())));
- frame_->Push(r0); // literal index (1)
+ frame_->EmitPush(r0); // literal index (1)
__ mov(r0, Operand(node->pattern())); // RegExp pattern (2)
- frame_->Push(r0);
+ frame_->EmitPush(r0);
__ mov(r0, Operand(node->flags())); // RegExp flags (3)
- frame_->Push(r0);
- __ CallRuntime(Runtime::kMaterializeRegExpLiteral, 4);
+ frame_->EmitPush(r0);
+ frame_->CallRuntime(Runtime::kMaterializeRegExpLiteral, 4);
__ mov(r2, Operand(r0));
- __ bind(&done);
+ done.Bind();
// Push the literal.
- frame_->Push(r2);
+ frame_->EmitPush(r2);
+ ASSERT(frame_->height() == original_height + 1);
}
// by calling Runtime_CreateObjectLiteral.
// Each created boilerplate is stored in the JSFunction and they are
// therefore context dependent.
-class ObjectLiteralDeferred: public DeferredCode {
+class DeferredObjectLiteral: public DeferredCode {
public:
- ObjectLiteralDeferred(CodeGenerator* generator, ObjectLiteral* node)
+ DeferredObjectLiteral(CodeGenerator* generator, ObjectLiteral* node)
: DeferredCode(generator), node_(node) {
- set_comment("[ ObjectLiteralDeferred");
+ set_comment("[ DeferredObjectLiteral");
}
+
virtual void Generate();
+
private:
ObjectLiteral* node_;
};
-void ObjectLiteralDeferred::Generate() {
+void DeferredObjectLiteral::Generate() {
+ // Argument is passed in r1.
+ enter()->Bind();
+ VirtualFrame::SpilledScope spilled_scope(generator());
+
// If the entry is undefined we call the runtime system to computed
// the literal.
+ VirtualFrame* frame = generator()->frame();
// Literal array (0).
- __ push(r1);
+ frame->EmitPush(r1);
// Literal index (1).
__ mov(r0, Operand(Smi::FromInt(node_->literal_index())));
- __ push(r0);
+ frame->EmitPush(r0);
// Constant properties (2).
__ mov(r0, Operand(node_->constant_properties()));
- __ push(r0);
- __ CallRuntime(Runtime::kCreateObjectLiteralBoilerplate, 3);
- __ mov(r2, Operand(r0));
+ frame->EmitPush(r0);
+ Result boilerplate =
+ frame->CallRuntime(Runtime::kCreateObjectLiteralBoilerplate, 3);
+ __ mov(r2, Operand(boilerplate.reg()));
+ // Result is returned in r2.
+ exit_.Jump();
}
void CodeGenerator::VisitObjectLiteral(ObjectLiteral* node) {
+#ifdef DEBUG
+ int original_height = frame_->height();
+#endif
+ VirtualFrame::SpilledScope spilled_scope(this);
Comment cmnt(masm_, "[ ObjectLiteral");
- ObjectLiteralDeferred* deferred = new ObjectLiteralDeferred(this, node);
+ DeferredObjectLiteral* deferred = new DeferredObjectLiteral(this, node);
// Retrieve the literal array and check the allocated entry.
// Check whether we need to materialize the object literal boilerplate.
// If so, jump to the deferred code.
__ cmp(r2, Operand(Factory::undefined_value()));
- __ b(eq, deferred->enter());
- __ bind(deferred->exit());
+ deferred->enter()->Branch(eq);
+ deferred->BindExit();
// Push the object literal boilerplate.
- frame_->Push(r2);
+ frame_->EmitPush(r2);
// Clone the boilerplate object.
- __ CallRuntime(Runtime::kCloneObjectLiteralBoilerplate, 1);
- frame_->Push(r0); // save the result
+ frame_->CallRuntime(Runtime::kCloneObjectLiteralBoilerplate, 1);
+ frame_->EmitPush(r0); // save the result
// r0: cloned object literal
for (int i = 0; i < node->properties()->length(); i++) {
case ObjectLiteral::Property::CONSTANT: break;
case ObjectLiteral::Property::COMPUTED: // fall through
case ObjectLiteral::Property::PROTOTYPE: {
- frame_->Push(r0); // dup the result
- Load(key);
- Load(value);
- __ CallRuntime(Runtime::kSetProperty, 3);
+ frame_->EmitPush(r0); // dup the result
+ LoadAndSpill(key);
+ LoadAndSpill(value);
+ frame_->CallRuntime(Runtime::kSetProperty, 3);
// restore r0
__ ldr(r0, frame_->Top());
break;
}
case ObjectLiteral::Property::SETTER: {
- frame_->Push(r0);
- Load(key);
+ frame_->EmitPush(r0);
+ LoadAndSpill(key);
__ mov(r0, Operand(Smi::FromInt(1)));
- frame_->Push(r0);
- Load(value);
- __ CallRuntime(Runtime::kDefineAccessor, 4);
+ frame_->EmitPush(r0);
+ LoadAndSpill(value);
+ frame_->CallRuntime(Runtime::kDefineAccessor, 4);
__ ldr(r0, frame_->Top());
break;
}
case ObjectLiteral::Property::GETTER: {
- frame_->Push(r0);
- Load(key);
+ frame_->EmitPush(r0);
+ LoadAndSpill(key);
__ mov(r0, Operand(Smi::FromInt(0)));
- frame_->Push(r0);
- Load(value);
- __ CallRuntime(Runtime::kDefineAccessor, 4);
+ frame_->EmitPush(r0);
+ LoadAndSpill(value);
+ frame_->CallRuntime(Runtime::kDefineAccessor, 4);
__ ldr(r0, frame_->Top());
break;
}
}
}
+ ASSERT(frame_->height() == original_height + 1);
}
void CodeGenerator::VisitArrayLiteral(ArrayLiteral* node) {
+#ifdef DEBUG
+ int original_height = frame_->height();
+#endif
+ VirtualFrame::SpilledScope spilled_scope(this);
Comment cmnt(masm_, "[ ArrayLiteral");
// Call runtime to create the array literal.
__ mov(r0, Operand(node->literals()));
- frame_->Push(r0);
+ frame_->EmitPush(r0);
// Load the function of this frame.
__ ldr(r0, frame_->Function());
__ ldr(r0, FieldMemOperand(r0, JSFunction::kLiteralsOffset));
- frame_->Push(r0);
- __ CallRuntime(Runtime::kCreateArrayLiteral, 2);
+ frame_->EmitPush(r0);
+ frame_->CallRuntime(Runtime::kCreateArrayLiteral, 2);
// Push the resulting array literal on the stack.
- frame_->Push(r0);
+ frame_->EmitPush(r0);
// Generate code to set the elements in the array that are not
// literals.
// set in the boilerplate object.
if (value->AsLiteral() == NULL) {
// The property must be set by generated code.
- Load(value);
- frame_->Pop(r0);
+ LoadAndSpill(value);
+ frame_->EmitPop(r0);
// Fetch the object literal
__ ldr(r1, frame_->Top());
__ RecordWrite(r1, r3, r2);
}
}
+ ASSERT(frame_->height() == original_height + 1);
}
void CodeGenerator::VisitCatchExtensionObject(CatchExtensionObject* node) {
+#ifdef DEBUG
+ int original_height = frame_->height();
+#endif
+ ASSERT(!in_spilled_code());
+ VirtualFrame::SpilledScope spilled_scope(this);
// Call runtime routine to allocate the catch extension object and
// assign the exception value to the catch variable.
- Comment cmnt(masm_, "[CatchExtensionObject ");
- Load(node->key());
- Load(node->value());
- __ CallRuntime(Runtime::kCreateCatchExtensionObject, 2);
- frame_->Push(r0);
+ Comment cmnt(masm_, "[ CatchExtensionObject");
+ LoadAndSpill(node->key());
+ LoadAndSpill(node->value());
+ Result result =
+ frame_->CallRuntime(Runtime::kCreateCatchExtensionObject, 2);
+ frame_->EmitPush(result.reg());
+ ASSERT(frame_->height() == original_height + 1);
}
void CodeGenerator::VisitAssignment(Assignment* node) {
+#ifdef DEBUG
+ int original_height = frame_->height();
+#endif
+ VirtualFrame::SpilledScope spilled_scope(this);
Comment cmnt(masm_, "[ Assignment");
- CodeForStatement(node);
-
- Reference target(this, node->target());
- if (target.is_illegal()) return;
-
- if (node->op() == Token::ASSIGN ||
- node->op() == Token::INIT_VAR ||
- node->op() == Token::INIT_CONST) {
- Load(node->value());
+ CodeForStatementPosition(node);
+
+ { Reference target(this, node->target());
+ if (target.is_illegal()) {
+ // Fool the virtual frame into thinking that we left the assignment's
+ // value on the frame.
+ __ mov(r0, Operand(Smi::FromInt(0)));
+ frame_->EmitPush(r0);
+ ASSERT(frame_->height() == original_height + 1);
+ return;
+ }
- } else {
- target.GetValue(NOT_INSIDE_TYPEOF);
- Literal* literal = node->value()->AsLiteral();
- if (literal != NULL && literal->handle()->IsSmi()) {
- SmiOperation(node->binary_op(), literal->handle(), false);
- frame_->Push(r0);
+ if (node->op() == Token::ASSIGN ||
+ node->op() == Token::INIT_VAR ||
+ node->op() == Token::INIT_CONST) {
+ LoadAndSpill(node->value());
} else {
- Load(node->value());
- GenericBinaryOperation(node->binary_op());
- frame_->Push(r0);
+ target.GetValueAndSpill(NOT_INSIDE_TYPEOF);
+ Literal* literal = node->value()->AsLiteral();
+ if (literal != NULL && literal->handle()->IsSmi()) {
+ SmiOperation(node->binary_op(), literal->handle(), false);
+ frame_->EmitPush(r0);
+
+ } else {
+ LoadAndSpill(node->value());
+ GenericBinaryOperation(node->binary_op());
+ frame_->EmitPush(r0);
+ }
}
- }
- Variable* var = node->target()->AsVariableProxy()->AsVariable();
- if (var != NULL &&
- (var->mode() == Variable::CONST) &&
- node->op() != Token::INIT_VAR && node->op() != Token::INIT_CONST) {
- // Assignment ignored - leave the value on the stack.
+ Variable* var = node->target()->AsVariableProxy()->AsVariable();
+ if (var != NULL &&
+ (var->mode() == Variable::CONST) &&
+ node->op() != Token::INIT_VAR && node->op() != Token::INIT_CONST) {
+ // Assignment ignored - leave the value on the stack.
- } else {
- CodeForSourcePosition(node->position());
- if (node->op() == Token::INIT_CONST) {
- // Dynamic constant initializations must use the function context
- // and initialize the actual constant declared. Dynamic variable
- // initializations are simply assignments and use SetValue.
- target.SetValue(CONST_INIT);
} else {
- target.SetValue(NOT_CONST_INIT);
+ CodeForSourcePosition(node->position());
+ if (node->op() == Token::INIT_CONST) {
+ // Dynamic constant initializations must use the function context
+ // and initialize the actual constant declared. Dynamic variable
+ // initializations are simply assignments and use SetValue.
+ target.SetValue(CONST_INIT);
+ } else {
+ target.SetValue(NOT_CONST_INIT);
+ }
}
}
+ ASSERT(frame_->height() == original_height + 1);
}
void CodeGenerator::VisitThrow(Throw* node) {
+#ifdef DEBUG
+ int original_height = frame_->height();
+#endif
+ VirtualFrame::SpilledScope spilled_scope(this);
Comment cmnt(masm_, "[ Throw");
- Load(node->exception());
+ LoadAndSpill(node->exception());
CodeForSourcePosition(node->position());
- __ CallRuntime(Runtime::kThrow, 1);
- frame_->Push(r0);
+ frame_->CallRuntime(Runtime::kThrow, 1);
+ frame_->EmitPush(r0);
+ ASSERT(frame_->height() == original_height + 1);
}
void CodeGenerator::VisitProperty(Property* node) {
+#ifdef DEBUG
+ int original_height = frame_->height();
+#endif
+ VirtualFrame::SpilledScope spilled_scope(this);
Comment cmnt(masm_, "[ Property");
- Reference property(this, node);
- property.GetValue(typeof_state());
+ { Reference property(this, node);
+ property.GetValueAndSpill(typeof_state());
+ }
+ ASSERT(frame_->height() == original_height + 1);
}
void CodeGenerator::VisitCall(Call* node) {
+#ifdef DEBUG
+ int original_height = frame_->height();
+#endif
+ VirtualFrame::SpilledScope spilled_scope(this);
Comment cmnt(masm_, "[ Call");
ZoneList<Expression*>* args = node->arguments();
- CodeForStatement(node);
+ CodeForStatementPosition(node);
// Standard function call.
// Check if the function is a variable or a property.
// Push the name of the function and the receiver onto the stack.
__ mov(r0, Operand(var->name()));
- frame_->Push(r0);
+ frame_->EmitPush(r0);
// Pass the global object as the receiver and let the IC stub
// patch the stack to use the global proxy as 'this' in the
LoadGlobal();
// Load the arguments.
- for (int i = 0; i < args->length(); i++) Load(args->at(i));
+ int arg_count = args->length();
+ for (int i = 0; i < arg_count; i++) {
+ LoadAndSpill(args->at(i));
+ }
// Setup the receiver register and call the IC initialization code.
- Handle<Code> stub = ComputeCallInitialize(args->length());
+ Handle<Code> stub = ComputeCallInitialize(arg_count);
CodeForSourcePosition(node->position());
- __ Call(stub, RelocInfo::CODE_TARGET_CONTEXT);
+ frame_->CallCodeObject(stub, RelocInfo::CODE_TARGET_CONTEXT,
+ arg_count + 1);
__ ldr(cp, frame_->Context());
// Remove the function from the stack.
- frame_->Pop();
- frame_->Push(r0);
+ frame_->Drop();
+ frame_->EmitPush(r0);
} else if (var != NULL && var->slot() != NULL &&
var->slot()->type() == Slot::LOOKUP) {
// ----------------------------------
// Load the function
- frame_->Push(cp);
+ frame_->EmitPush(cp);
__ mov(r0, Operand(var->name()));
- frame_->Push(r0);
- __ CallRuntime(Runtime::kLoadContextSlot, 2);
+ frame_->EmitPush(r0);
+ frame_->CallRuntime(Runtime::kLoadContextSlot, 2);
// r0: slot value; r1: receiver
// Load the receiver.
- frame_->Push(r0); // function
- frame_->Push(r1); // receiver
+ frame_->EmitPush(r0); // function
+ frame_->EmitPush(r1); // receiver
// Call the function.
CallWithArguments(args, node->position());
- frame_->Push(r0);
+ frame_->EmitPush(r0);
} else if (property != NULL) {
// Check if the key is a literal string.
// Push the name of the function and the receiver onto the stack.
__ mov(r0, Operand(literal->handle()));
- frame_->Push(r0);
- Load(property->obj());
+ frame_->EmitPush(r0);
+ LoadAndSpill(property->obj());
// Load the arguments.
- for (int i = 0; i < args->length(); i++) Load(args->at(i));
+ int arg_count = args->length();
+ for (int i = 0; i < arg_count; i++) {
+ LoadAndSpill(args->at(i));
+ }
// Set the receiver register and call the IC initialization code.
- Handle<Code> stub = ComputeCallInitialize(args->length());
+ Handle<Code> stub = ComputeCallInitialize(arg_count);
CodeForSourcePosition(node->position());
- __ Call(stub, RelocInfo::CODE_TARGET);
+ frame_->CallCodeObject(stub, RelocInfo::CODE_TARGET, arg_count + 1);
__ ldr(cp, frame_->Context());
// Remove the function from the stack.
- frame_->Pop();
+ frame_->Drop();
- frame_->Push(r0); // push after get rid of function from the stack
+ frame_->EmitPush(r0); // push after get rid of function from the stack
} else {
// -------------------------------------------
// Load the function to call from the property through a reference.
Reference ref(this, property);
- ref.GetValue(NOT_INSIDE_TYPEOF); // receiver
+ ref.GetValueAndSpill(NOT_INSIDE_TYPEOF); // receiver
// Pass receiver to called function.
- __ ldr(r0, frame_->Element(ref.size()));
- frame_->Push(r0);
+ __ ldr(r0, frame_->ElementAt(ref.size()));
+ frame_->EmitPush(r0);
// Call the function.
CallWithArguments(args, node->position());
- frame_->Push(r0);
+ frame_->EmitPush(r0);
}
} else {
// ----------------------------------
// Load the function.
- Load(function);
+ LoadAndSpill(function);
// Pass the global proxy as the receiver.
LoadGlobalReceiver(r0);
// Call the function.
CallWithArguments(args, node->position());
- frame_->Push(r0);
+ frame_->EmitPush(r0);
}
+ ASSERT(frame_->height() == original_height + 1);
}
void CodeGenerator::VisitCallEval(CallEval* node) {
+#ifdef DEBUG
+ int original_height = frame_->height();
+#endif
+ VirtualFrame::SpilledScope spilled_scope(this);
Comment cmnt(masm_, "[ CallEval");
// In a call to eval, we first call %ResolvePossiblyDirectEval to resolve
ZoneList<Expression*>* args = node->arguments();
Expression* function = node->expression();
- CodeForStatement(node);
+ CodeForStatementPosition(node);
// Prepare stack for call to resolved function.
- Load(function);
+ LoadAndSpill(function);
__ mov(r2, Operand(Factory::undefined_value()));
- __ push(r2); // Slot for receiver
- for (int i = 0; i < args->length(); i++) {
- Load(args->at(i));
+ frame_->EmitPush(r2); // Slot for receiver
+ int arg_count = args->length();
+ for (int i = 0; i < arg_count; i++) {
+ LoadAndSpill(args->at(i));
}
// Prepare stack for call to ResolvePossiblyDirectEval.
- __ ldr(r1, MemOperand(sp, args->length() * kPointerSize + kPointerSize));
- __ push(r1);
- if (args->length() > 0) {
- __ ldr(r1, MemOperand(sp, args->length() * kPointerSize));
- __ push(r1);
+ __ ldr(r1, MemOperand(sp, arg_count * kPointerSize + kPointerSize));
+ frame_->EmitPush(r1);
+ if (arg_count > 0) {
+ __ ldr(r1, MemOperand(sp, arg_count * kPointerSize));
+ frame_->EmitPush(r1);
} else {
- __ push(r2);
+ frame_->EmitPush(r2);
}
// Resolve the call.
- __ CallRuntime(Runtime::kResolvePossiblyDirectEval, 2);
+ frame_->CallRuntime(Runtime::kResolvePossiblyDirectEval, 2);
// Touch up stack with the right values for the function and the receiver.
__ ldr(r1, FieldMemOperand(r0, FixedArray::kHeaderSize));
- __ str(r1, MemOperand(sp, (args->length() + 1) * kPointerSize));
+ __ str(r1, MemOperand(sp, (arg_count + 1) * kPointerSize));
__ ldr(r1, FieldMemOperand(r0, FixedArray::kHeaderSize + kPointerSize));
- __ str(r1, MemOperand(sp, args->length() * kPointerSize));
+ __ str(r1, MemOperand(sp, arg_count * kPointerSize));
// Call the function.
CodeForSourcePosition(node->position());
- CallFunctionStub call_function(args->length());
- __ CallStub(&call_function);
+ CallFunctionStub call_function(arg_count);
+ frame_->CallStub(&call_function, arg_count + 1);
__ ldr(cp, frame_->Context());
// Remove the function from the stack.
- frame_->Pop();
- frame_->Push(r0);
+ frame_->Drop();
+ frame_->EmitPush(r0);
+ ASSERT(frame_->height() == original_height + 1);
}
void CodeGenerator::VisitCallNew(CallNew* node) {
+#ifdef DEBUG
+ int original_height = frame_->height();
+#endif
+ VirtualFrame::SpilledScope spilled_scope(this);
Comment cmnt(masm_, "[ CallNew");
- CodeForStatement(node);
+ CodeForStatementPosition(node);
// According to ECMA-262, section 11.2.2, page 44, the function
// expression in new calls must be evaluated before the
// Compute function to call and use the global object as the
// receiver. There is no need to use the global proxy here because
// it will always be replaced with a newly allocated object.
- Load(node->expression());
+ LoadAndSpill(node->expression());
LoadGlobal();
// Push the arguments ("left-to-right") on the stack.
ZoneList<Expression*>* args = node->arguments();
- for (int i = 0; i < args->length(); i++) Load(args->at(i));
+ int arg_count = args->length();
+ for (int i = 0; i < arg_count; i++) {
+ LoadAndSpill(args->at(i));
+ }
// r0: the number of arguments.
- __ mov(r0, Operand(args->length()));
+ Result num_args = allocator_->Allocate(r0);
+ ASSERT(num_args.is_valid());
+ __ mov(num_args.reg(), Operand(arg_count));
// Load the function into r1 as per calling convention.
- __ ldr(r1, frame_->Element(args->length() + 1));
+ Result function = allocator_->Allocate(r1);
+ ASSERT(function.is_valid());
+ __ ldr(function.reg(), frame_->ElementAt(arg_count + 1));
// Call the construct call builtin that handles allocation and
// constructor invocation.
CodeForSourcePosition(node->position());
- __ Call(Handle<Code>(Builtins::builtin(Builtins::JSConstructCall)),
- RelocInfo::CONSTRUCT_CALL);
+ Handle<Code> ic(Builtins::builtin(Builtins::JSConstructCall));
+ Result result = frame_->CallCodeObject(ic,
+ RelocInfo::CONSTRUCT_CALL,
+ &num_args,
+ &function,
+ arg_count + 1);
// Discard old TOS value and push r0 on the stack (same as Pop(), push(r0)).
__ str(r0, frame_->Top());
+ ASSERT(frame_->height() == original_height + 1);
}
void CodeGenerator::GenerateValueOf(ZoneList<Expression*>* args) {
+ VirtualFrame::SpilledScope spilled_scope(this);
ASSERT(args->length() == 1);
- Label leave;
- Load(args->at(0));
- frame_->Pop(r0); // r0 contains object.
+ JumpTarget leave(this);
+ LoadAndSpill(args->at(0));
+ frame_->EmitPop(r0); // r0 contains object.
// if (object->IsSmi()) return the object.
__ tst(r0, Operand(kSmiTagMask));
- __ b(eq, &leave);
+ leave.Branch(eq);
// It is a heap object - get map.
__ ldr(r1, FieldMemOperand(r0, HeapObject::kMapOffset));
__ ldrb(r1, FieldMemOperand(r1, Map::kInstanceTypeOffset));
// if (!object->IsJSValue()) return the object.
__ cmp(r1, Operand(JS_VALUE_TYPE));
- __ b(ne, &leave);
+ leave.Branch(ne);
// Load the value.
__ ldr(r0, FieldMemOperand(r0, JSValue::kValueOffset));
- __ bind(&leave);
- frame_->Push(r0);
+ leave.Bind();
+ frame_->EmitPush(r0);
}
void CodeGenerator::GenerateSetValueOf(ZoneList<Expression*>* args) {
+ VirtualFrame::SpilledScope spilled_scope(this);
ASSERT(args->length() == 2);
- Label leave;
- Load(args->at(0)); // Load the object.
- Load(args->at(1)); // Load the value.
- frame_->Pop(r0); // r0 contains value
- frame_->Pop(r1); // r1 contains object
+ JumpTarget leave(this);
+ LoadAndSpill(args->at(0)); // Load the object.
+ LoadAndSpill(args->at(1)); // Load the value.
+ frame_->EmitPop(r0); // r0 contains value
+ frame_->EmitPop(r1); // r1 contains object
// if (object->IsSmi()) return object.
__ tst(r1, Operand(kSmiTagMask));
- __ b(eq, &leave);
+ leave.Branch(eq);
// It is a heap object - get map.
__ ldr(r2, FieldMemOperand(r1, HeapObject::kMapOffset));
__ ldrb(r2, FieldMemOperand(r2, Map::kInstanceTypeOffset));
// if (!object->IsJSValue()) return object.
__ cmp(r2, Operand(JS_VALUE_TYPE));
- __ b(ne, &leave);
+ leave.Branch(ne);
// Store the value.
__ str(r0, FieldMemOperand(r1, JSValue::kValueOffset));
// Update the write barrier.
__ mov(r2, Operand(JSValue::kValueOffset - kHeapObjectTag));
__ RecordWrite(r1, r2, r3);
// Leave.
- __ bind(&leave);
- frame_->Push(r0);
+ leave.Bind();
+ frame_->EmitPush(r0);
}
void CodeGenerator::GenerateIsSmi(ZoneList<Expression*>* args) {
+ VirtualFrame::SpilledScope spilled_scope(this);
ASSERT(args->length() == 1);
- Load(args->at(0));
- frame_->Pop(r0);
+ LoadAndSpill(args->at(0));
+ frame_->EmitPop(r0);
__ tst(r0, Operand(kSmiTagMask));
cc_reg_ = eq;
}
void CodeGenerator::GenerateLog(ZoneList<Expression*>* args) {
+ VirtualFrame::SpilledScope spilled_scope(this);
// See comment in CodeGenerator::GenerateLog in codegen-ia32.cc.
ASSERT_EQ(args->length(), 3);
#ifdef ENABLE_LOGGING_AND_PROFILING
if (ShouldGenerateLog(args->at(0))) {
- Load(args->at(1));
- Load(args->at(2));
+ LoadAndSpill(args->at(1));
+ LoadAndSpill(args->at(2));
__ CallRuntime(Runtime::kLog, 2);
}
#endif
__ mov(r0, Operand(Factory::undefined_value()));
- frame_->Push(r0);
+ frame_->EmitPush(r0);
}
void CodeGenerator::GenerateIsNonNegativeSmi(ZoneList<Expression*>* args) {
+ VirtualFrame::SpilledScope spilled_scope(this);
ASSERT(args->length() == 1);
- Load(args->at(0));
- frame_->Pop(r0);
+ LoadAndSpill(args->at(0));
+ frame_->EmitPop(r0);
__ tst(r0, Operand(kSmiTagMask | 0x80000000));
cc_reg_ = eq;
}
// undefined in order to trigger the slow case, Runtime_StringCharCodeAt.
// It is not yet implemented on ARM, so it always goes to the slow case.
void CodeGenerator::GenerateFastCharCodeAt(ZoneList<Expression*>* args) {
+ VirtualFrame::SpilledScope spilled_scope(this);
ASSERT(args->length() == 2);
__ mov(r0, Operand(Factory::undefined_value()));
- frame_->Push(r0);
+ frame_->EmitPush(r0);
}
void CodeGenerator::GenerateIsArray(ZoneList<Expression*>* args) {
+ VirtualFrame::SpilledScope spilled_scope(this);
ASSERT(args->length() == 1);
- Load(args->at(0));
- Label answer;
+ LoadAndSpill(args->at(0));
+ JumpTarget answer(this);
// We need the CC bits to come out as not_equal in the case where the
// object is a smi. This can't be done with the usual test opcode so
// we use XOR to get the right CC bits.
- frame_->Pop(r0);
+ frame_->EmitPop(r0);
__ and_(r1, r0, Operand(kSmiTagMask));
__ eor(r1, r1, Operand(kSmiTagMask), SetCC);
- __ b(ne, &answer);
+ answer.Branch(ne);
// It is a heap object - get the map.
__ ldr(r1, FieldMemOperand(r0, HeapObject::kMapOffset));
__ ldrb(r1, FieldMemOperand(r1, Map::kInstanceTypeOffset));
// Check if the object is a JS array or not.
__ cmp(r1, Operand(JS_ARRAY_TYPE));
- __ bind(&answer);
+ answer.Bind();
cc_reg_ = eq;
}
void CodeGenerator::GenerateArgumentsLength(ZoneList<Expression*>* args) {
+ VirtualFrame::SpilledScope spilled_scope(this);
ASSERT(args->length() == 0);
// Seed the result with the formal parameters count, which will be used
// Call the shared stub to get to the arguments.length.
ArgumentsAccessStub stub(ArgumentsAccessStub::READ_LENGTH);
- __ CallStub(&stub);
- frame_->Push(r0);
+ frame_->CallStub(&stub, 0);
+ frame_->EmitPush(r0);
}
void CodeGenerator::GenerateArgumentsAccess(ZoneList<Expression*>* args) {
+ VirtualFrame::SpilledScope spilled_scope(this);
ASSERT(args->length() == 1);
// Satisfy contract with ArgumentsAccessStub:
// Load the key into r1 and the formal parameters count into r0.
- Load(args->at(0));
- frame_->Pop(r1);
+ LoadAndSpill(args->at(0));
+ frame_->EmitPop(r1);
__ mov(r0, Operand(Smi::FromInt(scope_->num_parameters())));
// Call the shared stub to get to arguments[key].
ArgumentsAccessStub stub(ArgumentsAccessStub::READ_ELEMENT);
- __ CallStub(&stub);
- frame_->Push(r0);
+ frame_->CallStub(&stub, 0);
+ frame_->EmitPush(r0);
}
void CodeGenerator::GenerateObjectEquals(ZoneList<Expression*>* args) {
+ VirtualFrame::SpilledScope spilled_scope(this);
ASSERT(args->length() == 2);
// Load the two objects into registers and perform the comparison.
- Load(args->at(0));
- Load(args->at(1));
- frame_->Pop(r0);
- frame_->Pop(r1);
+ LoadAndSpill(args->at(0));
+ LoadAndSpill(args->at(1));
+ frame_->EmitPop(r0);
+ frame_->EmitPop(r1);
__ cmp(r0, Operand(r1));
cc_reg_ = eq;
}
void CodeGenerator::VisitCallRuntime(CallRuntime* node) {
- if (CheckForInlineRuntimeCall(node)) return;
+#ifdef DEBUG
+ int original_height = frame_->height();
+#endif
+ VirtualFrame::SpilledScope spilled_scope(this);
+ if (CheckForInlineRuntimeCall(node)) {
+ ASSERT((has_cc() && frame_->height() == original_height) ||
+ (!has_cc() && frame_->height() == original_height + 1));
+ return;
+ }
ZoneList<Expression*>* args = node->arguments();
Comment cmnt(masm_, "[ CallRuntime");
if (function != NULL) {
// Push the arguments ("left-to-right").
- for (int i = 0; i < args->length(); i++) Load(args->at(i));
+ int arg_count = args->length();
+ for (int i = 0; i < arg_count; i++) {
+ LoadAndSpill(args->at(i));
+ }
// Call the C runtime function.
- __ CallRuntime(function, args->length());
- frame_->Push(r0);
+ frame_->CallRuntime(function, arg_count);
+ frame_->EmitPush(r0);
} else {
// Prepare stack for calling JS runtime function.
__ mov(r0, Operand(node->name()));
- frame_->Push(r0);
+ frame_->EmitPush(r0);
// Push the builtins object found in the current global object.
__ ldr(r1, GlobalObject());
__ ldr(r0, FieldMemOperand(r1, GlobalObject::kBuiltinsOffset));
- frame_->Push(r0);
+ frame_->EmitPush(r0);
- for (int i = 0; i < args->length(); i++) Load(args->at(i));
+ int arg_count = args->length();
+ for (int i = 0; i < arg_count; i++) {
+ LoadAndSpill(args->at(i));
+ }
// Call the JS runtime function.
Handle<Code> stub = ComputeCallInitialize(args->length());
- __ Call(stub, RelocInfo::CODE_TARGET);
+ frame_->CallCodeObject(stub, RelocInfo::CODE_TARGET, arg_count + 1);
__ ldr(cp, frame_->Context());
- frame_->Pop();
- frame_->Push(r0);
+ frame_->Drop();
+ frame_->EmitPush(r0);
}
+ ASSERT(frame_->height() == original_height + 1);
}
void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) {
+#ifdef DEBUG
+ int original_height = frame_->height();
+#endif
+ VirtualFrame::SpilledScope spilled_scope(this);
Comment cmnt(masm_, "[ UnaryOperation");
Token::Value op = node->op();
if (op == Token::NOT) {
- LoadCondition(node->expression(),
- NOT_INSIDE_TYPEOF,
- false_target(),
- true_target(),
- true);
+ LoadConditionAndSpill(node->expression(),
+ NOT_INSIDE_TYPEOF,
+ false_target(),
+ true_target(),
+ true);
cc_reg_ = NegateCondition(cc_reg_);
} else if (op == Token::DELETE) {
Property* property = node->expression()->AsProperty();
Variable* variable = node->expression()->AsVariableProxy()->AsVariable();
if (property != NULL) {
- Load(property->obj());
- Load(property->key());
- __ mov(r0, Operand(1)); // not counting receiver
- __ InvokeBuiltin(Builtins::DELETE, CALL_JS);
+ LoadAndSpill(property->obj());
+ LoadAndSpill(property->key());
+ Result arg_count = allocator_->Allocate(r0);
+ ASSERT(arg_count.is_valid());
+ __ mov(arg_count.reg(), Operand(1)); // not counting receiver
+ frame_->InvokeBuiltin(Builtins::DELETE, CALL_JS, &arg_count, 2);
} else if (variable != NULL) {
Slot* slot = variable->slot();
if (variable->is_global()) {
LoadGlobal();
__ mov(r0, Operand(variable->name()));
- frame_->Push(r0);
- __ mov(r0, Operand(1)); // not counting receiver
- __ InvokeBuiltin(Builtins::DELETE, CALL_JS);
+ frame_->EmitPush(r0);
+ Result arg_count = allocator_->Allocate(r0);
+ ASSERT(arg_count.is_valid());
+ __ mov(arg_count.reg(), Operand(1)); // not counting receiver
+ frame_->InvokeBuiltin(Builtins::DELETE, CALL_JS, &arg_count, 2);
} else if (slot != NULL && slot->type() == Slot::LOOKUP) {
// lookup the context holding the named variable
- frame_->Push(cp);
+ frame_->EmitPush(cp);
__ mov(r0, Operand(variable->name()));
- frame_->Push(r0);
- __ CallRuntime(Runtime::kLookupContext, 2);
+ frame_->EmitPush(r0);
+ frame_->CallRuntime(Runtime::kLookupContext, 2);
// r0: context
- frame_->Push(r0);
+ frame_->EmitPush(r0);
__ mov(r0, Operand(variable->name()));
- frame_->Push(r0);
- __ mov(r0, Operand(1)); // not counting receiver
- __ InvokeBuiltin(Builtins::DELETE, CALL_JS);
+ frame_->EmitPush(r0);
+ Result arg_count = allocator_->Allocate(r0);
+ ASSERT(arg_count.is_valid());
+ __ mov(arg_count.reg(), Operand(1)); // not counting receiver
+ frame_->InvokeBuiltin(Builtins::DELETE, CALL_JS, &arg_count, 2);
} else {
// Default: Result of deleting non-global, not dynamically
} else {
// Default: Result of deleting expressions is true.
- Load(node->expression()); // may have side-effects
- frame_->Pop();
+ LoadAndSpill(node->expression()); // may have side-effects
+ frame_->Drop();
__ mov(r0, Operand(Factory::true_value()));
}
- frame_->Push(r0);
+ frame_->EmitPush(r0);
} else if (op == Token::TYPEOF) {
// Special case for loading the typeof expression; see comment on
// LoadTypeofExpression().
LoadTypeofExpression(node->expression());
- __ CallRuntime(Runtime::kTypeof, 1);
- frame_->Push(r0); // r0 has result
+ frame_->CallRuntime(Runtime::kTypeof, 1);
+ frame_->EmitPush(r0); // r0 has result
} else {
- Load(node->expression());
- frame_->Pop(r0);
+ LoadAndSpill(node->expression());
+ frame_->EmitPop(r0);
switch (op) {
case Token::NOT:
case Token::DELETE:
case Token::SUB: {
UnarySubStub stub;
- __ CallStub(&stub);
+ frame_->CallStub(&stub, 0);
break;
}
case Token::BIT_NOT: {
// smi check
- Label smi_label;
- Label continue_label;
+ JumpTarget smi_label(this);
+ JumpTarget continue_label(this);
__ tst(r0, Operand(kSmiTagMask));
- __ b(eq, &smi_label);
+ smi_label.Branch(eq);
- frame_->Push(r0);
- __ mov(r0, Operand(0)); // not counting receiver
- __ InvokeBuiltin(Builtins::BIT_NOT, CALL_JS);
+ frame_->EmitPush(r0);
+ Result arg_count = allocator_->Allocate(r0);
+ ASSERT(arg_count.is_valid());
+ __ mov(arg_count.reg(), Operand(0)); // not counting receiver
+ frame_->InvokeBuiltin(Builtins::BIT_NOT, CALL_JS, &arg_count, 1);
- __ b(&continue_label);
- __ bind(&smi_label);
+ continue_label.Jump();
+ smi_label.Bind();
__ mvn(r0, Operand(r0));
__ bic(r0, r0, Operand(kSmiTagMask)); // bit-clear inverted smi-tag
- __ bind(&continue_label);
+ continue_label.Bind();
break;
}
case Token::ADD: {
// Smi check.
- Label continue_label;
+ JumpTarget continue_label(this);
__ tst(r0, Operand(kSmiTagMask));
- __ b(eq, &continue_label);
- frame_->Push(r0);
- __ mov(r0, Operand(0)); // not counting receiver
- __ InvokeBuiltin(Builtins::TO_NUMBER, CALL_JS);
- __ bind(&continue_label);
+ continue_label.Branch(eq);
+ frame_->EmitPush(r0);
+ Result arg_count = allocator_->Allocate(r0);
+ ASSERT(arg_count.is_valid());
+ __ mov(arg_count.reg(), Operand(0)); // not counting receiver
+ frame_->InvokeBuiltin(Builtins::TO_NUMBER, CALL_JS, &arg_count, 1);
+ continue_label.Bind();
break;
}
default:
UNREACHABLE();
}
- frame_->Push(r0); // r0 has result
+ frame_->EmitPush(r0); // r0 has result
}
+ ASSERT((has_cc() && frame_->height() == original_height) ||
+ (!has_cc() && frame_->height() == original_height + 1));
}
void CodeGenerator::VisitCountOperation(CountOperation* node) {
+#ifdef DEBUG
+ int original_height = frame_->height();
+#endif
+ VirtualFrame::SpilledScope spilled_scope(this);
Comment cmnt(masm_, "[ CountOperation");
bool is_postfix = node->is_postfix();
// Postfix: Make room for the result.
if (is_postfix) {
__ mov(r0, Operand(0));
- frame_->Push(r0);
+ frame_->EmitPush(r0);
}
{ Reference target(this, node->expression());
- if (target.is_illegal()) return;
- target.GetValue(NOT_INSIDE_TYPEOF);
- frame_->Pop(r0);
+ if (target.is_illegal()) {
+ // Spoof the virtual frame to have the expected height (one higher
+ // than on entry).
+ if (!is_postfix) {
+ __ mov(r0, Operand(Smi::FromInt(0)));
+ frame_->EmitPush(r0);
+ }
+ ASSERT(frame_->height() == original_height + 1);
+ return;
+ }
+ target.GetValueAndSpill(NOT_INSIDE_TYPEOF);
+ frame_->EmitPop(r0);
- Label slow, exit;
+ JumpTarget slow(this);
+ JumpTarget exit(this);
// Load the value (1) into register r1.
__ mov(r1, Operand(Smi::FromInt(1)));
// Check for smi operand.
__ tst(r0, Operand(kSmiTagMask));
- __ b(ne, &slow);
+ slow.Branch(ne);
// Postfix: Store the old value as the result.
if (is_postfix) {
- __ str(r0, frame_->Element(target.size()));
+ __ str(r0, frame_->ElementAt(target.size()));
}
// Perform optimistic increment/decrement.
}
// If the increment/decrement didn't overflow, we're done.
- __ b(vc, &exit);
+ exit.Branch(vc);
// Revert optimistic increment/decrement.
if (is_increment) {
}
// Slow case: Convert to number.
- __ bind(&slow);
+ slow.Bind();
// Postfix: Convert the operand to a number and store it as the result.
if (is_postfix) {
InvokeBuiltinStub stub(InvokeBuiltinStub::ToNumber, 2);
- __ CallStub(&stub);
+ frame_->CallStub(&stub, 0);
// Store to result (on the stack).
- __ str(r0, frame_->Element(target.size()));
+ __ str(r0, frame_->ElementAt(target.size()));
}
// Compute the new value by calling the right JavaScript native.
if (is_increment) {
InvokeBuiltinStub stub(InvokeBuiltinStub::Inc, 1);
- __ CallStub(&stub);
+ frame_->CallStub(&stub, 0);
} else {
InvokeBuiltinStub stub(InvokeBuiltinStub::Dec, 1);
- __ CallStub(&stub);
+ frame_->CallStub(&stub, 0);
}
// Store the new value in the target if not const.
- __ bind(&exit);
- frame_->Push(r0);
+ exit.Bind();
+ frame_->EmitPush(r0);
if (!is_const) target.SetValue(NOT_CONST_INIT);
}
// Postfix: Discard the new value and use the old.
- if (is_postfix) frame_->Pop(r0);
+ if (is_postfix) frame_->EmitPop(r0);
+ ASSERT(frame_->height() == original_height + 1);
}
void CodeGenerator::VisitBinaryOperation(BinaryOperation* node) {
+#ifdef DEBUG
+ int original_height = frame_->height();
+#endif
+ VirtualFrame::SpilledScope spilled_scope(this);
Comment cmnt(masm_, "[ BinaryOperation");
Token::Value op = node->op();
// of compiling the binary operation is materialized or not.
if (op == Token::AND) {
- Label is_true;
- LoadCondition(node->left(),
- NOT_INSIDE_TYPEOF,
- &is_true,
- false_target(),
- false);
+ JumpTarget is_true(this);
+ LoadConditionAndSpill(node->left(),
+ NOT_INSIDE_TYPEOF,
+ &is_true,
+ false_target(),
+ false);
if (has_cc()) {
Branch(false, false_target());
// Evaluate right side expression.
- __ bind(&is_true);
- LoadCondition(node->right(),
- NOT_INSIDE_TYPEOF,
- true_target(),
- false_target(),
- false);
+ is_true.Bind();
+ LoadConditionAndSpill(node->right(),
+ NOT_INSIDE_TYPEOF,
+ true_target(),
+ false_target(),
+ false);
} else {
- Label pop_and_continue, exit;
+ JumpTarget pop_and_continue(this);
+ JumpTarget exit(this);
__ ldr(r0, frame_->Top()); // dup the stack top
- frame_->Push(r0);
+ frame_->EmitPush(r0);
// Avoid popping the result if it converts to 'false' using the
// standard ToBoolean() conversion as described in ECMA-262,
// section 9.2, page 30.
Branch(false, &exit);
// Pop the result of evaluating the first part.
- __ bind(&pop_and_continue);
- frame_->Pop(r0);
+ pop_and_continue.Bind();
+ frame_->EmitPop(r0);
// Evaluate right side expression.
- __ bind(&is_true);
- Load(node->right());
+ is_true.Bind();
+ LoadAndSpill(node->right());
// Exit (always with a materialized value).
- __ bind(&exit);
+ exit.Bind();
}
} else if (op == Token::OR) {
- Label is_false;
- LoadCondition(node->left(),
- NOT_INSIDE_TYPEOF,
- true_target(),
- &is_false,
- false);
+ JumpTarget is_false(this);
+ LoadConditionAndSpill(node->left(),
+ NOT_INSIDE_TYPEOF,
+ true_target(),
+ &is_false,
+ false);
if (has_cc()) {
Branch(true, true_target());
// Evaluate right side expression.
- __ bind(&is_false);
- LoadCondition(node->right(),
- NOT_INSIDE_TYPEOF,
- true_target(),
- false_target(),
- false);
+ is_false.Bind();
+ LoadConditionAndSpill(node->right(),
+ NOT_INSIDE_TYPEOF,
+ true_target(),
+ false_target(),
+ false);
} else {
- Label pop_and_continue, exit;
+ JumpTarget pop_and_continue(this);
+ JumpTarget exit(this);
__ ldr(r0, frame_->Top());
- frame_->Push(r0);
+ frame_->EmitPush(r0);
// Avoid popping the result if it converts to 'true' using the
// standard ToBoolean() conversion as described in ECMA-262,
// section 9.2, page 30.
Branch(true, &exit);
// Pop the result of evaluating the first part.
- __ bind(&pop_and_continue);
- frame_->Pop(r0);
+ pop_and_continue.Bind();
+ frame_->EmitPop(r0);
// Evaluate right side expression.
- __ bind(&is_false);
- Load(node->right());
+ is_false.Bind();
+ LoadAndSpill(node->right());
// Exit (always with a materialized value).
- __ bind(&exit);
+ exit.Bind();
}
} else {
Literal* rliteral = node->right()->AsLiteral();
if (rliteral != NULL && rliteral->handle()->IsSmi()) {
- Load(node->left());
+ LoadAndSpill(node->left());
SmiOperation(node->op(), rliteral->handle(), false);
} else if (lliteral != NULL && lliteral->handle()->IsSmi()) {
- Load(node->right());
+ LoadAndSpill(node->right());
SmiOperation(node->op(), lliteral->handle(), true);
} else {
- Load(node->left());
- Load(node->right());
+ LoadAndSpill(node->left());
+ LoadAndSpill(node->right());
GenericBinaryOperation(node->op());
}
- frame_->Push(r0);
+ frame_->EmitPush(r0);
}
+ ASSERT((has_cc() && frame_->height() == original_height) ||
+ (!has_cc() && frame_->height() == original_height + 1));
}
void CodeGenerator::VisitThisFunction(ThisFunction* node) {
+#ifdef DEBUG
+ int original_height = frame_->height();
+#endif
+ VirtualFrame::SpilledScope spilled_scope(this);
__ ldr(r0, frame_->Function());
- frame_->Push(r0);
+ frame_->EmitPush(r0);
+ ASSERT(frame_->height() == original_height + 1);
}
void CodeGenerator::VisitCompareOperation(CompareOperation* node) {
+#ifdef DEBUG
+ int original_height = frame_->height();
+#endif
+ VirtualFrame::SpilledScope spilled_scope(this);
Comment cmnt(masm_, "[ CompareOperation");
// Get the expressions from the node.
right->AsLiteral() != NULL && right->AsLiteral()->IsNull();
// The 'null' value can only be equal to 'null' or 'undefined'.
if (left_is_null || right_is_null) {
- Load(left_is_null ? right : left);
- frame_->Pop(r0);
+ LoadAndSpill(left_is_null ? right : left);
+ frame_->EmitPop(r0);
__ cmp(r0, Operand(Factory::null_value()));
// The 'null' value is only equal to 'undefined' if using non-strict
// comparisons.
if (op != Token::EQ_STRICT) {
- __ b(eq, true_target());
+ true_target()->Branch(eq);
__ cmp(r0, Operand(Factory::undefined_value()));
- __ b(eq, true_target());
+ true_target()->Branch(eq);
__ tst(r0, Operand(kSmiTagMask));
- __ b(eq, false_target());
+ false_target()->Branch(eq);
// It can be an undetectable object.
__ ldr(r0, FieldMemOperand(r0, HeapObject::kMapOffset));
}
cc_reg_ = eq;
+ ASSERT(has_cc() && frame_->height() == original_height);
return;
}
}
// Load the operand, move it to register r1.
LoadTypeofExpression(operation->expression());
- frame_->Pop(r1);
+ frame_->EmitPop(r1);
if (check->Equals(Heap::number_symbol())) {
__ tst(r1, Operand(kSmiTagMask));
- __ b(eq, true_target());
+ true_target()->Branch(eq);
__ ldr(r1, FieldMemOperand(r1, HeapObject::kMapOffset));
__ cmp(r1, Operand(Factory::heap_number_map()));
cc_reg_ = eq;
} else if (check->Equals(Heap::string_symbol())) {
__ tst(r1, Operand(kSmiTagMask));
- __ b(eq, false_target());
+ false_target()->Branch(eq);
__ ldr(r1, FieldMemOperand(r1, HeapObject::kMapOffset));
__ ldrb(r2, FieldMemOperand(r1, Map::kBitFieldOffset));
__ and_(r2, r2, Operand(1 << Map::kIsUndetectable));
__ cmp(r2, Operand(1 << Map::kIsUndetectable));
- __ b(eq, false_target());
+ false_target()->Branch(eq);
__ ldrb(r2, FieldMemOperand(r1, Map::kInstanceTypeOffset));
__ cmp(r2, Operand(FIRST_NONSTRING_TYPE));
} else if (check->Equals(Heap::boolean_symbol())) {
__ cmp(r1, Operand(Factory::true_value()));
- __ b(eq, true_target());
+ true_target()->Branch(eq);
__ cmp(r1, Operand(Factory::false_value()));
cc_reg_ = eq;
} else if (check->Equals(Heap::undefined_symbol())) {
__ cmp(r1, Operand(Factory::undefined_value()));
- __ b(eq, true_target());
+ true_target()->Branch(eq);
__ tst(r1, Operand(kSmiTagMask));
- __ b(eq, false_target());
+ false_target()->Branch(eq);
// It can be an undetectable object.
__ ldr(r1, FieldMemOperand(r1, HeapObject::kMapOffset));
} else if (check->Equals(Heap::function_symbol())) {
__ tst(r1, Operand(kSmiTagMask));
- __ b(eq, false_target());
+ false_target()->Branch(eq);
__ ldr(r1, FieldMemOperand(r1, HeapObject::kMapOffset));
__ ldrb(r1, FieldMemOperand(r1, Map::kInstanceTypeOffset));
__ cmp(r1, Operand(JS_FUNCTION_TYPE));
} else if (check->Equals(Heap::object_symbol())) {
__ tst(r1, Operand(kSmiTagMask));
- __ b(eq, false_target());
+ false_target()->Branch(eq);
__ ldr(r2, FieldMemOperand(r1, HeapObject::kMapOffset));
__ cmp(r1, Operand(Factory::null_value()));
- __ b(eq, true_target());
+ true_target()->Branch(eq);
// It can be an undetectable object.
__ ldrb(r1, FieldMemOperand(r2, Map::kBitFieldOffset));
__ and_(r1, r1, Operand(1 << Map::kIsUndetectable));
__ cmp(r1, Operand(1 << Map::kIsUndetectable));
- __ b(eq, false_target());
+ false_target()->Branch(eq);
__ ldrb(r2, FieldMemOperand(r2, Map::kInstanceTypeOffset));
__ cmp(r2, Operand(FIRST_JS_OBJECT_TYPE));
- __ b(lt, false_target());
+ false_target()->Branch(lt);
__ cmp(r2, Operand(LAST_JS_OBJECT_TYPE));
cc_reg_ = le;
} else {
// Uncommon case: typeof testing against a string literal that is
// never returned from the typeof operator.
- __ b(false_target());
+ false_target()->Jump();
}
+ ASSERT(!has_valid_frame() ||
+ (has_cc() && frame_->height() == original_height));
return;
}
- Load(left);
- Load(right);
+ LoadAndSpill(left);
+ LoadAndSpill(right);
switch (op) {
case Token::EQ:
Comparison(eq, false);
Comparison(eq, true);
break;
- case Token::IN:
- __ mov(r0, Operand(1)); // not counting receiver
- __ InvokeBuiltin(Builtins::IN, CALL_JS);
- frame_->Push(r0);
+ case Token::IN: {
+ Result arg_count = allocator_->Allocate(r0);
+ ASSERT(arg_count.is_valid());
+ __ mov(arg_count.reg(), Operand(1)); // not counting receiver
+ Result result = frame_->InvokeBuiltin(Builtins::IN,
+ CALL_JS,
+ &arg_count,
+ 2);
+ frame_->EmitPush(result.reg());
break;
+ }
- case Token::INSTANCEOF:
- __ mov(r0, Operand(1)); // not counting receiver
- __ InvokeBuiltin(Builtins::INSTANCE_OF, CALL_JS);
- __ tst(r0, Operand(r0));
+ case Token::INSTANCEOF: {
+ Result arg_count = allocator_->Allocate(r0);
+ ASSERT(arg_count.is_valid());
+ __ mov(arg_count.reg(), Operand(1)); // not counting receiver
+ Result result = frame_->InvokeBuiltin(Builtins::INSTANCE_OF,
+ CALL_JS,
+ &arg_count,
+ 2);
+ __ tst(result.reg(), Operand(result.reg()));
cc_reg_ = eq;
break;
+ }
default:
UNREACHABLE();
}
+ ASSERT((has_cc() && frame_->height() == original_height) ||
+ (!has_cc() && frame_->height() == original_height + 1));
+}
+
+
+#ifdef DEBUG
+bool CodeGenerator::HasValidEntryRegisters() { return true; }
+#endif
+
+
+bool CodeGenerator::IsActualFunctionReturn(JumpTarget* target) {
+ return (target == &function_return_ && !function_return_is_shadowed_);
}
void Reference::GetValue(TypeofState typeof_state) {
+ ASSERT(!cgen_->in_spilled_code());
+ ASSERT(cgen_->HasValidEntryRegisters());
ASSERT(!is_illegal());
ASSERT(!cgen_->has_cc());
MacroAssembler* masm = cgen_->masm();
- VirtualFrame* frame = cgen_->frame();
Property* property = expression_->AsProperty();
if (property != NULL) {
cgen_->CodeForSourcePosition(property->position());
// there is a chance that reference errors can be thrown below, we
// must distinguish between the two kinds of loads (typeof expression
// loads must not throw a reference error).
+ VirtualFrame* frame = cgen_->frame();
Comment cmnt(masm, "[ Load from named Property");
- // Setup the name register.
Handle<String> name(GetName());
- __ mov(r2, Operand(name));
- Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
-
Variable* var = expression_->AsVariableProxy()->AsVariable();
- if (var != NULL) {
- ASSERT(var->is_global());
- __ Call(ic, RelocInfo::CODE_TARGET_CONTEXT);
- } else {
- __ Call(ic, RelocInfo::CODE_TARGET);
- }
- frame->Push(r0);
+ Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
+ // Setup the name register.
+ Result name_reg = cgen_->allocator()->Allocate(r2);
+ ASSERT(name_reg.is_valid());
+ __ mov(name_reg.reg(), Operand(name));
+ ASSERT(var == NULL || var->is_global());
+ RelocInfo::Mode rmode = (var == NULL)
+ ? RelocInfo::CODE_TARGET
+ : RelocInfo::CODE_TARGET_CONTEXT;
+ Result answer = frame->CallCodeObject(ic, rmode, &name_reg, 0);
+ frame->EmitPush(answer.reg());
break;
}
// TODO(181): Implement inlined version of array indexing once
// loop nesting is properly tracked on ARM.
+ VirtualFrame* frame = cgen_->frame();
Comment cmnt(masm, "[ Load from keyed Property");
ASSERT(property != NULL);
Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
-
Variable* var = expression_->AsVariableProxy()->AsVariable();
- if (var != NULL) {
- ASSERT(var->is_global());
- __ Call(ic, RelocInfo::CODE_TARGET_CONTEXT);
- } else {
- __ Call(ic, RelocInfo::CODE_TARGET);
- }
- frame->Push(r0);
+ ASSERT(var == NULL || var->is_global());
+ RelocInfo::Mode rmode = (var == NULL)
+ ? RelocInfo::CODE_TARGET
+ : RelocInfo::CODE_TARGET_CONTEXT;
+ Result answer = frame->CallCodeObject(ic, rmode, 0);
+ frame->EmitPush(answer.reg());
break;
}
ASSERT(slot->var()->is_dynamic());
// For now, just do a runtime call.
- frame->Push(cp);
+ frame->EmitPush(cp);
__ mov(r0, Operand(slot->var()->name()));
- frame->Push(r0);
+ frame->EmitPush(r0);
if (init_state == CONST_INIT) {
// Same as the case for a normal store, but ignores attribute
// and when the expression operands are defined and valid, and
// thus we need the split into 2 operations: declaration of the
// context slot followed by initialization.
- __ CallRuntime(Runtime::kInitializeConstContextSlot, 3);
+ frame->CallRuntime(Runtime::kInitializeConstContextSlot, 3);
} else {
- __ CallRuntime(Runtime::kStoreContextSlot, 3);
+ frame->CallRuntime(Runtime::kStoreContextSlot, 3);
}
// Storing a variable must keep the (new) value on the expression
// stack. This is necessary for compiling assignment expressions.
- frame->Push(r0);
+ frame->EmitPush(r0);
} else {
ASSERT(!slot->var()->is_dynamic());
- Label exit;
+ JumpTarget exit(cgen_);
if (init_state == CONST_INIT) {
ASSERT(slot->var()->mode() == Variable::CONST);
// Only the first const initialization must be executed (the slot
Comment cmnt(masm, "[ Init const");
__ ldr(r2, cgen_->SlotOperand(slot, r2));
__ cmp(r2, Operand(Factory::the_hole_value()));
- __ b(ne, &exit);
+ exit.Branch(ne);
}
// We must execute the store. Storing a variable must keep the
// initialize consts to 'the hole' value and by doing so, end up
// calling this code. r2 may be loaded with context; used below in
// RecordWrite.
- frame->Pop(r0);
+ frame->EmitPop(r0);
__ str(r0, cgen_->SlotOperand(slot, r2));
- frame->Push(r0);
+ frame->EmitPush(r0);
if (slot->type() == Slot::CONTEXT) {
// Skip write barrier if the written value is a smi.
__ tst(r0, Operand(kSmiTagMask));
- __ b(eq, &exit);
+ exit.Branch(eq);
// r2 is loaded with context when calling SlotOperand above.
int offset = FixedArray::kHeaderSize + slot->index() * kPointerSize;
__ mov(r3, Operand(offset));
// to bind the exit label. Doing so can defeat peephole
// optimization.
if (init_state == CONST_INIT || slot->type() == Slot::CONTEXT) {
- __ bind(&exit);
+ exit.Bind();
}
}
break;
case NAMED: {
Comment cmnt(masm, "[ Store to named Property");
// Call the appropriate IC code.
- frame->Pop(r0); // value
- // Setup the name register.
- Handle<String> name(GetName());
- __ mov(r2, Operand(name));
Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
- __ Call(ic, RelocInfo::CODE_TARGET);
- frame->Push(r0);
+ Handle<String> name(GetName());
+
+ Result value = cgen_->allocator()->Allocate(r0);
+ ASSERT(value.is_valid());
+ frame->EmitPop(value.reg());
+
+ // Setup the name register.
+ Result property_name = cgen_->allocator()->Allocate(r2);
+ ASSERT(property_name.is_valid());
+ __ mov(property_name.reg(), Operand(name));
+ Result answer = frame->CallCodeObject(ic,
+ RelocInfo::CODE_TARGET,
+ &value,
+ &property_name,
+ 0);
+ frame->EmitPush(answer.reg());
break;
}
// Call IC code.
Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
// TODO(1222589): Make the IC grab the values from the stack.
- frame->Pop(r0); // value
- __ Call(ic, RelocInfo::CODE_TARGET);
- frame->Push(r0);
+ Result value = cgen_->allocator()->Allocate(r0);
+ ASSERT(value.is_valid());
+ frame->EmitPop(value.reg()); // value
+ Result result =
+ frame->CallCodeObject(ic, RelocInfo::CODE_TARGET, &value, 0);
+ frame->EmitPush(result.reg());
break;
}
enum TypeofState { INSIDE_TYPEOF, NOT_INSIDE_TYPEOF };
-// -------------------------------------------------------------------------
-// Virtual frame
-
-class VirtualFrame BASE_EMBEDDED {
- public:
- explicit VirtualFrame(CodeGenerator* cgen);
-
- void Enter();
- void Exit();
-
- void AllocateLocals();
-
- MemOperand Top() const { return MemOperand(sp, 0); }
-
- MemOperand Element(int index) const {
- return MemOperand(sp, index * kPointerSize);
- }
-
- MemOperand Local(int index) const {
- ASSERT(0 <= index && index < frame_local_count_);
- return MemOperand(fp, kLocal0Offset - index * kPointerSize);
- }
-
- MemOperand Function() const { return MemOperand(fp, kFunctionOffset); }
-
- MemOperand Context() const { return MemOperand(fp, kContextOffset); }
-
- MemOperand Parameter(int index) const {
- // Index -1 corresponds to the receiver.
- ASSERT(-1 <= index && index <= parameter_count_);
- return MemOperand(fp, (1 + parameter_count_ - index) * kPointerSize);
- }
-
- inline void Drop(int count);
-
- inline void Pop();
- inline void Pop(Register reg);
-
- inline void Push(Register reg);
-
- private:
- static const int kLocal0Offset = JavaScriptFrameConstants::kLocal0Offset;
- static const int kFunctionOffset = JavaScriptFrameConstants::kFunctionOffset;
- static const int kContextOffset = StandardFrameConstants::kContextOffset;
-
- MacroAssembler* masm_;
- int frame_local_count_;
- int parameter_count_;
-};
-
-
// -------------------------------------------------------------------------
// Reference support
// the expression stack, and it is left in place with its value above it.
void GetValue(TypeofState typeof_state);
+ // Generate code to push the value of a reference on top of the expression
+ // stack and then spill the stack frame. This function is used temporarily
+ // while the code generator is being transformed.
+ inline void GetValueAndSpill(TypeofState typeof_state);
+
// Generate code to store the value on top of the expression stack in the
// reference. The reference is expected to be immediately below the value
// on the expression stack. The stored value is left in place (with the
// labels.
CodeGenState(CodeGenerator* owner,
TypeofState typeof_state,
- Label* true_target,
- Label* false_target);
+ JumpTarget* true_target,
+ JumpTarget* false_target);
// Destroy a code generator state and restore the owning code generator's
// previous state.
~CodeGenState();
TypeofState typeof_state() const { return typeof_state_; }
- Label* true_target() const { return true_target_; }
- Label* false_target() const { return false_target_; }
+ JumpTarget* true_target() const { return true_target_; }
+ JumpTarget* false_target() const { return false_target_; }
private:
CodeGenerator* owner_;
TypeofState typeof_state_;
- Label* true_target_;
- Label* false_target_;
+ JumpTarget* true_target_;
+ JumpTarget* false_target_;
CodeGenState* previous_;
};
VirtualFrame* frame() const { return frame_; }
+ bool has_valid_frame() const { return frame_ != NULL; }
+
+ // Set the virtual frame to be new_frame, with non-frame register
+ // reference counts given by non_frame_registers. The non-frame
+ // register reference counts of the old frame are returned in
+ // non_frame_registers.
+ void SetFrame(VirtualFrame* new_frame, RegisterFile* non_frame_registers);
+
+ void DeleteFrame();
+
+ RegisterAllocator* allocator() const { return allocator_; }
+
CodeGenState* state() { return state_; }
void set_state(CodeGenState* state) { state_ = state; }
void AddDeferred(DeferredCode* code) { deferred_.Add(code); }
+ bool in_spilled_code() const { return in_spilled_code_; }
+ void set_in_spilled_code(bool flag) { in_spilled_code_ = flag; }
+
private:
// Construction/Destruction
CodeGenerator(int buffer_size, Handle<Script> script, bool is_eval);
// State
bool has_cc() const { return cc_reg_ != al; }
TypeofState typeof_state() const { return state_->typeof_state(); }
- Label* true_target() const { return state_->true_target(); }
- Label* false_target() const { return state_->false_target(); }
+ JumpTarget* true_target() const { return state_->true_target(); }
+ JumpTarget* false_target() const { return state_->false_target(); }
// Node visitors.
+ void VisitStatements(ZoneList<Statement*>* statements);
+
#define DEF_VISIT(type) \
void Visit##type(type* node);
NODE_LIST(DEF_VISIT)
#undef DEF_VISIT
+ // Visit a statement and then spill the virtual frame if control flow can
+ // reach the end of the statement (ie, it does not exit via break,
+ // continue, return, or throw). This function is used temporarily while
+ // the code generator is being transformed.
+ void VisitAndSpill(Statement* statement) {
+ ASSERT(in_spilled_code());
+ set_in_spilled_code(false);
+ Visit(statement);
+ if (frame_ != NULL) {
+ frame_->SpillAll();
+ }
+ set_in_spilled_code(true);
+ }
+
+ // Visit a list of statements and then spill the virtual frame if control
+ // flow can reach the end of the list.
+ void VisitStatementsAndSpill(ZoneList<Statement*>* statements) {
+ ASSERT(in_spilled_code());
+ set_in_spilled_code(false);
+ VisitStatements(statements);
+ if (frame_ != NULL) {
+ frame_->SpillAll();
+ }
+ set_in_spilled_code(true);
+ }
+
// Main code generation function
void GenCode(FunctionLiteral* fun);
MemOperand ContextSlotOperandCheckExtensions(Slot* slot,
Register tmp,
Register tmp2,
- Label* slow);
+ JumpTarget* slow);
// Expressions
MemOperand GlobalObject() const {
void LoadCondition(Expression* x,
TypeofState typeof_state,
- Label* true_target,
- Label* false_target,
+ JumpTarget* true_target,
+ JumpTarget* false_target,
bool force_cc);
void Load(Expression* x, TypeofState typeof_state = NOT_INSIDE_TYPEOF);
void LoadGlobal();
void LoadGlobalReceiver(Register scratch);
+ // Generate code to push the value of an expression on top of the frame
+ // and then spill the frame fully to memory. This function is used
+ // temporarily while the code generator is being transformed.
+ void LoadAndSpill(Expression* expression,
+ TypeofState typeof_state = NOT_INSIDE_TYPEOF) {
+ ASSERT(in_spilled_code());
+ set_in_spilled_code(false);
+ Load(expression, typeof_state);
+ frame_->SpillAll();
+ set_in_spilled_code(true);
+ }
+
+ // Call LoadCondition and then spill the virtual frame unless control flow
+ // cannot reach the end of the expression (ie, by emitting only
+ // unconditional jumps to the control targets).
+ void LoadConditionAndSpill(Expression* expression,
+ TypeofState typeof_state,
+ JumpTarget* true_target,
+ JumpTarget* false_target,
+ bool force_control) {
+ ASSERT(in_spilled_code());
+ set_in_spilled_code(false);
+ LoadCondition(expression, typeof_state, true_target, false_target,
+ force_control);
+ if (frame_ != NULL) {
+ frame_->SpillAll();
+ }
+ set_in_spilled_code(true);
+ }
+
// Read a value from a slot and leave it on top of the expression stack.
void LoadFromSlot(Slot* slot, TypeofState typeof_state);
void LoadFromGlobalSlotCheckExtensions(Slot* slot,
TypeofState typeof_state,
Register tmp,
Register tmp2,
- Label* slow);
+ JumpTarget* slow);
// Special code for typeof expressions: Unfortunately, we must
// be careful when loading the expression in 'typeof'
// through the context chain.
void LoadTypeofExpression(Expression* x);
- void ToBoolean(Label* true_target, Label* false_target);
+ void ToBoolean(JumpTarget* true_target, JumpTarget* false_target);
void GenericBinaryOperation(Token::Value op);
void Comparison(Condition cc, bool strict = false);
void CallWithArguments(ZoneList<Expression*>* arguments, int position);
// Control flow
- void Branch(bool if_true, Label* L);
+ void Branch(bool if_true, JumpTarget* target);
void CheckStack();
void CleanStack(int num_bytes);
void GenerateFastCaseSwitchJumpTable(SwitchStatement* node,
int min_index,
int range,
- Label* fail_label,
+ Label* default_label,
Vector<Label*> case_targets,
Vector<Label> case_labels);
// Generate the code for cases for the fast case switch.
// Called by GenerateFastCaseSwitchJumpTable.
void GenerateFastCaseSwitchCases(SwitchStatement* node,
- Vector<Label> case_labels);
+ Vector<Label> case_labels,
+ VirtualFrame* start_frame);
// Fast support for constant-Smi switches.
void GenerateFastCaseSwitchStatement(SwitchStatement* node,
// Methods used to indicate which source code is generated for. Source
// positions are collected by the assembler and emitted with the relocation
// information.
- void CodeForStatement(Node* node);
+ void CodeForFunctionPosition(FunctionLiteral* fun);
+ void CodeForStatementPosition(Node* node);
void CodeForSourcePosition(int pos);
+ // Is the given jump target the actual (ie, non-shadowed) function return
+ // target?
+ bool IsActualFunctionReturn(JumpTarget* target);
+
+#ifdef DEBUG
+ // True if the registers are valid for entry to a block.
+ bool HasValidEntryRegisters();
+#endif
+
bool is_eval_; // Tells whether code is generated for eval.
+
Handle<Script> script_;
List<DeferredCode*> deferred_;
// Code generation state
Scope* scope_;
VirtualFrame* frame_;
+ RegisterAllocator* allocator_;
Condition cc_reg_;
CodeGenState* state_;
- bool is_inside_try_;
int break_stack_height_;
- // Labels
- Label function_return_;
+ // Jump targets
+ JumpTarget function_return_;
+
+ // True if the function return is shadowed (ie, jumping to the target
+ // function_return_ does not jump to the true function return, but rather
+ // to some unlinking code).
+ bool function_return_is_shadowed_;
+
+ // True when we are in code that expects the virtual frame to be fully
+ // spilled. Some virtual frame function are disabled in DEBUG builds when
+ // called from spilled code, because they do not leave the virtual frame
+ // in a spilled state.
+ bool in_spilled_code_;
friend class VirtualFrame;
+ friend class JumpTarget;
friend class Reference;
DISALLOW_COPY_AND_ASSIGN(CodeGenerator);
};
+
+void Reference::GetValueAndSpill(TypeofState typeof_state) {
+ ASSERT(cgen_->in_spilled_code());
+ cgen_->set_in_spilled_code(false);
+ GetValue(typeof_state);
+ cgen_->frame()->SpillAll();
+ cgen_->set_in_spilled_code(true);
+}
+
+
} } // namespace v8::internal
#endif // V8_CODEGEN_ARM_H_
#define __ masm_->
-// -------------------------------------------------------------------------
-// VirtualFrame implementation.
-
-VirtualFrame::VirtualFrame(CodeGenerator* cgen) {
- ASSERT(cgen->scope() != NULL);
-
- masm_ = cgen->masm();
- frame_local_count_ = cgen->scope()->num_stack_slots();
- parameter_count_ = cgen->scope()->num_parameters();
-}
-
-
-void VirtualFrame::Enter() {
- Comment cmnt(masm_, "[ Enter JS frame");
- __ push(ebp);
- __ mov(ebp, Operand(esp));
-
- // Store the context and the function in the frame.
- __ push(esi);
- __ push(edi);
-
- // Clear the function slot when generating debug code.
- if (FLAG_debug_code) {
- __ Set(edi, Immediate(reinterpret_cast<int>(kZapValue)));
- }
-}
-
-
-void VirtualFrame::Exit() {
- Comment cmnt(masm_, "[ Exit JS frame");
- // Record the location of the JS exit code for patching when setting
- // break point.
- __ RecordJSReturn();
-
- // Avoid using the leave instruction here, because it is too
- // short. We need the return sequence to be a least the size of a
- // call instruction to support patching the exit code in the
- // debugger. See VisitReturnStatement for the full return sequence.
- __ mov(esp, Operand(ebp));
- __ pop(ebp);
-}
-
-
-void VirtualFrame::AllocateLocals() {
- if (frame_local_count_ > 0) {
- Comment cmnt(masm_, "[ Allocate space for locals");
- __ Set(eax, Immediate(Factory::undefined_value()));
- for (int i = 0; i < frame_local_count_; i++) {
- __ push(eax);
- }
- }
-}
-
-
-void VirtualFrame::Drop(int count) {
- ASSERT(count >= 0);
- if (count > 0) {
- __ add(Operand(esp), Immediate(count * kPointerSize));
- }
-}
-
-
-void VirtualFrame::Pop() { Drop(1); }
-
-
-void VirtualFrame::Pop(Register reg) {
- __ pop(reg);
-}
-
-
-void VirtualFrame::Pop(Operand operand) {
- __ pop(operand);
-}
-
-
-void VirtualFrame::Push(Register reg) {
- __ push(reg);
-}
-
-
-void VirtualFrame::Push(Operand operand) {
- __ push(operand);
-}
-
-
-void VirtualFrame::Push(Immediate immediate) {
- __ push(immediate);
-}
-
-
// -------------------------------------------------------------------------
// CodeGenState implementation.
CodeGenState::CodeGenState(CodeGenerator* owner)
: owner_(owner),
typeof_state_(NOT_INSIDE_TYPEOF),
- true_target_(NULL),
- false_target_(NULL),
+ destination_(NULL),
previous_(NULL) {
owner_->set_state(this);
}
CodeGenState::CodeGenState(CodeGenerator* owner,
TypeofState typeof_state,
- Label* true_target,
- Label* false_target)
+ ControlDestination* destination)
: owner_(owner),
typeof_state_(typeof_state),
- true_target_(true_target),
- false_target_(false_target),
+ destination_(destination),
previous_(owner->state()) {
owner_->set_state(this);
}
masm_(new MacroAssembler(NULL, buffer_size)),
scope_(NULL),
frame_(NULL),
- cc_reg_(no_condition),
+ allocator_(NULL),
state_(NULL),
- is_inside_try_(false),
break_stack_height_(0),
- loop_nesting_(0) {
+ loop_nesting_(0),
+ function_return_is_shadowed_(false),
+ in_spilled_code_(false) {
}
// Calling conventions:
-// ebp: frame pointer
+// ebp: caller's frame pointer
// esp: stack pointer
-// edi: caller's parameter pointer
+// edi: called JS function
// esi: callee's context
void CodeGenerator::GenCode(FunctionLiteral* fun) {
// Record the position for debugging purposes.
- CodeForSourcePosition(fun->start_position());
+ CodeForFunctionPosition(fun);
ZoneList<Statement*>* body = fun->body();
// Initialize state.
ASSERT(scope_ == NULL);
scope_ = fun->scope();
+ ASSERT(allocator_ == NULL);
+ RegisterAllocator register_allocator(this);
+ allocator_ = ®ister_allocator;
ASSERT(frame_ == NULL);
- VirtualFrame virtual_frame(this);
- frame_ = &virtual_frame;
- cc_reg_ = no_condition;
+ frame_ = new VirtualFrame(this);
+ function_return_.Initialize(this, JumpTarget::BIDIRECTIONAL);
+ function_return_is_shadowed_ = false;
+ set_in_spilled_code(false);
// Adjust for function-level loop nesting.
loop_nesting_ += fun->loop_nesting();
{
CodeGenState state(this);
- // Entry
- // stack: function, receiver, arguments, return address
+ // Entry:
+ // Stack: receiver, arguments, return address.
+ // ebp: caller's frame pointer
// esp: stack pointer
- // ebp: frame pointer
- // edi: caller's parameter pointer
+ // edi: called JS function
// esi: callee's context
-
+ allocator_->Initialize();
frame_->Enter();
// tos: code slot
#ifdef DEBUG
if (strlen(FLAG_stop_at) > 0 &&
fun->name()->IsEqualTo(CStrVector(FLAG_stop_at))) {
+ frame_->SpillAll();
__ int3();
}
#endif
- // This section now only allocates and copies the formals into the
- // arguments object. It saves the address in ecx, which is saved
- // at any point before either garbage collection or ecx is
- // overwritten. The flag arguments_array_allocated communicates
- // with the store into the arguments variable and guards the lazy
- // pushes of ecx to TOS. The flag arguments_array_saved notes
- // when the push has happened.
- bool arguments_object_allocated = false;
- bool arguments_object_saved = false;
-
- // Allocate arguments object.
- // The arguments object pointer needs to be saved in ecx, since we need
- // to store arguments into the context.
+ // Allocate space for locals and initialize them.
+ frame_->AllocateStackSlots(scope_->num_stack_slots());
+
+ // Allocate the arguments object and copy the parameters into it.
if (scope_->arguments() != NULL) {
ASSERT(scope_->arguments_shadow() != NULL);
- Comment cmnt(masm_, "[ allocate arguments object");
+ Comment cmnt(masm_, "[ Allocate arguments object");
ArgumentsAccessStub stub(ArgumentsAccessStub::NEW_OBJECT);
- __ lea(eax, frame_->Receiver());
- frame_->Push(frame_->Function());
- frame_->Push(eax);
- frame_->Push(Immediate(Smi::FromInt(scope_->num_parameters())));
- __ CallStub(&stub);
- __ mov(ecx, Operand(eax));
- arguments_object_allocated = true;
+ frame_->PushFunction();
+ frame_->PushReceiverSlotAddress();
+ frame_->Push(Smi::FromInt(scope_->num_parameters()));
+ Result answer = frame_->CallStub(&stub, 3);
+ frame_->Push(&answer);
}
- // Allocate space for locals and initialize them.
- frame_->AllocateLocals();
-
if (scope_->num_heap_slots() > 0) {
Comment cmnt(masm_, "[ allocate local context");
- // Save the arguments object pointer, if any.
- if (arguments_object_allocated && !arguments_object_saved) {
- frame_->Push(ecx);
- arguments_object_saved = true;
- }
// Allocate local context.
// Get outer context and create a new context based on it.
- frame_->Push(frame_->Function());
- __ CallRuntime(Runtime::kNewContext, 1); // eax holds the result
+ frame_->PushFunction();
+ Result context = frame_->CallRuntime(Runtime::kNewContext, 1);
+ // Update context local.
+ frame_->SaveContextRegister();
if (kDebug) {
- Label verified_true;
+ JumpTarget verified_true(this);
// Verify eax and esi are the same in debug mode
- __ cmp(eax, Operand(esi));
- __ j(equal, &verified_true);
+ __ cmp(context.reg(), Operand(esi));
+ context.Unuse();
+ verified_true.Branch(equal);
+ frame_->SpillAll();
__ int3();
- __ bind(&verified_true);
+ verified_true.Bind();
}
- // Update context local.
- __ mov(frame_->Context(), esi);
}
// TODO(1241774): Improve this code:
Variable* par = scope_->parameter(i);
Slot* slot = par->slot();
if (slot != NULL && slot->type() == Slot::CONTEXT) {
- // Save the arguments object pointer, if any.
- if (arguments_object_allocated && !arguments_object_saved) {
- frame_->Push(ecx);
- arguments_object_saved = true;
- }
- ASSERT(!scope_->is_global_scope()); // no parameters in global scope
- __ mov(eax, frame_->Parameter(i));
- // Loads ecx with context; used below in RecordWrite.
- __ mov(SlotOperand(slot, ecx), eax);
+ // The use of SlotOperand below is safe in unspilled code
+ // because the slot is guaranteed to be a context slot.
+ //
+ // There are no parameters in the global scope.
+ ASSERT(!scope_->is_global_scope());
+ frame_->PushParameterAt(i);
+ Result value = frame_->Pop();
+ value.ToRegister();
+
+ // SlotOperand loads context.reg() with the context object
+ // stored to, used below in RecordWrite.
+ Result context = allocator_->Allocate();
+ ASSERT(context.is_valid());
+ __ mov(SlotOperand(slot, context.reg()), value.reg());
int offset = FixedArray::kHeaderSize + slot->index() * kPointerSize;
- __ RecordWrite(ecx, offset, eax, ebx);
+ Result scratch = allocator_->Allocate();
+ ASSERT(scratch.is_valid());
+ frame_->Spill(context.reg());
+ frame_->Spill(value.reg());
+ __ RecordWrite(context.reg(), offset, value.reg(), scratch.reg());
}
}
}
// Store the arguments object. This must happen after context
// initialization because the arguments object may be stored in the
// context.
- if (arguments_object_allocated) {
- ASSERT(scope_->arguments() != NULL);
- ASSERT(scope_->arguments_shadow() != NULL);
+ if (scope_->arguments() != NULL) {
Comment cmnt(masm_, "[ store arguments object");
{ Reference shadow_ref(this, scope_->arguments_shadow());
ASSERT(shadow_ref.is_slot());
{ Reference arguments_ref(this, scope_->arguments());
ASSERT(arguments_ref.is_slot());
- // If the newly-allocated arguments object is already on the
- // stack, we make use of the convenient property that references
- // representing slots take up no space on the expression stack
- // (ie, it doesn't matter that the stored value is actually below
- // the reference).
- //
- // If the newly-allocated argument object is not already on
- // the stack, we rely on the property that loading a
- // zero-sized reference will not clobber the ecx register.
- if (!arguments_object_saved) {
- frame_->Push(ecx);
- }
+ // Here we rely on the convenient property that references to slot
+ // take up zero space in the frame (ie, it doesn't matter that the
+ // stored value is actually below the reference on the frame).
arguments_ref.SetValue(NOT_CONST_INIT);
}
shadow_ref.SetValue(NOT_CONST_INIT);
}
- frame_->Pop(); // Value is no longer needed.
+ frame_->Drop(); // Value is no longer needed.
}
// Generate code to 'execute' declarations and initialize functions
}
if (FLAG_trace) {
- __ CallRuntime(Runtime::kTraceEnter, 0);
+ frame_->CallRuntime(Runtime::kTraceEnter, 0);
// Ignore the return value.
}
CheckStack();
bool should_trace =
is_builtin ? FLAG_trace_builtin_calls : FLAG_trace_calls;
if (should_trace) {
- __ CallRuntime(Runtime::kDebugTrace, 0);
+ frame_->CallRuntime(Runtime::kDebugTrace, 0);
// Ignore the return value.
}
#endif
VisitStatements(body);
- // Generate a return statement if necessary.
- if (body->is_empty() || body->last()->AsReturnStatement() == NULL) {
+ // Handle the return from the function.
+ if (has_valid_frame()) {
+ // If there is a valid frame, control flow can fall off the end of
+ // the body. In that case there is an implicit return statement.
+ // Compiling a return statement will jump to the return sequence if
+ // it is already generated or generate it if not.
+ ASSERT(!function_return_is_shadowed_);
Literal undefined(Factory::undefined_value());
ReturnStatement statement(&undefined);
statement.set_statement_pos(fun->end_position());
VisitReturnStatement(&statement);
+ } else if (function_return_.is_linked()) {
+ // If the return target has dangling jumps to it, then we have not
+ // yet generated the return sequence. This can happen when (a)
+ // control does not flow off the end of the body so we did not
+ // compile an artificial return statement just above, and (b) there
+ // are return statements in the body but (c) they are all shadowed.
+ //
+ // There is no valid frame here but it is safe (also necessary) to
+ // load the return value into eax.
+ __ mov(eax, Immediate(Factory::undefined_value()));
+ function_return_.Bind();
+ GenerateReturnSequence();
}
}
}
loop_nesting_ -= fun->loop_nesting();
// Code generation state must be reset.
- scope_ = NULL;
- frame_ = NULL;
- ASSERT(!has_cc());
ASSERT(state_ == NULL);
ASSERT(loop_nesting() == 0);
+ ASSERT(!function_return_is_shadowed_);
+ function_return_.Unuse();
+ DeleteFrame();
+
+ // Process any deferred code using the register allocator.
+ ProcessDeferred();
+
+ // There is no need to delete the register allocator, it is a
+ // stack-allocated local.
+ allocator_ = NULL;
+ scope_ = NULL;
}
int index = slot->index();
switch (slot->type()) {
case Slot::PARAMETER:
- return frame_->Parameter(index);
+ return frame_->ParameterAt(index);
case Slot::LOCAL:
- return frame_->Local(index);
+ return frame_->LocalAt(index);
case Slot::CONTEXT: {
// Follow the context chain if necessary.
Operand CodeGenerator::ContextSlotOperandCheckExtensions(Slot* slot,
- Register tmp,
- Label* slow) {
+ Result tmp,
+ JumpTarget* slow) {
ASSERT(slot->type() == Slot::CONTEXT);
- int index = slot->index();
- Register context = esi;
+ ASSERT(tmp.is_register());
+ Result context(esi, this);
+
for (Scope* s = scope(); s != slot->var()->scope(); s = s->outer_scope()) {
if (s->num_heap_slots() > 0) {
if (s->calls_eval()) {
// Check that extension is NULL.
- __ cmp(ContextOperand(context, Context::EXTENSION_INDEX), Immediate(0));
- __ j(not_equal, slow, not_taken);
+ __ cmp(ContextOperand(context.reg(), Context::EXTENSION_INDEX),
+ Immediate(0));
+ slow->Branch(not_equal, not_taken);
}
- __ mov(tmp, ContextOperand(context, Context::CLOSURE_INDEX));
- __ mov(tmp, FieldOperand(tmp, JSFunction::kContextOffset));
+ __ mov(tmp.reg(), ContextOperand(context.reg(), Context::CLOSURE_INDEX));
+ __ mov(tmp.reg(), FieldOperand(tmp.reg(), JSFunction::kContextOffset));
context = tmp;
}
}
// Check that last extension is NULL.
- __ cmp(ContextOperand(context, Context::EXTENSION_INDEX), Immediate(0));
- __ j(not_equal, slow, not_taken);
- __ mov(tmp, ContextOperand(context, Context::FCONTEXT_INDEX));
- return ContextOperand(tmp, index);
+ __ cmp(ContextOperand(context.reg(), Context::EXTENSION_INDEX),
+ Immediate(0));
+ slow->Branch(not_equal, not_taken);
+ __ mov(tmp.reg(), ContextOperand(context.reg(), Context::FCONTEXT_INDEX));
+ return ContextOperand(tmp.reg(), slot->index());
}
-
-// Loads a value on TOS. If it is a boolean value, the result may have been
-// (partially) translated into branches, or it may have set the condition
-// code register. If force_cc is set, the value is forced to set the
-// condition code register and no value is pushed. If the condition code
-// register was set, has_cc() is true and cc_reg_ contains the condition to
-// test for 'true'.
+// Emit code to load the value of an expression to the top of the
+// frame. If the expression is boolean-valued it may be compiled (or
+// partially compiled) into control flow to the control destination.
+// If force_control is true, control flow is forced.
void CodeGenerator::LoadCondition(Expression* x,
TypeofState typeof_state,
- Label* true_target,
- Label* false_target,
- bool force_cc) {
- ASSERT(!has_cc());
+ ControlDestination* dest,
+ bool force_control) {
+ ASSERT(!in_spilled_code());
+ int original_height = frame_->height();
- { CodeGenState new_state(this, typeof_state, true_target, false_target);
+ { CodeGenState new_state(this, typeof_state, dest);
Visit(x);
+
+ // If we hit a stack overflow, we may not have actually visited
+ // the expression. In that case, we ensure that we have a
+ // valid-looking frame state because we will continue to generate
+ // code as we unwind the C++ stack.
+ //
+ // It's possible to have both a stack overflow and a valid frame
+ // state (eg, a subexpression overflowed, visiting it returned
+ // with a dummied frame state, and visiting this expression
+ // returned with a normal-looking state).
+ if (HasStackOverflow() &&
+ !dest->is_used() &&
+ frame_->height() == original_height) {
+ dest->Goto(true);
+ }
}
- if (force_cc && !has_cc()) {
- // Convert the TOS value to a boolean in the condition code register.
- // Visiting an expression may possibly choose neither (a) to leave a
- // value in the condition code register nor (b) to leave a value in TOS
- // (eg, by compiling to only jumps to the targets). In that case the
- // code generated by ToBoolean is wrong because it assumes the value of
- // the expression in TOS. So long as there is always a value in TOS or
- // the condition code register when control falls through to here (there
- // is), the code generated by ToBoolean is dead and therefore safe.
- ToBoolean(true_target, false_target);
+
+ if (force_control && !dest->is_used()) {
+ // Convert the TOS value into flow to the control destination.
+ ToBoolean(dest);
}
- ASSERT(has_cc() || !force_cc);
+
+ ASSERT(!(force_control && !dest->is_used()));
+ ASSERT(dest->is_used() || frame_->height() == original_height + 1);
}
void CodeGenerator::Load(Expression* x, TypeofState typeof_state) {
- Label true_target;
- Label false_target;
- LoadCondition(x, typeof_state, &true_target, &false_target, false);
-
- if (has_cc()) {
- // convert cc_reg_ into a bool
- Label loaded, materialize_true;
- __ j(cc_reg_, &materialize_true);
- frame_->Push(Immediate(Factory::false_value()));
- __ jmp(&loaded);
- __ bind(&materialize_true);
- frame_->Push(Immediate(Factory::true_value()));
- __ bind(&loaded);
- cc_reg_ = no_condition;
- }
-
- if (true_target.is_linked() || false_target.is_linked()) {
- // we have at least one condition value
- // that has been "translated" into a branch,
- // thus it needs to be loaded explicitly again
- Label loaded;
- __ jmp(&loaded); // don't lose current TOS
- bool both = true_target.is_linked() && false_target.is_linked();
- // reincarnate "true", if necessary
+#ifdef DEBUG
+ int original_height = frame_->height();
+#endif
+ ASSERT(!in_spilled_code());
+ JumpTarget true_target(this);
+ JumpTarget false_target(this);
+ ControlDestination dest(&true_target, &false_target, true);
+ LoadCondition(x, typeof_state, &dest, false);
+
+ if (dest.false_was_fall_through()) {
+ // The false target was just bound.
+ JumpTarget loaded(this);
+ frame_->Push(Factory::false_value());
+ // There may be dangling jumps to the true target.
if (true_target.is_linked()) {
- __ bind(&true_target);
- frame_->Push(Immediate(Factory::true_value()));
+ loaded.Jump();
+ true_target.Bind();
+ frame_->Push(Factory::true_value());
+ loaded.Bind();
}
- // if both "true" and "false" need to be reincarnated,
- // jump across code for "false"
- if (both)
- __ jmp(&loaded);
- // reincarnate "false", if necessary
+
+ } else if (dest.is_used()) {
+ // There is true, and possibly false, control flow (with true as
+ // the fall through).
+ JumpTarget loaded(this);
+ frame_->Push(Factory::true_value());
if (false_target.is_linked()) {
- __ bind(&false_target);
- frame_->Push(Immediate(Factory::false_value()));
+ loaded.Jump();
+ false_target.Bind();
+ frame_->Push(Factory::false_value());
+ loaded.Bind();
+ }
+
+ } else {
+ // We have a valid value on top of the frame, but we still may
+ // have dangling jumps to the true and false targets from nested
+ // subexpressions (eg, the left subexpressions of the
+ // short-circuited boolean operators).
+ ASSERT(has_valid_frame());
+ if (true_target.is_linked() || false_target.is_linked()) {
+ JumpTarget loaded(this);
+ loaded.Jump(); // Don't lose the current TOS.
+ if (true_target.is_linked()) {
+ true_target.Bind();
+ frame_->Push(Factory::true_value());
+ if (false_target.is_linked()) {
+ loaded.Jump();
+ }
+ }
+ if (false_target.is_linked()) {
+ false_target.Bind();
+ frame_->Push(Factory::false_value());
+ }
+ loaded.Bind();
}
- // everything is loaded at this point
- __ bind(&loaded);
}
- ASSERT(!has_cc());
+
+ ASSERT(has_valid_frame());
+ ASSERT(frame_->height() == original_height + 1);
}
void CodeGenerator::LoadGlobal() {
- frame_->Push(GlobalObject());
+ if (in_spilled_code()) {
+ frame_->EmitPush(GlobalObject());
+ } else {
+ Result temp = allocator_->Allocate();
+ __ mov(temp.reg(), GlobalObject());
+ frame_->Push(&temp);
+ }
}
-void CodeGenerator::LoadGlobalReceiver(Register scratch) {
- __ mov(scratch, GlobalObject());
- frame_->Push(FieldOperand(scratch, GlobalObject::kGlobalReceiverOffset));
+void CodeGenerator::LoadGlobalReceiver() {
+ Result temp = allocator_->Allocate();
+ Register reg = temp.reg();
+ __ mov(reg, GlobalObject());
+ __ mov(reg, FieldOperand(reg, GlobalObject::kGlobalReceiverOffset));
+ frame_->Push(&temp);
}
void CodeGenerator::LoadReference(Reference* ref) {
+ // References are loaded from both spilled and unspilled code. Set the
+ // state to unspilled to allow that (and explicitly spill after
+ // construction at the construction sites).
+ bool was_in_spilled_code = in_spilled_code_;
+ in_spilled_code_ = false;
+
Comment cmnt(masm_, "[ LoadReference");
Expression* e = ref->expression();
Property* property = e->AsProperty();
} else {
// Anything else is a runtime error.
Load(e);
- __ CallRuntime(Runtime::kThrowReferenceError, 1);
+ frame_->CallRuntime(Runtime::kThrowReferenceError, 1);
}
+
+ in_spilled_code_ = was_in_spilled_code;
}
void CodeGenerator::UnloadReference(Reference* ref) {
// Pop a reference from the stack while preserving TOS.
Comment cmnt(masm_, "[ UnloadReference");
- int size = ref->size();
- if (size == 1) {
- frame_->Pop(eax);
- __ mov(frame_->Top(), eax);
- } else if (size > 1) {
- frame_->Pop(eax);
- frame_->Drop(size);
- frame_->Push(eax);
- }
+ frame_->Nip(ref->size());
}
// ECMA-262, section 9.2, page 30: ToBoolean(). Pop the top of stack and
// convert it to a boolean in the condition code register or jump to
// 'false_target'/'true_target' as appropriate.
-void CodeGenerator::ToBoolean(Label* true_target, Label* false_target) {
+void CodeGenerator::ToBoolean(ControlDestination* dest) {
Comment cmnt(masm_, "[ ToBoolean");
- // The value to convert should be popped from the stack.
- frame_->Pop(eax);
-
+ // The value to convert should be popped from the frame.
+ Result value = frame_->Pop();
+ value.ToRegister();
// Fast case checks.
// 'false' => false.
- __ cmp(eax, Factory::false_value());
- __ j(equal, false_target);
+ __ cmp(value.reg(), Factory::false_value());
+ dest->false_target()->Branch(equal);
// 'true' => true.
- __ cmp(eax, Factory::true_value());
- __ j(equal, true_target);
+ __ cmp(value.reg(), Factory::true_value());
+ dest->true_target()->Branch(equal);
// 'undefined' => false.
- __ cmp(eax, Factory::undefined_value());
- __ j(equal, false_target);
+ __ cmp(value.reg(), Factory::undefined_value());
+ dest->false_target()->Branch(equal);
// Smi => false iff zero.
ASSERT(kSmiTag == 0);
- __ test(eax, Operand(eax));
- __ j(zero, false_target);
- __ test(eax, Immediate(kSmiTagMask));
- __ j(zero, true_target);
+ __ test(value.reg(), Operand(value.reg()));
+ dest->false_target()->Branch(zero);
+ __ test(value.reg(), Immediate(kSmiTagMask));
+ dest->true_target()->Branch(zero);
// Call the stub for all other cases.
- frame_->Push(eax); // Undo the pop(eax) from above.
+ frame_->Push(&value); // Undo the Pop() from above.
ToBooleanStub stub;
- __ CallStub(&stub);
- // Convert the result (eax) to condition code.
- __ test(eax, Operand(eax));
-
- ASSERT(not_equal == not_zero);
- cc_reg_ = not_equal;
+ Result temp = frame_->CallStub(&stub, 1);
+ // Convert the result to a condition code.
+ __ test(temp.reg(), Operand(temp.reg()));
+ temp.Unuse();
+ dest->Split(not_equal);
}
Token::Value op,
OverwriteMode mode,
GenericBinaryFlags flags)
- : DeferredCode(generator), stub_(op, mode, flags) { }
-
- void GenerateInlineCode() {
- stub_.GenerateSmiCode(masm(), enter());
+ : DeferredCode(generator), stub_(op, mode, flags), op_(op) {
+ set_comment("[ DeferredInlineBinaryOperation");
}
- virtual void Generate() {
- __ push(ebx);
- __ CallStub(&stub_);
- // We must preserve the eax value here, because it will be written
- // to the top-of-stack element when getting back to the fast case
- // code. See comment in GenericBinaryOperation where
- // deferred->exit() is bound.
- __ push(eax);
- }
+ Result GenerateInlineCode();
+
+ virtual void Generate();
private:
GenericBinaryOpStub stub_;
+ Token::Value op_;
};
+void DeferredInlineBinaryOperation::Generate() {
+ Result left(generator());
+ Result right(generator());
+ enter()->Bind(&left, &right);
+ generator()->frame()->Push(&left);
+ generator()->frame()->Push(&right);
+ Result answer = generator()->frame()->CallStub(&stub_, 2);
+ exit_.Jump(&answer);
+}
+
+
void CodeGenerator::GenericBinaryOperation(Token::Value op,
StaticType* type,
OverwriteMode overwrite_mode) {
if (op == Token::COMMA) {
// Simply discard left value.
- frame_->Pop(eax);
- frame_->Pop();
- frame_->Push(eax);
+ frame_->Nip(1);
return;
}
// Create a new deferred code for the slow-case part.
DeferredInlineBinaryOperation* deferred =
new DeferredInlineBinaryOperation(this, op, overwrite_mode, flags);
- // Fetch the operands from the stack.
- frame_->Pop(ebx); // get y
- __ mov(eax, frame_->Top()); // get x
// Generate the inline part of the code.
- deferred->GenerateInlineCode();
- // Put result back on the stack. It seems somewhat weird to let
- // the deferred code jump back before the assignment to the frame
- // top, but this is just to let the peephole optimizer get rid of
- // more code.
- __ bind(deferred->exit());
- __ mov(frame_->Top(), eax);
+ // The operands are on the frame.
+ Result answer = deferred->GenerateInlineCode();
+ deferred->BindExit(&answer);
+ frame_->Push(&answer);
} else {
// Call the stub and push the result to the stack.
GenericBinaryOpStub stub(op, overwrite_mode, flags);
- __ CallStub(&stub);
- frame_->Push(eax);
+ Result answer = frame_->CallStub(&stub, 2);
+ frame_->Push(&answer);
}
}
-class DeferredInlinedSmiOperation: public DeferredCode {
+class DeferredInlineSmiOperation: public DeferredCode {
public:
- DeferredInlinedSmiOperation(CodeGenerator* generator,
- Token::Value op, int value,
- OverwriteMode overwrite_mode) :
- DeferredCode(generator), op_(op), value_(value),
- overwrite_mode_(overwrite_mode) {
- set_comment("[ DeferredInlinedSmiOperation");
- }
- virtual void Generate() {
- __ push(eax);
- __ push(Immediate(Smi::FromInt(value_)));
- GenericBinaryOpStub igostub(op_, overwrite_mode_, SMI_CODE_INLINED);
- __ CallStub(&igostub);
+ DeferredInlineSmiOperation(CodeGenerator* generator,
+ Token::Value op,
+ Smi* value,
+ OverwriteMode overwrite_mode)
+ : DeferredCode(generator),
+ op_(op),
+ value_(value),
+ overwrite_mode_(overwrite_mode) {
+ set_comment("[ DeferredInlineSmiOperation");
}
+ virtual void Generate();
+
private:
Token::Value op_;
- int value_;
+ Smi* value_;
OverwriteMode overwrite_mode_;
};
-class DeferredInlinedSmiOperationReversed: public DeferredCode {
+void DeferredInlineSmiOperation::Generate() {
+ Result left(generator());
+ enter()->Bind(&left);
+ generator()->frame()->Push(&left);
+ generator()->frame()->Push(value_);
+ GenericBinaryOpStub igostub(op_, overwrite_mode_, SMI_CODE_INLINED);
+ Result answer = generator()->frame()->CallStub(&igostub, 2);
+ exit_.Jump(&answer);
+}
+
+
+class DeferredInlineSmiOperationReversed: public DeferredCode {
public:
- DeferredInlinedSmiOperationReversed(CodeGenerator* generator,
- Token::Value op, int value,
- OverwriteMode overwrite_mode) :
- DeferredCode(generator), op_(op), value_(value),
- overwrite_mode_(overwrite_mode) {
- set_comment("[ DeferredInlinedSmiOperationReversed");
- }
- virtual void Generate() {
- __ push(Immediate(Smi::FromInt(value_)));
- __ push(eax);
- GenericBinaryOpStub igostub(op_, overwrite_mode_, SMI_CODE_INLINED);
- __ CallStub(&igostub);
+ DeferredInlineSmiOperationReversed(CodeGenerator* generator,
+ Token::Value op,
+ Smi* value,
+ OverwriteMode overwrite_mode)
+ : DeferredCode(generator),
+ op_(op),
+ value_(value),
+ overwrite_mode_(overwrite_mode) {
+ set_comment("[ DeferredInlineSmiOperationReversed");
}
+ virtual void Generate();
+
private:
Token::Value op_;
- int value_;
+ Smi* value_;
OverwriteMode overwrite_mode_;
};
-class DeferredInlinedSmiAdd: public DeferredCode {
+void DeferredInlineSmiOperationReversed::Generate() {
+ Result right(generator());
+ enter()->Bind(&right);
+ generator()->frame()->Push(value_);
+ generator()->frame()->Push(&right);
+ GenericBinaryOpStub igostub(op_, overwrite_mode_, SMI_CODE_INLINED);
+ Result answer = generator()->frame()->CallStub(&igostub, 2);
+ exit_.Jump(&answer);
+}
+
+
+class DeferredInlineSmiAdd: public DeferredCode {
public:
- DeferredInlinedSmiAdd(CodeGenerator* generator, int value,
- OverwriteMode overwrite_mode) :
- DeferredCode(generator), value_(value), overwrite_mode_(overwrite_mode) {
- set_comment("[ DeferredInlinedSmiAdd");
+ DeferredInlineSmiAdd(CodeGenerator* generator,
+ Smi* value,
+ OverwriteMode overwrite_mode)
+ : DeferredCode(generator),
+ value_(value),
+ overwrite_mode_(overwrite_mode) {
+ set_comment("[ DeferredInlineSmiAdd");
}
- virtual void Generate() {
- // Undo the optimistic add operation and call the shared stub.
- Immediate immediate(Smi::FromInt(value_));
- __ sub(Operand(eax), immediate);
- __ push(eax);
- __ push(immediate);
- GenericBinaryOpStub igostub(Token::ADD, overwrite_mode_, SMI_CODE_INLINED);
- __ CallStub(&igostub);
- }
+ virtual void Generate();
private:
- int value_;
+ Smi* value_;
OverwriteMode overwrite_mode_;
};
-class DeferredInlinedSmiAddReversed: public DeferredCode {
+void DeferredInlineSmiAdd::Generate() {
+ // Undo the optimistic add operation and call the shared stub.
+ Result left(generator()); // Initially left + value_.
+ enter()->Bind(&left);
+ left.ToRegister();
+ generator()->frame()->Spill(left.reg());
+ __ sub(Operand(left.reg()), Immediate(value_));
+ generator()->frame()->Push(&left);
+ generator()->frame()->Push(value_);
+ GenericBinaryOpStub igostub(Token::ADD, overwrite_mode_, SMI_CODE_INLINED);
+ Result answer = generator()->frame()->CallStub(&igostub, 2);
+ exit_.Jump(&answer);
+}
+
+
+class DeferredInlineSmiAddReversed: public DeferredCode {
public:
- DeferredInlinedSmiAddReversed(CodeGenerator* generator, int value,
- OverwriteMode overwrite_mode) :
- DeferredCode(generator), value_(value), overwrite_mode_(overwrite_mode) {
- set_comment("[ DeferredInlinedSmiAddReversed");
+ DeferredInlineSmiAddReversed(CodeGenerator* generator,
+ Smi* value,
+ OverwriteMode overwrite_mode)
+ : DeferredCode(generator),
+ value_(value),
+ overwrite_mode_(overwrite_mode) {
+ set_comment("[ DeferredInlineSmiAddReversed");
}
- virtual void Generate() {
- // Undo the optimistic add operation and call the shared stub.
- Immediate immediate(Smi::FromInt(value_));
- __ sub(Operand(eax), immediate);
- __ push(immediate);
- __ push(eax);
- GenericBinaryOpStub igostub(Token::ADD, overwrite_mode_, SMI_CODE_INLINED);
- __ CallStub(&igostub);
- }
+ virtual void Generate();
private:
- int value_;
+ Smi* value_;
OverwriteMode overwrite_mode_;
};
-class DeferredInlinedSmiSub: public DeferredCode {
+void DeferredInlineSmiAddReversed::Generate() {
+ // Undo the optimistic add operation and call the shared stub.
+ Result right(generator()); // Initially value_ + right.
+ enter()->Bind(&right);
+ right.ToRegister();
+ generator()->frame()->Spill(right.reg());
+ __ sub(Operand(right.reg()), Immediate(value_));
+ generator()->frame()->Push(value_);
+ generator()->frame()->Push(&right);
+ GenericBinaryOpStub igostub(Token::ADD, overwrite_mode_, SMI_CODE_INLINED);
+ Result answer = generator()->frame()->CallStub(&igostub, 2);
+ exit_.Jump(&answer);
+}
+
+
+class DeferredInlineSmiSub: public DeferredCode {
public:
- DeferredInlinedSmiSub(CodeGenerator* generator, int value,
- OverwriteMode overwrite_mode) :
- DeferredCode(generator), value_(value), overwrite_mode_(overwrite_mode) {
- set_comment("[ DeferredInlinedSmiSub");
+ DeferredInlineSmiSub(CodeGenerator* generator,
+ Smi* value,
+ OverwriteMode overwrite_mode)
+ : DeferredCode(generator),
+ value_(value),
+ overwrite_mode_(overwrite_mode) {
+ set_comment("[ DeferredInlineSmiSub");
}
- virtual void Generate() {
- // Undo the optimistic sub operation and call the shared stub.
- Immediate immediate(Smi::FromInt(value_));
- __ add(Operand(eax), immediate);
- __ push(eax);
- __ push(immediate);
- GenericBinaryOpStub igostub(Token::SUB, overwrite_mode_, SMI_CODE_INLINED);
- __ CallStub(&igostub);
- }
+ virtual void Generate();
private:
- int value_;
+ Smi* value_;
OverwriteMode overwrite_mode_;
};
-class DeferredInlinedSmiSubReversed: public DeferredCode {
+void DeferredInlineSmiSub::Generate() {
+ // Undo the optimistic sub operation and call the shared stub.
+ Result left(generator()); // Initially left - value_.
+ enter()->Bind(&left);
+ left.ToRegister();
+ generator()->frame()->Spill(left.reg());
+ __ add(Operand(left.reg()), Immediate(value_));
+ generator()->frame()->Push(&left);
+ generator()->frame()->Push(value_);
+ GenericBinaryOpStub igostub(Token::SUB, overwrite_mode_, SMI_CODE_INLINED);
+ Result answer = generator()->frame()->CallStub(&igostub, 2);
+ exit_.Jump(&answer);
+}
+
+
+class DeferredInlineSmiSubReversed: public DeferredCode {
public:
- // tos_reg is used to save the TOS value before reversing the operands
- // eax will contain the immediate value after undoing the optimistic sub.
- DeferredInlinedSmiSubReversed(CodeGenerator* generator, Register tos_reg,
- OverwriteMode overwrite_mode) :
- DeferredCode(generator), tos_reg_(tos_reg),
- overwrite_mode_(overwrite_mode) {
- set_comment("[ DeferredInlinedSmiSubReversed");
+ DeferredInlineSmiSubReversed(CodeGenerator* generator,
+ Smi* value,
+ OverwriteMode overwrite_mode)
+ : DeferredCode(generator),
+ value_(value),
+ overwrite_mode_(overwrite_mode) {
+ set_comment("[ DeferredInlineSmiSubReversed");
}
- virtual void Generate() {
- // Undo the optimistic sub operation and call the shared stub.
- __ add(eax, Operand(tos_reg_));
- __ push(eax);
- __ push(tos_reg_);
- GenericBinaryOpStub igostub(Token::SUB, overwrite_mode_, SMI_CODE_INLINED);
- __ CallStub(&igostub);
- }
+ virtual void Generate();
private:
- Register tos_reg_;
+ Smi* value_;
OverwriteMode overwrite_mode_;
};
+void DeferredInlineSmiSubReversed::Generate() {
+ // Call the shared stub.
+ Result right(generator());
+ enter()->Bind(&right);
+ generator()->frame()->Push(value_);
+ generator()->frame()->Push(&right);
+ GenericBinaryOpStub igostub(Token::SUB, overwrite_mode_, SMI_CODE_INLINED);
+ Result answer = generator()->frame()->CallStub(&igostub, 2);
+ exit_.Jump(&answer);
+}
+
+
void CodeGenerator::SmiOperation(Token::Value op,
StaticType* type,
Handle<Object> value,
// smi literal (multiply by 2, shift by 0, etc.).
// Get the literal value.
- int int_value = Smi::cast(*value)->value();
+ Smi* smi_value = Smi::cast(*value);
+ int int_value = smi_value->value();
ASSERT(is_intn(int_value, kMaxSmiInlinedBits));
switch (op) {
case Token::ADD: {
DeferredCode* deferred = NULL;
if (!reversed) {
- deferred = new DeferredInlinedSmiAdd(this, int_value, overwrite_mode);
+ deferred = new DeferredInlineSmiAdd(this, smi_value, overwrite_mode);
} else {
- deferred = new DeferredInlinedSmiAddReversed(this, int_value,
- overwrite_mode);
+ deferred = new DeferredInlineSmiAddReversed(this, smi_value,
+ overwrite_mode);
}
- frame_->Pop(eax);
- __ add(Operand(eax), Immediate(value));
- __ j(overflow, deferred->enter(), not_taken);
- __ test(eax, Immediate(kSmiTagMask));
- __ j(not_zero, deferred->enter(), not_taken);
- __ bind(deferred->exit());
- frame_->Push(eax);
+ Result operand = frame_->Pop();
+ operand.ToRegister();
+ frame_->Spill(operand.reg());
+ __ add(Operand(operand.reg()), Immediate(value));
+ deferred->enter()->Branch(overflow, &operand, not_taken);
+ __ test(operand.reg(), Immediate(kSmiTagMask));
+ deferred->enter()->Branch(not_zero, &operand, not_taken);
+ deferred->BindExit(&operand);
+ frame_->Push(&operand);
break;
}
case Token::SUB: {
DeferredCode* deferred = NULL;
- frame_->Pop(eax);
+ Result operand = frame_->Pop();
+ Result answer(this); // Only allocated a new register if reversed.
if (!reversed) {
- deferred = new DeferredInlinedSmiSub(this, int_value, overwrite_mode);
- __ sub(Operand(eax), Immediate(value));
+ operand.ToRegister();
+ frame_->Spill(operand.reg());
+ deferred = new DeferredInlineSmiSub(this,
+ smi_value,
+ overwrite_mode);
+ __ sub(Operand(operand.reg()), Immediate(value));
+ answer = operand;
} else {
- deferred = new DeferredInlinedSmiSubReversed(this, edx, overwrite_mode);
- __ mov(edx, Operand(eax));
- __ mov(eax, Immediate(value));
- __ sub(eax, Operand(edx));
+ answer = allocator()->Allocate();
+ ASSERT(answer.is_valid());
+ deferred = new DeferredInlineSmiSubReversed(this,
+ smi_value,
+ overwrite_mode);
+ __ mov(answer.reg(), Immediate(value));
+ if (operand.is_register()) {
+ __ sub(answer.reg(), Operand(operand.reg()));
+ } else {
+ ASSERT(operand.is_constant());
+ __ sub(Operand(answer.reg()), Immediate(operand.handle()));
+ }
}
- __ j(overflow, deferred->enter(), not_taken);
- __ test(eax, Immediate(kSmiTagMask));
- __ j(not_zero, deferred->enter(), not_taken);
- __ bind(deferred->exit());
- frame_->Push(eax);
+ deferred->enter()->Branch(overflow, &operand, not_taken);
+ __ test(answer.reg(), Immediate(kSmiTagMask));
+ deferred->enter()->Branch(not_zero, &operand, not_taken);
+ operand.Unuse();
+ deferred->BindExit(&answer);
+ frame_->Push(&answer);
break;
}
case Token::SAR: {
if (reversed) {
- frame_->Pop(eax);
- frame_->Push(Immediate(value));
- frame_->Push(eax);
+ Result top = frame_->Pop();
+ frame_->Push(value);
+ frame_->Push(&top);
GenericBinaryOperation(op, type, overwrite_mode);
} else {
- int shift_value = int_value & 0x1f; // only least significant 5 bits
+ // Only the least significant 5 bits of the shift value are used.
+ // In the slow case, this masking is done inside the runtime call.
+ int shift_value = int_value & 0x1f;
DeferredCode* deferred =
- new DeferredInlinedSmiOperation(this, Token::SAR, shift_value,
- overwrite_mode);
- frame_->Pop(eax);
- __ test(eax, Immediate(kSmiTagMask));
- __ j(not_zero, deferred->enter(), not_taken);
- __ sar(eax, shift_value);
- __ and_(eax, ~kSmiTagMask);
- __ bind(deferred->exit());
- frame_->Push(eax);
+ new DeferredInlineSmiOperation(this, Token::SAR, smi_value,
+ overwrite_mode);
+ Result result = frame_->Pop();
+ result.ToRegister();
+ __ test(result.reg(), Immediate(kSmiTagMask));
+ deferred->enter()->Branch(not_zero, &result, not_taken);
+ frame_->Spill(result.reg());
+ __ sar(result.reg(), shift_value);
+ __ and_(result.reg(), ~kSmiTagMask);
+ deferred->BindExit(&result);
+ frame_->Push(&result);
}
break;
}
case Token::SHR: {
if (reversed) {
- frame_->Pop(eax);
- frame_->Push(Immediate(value));
- frame_->Push(eax);
+ Result top = frame_->Pop();
+ frame_->Push(value);
+ frame_->Push(&top);
GenericBinaryOperation(op, type, overwrite_mode);
} else {
- int shift_value = int_value & 0x1f; // only least significant 5 bits
+ // Only the least significant 5 bits of the shift value are used.
+ // In the slow case, this masking is done inside the runtime call.
+ int shift_value = int_value & 0x1f;
DeferredCode* deferred =
- new DeferredInlinedSmiOperation(this, Token::SHR, shift_value,
- overwrite_mode);
- frame_->Pop(eax);
- __ test(eax, Immediate(kSmiTagMask));
- __ mov(ebx, Operand(eax));
- __ j(not_zero, deferred->enter(), not_taken);
- __ sar(ebx, kSmiTagSize);
- __ shr(ebx, shift_value);
- __ test(ebx, Immediate(0xc0000000));
- __ j(not_zero, deferred->enter(), not_taken);
- // tag result and store it in TOS (eax)
- ASSERT(kSmiTagSize == times_2); // adjust code if not the case
- __ lea(eax, Operand(ebx, ebx, times_1, kSmiTag));
- __ bind(deferred->exit());
- frame_->Push(eax);
+ new DeferredInlineSmiOperation(this, Token::SHR, smi_value,
+ overwrite_mode);
+ Result operand = frame_->Pop();
+ operand.ToRegister();
+ __ test(operand.reg(), Immediate(kSmiTagMask));
+ deferred->enter()->Branch(not_zero, &operand, not_taken);
+ Result answer = allocator()->Allocate();
+ ASSERT(answer.is_valid());
+ __ mov(answer.reg(), Operand(operand.reg()));
+ __ sar(answer.reg(), kSmiTagSize);
+ __ shr(answer.reg(), shift_value);
+ // A negative Smi shifted right two is in the positive Smi range.
+ if (shift_value < 2) {
+ __ test(answer.reg(), Immediate(0xc0000000));
+ deferred->enter()->Branch(not_zero, &operand, not_taken);
+ }
+ operand.Unuse();
+ ASSERT(kSmiTagSize == times_2); // Adjust the code if not true.
+ __ lea(answer.reg(),
+ Operand(answer.reg(), answer.reg(), times_1, kSmiTag));
+ deferred->BindExit(&answer);
+ frame_->Push(&answer);
}
break;
}
case Token::SHL: {
if (reversed) {
- frame_->Pop(eax);
- frame_->Push(Immediate(value));
- frame_->Push(eax);
+ Result top = frame_->Pop();
+ frame_->Push(value);
+ frame_->Push(&top);
GenericBinaryOperation(op, type, overwrite_mode);
} else {
- int shift_value = int_value & 0x1f; // only least significant 5 bits
+ // Only the least significant 5 bits of the shift value are used.
+ // In the slow case, this masking is done inside the runtime call.
+ int shift_value = int_value & 0x1f;
DeferredCode* deferred =
- new DeferredInlinedSmiOperation(this, Token::SHL, shift_value,
- overwrite_mode);
- frame_->Pop(eax);
- __ test(eax, Immediate(kSmiTagMask));
- __ mov(ebx, Operand(eax));
- __ j(not_zero, deferred->enter(), not_taken);
- __ sar(ebx, kSmiTagSize);
- __ shl(ebx, shift_value);
- // This is the Smi check for the shifted result.
- // After signed subtraction of 0xc0000000, the valid
- // Smis are positive.
- __ cmp(ebx, 0xc0000000);
- __ j(sign, deferred->enter(), not_taken);
- // Tag the result and store it on top of the frame.
- ASSERT(kSmiTagSize == times_2); // Adjust the code if not true.
- __ lea(eax, Operand(ebx, ebx, times_1, kSmiTag));
- __ bind(deferred->exit());
- frame_->Push(eax);
+ new DeferredInlineSmiOperation(this, Token::SHL, smi_value,
+ overwrite_mode);
+ Result operand = frame_->Pop();
+ operand.ToRegister();
+ __ test(operand.reg(), Immediate(kSmiTagMask));
+ deferred->enter()->Branch(not_zero, &operand, not_taken);
+ Result answer = allocator()->Allocate();
+ ASSERT(answer.is_valid());
+ __ mov(answer.reg(), Operand(operand.reg()));
+ ASSERT(kSmiTag == 0); // adjust code if not the case
+ // We do no shifts, only the Smi conversion, if shift_value is 1.
+ if (shift_value == 0) {
+ __ sar(answer.reg(), kSmiTagSize);
+ } else if (shift_value > 1) {
+ __ shl(answer.reg(), shift_value - 1);
+ }
+ // Convert int result to Smi, checking that it is in int range.
+ ASSERT(kSmiTagSize == times_2); // adjust code if not the case
+ __ add(answer.reg(), Operand(answer.reg()));
+ deferred->enter()->Branch(overflow, &operand, not_taken);
+ operand.Unuse();
+ deferred->BindExit(&answer);
+ frame_->Push(&answer);
}
break;
}
case Token::BIT_AND: {
DeferredCode* deferred = NULL;
if (!reversed) {
- deferred = new DeferredInlinedSmiOperation(this, op, int_value,
- overwrite_mode);
+ deferred = new DeferredInlineSmiOperation(this, op, smi_value,
+ overwrite_mode);
} else {
- deferred = new DeferredInlinedSmiOperationReversed(this, op, int_value,
- overwrite_mode);
+ deferred = new DeferredInlineSmiOperationReversed(this, op, smi_value,
+ overwrite_mode);
}
- frame_->Pop(eax);
- __ test(eax, Immediate(kSmiTagMask));
- __ j(not_zero, deferred->enter(), not_taken);
+ Result operand = frame_->Pop();
+ operand.ToRegister();
+ __ test(operand.reg(), Immediate(kSmiTagMask));
+ deferred->enter()->Branch(not_zero, &operand, not_taken);
+ frame_->Spill(operand.reg());
if (op == Token::BIT_AND) {
if (int_value == 0) {
- __ xor_(Operand(eax), eax);
+ __ xor_(Operand(operand.reg()), operand.reg());
} else {
- __ and_(Operand(eax), Immediate(value));
+ __ and_(Operand(operand.reg()), Immediate(value));
}
} else if (op == Token::BIT_XOR) {
if (int_value != 0) {
- __ xor_(Operand(eax), Immediate(value));
+ __ xor_(Operand(operand.reg()), Immediate(value));
}
} else {
ASSERT(op == Token::BIT_OR);
if (int_value != 0) {
- __ or_(Operand(eax), Immediate(value));
+ __ or_(Operand(operand.reg()), Immediate(value));
}
}
- __ bind(deferred->exit());
- frame_->Push(eax);
+ deferred->BindExit(&operand);
+ frame_->Push(&operand);
break;
}
default: {
if (!reversed) {
- frame_->Push(Immediate(value));
+ frame_->Push(value);
} else {
- frame_->Pop(eax);
- frame_->Push(Immediate(value));
- frame_->Push(eax);
+ Result top = frame_->Pop();
+ frame_->Push(value);
+ frame_->Push(&top);
}
GenericBinaryOperation(op, type, overwrite_mode);
break;
};
-void CodeGenerator::Comparison(Condition cc, bool strict) {
+void CodeGenerator::Comparison(Condition cc,
+ bool strict,
+ ControlDestination* dest) {
// Strict only makes sense for equality comparisons.
ASSERT(!strict || cc == equal);
+ Result left_side(this);
+ Result right_side(this);
// Implement '>' and '<=' by reversal to obtain ECMA-262 conversion order.
if (cc == greater || cc == less_equal) {
cc = ReverseCondition(cc);
- frame_->Pop(edx);
- frame_->Pop(eax);
+ left_side = frame_->Pop();
+ right_side = frame_->Pop();
} else {
- frame_->Pop(eax);
- frame_->Pop(edx);
+ right_side = frame_->Pop();
+ left_side = frame_->Pop();
}
+ // If either side is a constant smi, optimize the comparison.
+ bool left_side_constant_smi =
+ left_side.is_constant() && left_side.handle()->IsSmi();
+ bool right_side_constant_smi =
+ right_side.is_constant() && right_side.handle()->IsSmi();
+ bool left_side_constant_null =
+ left_side.is_constant() && left_side.handle()->IsNull();
+ bool right_side_constant_null =
+ right_side.is_constant() && right_side.handle()->IsNull();
+
+ if (left_side_constant_smi || right_side_constant_smi) {
+ if (left_side_constant_smi && right_side_constant_smi) {
+ // Trivial case, comparing two constants.
+ int left_value = Smi::cast(*left_side.handle())->value();
+ int right_value = Smi::cast(*right_side.handle())->value();
+ if (left_value < right_value &&
+ (cc == less || cc == less_equal || cc == not_equal) ||
+ left_value == right_value &&
+ (cc == less_equal || cc == equal || cc == greater_equal) ||
+ left_value > right_value &&
+ (cc == greater || cc == greater_equal || cc == not_equal)) {
+ // The comparison is unconditionally true.
+ dest->Goto(true);
+ } else {
+ // The comparison is unconditionally false.
+ dest->Goto(false);
+ }
+ } else { // Only one side is a constant Smi.
+ // If left side is a constant Smi, reverse the operands.
+ // Since one side is a constant Smi, conversion order does not matter.
+ if (left_side_constant_smi) {
+ Result temp = left_side;
+ left_side = right_side;
+ right_side = temp;
+ cc = ReverseCondition(cc);
+ // This may reintroduce greater or less_equal as the value of cc.
+ // CompareStub and the inline code both support all values of cc.
+ }
+ // Implement comparison against a constant Smi, inlining the case
+ // where both sides are Smis.
+ left_side.ToRegister();
+ ASSERT(left_side.is_valid());
+ JumpTarget is_smi(this);
+ __ test(left_side.reg(), Immediate(kSmiTagMask));
+ is_smi.Branch(zero, &left_side, &right_side, taken);
+
+ // Setup and call the compare stub, which expects arguments in edx
+ // and eax.
+ CompareStub stub(cc, strict);
+ left_side.ToRegister(edx); // Only left_side currently uses a register.
+ right_side.ToRegister(eax); // left_side is not in eax. eax is free.
+ Result result = frame_->CallStub(&stub, &left_side, &right_side, 0);
+ result.ToRegister();
+ __ cmp(result.reg(), 0);
+ result.Unuse();
+ dest->true_target()->Branch(cc);
+ dest->false_target()->Jump();
+
+ is_smi.Bind(&left_side, &right_side);
+ left_side.ToRegister();
+ // Test smi equality and comparison by signed int comparison.
+ if (IsUnsafeSmi(right_side.handle())) {
+ right_side.ToRegister();
+ ASSERT(right_side.is_valid());
+ __ cmp(left_side.reg(), Operand(right_side.reg()));
+ } else {
+ __ cmp(Operand(left_side.reg()), Immediate(right_side.handle()));
+ }
+ left_side.Unuse();
+ right_side.Unuse();
+ dest->Split(cc);
+ }
+ } else if (cc == equal &&
+ (left_side_constant_null || right_side_constant_null)) {
+ // To make null checks efficient, we check if either the left side or
+ // the right side is the constant 'null'.
+ // If so, we optimize the code by inlining a null check instead of
+ // calling the (very) general runtime routine for checking equality.
+ Result operand = left_side_constant_null ? right_side : left_side;
+ right_side.Unuse();
+ left_side.Unuse();
+ operand.ToRegister();
+ __ cmp(operand.reg(), Factory::null_value());
+ if (strict) {
+ operand.Unuse();
+ dest->Split(equal);
+ } else {
+ // The 'null' value is only equal to 'undefined' if using non-strict
+ // comparisons.
+ dest->true_target()->Branch(equal);
+ __ cmp(operand.reg(), Factory::undefined_value());
+ dest->true_target()->Branch(equal);
+ __ test(operand.reg(), Immediate(kSmiTagMask));
+ dest->false_target()->Branch(equal);
- // Check for the smi case.
- Label is_smi, done;
- __ mov(ecx, Operand(eax));
- __ or_(ecx, Operand(edx));
- __ test(ecx, Immediate(kSmiTagMask));
- __ j(zero, &is_smi, taken);
-
- // When non-smi, call out to the compare stub. "parameters" setup by
- // calling code in edx and eax and "result" is returned in the flags.
- CompareStub stub(cc, strict);
- __ CallStub(&stub);
- if (cc == equal) {
- __ test(eax, Operand(eax));
- } else {
- __ cmp(eax, 0);
- }
- __ jmp(&done);
-
- // Test smi equality by pointer comparison.
- __ bind(&is_smi);
- __ cmp(edx, Operand(eax));
- // Fall through to |done|.
-
- __ bind(&done);
- cc_reg_ = cc;
-}
-
-
-class SmiComparisonDeferred: public DeferredCode {
- public:
- SmiComparisonDeferred(CodeGenerator* generator,
- Condition cc,
- bool strict,
- int value)
- : DeferredCode(generator), cc_(cc), strict_(strict), value_(value) {
- set_comment("[ ComparisonDeferred");
+ // It can be an undetectable object.
+ // Use a scratch register in preference to spilling operand.reg().
+ Result temp = allocator()->Allocate();
+ ASSERT(temp.is_valid());
+ __ mov(temp.reg(),
+ FieldOperand(operand.reg(), HeapObject::kMapOffset));
+ __ movzx_b(temp.reg(),
+ FieldOperand(temp.reg(), Map::kBitFieldOffset));
+ __ test(temp.reg(), Immediate(1 << Map::kIsUndetectable));
+ temp.Unuse();
+ operand.Unuse();
+ dest->Split(not_zero);
+ }
+ } else { // Neither side is a constant Smi or null.
+ // If either side is a non-smi constant, skip the smi check.
+ bool known_non_smi =
+ left_side.is_constant() && !left_side.handle()->IsSmi() ||
+ right_side.is_constant() && !right_side.handle()->IsSmi();
+ left_side.ToRegister();
+ right_side.ToRegister();
+ JumpTarget is_smi(this);
+ if (!known_non_smi) {
+ // Check for the smi case.
+ Result temp = allocator_->Allocate();
+ ASSERT(temp.is_valid());
+ __ mov(temp.reg(), left_side.reg());
+ __ or_(temp.reg(), Operand(right_side.reg()));
+ __ test(temp.reg(), Immediate(kSmiTagMask));
+ temp.Unuse();
+ is_smi.Branch(zero, &left_side, &right_side, taken);
+ }
+ // When non-smi, call out to the compare stub. "parameters" setup by
+ // calling code in edx and eax and "result" is returned in the flags.
+ if (!left_side.reg().is(eax)) {
+ right_side.ToRegister(eax);
+ left_side.ToRegister(edx);
+ } else if (!right_side.reg().is(edx)) {
+ left_side.ToRegister(edx);
+ right_side.ToRegister(eax);
+ } else {
+ frame_->Spill(eax); // Can be multiply referenced, even now.
+ frame_->Spill(edx);
+ __ xchg(eax, edx);
+ // If left_side and right_side become real (non-dummy) arguments
+ // to CallStub, they need to be swapped in this case.
+ }
+ CompareStub stub(cc, strict);
+ Result answer = frame_->CallStub(&stub, &right_side, &left_side, 0);
+ if (cc == equal) {
+ __ test(answer.reg(), Operand(answer.reg()));
+ } else {
+ __ cmp(answer.reg(), 0);
+ }
+ answer.Unuse();
+ if (known_non_smi) {
+ dest->Split(cc);
+ } else {
+ dest->true_target()->Branch(cc);
+ dest->false_target()->Jump();
+ is_smi.Bind(&left_side, &right_side);
+ left_side.ToRegister();
+ right_side.ToRegister();
+ __ cmp(left_side.reg(), Operand(right_side.reg()));
+ right_side.Unuse();
+ left_side.Unuse();
+ dest->Split(cc);
+ }
}
- virtual void Generate();
-
- private:
- Condition cc_;
- bool strict_;
- int value_;
-};
-
-
-void SmiComparisonDeferred::Generate() {
- CompareStub stub(cc_, strict_);
- // Setup parameters and call stub.
- __ mov(edx, Operand(eax));
- __ Set(eax, Immediate(Smi::FromInt(value_)));
- __ CallStub(&stub);
- __ cmp(eax, 0);
- // "result" is returned in the flags
-}
-
-
-void CodeGenerator::SmiComparison(Condition cc,
- Handle<Object> value,
- bool strict) {
- // Strict only makes sense for equality comparisons.
- ASSERT(!strict || cc == equal);
-
- int int_value = Smi::cast(*value)->value();
- ASSERT(is_intn(int_value, kMaxSmiInlinedBits));
-
- SmiComparisonDeferred* deferred =
- new SmiComparisonDeferred(this, cc, strict, int_value);
- frame_->Pop(eax);
- __ test(eax, Immediate(kSmiTagMask));
- __ j(not_zero, deferred->enter(), not_taken);
- // Test smi equality by pointer comparison.
- __ cmp(Operand(eax), Immediate(value));
- __ bind(deferred->exit());
- cc_reg_ = cc;
}
void CodeGenerator::CallWithArguments(ZoneList<Expression*>* args,
int position) {
// Push the arguments ("left-to-right") on the stack.
- for (int i = 0; i < args->length(); i++) {
+ int arg_count = args->length();
+ for (int i = 0; i < arg_count; i++) {
Load(args->at(i));
}
CodeForSourcePosition(position);
// Use the shared code stub to call the function.
- CallFunctionStub call_function(args->length());
- __ CallStub(&call_function);
-
- // Restore context and pop function from the stack.
- __ mov(esi, frame_->Context());
- __ mov(frame_->Top(), eax);
+ CallFunctionStub call_function(arg_count);
+ Result answer = frame_->CallStub(&call_function, arg_count + 1);
+ // Restore context and replace function on the stack with the
+ // result of the stub invocation.
+ frame_->RestoreContextRegister();
+ frame_->SetElementAt(0, &answer);
}
-void CodeGenerator::Branch(bool if_true, Label* L) {
- ASSERT(has_cc());
- Condition cc = if_true ? cc_reg_ : NegateCondition(cc_reg_);
- __ j(cc, L);
- cc_reg_ = no_condition;
+class DeferredStackCheck: public DeferredCode {
+ public:
+ explicit DeferredStackCheck(CodeGenerator* generator)
+ : DeferredCode(generator) {
+ set_comment("[ DeferredStackCheck");
+ }
+
+ virtual void Generate();
+};
+
+
+void DeferredStackCheck::Generate() {
+ enter()->Bind();
+ StackCheckStub stub;
+ Result ignored = generator()->frame()->CallStub(&stub, 0);
+ ignored.Unuse();
+ exit_.Jump();
}
void CodeGenerator::CheckStack() {
if (FLAG_check_stack) {
- Label stack_is_ok;
- StackCheckStub stub;
+ DeferredStackCheck* deferred = new DeferredStackCheck(this);
ExternalReference stack_guard_limit =
ExternalReference::address_of_stack_guard_limit();
__ cmp(esp, Operand::StaticVariable(stack_guard_limit));
- __ j(above_equal, &stack_is_ok, taken);
- __ CallStub(&stub);
- __ bind(&stack_is_ok);
+ deferred->enter()->Branch(below, not_taken);
+ deferred->BindExit();
+ }
+}
+
+
+void CodeGenerator::VisitStatements(ZoneList<Statement*>* statements) {
+ ASSERT(!in_spilled_code());
+ for (int i = 0; has_valid_frame() && i < statements->length(); i++) {
+ Visit(statements->at(i));
}
}
void CodeGenerator::VisitBlock(Block* node) {
+ ASSERT(!in_spilled_code());
Comment cmnt(masm_, "[ Block");
- CodeForStatement(node);
+ CodeForStatementPosition(node);
node->set_break_stack_height(break_stack_height_);
+ node->break_target()->Initialize(this);
VisitStatements(node->statements());
- __ bind(node->break_target());
+ if (node->break_target()->is_linked()) {
+ node->break_target()->Bind();
+ }
}
void CodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
- frame_->Push(Immediate(pairs));
- frame_->Push(esi);
- frame_->Push(Immediate(Smi::FromInt(is_eval() ? 1 : 0)));
- __ CallRuntime(Runtime::kDeclareGlobals, 3);
+ frame_->Push(pairs);
+
+ // Duplicate the context register.
+ Result context(esi, this);
+ frame_->Push(&context);
+
+ frame_->Push(Smi::FromInt(is_eval() ? 1 : 0));
+ Result ignored = frame_->CallRuntime(Runtime::kDeclareGlobals, 3);
// Return value is ignored.
}
void CodeGenerator::VisitDeclaration(Declaration* node) {
Comment cmnt(masm_, "[ Declaration");
- CodeForStatement(node);
+ CodeForStatementPosition(node);
Variable* var = node->proxy()->var();
ASSERT(var != NULL); // must have been resolved
Slot* slot = var->slot();
// Variables with a "LOOKUP" slot were introduced as non-locals
// during variable resolution and must have mode DYNAMIC.
ASSERT(var->is_dynamic());
- // For now, just do a runtime call.
- frame_->Push(esi);
- frame_->Push(Immediate(var->name()));
+ // For now, just do a runtime call. Duplicate the context register.
+ Result context(esi, this);
+ frame_->Push(&context);
+ frame_->Push(var->name());
// Declaration nodes are always introduced in one of two modes.
ASSERT(node->mode() == Variable::VAR || node->mode() == Variable::CONST);
PropertyAttributes attr = node->mode() == Variable::VAR ? NONE : READ_ONLY;
- frame_->Push(Immediate(Smi::FromInt(attr)));
+ frame_->Push(Smi::FromInt(attr));
// Push initial value, if any.
// Note: For variables we must not push an initial value (such as
// 'undefined') because we may have a (legal) redeclaration and we
// must not destroy the current value.
if (node->mode() == Variable::CONST) {
- frame_->Push(Immediate(Factory::the_hole_value()));
+ frame_->Push(Factory::the_hole_value());
} else if (node->fun() != NULL) {
Load(node->fun());
} else {
- frame_->Push(Immediate(0)); // no initial value!
+ frame_->Push(Smi::FromInt(0)); // no initial value!
}
- __ CallRuntime(Runtime::kDeclareContextSlot, 4);
+ Result ignored = frame_->CallRuntime(Runtime::kDeclareContextSlot, 4);
// Ignore the return value (declarations are statements).
return;
}
if (val != NULL) {
{
- // Set initial value.
+ // Set the initial value.
Reference target(this, node->proxy());
Load(val);
target.SetValue(NOT_CONST_INIT);
// it goes out of scope.
}
// Get rid of the assigned value (declarations are statements).
- frame_->Pop();
+ frame_->Drop();
}
}
void CodeGenerator::VisitExpressionStatement(ExpressionStatement* node) {
+ ASSERT(!in_spilled_code());
Comment cmnt(masm_, "[ ExpressionStatement");
- CodeForStatement(node);
+ CodeForStatementPosition(node);
Expression* expression = node->expression();
expression->MarkAsStatement();
Load(expression);
// Remove the lingering expression result from the top of stack.
- frame_->Pop();
+ frame_->Drop();
}
void CodeGenerator::VisitEmptyStatement(EmptyStatement* node) {
+ ASSERT(!in_spilled_code());
Comment cmnt(masm_, "// EmptyStatement");
- CodeForStatement(node);
+ CodeForStatementPosition(node);
// nothing to do
}
void CodeGenerator::VisitIfStatement(IfStatement* node) {
+ ASSERT(!in_spilled_code());
Comment cmnt(masm_, "[ IfStatement");
- // Generate different code depending on which
- // parts of the if statement are present or not.
+ // Generate different code depending on which parts of the if statement
+ // are present or not.
bool has_then_stm = node->HasThenStatement();
bool has_else_stm = node->HasElseStatement();
- CodeForStatement(node);
- Label exit;
+ CodeForStatementPosition(node);
+ JumpTarget exit(this);
if (has_then_stm && has_else_stm) {
- Label then;
- Label else_;
- // if (cond)
- LoadCondition(node->condition(), NOT_INSIDE_TYPEOF, &then, &else_, true);
- Branch(false, &else_);
- // then
- __ bind(&then);
- Visit(node->then_statement());
- __ jmp(&exit);
- // else
- __ bind(&else_);
- Visit(node->else_statement());
+ JumpTarget then(this);
+ JumpTarget else_(this);
+ ControlDestination dest(&then, &else_, true);
+ LoadCondition(node->condition(), NOT_INSIDE_TYPEOF, &dest, true);
+
+ if (dest.false_was_fall_through()) {
+ // The else target was bound, so we compile the else part first.
+ Visit(node->else_statement());
+
+ // We may have dangling jumps to the then part.
+ if (then.is_linked()) {
+ if (has_valid_frame()) exit.Jump();
+ then.Bind();
+ Visit(node->then_statement());
+ }
+ } else {
+ // The then target was bound, so we compile the then part first.
+ Visit(node->then_statement());
+
+ if (else_.is_linked()) {
+ if (has_valid_frame()) exit.Jump();
+ else_.Bind();
+ Visit(node->else_statement());
+ }
+ }
} else if (has_then_stm) {
ASSERT(!has_else_stm);
- Label then;
- // if (cond)
- LoadCondition(node->condition(), NOT_INSIDE_TYPEOF, &then, &exit, true);
- Branch(false, &exit);
- // then
- __ bind(&then);
- Visit(node->then_statement());
+ JumpTarget then(this);
+ ControlDestination dest(&then, &exit, true);
+ LoadCondition(node->condition(), NOT_INSIDE_TYPEOF, &dest, true);
+
+ if (dest.false_was_fall_through()) {
+ // The exit label was bound. We may have dangling jumps to the
+ // then part.
+ if (then.is_linked()) {
+ exit.Unuse();
+ exit.Jump();
+ then.Bind();
+ Visit(node->then_statement());
+ }
+ } else {
+ // The then label was bound.
+ Visit(node->then_statement());
+ }
} else if (has_else_stm) {
ASSERT(!has_then_stm);
- Label else_;
- // if (!cond)
- LoadCondition(node->condition(), NOT_INSIDE_TYPEOF, &exit, &else_, true);
- Branch(true, &exit);
- // else
- __ bind(&else_);
- Visit(node->else_statement());
+ JumpTarget else_(this);
+ ControlDestination dest(&exit, &else_, false);
+ LoadCondition(node->condition(), NOT_INSIDE_TYPEOF, &dest, true);
+
+ if (dest.true_was_fall_through()) {
+ // The exit label was bound. We may have dangling jumps to the
+ // else part.
+ if (else_.is_linked()) {
+ exit.Unuse();
+ exit.Jump();
+ else_.Bind();
+ Visit(node->else_statement());
+ }
+ } else {
+ // The else label was bound.
+ Visit(node->else_statement());
+ }
} else {
ASSERT(!has_then_stm && !has_else_stm);
- // if (cond)
- LoadCondition(node->condition(), NOT_INSIDE_TYPEOF, &exit, &exit, false);
- if (has_cc()) {
- cc_reg_ = no_condition;
- } else {
- // No cc value set up, that means the boolean was pushed.
- // Pop it again, since it is not going to be used.
- frame_->Pop();
+ // We only care about the condition's side effects (not its value
+ // or control flow effect). LoadCondition is called without
+ // forcing control flow.
+ ControlDestination dest(&exit, &exit, true);
+ LoadCondition(node->condition(), NOT_INSIDE_TYPEOF, &dest, false);
+ if (!dest.is_used()) {
+ // We got a value on the frame rather than (or in addition to)
+ // control flow.
+ frame_->Drop();
}
}
- // end
- __ bind(&exit);
+ if (exit.is_linked()) {
+ exit.Bind();
+ }
}
void CodeGenerator::VisitContinueStatement(ContinueStatement* node) {
+ ASSERT(!in_spilled_code());
Comment cmnt(masm_, "[ ContinueStatement");
- CodeForStatement(node);
+ CodeForStatementPosition(node);
CleanStack(break_stack_height_ - node->target()->break_stack_height());
- __ jmp(node->target()->continue_target());
+ node->target()->continue_target()->Jump();
}
void CodeGenerator::VisitBreakStatement(BreakStatement* node) {
+ ASSERT(!in_spilled_code());
Comment cmnt(masm_, "[ BreakStatement");
- CodeForStatement(node);
+ CodeForStatementPosition(node);
CleanStack(break_stack_height_ - node->target()->break_stack_height());
- __ jmp(node->target()->break_target());
+ node->target()->break_target()->Jump();
}
void CodeGenerator::VisitReturnStatement(ReturnStatement* node) {
+ ASSERT(!in_spilled_code());
Comment cmnt(masm_, "[ ReturnStatement");
- CodeForStatement(node);
- Load(node->expression());
-
- // Move the function result into eax
- frame_->Pop(eax);
- // If we're inside a try statement or the return instruction
- // sequence has been generated, we just jump to that
- // point. Otherwise, we generate the return instruction sequence and
- // bind the function return label.
- if (is_inside_try_ || function_return_.is_bound()) {
- __ jmp(&function_return_);
+ if (function_return_is_shadowed_) {
+ // If the function return is shadowed, we spill all information
+ // and just jump to the label.
+ VirtualFrame::SpilledScope spilled_scope(this);
+ CodeForStatementPosition(node);
+ LoadAndSpill(node->expression());
+ frame_->EmitPop(eax);
+ function_return_.Jump();
} else {
- __ bind(&function_return_);
- if (FLAG_trace) {
- frame_->Push(eax); // undo the pop(eax) from above
- __ CallRuntime(Runtime::kTraceExit, 1);
- }
+ // Load the returned value.
+ CodeForStatementPosition(node);
+ Load(node->expression());
- // Add a label for checking the size of the code used for returning.
- Label check_exit_codesize;
- __ bind(&check_exit_codesize);
+ // Pop the result from the frame and prepare the frame for
+ // returning thus making it easier to merge.
+ Result result = frame_->Pop();
+ frame_->PrepareForReturn();
+
+ // Move the result into register eax where it belongs.
+ result.ToRegister(eax);
+ // TODO(203): Instead of explictly calling Unuse on the result, it
+ // might be better to pass the result to Jump and Bind below.
+ result.Unuse();
+
+ // If the function return label is already bound, we reuse the
+ // code by jumping to the return site.
+ if (function_return_.is_bound()) {
+ function_return_.Jump();
+ } else {
+ function_return_.Bind();
+ GenerateReturnSequence();
+ }
+ }
+}
- // Leave the frame and return popping the arguments and the
- // receiver.
- frame_->Exit();
- __ ret((scope_->num_parameters() + 1) * kPointerSize);
- // Check that the size of the code used for returning matches what is
- // expected by the debugger.
- ASSERT_EQ(Debug::kIa32JSReturnSequenceLength,
- __ SizeOfCodeGeneratedSince(&check_exit_codesize));
+void CodeGenerator::GenerateReturnSequence() {
+ // The return value is a live (but not currently reference counted)
+ // reference to eax. This is safe because the current frame does not
+ // contain a reference to eax (it is prepared for the return by spilling
+ // all registers).
+ ASSERT(has_valid_frame());
+ if (FLAG_trace) {
+ frame_->Push(eax); // Materialize result on the stack.
+ frame_->CallRuntime(Runtime::kTraceExit, 1);
}
+
+ // Add a label for checking the size of the code used for returning.
+ Label check_exit_codesize;
+ __ bind(&check_exit_codesize);
+
+ // Leave the frame and return popping the arguments and the
+ // receiver.
+ frame_->Exit();
+ __ ret((scope_->num_parameters() + 1) * kPointerSize);
+ DeleteFrame();
+
+ // Check that the size of the code used for returning matches what is
+ // expected by the debugger.
+ ASSERT_EQ(Debug::kIa32JSReturnSequenceLength,
+ __ SizeOfCodeGeneratedSince(&check_exit_codesize));
}
void CodeGenerator::VisitWithEnterStatement(WithEnterStatement* node) {
+ ASSERT(!in_spilled_code());
Comment cmnt(masm_, "[ WithEnterStatement");
- CodeForStatement(node);
+ CodeForStatementPosition(node);
Load(node->expression());
+ Result context(this);
if (node->is_catch_block()) {
- __ CallRuntime(Runtime::kPushCatchContext, 1);
+ context = frame_->CallRuntime(Runtime::kPushCatchContext, 1);
} else {
- __ CallRuntime(Runtime::kPushContext, 1);
+ context = frame_->CallRuntime(Runtime::kPushContext, 1);
}
+ // Update context local.
+ frame_->SaveContextRegister();
+
if (kDebug) {
- Label verified_true;
- // Verify eax and esi are the same in debug mode
- __ cmp(eax, Operand(esi));
- __ j(equal, &verified_true);
+ JumpTarget verified_true(this);
+ // Verify that the result of the runtime call and the esi register are
+ // the same in debug mode.
+ __ cmp(context.reg(), Operand(esi));
+ context.Unuse();
+ verified_true.Branch(equal);
+ frame_->SpillAll();
__ int3();
- __ bind(&verified_true);
+ verified_true.Bind();
}
-
- // Update context local.
- __ mov(frame_->Context(), esi);
}
void CodeGenerator::VisitWithExitStatement(WithExitStatement* node) {
+ ASSERT(!in_spilled_code());
Comment cmnt(masm_, "[ WithExitStatement");
- CodeForStatement(node);
+ CodeForStatementPosition(node);
// Pop context.
__ mov(esi, ContextOperand(esi, Context::PREVIOUS_INDEX));
// Update context local.
- __ mov(frame_->Context(), esi);
+ frame_->SaveContextRegister();
}
+
int CodeGenerator::FastCaseSwitchMaxOverheadFactor() {
return kFastSwitchMaxOverheadFactor;
}
+
int CodeGenerator::FastCaseSwitchMinCaseCount() {
return kFastSwitchMinCaseCount;
}
+
// Generate a computed jump to a switch case.
void CodeGenerator::GenerateFastCaseSwitchJumpTable(
SwitchStatement* node,
int min_index,
int range,
- Label* fail_label,
+ Label* default_label,
Vector<Label*> case_targets,
Vector<Label> case_labels) {
// Notice: Internal references, used by both the jmp instruction and
// placeholders, and fill in the addresses after the labels have been
// bound.
- frame_->Pop(eax); // supposed Smi
- // check range of value, if outside [0..length-1] jump to default/end label.
- ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
+ JumpTarget setup_default(this);
+ JumpTarget is_smi(this);
- // Test whether input is a HeapNumber that is really a Smi
- Label is_smi;
- __ test(eax, Immediate(kSmiTagMask));
- __ j(equal, &is_smi);
- // It's a heap object, not a Smi or a Failure
- __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
- __ movzx_b(ebx, FieldOperand(ebx, Map::kInstanceTypeOffset));
- __ cmp(ebx, HEAP_NUMBER_TYPE);
- __ j(not_equal, fail_label);
- // eax points to a heap number.
- __ push(eax);
- __ CallRuntime(Runtime::kNumberToSmi, 1);
- __ bind(&is_smi);
+ // A non-null default label pointer indicates a default case among
+ // the case labels. Otherwise we use the break target as a
+ // "default".
+ JumpTarget* default_target =
+ (default_label == NULL) ? node->break_target() : &setup_default;
+ // Test whether input is a smi.
+ ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
+ Result switch_value = frame_->Pop();
+ switch_value.ToRegister();
+ __ test(switch_value.reg(), Immediate(kSmiTagMask));
+ is_smi.Branch(equal, &switch_value, taken);
+
+ // It's a heap object, not a smi or a failure. Check if it is a
+ // heap number.
+ Result temp = allocator()->Allocate();
+ ASSERT(temp.is_valid());
+ __ mov(temp.reg(), FieldOperand(switch_value.reg(), HeapObject::kMapOffset));
+ __ movzx_b(temp.reg(), FieldOperand(temp.reg(), Map::kInstanceTypeOffset));
+ __ cmp(temp.reg(), HEAP_NUMBER_TYPE);
+ temp.Unuse();
+ default_target->Branch(not_equal);
+
+ // The switch value is a heap number. Convert it to a smi.
+ frame_->Push(&switch_value);
+ Result smi_value = frame_->CallRuntime(Runtime::kNumberToSmi, 1);
+
+ is_smi.Bind(&smi_value);
+ smi_value.ToRegister();
+ // Convert the switch value to a 0-based table index.
if (min_index != 0) {
- __ sub(Operand(eax), Immediate(min_index << kSmiTagSize));
+ frame_->Spill(smi_value.reg());
+ __ sub(Operand(smi_value.reg()), Immediate(min_index << kSmiTagSize));
}
- __ test(eax, Immediate(0x80000000 | kSmiTagMask)); // negative or not Smi
- __ j(not_equal, fail_label, not_taken);
- __ cmp(eax, range << kSmiTagSize);
- __ j(greater_equal, fail_label, not_taken);
+ // Go to the default case if the table index is negative or not a smi.
+ __ test(smi_value.reg(), Immediate(0x80000000 | kSmiTagMask));
+ default_target->Branch(not_equal, not_taken);
+ __ cmp(smi_value.reg(), range << kSmiTagSize);
+ default_target->Branch(greater_equal, not_taken);
+
+ // The expected frame at all the case labels is a version of the
+ // current one (the bidirectional entry frame, which an arbitrary
+ // frame of the correct height can be merged to). Keep a copy to
+ // restore at the start of every label. Create a jump target and
+ // bind it to set its entry frame properly.
+ JumpTarget entry_target(this, JumpTarget::BIDIRECTIONAL);
+ entry_target.Bind(&smi_value);
+ VirtualFrame* start_frame = new VirtualFrame(frame_);
// 0 is placeholder.
- __ jmp(Operand(eax, eax, times_1, 0x0, RelocInfo::INTERNAL_REFERENCE));
- // calculate address to overwrite later with actual address of table.
+ // Jump to the address at table_address + 2 * smi_value.reg().
+ // The target of the jump is read from table_address + 4 * switch_value.
+ // The Smi encoding of smi_value.reg() is 2 * switch_value.
+ smi_value.ToRegister();
+ __ jmp(Operand(smi_value.reg(), smi_value.reg(),
+ times_1, 0x0, RelocInfo::INTERNAL_REFERENCE));
+ smi_value.Unuse();
+ // Calculate address to overwrite later with actual address of table.
int32_t jump_table_ref = __ pc_offset() - sizeof(int32_t);
-
__ Align(4);
Label table_start;
__ bind(&table_start);
__ WriteInternalReference(jump_table_ref, table_start);
for (int i = 0; i < range; i++) {
- // table entry, 0 is placeholder for case address
+ // These are the table entries. 0x0 is the placeholder for case address.
__ dd(0x0, RelocInfo::INTERNAL_REFERENCE);
}
- GenerateFastCaseSwitchCases(node, case_labels);
+ GenerateFastCaseSwitchCases(node, case_labels, start_frame);
+
+ // If there was a default case, we need to emit the code to match it.
+ if (default_label != NULL) {
+ if (has_valid_frame()) {
+ node->break_target()->Jump();
+ }
+ setup_default.Bind();
+ frame_->MergeTo(start_frame);
+ __ jmp(default_label);
+ DeleteFrame();
+ }
+ if (node->break_target()->is_linked()) {
+ node->break_target()->Bind();
+ }
for (int i = 0, entry_pos = table_start.pos();
- i < range; i++, entry_pos += sizeof(uint32_t)) {
- __ WriteInternalReference(entry_pos, *case_targets[i]);
+ i < range;
+ i++, entry_pos += sizeof(uint32_t)) {
+ if (case_targets[i] == NULL) {
+ __ WriteInternalReference(entry_pos,
+ *node->break_target()->entry_label());
+ } else {
+ __ WriteInternalReference(entry_pos, *case_targets[i]);
+ }
}
+
+ delete start_frame;
}
void CodeGenerator::VisitSwitchStatement(SwitchStatement* node) {
+ ASSERT(!in_spilled_code());
Comment cmnt(masm_, "[ SwitchStatement");
- CodeForStatement(node);
+ CodeForStatementPosition(node);
node->set_break_stack_height(break_stack_height_);
+ node->break_target()->Initialize(this);
Load(node->tag());
-
if (TryGenerateFastCaseSwitchStatement(node)) {
return;
}
- Label next, fall_through, default_case;
+ JumpTarget next_test(this);
+ JumpTarget fall_through(this);
+ JumpTarget default_entry(this);
+ JumpTarget default_exit(this, JumpTarget::BIDIRECTIONAL);
ZoneList<CaseClause*>* cases = node->cases();
int length = cases->length();
+ CaseClause* default_clause = NULL;
- for (int i = 0; i < length; i++) {
- CaseClause* clause = cases->at(i);
- Comment cmnt(masm_, "[ case clause");
-
+ // Loop over the cases, compiling tests and bodies. Skip the
+ // default if found and compile it at the end. Exit early if an
+ // unconditionally true match occurs (which can happen, eg, in the
+ // event the switch value is a compile-time constant).
+ //
+ // Bind the next_test target before entering the loop so we can use
+ // its state to detect whether the switch value needs to be dropped
+ // from the frame.
+ next_test.Bind();
+ int index = 0;
+ for (; index < length; index++) {
+ CaseClause* clause = cases->at(index);
if (clause->is_default()) {
- // Continue matching cases. The program will execute the default case's
- // statements if it does not match any of the cases.
- __ jmp(&next);
-
- // Bind the default case label, so we can branch to it when we
- // have compared against all other cases.
- ASSERT(default_case.is_unused()); // at most one default clause
- __ bind(&default_case);
+ // Remember the default clause and compile it at the end.
+ default_clause = clause;
+ continue;
+ }
+
+ // Compile each non-default clause.
+ Comment cmnt(masm_, "[ Case clause");
+ // Recycle the same target for each test.
+ if (!next_test.is_unused()) {
+ // The next test target may be linked (as the target of a
+ // previous match failure) or bound (if the previous comparison
+ // was unconditionally false or this is the first non-default
+ // comparison).
+ if (next_test.is_linked()) {
+ next_test.Bind();
+ }
+ next_test.Unuse();
+ }
+
+ // Duplicate the switch value.
+ frame_->Dup();
+
+ // Compile the clause's label expression.
+ Load(clause->label());
+
+ // Compare and branch to the body if true and to the next test if
+ // false.
+ JumpTarget enter_body(this);
+ ControlDestination dest(&enter_body, &next_test, true);
+ Comparison(equal, true, &dest);
+
+ bool previous_was_default =
+ index > 0 && cases->at(index - 1)->is_default();
+ if (dest.false_was_fall_through()) {
+ // The false target next_test was bound as the fall-through.
+ // This may indicate that the comparison was unconditionally
+ // false if there are no dangling jumps to enter_body. Even
+ // then we may still need to compile the body if it is reachable
+ // as a fall through.
+
+ // We do not need to compile the body if control cannot reach
+ // it. Control could reach the body (1) from the comparison by
+ // a branch to enter_body, (2) as the fall through of some
+ // previous case, or (3) possibly via a backward jump from the
+ // default.
+ if (!enter_body.is_linked() &&
+ !fall_through.is_linked() &&
+ !previous_was_default) {
+ continue;
+ }
+
+ // We will compile the body and we have to jump around it on
+ // this path where the comparison failed.
+ next_test.Unuse();
+ next_test.Jump();
+ if (enter_body.is_linked()) {
+ enter_body.Bind();
+ }
+ }
+
+ // The body entry target may have been bound, indicating control
+ // flow can reach the body via the comparison.
+ if (enter_body.is_bound()) {
+ // The switch value is no longer needed.
+ frame_->Drop();
+ } else {
+ // The test was unconditionally false but we will compile the
+ // body as a fall through.
+ ASSERT(!has_valid_frame());
+ }
+
+ // Label the body if needed for fall through.
+ if (previous_was_default) {
+ // Because the default is compiled last, there is always a potential
+ // backwards edge to here, falling through from the default.
+ default_exit.Bind();
} else {
- __ bind(&next);
- next.Unuse();
- __ mov(eax, frame_->Top());
- frame_->Push(eax); // duplicate TOS
- Load(clause->label());
- Comparison(equal, true);
- Branch(false, &next);
+ // Recycle the same target for each fall through.
+ fall_through.Bind();
+ fall_through.Unuse();
+ }
+
+ // Compile the body.
+ ASSERT(has_valid_frame());
+ { Comment body_cmnt(masm_, "[ Case body");
+ VisitStatements(clause->statements());
}
- // Entering the case statement for the first time. Remove the switch value
- // from the stack.
- frame_->Pop(eax);
-
- // Generate code for the body.
- // This is also the target for the fall through from the previous case's
- // statements which has to skip over the matching code and the popping of
- // the switch value.
- __ bind(&fall_through);
- fall_through.Unuse();
- VisitStatements(clause->statements());
- __ jmp(&fall_through);
+ // The test may have been unconditionally true, which is indicated
+ // by the absence of any control flow to the next_test target. In
+ // that case, exit this loop and stop compiling both tests and
+ // bodies (and begin compiling only bodies if necessary).
+
+ // Otherwise, if control flow can fall off the end of the body
+ // jump to the body of the next case as fall through unless this
+ // is the last non-default case.
+ if (!next_test.is_linked()) {
+ index++;
+ break;
+ } else if (has_valid_frame()) {
+ if (index < length - 2 && // There are at least two cases after this
+ cases->at(index + 1)->is_default()) { // The next is the default.
+ default_entry.Jump();
+ } else if (index < length - 1) { // This is not the last case.
+ fall_through.Jump();
+ }
+ }
}
- __ bind(&next);
- // Reached the end of the case statements without matching any of the cases.
- if (default_case.is_bound()) {
- // A default case exists -> execute its statements.
- __ jmp(&default_case);
+ // If we did not compile all the cases then we must have hit one
+ // that was unconditionally true. We do not need to compile any
+ // more tests but we may have (and continue to have) fall through.
+ for (; index < length && has_valid_frame(); index++) {
+ Comment cmnt(masm_, "[ Case fall-through");
+ VisitStatements(cases->at(index)->statements());
+ }
+
+ // Complete the switch statement based on the compilation state of
+ // the last case that was compiled.
+ if (next_test.is_unused()) {
+ // The last test compiled was unconditionally true. We still need
+ // to compile the default if we found one and it can be targeted
+ // by fall through.
+ if (default_clause != NULL) {
+ bool was_only_clause = length == 1 && cases->at(0) == default_clause;
+ if (was_only_clause || default_entry.is_linked()) {
+ Comment cmnt(masm_, "[ Default clause");
+ default_entry.Bind();
+ VisitStatements(default_clause->statements());
+ // If control flow can fall off the end of the default and there
+ // was a case after it, jump to that case's body.
+ if (has_valid_frame() && default_exit.is_bound()) {
+ default_exit.Jump();
+ }
+ }
+ }
} else {
- // Remove the switch value from the stack.
- frame_->Pop();
+ // The switch value is still on the frame. We have to drop it and
+ // possibly compile a default case.
+ if (next_test.is_linked()) {
+ if (has_valid_frame()) {
+ // We have fall through and thus need to jump around the code
+ // to drop the switch value.
+ fall_through.Jump();
+ }
+ next_test.Bind();
+ }
+ frame_->Drop();
+
+ // If there was a default clause, compile it now.
+ if (default_clause != NULL) {
+ Comment cmnt(masm_, "[ Default clause");
+ if (default_entry.is_linked()) {
+ default_entry.Bind();
+ }
+ VisitStatements(default_clause->statements());
+ // If control flow can fall off the end of the default and there
+ // was a case after it, jump to that case's body.
+ if (has_valid_frame() && default_exit.is_bound()) {
+ default_exit.Jump();
+ }
+ }
}
- __ bind(&fall_through);
- __ bind(node->break_target());
+ if (fall_through.is_linked()) {
+ fall_through.Bind();
+ }
+ if (node->break_target()->is_linked()) {
+ node->break_target()->Bind();
+ }
}
void CodeGenerator::VisitLoopStatement(LoopStatement* node) {
+ ASSERT(!in_spilled_code());
Comment cmnt(masm_, "[ LoopStatement");
- CodeForStatement(node);
+ CodeForStatementPosition(node);
node->set_break_stack_height(break_stack_height_);
+ node->break_target()->Initialize(this);
- // simple condition analysis
+ // Simple condition analysis. ALWAYS_TRUE and ALWAYS_FALSE represent a
+ // known result for the test expression, with no side effects.
enum { ALWAYS_TRUE, ALWAYS_FALSE, DONT_KNOW } info = DONT_KNOW;
if (node->cond() == NULL) {
ASSERT(node->type() == LoopStatement::FOR_LOOP);
}
}
- Label loop, entry;
+ switch (node->type()) {
+ case LoopStatement::DO_LOOP: {
+ JumpTarget body(this, JumpTarget::BIDIRECTIONAL);
+ IncrementLoopNesting();
+
+ // Label the top of the loop for the backward CFG edge. If the test
+ // is always true we can use the continue target, and if the test is
+ // always false there is no need.
+ if (info == ALWAYS_TRUE) {
+ node->continue_target()->Initialize(this, JumpTarget::BIDIRECTIONAL);
+ node->continue_target()->Bind();
+ } else if (info == ALWAYS_FALSE) {
+ node->continue_target()->Initialize(this);
+ } else {
+ ASSERT(info == DONT_KNOW);
+ node->continue_target()->Initialize(this);
+ body.Bind();
+ }
- // init
- if (node->init() != NULL) {
- ASSERT(node->type() == LoopStatement::FOR_LOOP);
- Visit(node->init());
- }
- if (node->type() != LoopStatement::DO_LOOP && info != ALWAYS_TRUE) {
- __ jmp(&entry);
- }
+ CheckStack(); // TODO(1222600): ignore if body contains calls.
+ Visit(node->body());
- IncrementLoopNesting();
+ // Compile the test.
+ if (info == ALWAYS_TRUE) {
+ // If control flow can fall off the end of the body, jump back to
+ // the top and bind the break target as the exit.
+ if (has_valid_frame()) {
+ node->continue_target()->Jump();
+ }
+ if (node->break_target()->is_linked()) {
+ node->break_target()->Bind();
+ }
- // body
- __ bind(&loop);
- CheckStack(); // TODO(1222600): ignore if body contains calls.
- Visit(node->body());
-
- // next
- __ bind(node->continue_target());
- if (node->next() != NULL) {
- // Record source position of the statement as this code which is after the
- // code for the body actually belongs to the loop statement and not the
- // body.
- CodeForStatement(node);
- ASSERT(node->type() == LoopStatement::FOR_LOOP);
- Visit(node->next());
- }
+ } else if (info == ALWAYS_FALSE) {
+ // We may have had continues or breaks in the body.
+ if (node->continue_target()->is_linked()) {
+ node->continue_target()->Bind();
+ }
+ if (node->break_target()->is_linked()) {
+ node->break_target()->Bind();
+ }
- // cond
- __ bind(&entry);
- switch (info) {
- case ALWAYS_TRUE:
- __ jmp(&loop);
+ } else {
+ ASSERT(info == DONT_KNOW);
+ // We have to compile the test expression if it can be reached by
+ // control flow falling out of the body or via continue.
+ if (node->continue_target()->is_linked()) {
+ node->continue_target()->Bind();
+ }
+ if (has_valid_frame()) {
+ ControlDestination dest(&body, node->break_target(), false);
+ LoadCondition(node->cond(), NOT_INSIDE_TYPEOF, &dest, true);
+ }
+ if (node->break_target()->is_linked()) {
+ node->break_target()->Bind();
+ }
+ }
break;
- case ALWAYS_FALSE:
+ }
+
+ case LoopStatement::WHILE_LOOP: {
+ JumpTarget body(this, JumpTarget::BIDIRECTIONAL);
+ IncrementLoopNesting();
+
+ // If the condition is always false and has no side effects, we
+ // do not need to compile anything.
+ if (info == ALWAYS_FALSE) break;
+
+ // Based on the condition analysis, compile the test if
+ // necessary and label the body if necessary.
+ if (info == ALWAYS_TRUE) {
+ // We will not compile the test expression. Label the top of
+ // the loop with the continue target.
+ node->continue_target()->Initialize(this, JumpTarget::BIDIRECTIONAL);
+ node->continue_target()->Bind();
+ } else {
+ ASSERT(info == DONT_KNOW); // ALWAYS_FALSE cannot reach here.
+ node->continue_target()->Initialize(this);
+ // Compile the test with the body as the true target and
+ // preferred fall-through and with the break target as the
+ // false target.
+ ControlDestination dest(&body, node->break_target(), true);
+ LoadCondition(node->cond(), NOT_INSIDE_TYPEOF, &dest, true);
+
+ if (dest.false_was_fall_through()) {
+ // If we don't have dangling jumps to the body, the test is
+ // unconditionally false and we do not need to compile the
+ // body.
+ if (!body.is_linked()) break;
+
+ // Otherwise, jump around the body on the fall through and
+ // then bind the body target.
+ node->break_target()->Unuse();
+ node->break_target()->Jump();
+ body.Bind();
+ }
+ }
+
+ // The (stack check at the start of the) body was labeled.
+ // Compile it.
+ CheckStack(); // TODO(1222600): ignore if body contains calls.
+ Visit(node->body());
+
+ // Compile the test if necessary and jump back.
+ if (info == ALWAYS_TRUE) {
+ // The body has been labeled with the continue target.
+ if (has_valid_frame()) {
+ node->continue_target()->Jump();
+ }
+ } else {
+ ASSERT(info == DONT_KNOW); // ALWAYS_FALSE cannot reach here.
+ if (node->continue_target()->is_linked()) {
+ node->continue_target()->Bind();
+ }
+
+ // If control can reach the bottom by falling off the body or
+ // a continue in the body, (re)compile the test at the bottom.
+ if (has_valid_frame()) {
+ // The break target is the fall-through (body is a backward
+ // jump from here).
+ ControlDestination dest(&body, node->break_target(), false);
+ LoadCondition(node->cond(), NOT_INSIDE_TYPEOF, &dest, true);
+ }
+ }
+
+ // The break target may be already bound (by the condition), or
+ // there may not be a valid frame. Bind it only if needed.
+ if (node->break_target()->is_linked()) {
+ node->break_target()->Bind();
+ }
break;
- case DONT_KNOW:
- LoadCondition(node->cond(), NOT_INSIDE_TYPEOF, &loop,
- node->break_target(), true);
- Branch(true, &loop);
+ }
+
+ case LoopStatement::FOR_LOOP: {
+ JumpTarget body(this, JumpTarget::BIDIRECTIONAL);
+
+ // Compile the init expression if present.
+ if (node->init() != NULL) {
+ Visit(node->init());
+ }
+
+ IncrementLoopNesting();
+
+ // If the condition is always false and has no side effects, we
+ // do not need to compile anything else.
+ if (info == ALWAYS_FALSE) break;
+
+ // Based on the condition analysis, compile the test if
+ // necessary and label the body if necessary.
+ if (info == ALWAYS_TRUE) {
+ // We will not compile the test expression. Label the top of
+ // the loop with the continue target if there is no update
+ // expression, otherwise with the body target.
+ if (node->next() == NULL) {
+ node->continue_target()->Initialize(this, JumpTarget::BIDIRECTIONAL);
+ node->continue_target()->Bind();
+ } else {
+ node->continue_target()->Initialize(this);
+ body.Bind();
+ }
+ } else {
+ ASSERT(info == DONT_KNOW);
+ node->continue_target()->Initialize(this);
+ // Compile the test with the body as the true target and
+ // preferred fall-through and with the break target as the
+ // false target.
+ ControlDestination dest(&body, node->break_target(), true);
+ LoadCondition(node->cond(), NOT_INSIDE_TYPEOF, &dest, true);
+
+ if (dest.false_was_fall_through()) {
+ // If we don't have dangling jumps to the body, the test is
+ // unconditionally false and we do not need to compile the
+ // body.
+ if (!body.is_linked()) break;
+
+ // Otherwise, jump around the body on the fall through and
+ // then bind the body target.
+ node->break_target()->Unuse();
+ node->break_target()->Jump();
+ body.Bind();
+ }
+ }
+
+ // The (stack check at the start of the) body was labeled.
+ // Compile it.
+ CheckStack(); // TODO(1222600): ignore if body contains calls.
+ Visit(node->body());
+
+ // If there is an update expression, compile it if necessary.
+ if (node->next() != NULL) {
+ // We did not use the continue target for the body.
+ if (node->continue_target()->is_linked()) {
+ node->continue_target()->Bind();
+ }
+
+ // Control can reach the update by falling out of the body or
+ // by a continue in the body.
+ if (has_valid_frame()) {
+ // Record the source position of the statement as this code
+ // which is after the code for the body actually belongs to
+ // the loop statement and not the body.
+ CodeForStatementPosition(node);
+ Visit(node->next());
+ }
+ }
+
+ // Compile the test if necessary and jump back.
+ if (info == ALWAYS_TRUE) {
+ if (has_valid_frame()) {
+ if (node->next() == NULL) {
+ node->continue_target()->Jump();
+ } else {
+ body.Jump();
+ }
+ }
+ } else {
+ ASSERT(info == DONT_KNOW); // ALWAYS_FALSE cannot reach here.
+ if (node->continue_target()->is_linked()) {
+ // We can have dangling jumps to the continue target if
+ // there was no update expression.
+ node->continue_target()->Bind();
+ }
+
+ // Control can reach the test at the bottom by falling out of
+ // the body, by a continue in the body, or from the update
+ // expression.
+ if (has_valid_frame()) {
+ // The break target is the fall-through (body is a backward
+ // jump from here).
+ ControlDestination dest(&body, node->break_target(), false);
+ LoadCondition(node->cond(), NOT_INSIDE_TYPEOF, &dest, true);
+ }
+ }
+
+ // The break target may be already bound (by the condition), or
+ // there may not be a valid frame. Bind it only if needed.
+ if (node->break_target()->is_linked()) {
+ node->break_target()->Bind();
+ }
break;
+ }
}
DecrementLoopNesting();
-
- // exit
- __ bind(node->break_target());
}
void CodeGenerator::VisitForInStatement(ForInStatement* node) {
+ ASSERT(!in_spilled_code());
+ VirtualFrame::SpilledScope spilled_scope(this);
Comment cmnt(masm_, "[ ForInStatement");
- CodeForStatement(node);
+ CodeForStatementPosition(node);
// We keep stuff on the stack while the body is executing.
// Record it, so that a break/continue crossing this statement
const int kForInStackSize = 5 * kPointerSize;
break_stack_height_ += kForInStackSize;
node->set_break_stack_height(break_stack_height_);
+ node->break_target()->Initialize(this);
+ node->continue_target()->Initialize(this);
- Label loop, next, entry, cleanup, exit, primitive, jsobject;
- Label end_del_check, fixed_array;
+ JumpTarget primitive(this);
+ JumpTarget jsobject(this);
+ JumpTarget fixed_array(this);
+ JumpTarget entry(this, JumpTarget::BIDIRECTIONAL);
+ JumpTarget end_del_check(this);
+ JumpTarget exit(this);
// Get the object to enumerate over (converted to JSObject).
- Load(node->enumerable());
+ LoadAndSpill(node->enumerable());
// Both SpiderMonkey and kjs ignore null and undefined in contrast
// to the specification. 12.6.4 mandates a call to ToObject.
- frame_->Pop(eax);
+ frame_->EmitPop(eax);
// eax: value to be iterated over
__ cmp(eax, Factory::undefined_value());
- __ j(equal, &exit);
+ exit.Branch(equal);
__ cmp(eax, Factory::null_value());
- __ j(equal, &exit);
+ exit.Branch(equal);
// Stack layout in body:
// [iteration counter (smi)] <- slot 0
// Check if enumerable is already a JSObject
// eax: value to be iterated over
__ test(eax, Immediate(kSmiTagMask));
- __ j(zero, &primitive);
+ primitive.Branch(zero);
__ mov(ecx, FieldOperand(eax, HeapObject::kMapOffset));
__ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset));
__ cmp(ecx, FIRST_JS_OBJECT_TYPE);
- __ j(above_equal, &jsobject);
+ jsobject.Branch(above_equal);
- __ bind(&primitive);
- frame_->Push(eax);
- __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
+ primitive.Bind();
+ frame_->EmitPush(eax);
+ frame_->InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION, 1);
// function call returns the value in eax, which is where we want it below
-
- __ bind(&jsobject);
-
+ jsobject.Bind();
// Get the set of properties (as a FixedArray or Map).
// eax: value to be iterated over
- frame_->Push(eax); // push the object being iterated over (slot 4)
+ frame_->EmitPush(eax); // push the object being iterated over (slot 4)
- frame_->Push(eax); // push the Object (slot 4) for the runtime call
- __ CallRuntime(Runtime::kGetPropertyNamesFast, 1);
+ frame_->EmitPush(eax); // push the Object (slot 4) for the runtime call
+ frame_->CallRuntime(Runtime::kGetPropertyNamesFast, 1);
// If we got a Map, we can do a fast modification check.
// Otherwise, we got a FixedArray, and we have to do a slow check.
__ mov(edx, Operand(eax));
__ mov(ecx, FieldOperand(edx, HeapObject::kMapOffset));
__ cmp(ecx, Factory::meta_map());
- __ j(not_equal, &fixed_array);
+ fixed_array.Branch(not_equal);
// Get enum cache
// eax: map (result from call to Runtime::kGetPropertyNamesFast)
// Get the cache from the bridge array.
__ mov(edx, FieldOperand(ecx, DescriptorArray::kEnumCacheBridgeCacheOffset));
- frame_->Push(eax); // <- slot 3
- frame_->Push(edx); // <- slot 2
+ frame_->EmitPush(eax); // <- slot 3
+ frame_->EmitPush(edx); // <- slot 2
__ mov(eax, FieldOperand(edx, FixedArray::kLengthOffset));
__ shl(eax, kSmiTagSize);
- frame_->Push(eax); // <- slot 1
- frame_->Push(Immediate(Smi::FromInt(0))); // <- slot 0
- __ jmp(&entry);
-
-
- __ bind(&fixed_array);
+ frame_->EmitPush(eax); // <- slot 1
+ frame_->EmitPush(Immediate(Smi::FromInt(0))); // <- slot 0
+ entry.Jump();
+ fixed_array.Bind();
// eax: fixed array (result from call to Runtime::kGetPropertyNamesFast)
- frame_->Push(Immediate(Smi::FromInt(0))); // <- slot 3
- frame_->Push(eax); // <- slot 2
+ frame_->EmitPush(Immediate(Smi::FromInt(0))); // <- slot 3
+ frame_->EmitPush(eax); // <- slot 2
// Push the length of the array and the initial index onto the stack.
__ mov(eax, FieldOperand(eax, FixedArray::kLengthOffset));
__ shl(eax, kSmiTagSize);
- frame_->Push(eax); // <- slot 1
- frame_->Push(Immediate(Smi::FromInt(0))); // <- slot 0
- __ jmp(&entry);
-
- // Body.
- __ bind(&loop);
- Visit(node->body());
-
- // Next.
- __ bind(node->continue_target());
- __ bind(&next);
- frame_->Pop(eax);
- __ add(Operand(eax), Immediate(Smi::FromInt(1)));
- frame_->Push(eax);
+ frame_->EmitPush(eax); // <- slot 1
+ frame_->EmitPush(Immediate(Smi::FromInt(0))); // <- slot 0
// Condition.
- __ bind(&entry);
-
- __ mov(eax, frame_->Element(0)); // load the current count
- __ cmp(eax, frame_->Element(1)); // compare to the array length
- __ j(above_equal, &cleanup);
+ entry.Bind();
+ __ mov(eax, frame_->ElementAt(0)); // load the current count
+ __ cmp(eax, frame_->ElementAt(1)); // compare to the array length
+ node->break_target()->Branch(above_equal);
// Get the i'th entry of the array.
- __ mov(edx, frame_->Element(2));
+ __ mov(edx, frame_->ElementAt(2));
__ mov(ebx, Operand(edx, eax, times_2,
FixedArray::kHeaderSize - kHeapObjectTag));
// Get the expected map from the stack or a zero map in the
// permanent slow case eax: current iteration count ebx: i'th entry
// of the enum cache
- __ mov(edx, frame_->Element(3));
+ __ mov(edx, frame_->ElementAt(3));
// Check if the expected map still matches that of the enumerable.
// If not, we have to filter the key.
// eax: current iteration count
// ebx: i'th entry of the enum cache
// edx: expected map value
- __ mov(ecx, frame_->Element(4));
+ __ mov(ecx, frame_->ElementAt(4));
__ mov(ecx, FieldOperand(ecx, HeapObject::kMapOffset));
__ cmp(ecx, Operand(edx));
- __ j(equal, &end_del_check);
+ end_del_check.Branch(equal);
// Convert the entry to a string (or null if it isn't a property anymore).
- frame_->Push(frame_->Element(4)); // push enumerable
- frame_->Push(ebx); // push entry
- __ InvokeBuiltin(Builtins::FILTER_KEY, CALL_FUNCTION);
+ frame_->EmitPush(frame_->ElementAt(4)); // push enumerable
+ frame_->EmitPush(ebx); // push entry
+ frame_->InvokeBuiltin(Builtins::FILTER_KEY, CALL_FUNCTION, 2);
__ mov(ebx, Operand(eax));
// If the property has been removed while iterating, we just skip it.
__ cmp(ebx, Factory::null_value());
- __ j(equal, &next);
-
+ node->continue_target()->Branch(equal);
- __ bind(&end_del_check);
-
- // Store the entry in the 'each' expression and take another spin in the loop.
- // edx: i'th entry of the enum cache (or string there of)
- frame_->Push(ebx);
+ end_del_check.Bind();
+ // Store the entry in the 'each' expression and take another spin in the
+ // loop. edx: i'th entry of the enum cache (or string there of)
+ frame_->EmitPush(ebx);
{ Reference each(this, node->each());
+ // Loading a reference may leave the frame in an unspilled state.
+ frame_->SpillAll();
if (!each.is_illegal()) {
if (each.size() > 0) {
- frame_->Push(frame_->Element(each.size()));
+ frame_->EmitPush(frame_->ElementAt(each.size()));
}
// If the reference was to a slot we rely on the convenient property
// that it doesn't matter whether a value (eg, ebx pushed above) is
// ie, now the topmost value of the non-zero sized reference), since
// we will discard the top of stack after unloading the reference
// anyway.
- frame_->Pop();
+ frame_->Drop();
}
}
}
+ // Unloading a reference may leave the frame in an unspilled state.
+ frame_->SpillAll();
+
// Discard the i'th entry pushed above or else the remainder of the
// reference, whichever is currently on top of the stack.
- frame_->Pop();
+ frame_->Drop();
+
+ // Body.
CheckStack(); // TODO(1222600): ignore if body contains calls.
- __ jmp(&loop);
+ VisitAndSpill(node->body());
+
+ // Next.
+ node->continue_target()->Bind();
+ frame_->EmitPop(eax);
+ __ add(Operand(eax), Immediate(Smi::FromInt(1)));
+ frame_->EmitPush(eax);
+ entry.Jump();
// Cleanup.
- __ bind(&cleanup);
- __ bind(node->break_target());
+ node->break_target()->Bind();
frame_->Drop(5);
// Exit.
- __ bind(&exit);
+ exit.Bind();
break_stack_height_ -= kForInStackSize;
}
void CodeGenerator::VisitTryCatch(TryCatch* node) {
+ ASSERT(!in_spilled_code());
+ VirtualFrame::SpilledScope spilled_scope(this);
Comment cmnt(masm_, "[ TryCatch");
- CodeForStatement(node);
+ CodeForStatementPosition(node);
- Label try_block, exit;
+ JumpTarget try_block(this);
+ JumpTarget exit(this);
- __ call(&try_block);
+ try_block.Call();
// --- Catch block ---
- frame_->Push(eax);
+ frame_->EmitPush(eax);
// Store the caught exception in the catch variable.
{ Reference ref(this, node->catch_var());
}
// Remove the exception from the stack.
- frame_->Pop();
+ frame_->Drop();
- VisitStatements(node->catch_block()->statements());
- __ jmp(&exit);
+ VisitStatementsAndSpill(node->catch_block()->statements());
+ if (has_valid_frame()) {
+ exit.Jump();
+ }
// --- Try block ---
- __ bind(&try_block);
+ try_block.Bind();
- __ PushTryHandler(IN_JAVASCRIPT, TRY_CATCH_HANDLER);
- // TODO(1222589): remove the reliance of PushTryHandler on a cached TOS
- frame_->Push(eax); //
+ frame_->PushTryHandler(TRY_CATCH_HANDLER);
+ int handler_height = frame_->height();
- // Shadow the labels for all escapes from the try block, including
- // returns. During shadowing, the original label is hidden as the
- // LabelShadow and operations on the original actually affect the
- // shadowing label.
+ // Shadow the jump targets for all escapes from the try block, including
+ // returns. During shadowing, the original target is hidden as the
+ // ShadowTarget and operations on the original actually affect the
+ // shadowing target.
//
- // We should probably try to unify the escaping labels and the return
- // label.
- int nof_escapes = node->escaping_labels()->length();
- List<LabelShadow*> shadows(1 + nof_escapes);
- shadows.Add(new LabelShadow(&function_return_));
+ // We should probably try to unify the escaping targets and the return
+ // target.
+ int nof_escapes = node->escaping_targets()->length();
+ List<ShadowTarget*> shadows(1 + nof_escapes);
+
+ // Add the shadow target for the function return.
+ static const int kReturnShadowIndex = 0;
+ shadows.Add(new ShadowTarget(&function_return_));
+ bool function_return_was_shadowed = function_return_is_shadowed_;
+ function_return_is_shadowed_ = true;
+ ASSERT(shadows[kReturnShadowIndex]->other_target() == &function_return_);
+
+ // Add the remaining shadow targets.
for (int i = 0; i < nof_escapes; i++) {
- shadows.Add(new LabelShadow(node->escaping_labels()->at(i)));
+ shadows.Add(new ShadowTarget(node->escaping_targets()->at(i)));
}
// Generate code for the statements in the try block.
- { TempAssign<bool> temp(&is_inside_try_, true);
- VisitStatements(node->try_block()->statements());
- }
+ VisitStatementsAndSpill(node->try_block()->statements());
// Stop the introduced shadowing and count the number of required unlinks.
- // After shadowing stops, the original labels are unshadowed and the
- // LabelShadows represent the formerly shadowing labels.
+ // After shadowing stops, the original targets are unshadowed and the
+ // ShadowTargets represent the formerly shadowing targets.
int nof_unlinks = 0;
for (int i = 0; i <= nof_escapes; i++) {
shadows[i]->StopShadowing();
if (shadows[i]->is_linked()) nof_unlinks++;
}
+ function_return_is_shadowed_ = function_return_was_shadowed;
// Get an external reference to the handler address.
ExternalReference handler_address(Top::k_handler_address);
__ Assert(equal, "stack pointer should point to top handler");
}
- // Unlink from try chain.
- frame_->Pop(eax);
- __ mov(Operand::StaticVariable(handler_address), eax); // TOS == next_sp
- frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1);
- // next_sp popped.
- if (nof_unlinks > 0) __ jmp(&exit);
+ // If we can fall off the end of the try block, unlink from try chain.
+ if (has_valid_frame()) {
+ // The TOS is the next handler address.
+ frame_->EmitPop(eax);
+ __ mov(Operand::StaticVariable(handler_address), eax);
+ frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1);
+ if (nof_unlinks > 0) {
+ exit.Jump();
+ }
+ }
- // Generate unlink code for the (formerly) shadowing labels that have been
+ // Generate unlink code for the (formerly) shadowing targets that have been
// jumped to.
for (int i = 0; i <= nof_escapes; i++) {
if (shadows[i]->is_linked()) {
// Unlink from try chain; be careful not to destroy the TOS.
- __ bind(shadows[i]);
+ shadows[i]->Bind();
+ // Because we can be jumping here (to spilled code) from unspilled
+ // code, we need to reestablish a spilled frame at this block.
+ frame_->SpillAll();
// Reload sp from the top handler, because some statements that we
// break from (eg, for...in) may have left stuff on the stack.
const int kNextOffset = StackHandlerConstants::kNextOffset +
StackHandlerConstants::kAddressDisplacement;
__ lea(esp, Operand(edx, kNextOffset));
+ frame_->Forget(frame_->height() - handler_height);
- frame_->Pop(Operand::StaticVariable(handler_address));
+ frame_->EmitPop(Operand::StaticVariable(handler_address));
frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1);
// next_sp popped.
- __ jmp(shadows[i]->original_label());
+
+ if (!function_return_is_shadowed_ && i == kReturnShadowIndex) {
+ frame_->PrepareForReturn();
+ }
+ shadows[i]->other_target()->Jump();
}
}
- __ bind(&exit);
+ exit.Bind();
}
void CodeGenerator::VisitTryFinally(TryFinally* node) {
+ ASSERT(!in_spilled_code());
+ VirtualFrame::SpilledScope spilled_scope(this);
Comment cmnt(masm_, "[ TryFinally");
- CodeForStatement(node);
+ CodeForStatementPosition(node);
// State: Used to keep track of reason for entering the finally
// block. Should probably be extended to hold information for
// break/continue from within the try block.
enum { FALLING, THROWING, JUMPING };
- Label exit, unlink, try_block, finally_block;
+ JumpTarget unlink(this);
+ JumpTarget try_block(this);
+ JumpTarget finally_block(this);
- __ call(&try_block);
+ try_block.Call();
- frame_->Push(eax);
+ frame_->EmitPush(eax);
// In case of thrown exceptions, this is where we continue.
__ Set(ecx, Immediate(Smi::FromInt(THROWING)));
- __ jmp(&finally_block);
-
+ finally_block.Jump();
// --- Try block ---
- __ bind(&try_block);
+ try_block.Bind();
- __ PushTryHandler(IN_JAVASCRIPT, TRY_FINALLY_HANDLER);
- // TODO(1222589): remove the reliance of PushTryHandler on a cached TOS
- frame_->Push(eax);
+ frame_->PushTryHandler(TRY_FINALLY_HANDLER);
+ int handler_height = frame_->height();
- // Shadow the labels for all escapes from the try block, including
- // returns. During shadowing, the original label is hidden as the
- // LabelShadow and operations on the original actually affect the
- // shadowing label.
+ // Shadow the jump targets for all escapes from the try block, including
+ // returns. During shadowing, the original target is hidden as the
+ // ShadowTarget and operations on the original actually affect the
+ // shadowing target.
//
- // We should probably try to unify the escaping labels and the return
- // label.
- int nof_escapes = node->escaping_labels()->length();
- List<LabelShadow*> shadows(1 + nof_escapes);
- shadows.Add(new LabelShadow(&function_return_));
+ // We should probably try to unify the escaping targets and the return
+ // target.
+ int nof_escapes = node->escaping_targets()->length();
+ List<ShadowTarget*> shadows(1 + nof_escapes);
+
+ // Add the shadow target for the function return.
+ static const int kReturnShadowIndex = 0;
+ shadows.Add(new ShadowTarget(&function_return_));
+ bool function_return_was_shadowed = function_return_is_shadowed_;
+ function_return_is_shadowed_ = true;
+ ASSERT(shadows[kReturnShadowIndex]->other_target() == &function_return_);
+
+ // Add the remaining shadow targets.
for (int i = 0; i < nof_escapes; i++) {
- shadows.Add(new LabelShadow(node->escaping_labels()->at(i)));
+ shadows.Add(new ShadowTarget(node->escaping_targets()->at(i)));
}
// Generate code for the statements in the try block.
- { TempAssign<bool> temp(&is_inside_try_, true);
- VisitStatements(node->try_block()->statements());
- }
+ VisitStatementsAndSpill(node->try_block()->statements());
// Stop the introduced shadowing and count the number of required unlinks.
- // After shadowing stops, the original labels are unshadowed and the
- // LabelShadows represent the formerly shadowing labels.
+ // After shadowing stops, the original targets are unshadowed and the
+ // ShadowTargets represent the formerly shadowing targets.
int nof_unlinks = 0;
for (int i = 0; i <= nof_escapes; i++) {
shadows[i]->StopShadowing();
if (shadows[i]->is_linked()) nof_unlinks++;
}
+ function_return_is_shadowed_ = function_return_was_shadowed;
+
+ // If we can fall off the end of the try block, set the state on the stack
+ // to FALLING.
+ if (has_valid_frame()) {
+ frame_->EmitPush(Immediate(Factory::undefined_value())); // fake TOS
+ __ Set(ecx, Immediate(Smi::FromInt(FALLING)));
+ if (nof_unlinks > 0) {
+ unlink.Jump();
+ }
+ }
- // Set the state on the stack to FALLING.
- frame_->Push(Immediate(Factory::undefined_value())); // fake TOS
- __ Set(ecx, Immediate(Smi::FromInt(FALLING)));
- if (nof_unlinks > 0) __ jmp(&unlink);
-
- // Generate code to set the state for the (formerly) shadowing labels that
+ // Generate code to set the state for the (formerly) shadowing targets that
// have been jumped to.
for (int i = 0; i <= nof_escapes; i++) {
if (shadows[i]->is_linked()) {
- __ bind(shadows[i]);
- if (shadows[i]->original_label() == &function_return_) {
- // If this label shadowed the function return, materialize the
- // return value on the stack.
- frame_->Push(eax);
+ // Because we can be jumping here (to spilled code) from
+ // unspilled code, we need to reestablish a spilled frame at
+ // this block.
+ shadows[i]->Bind();
+ frame_->SpillAll();
+ if (i == kReturnShadowIndex) {
+ // If this target shadowed the function return, materialize
+ // the return value on the stack.
+ frame_->EmitPush(eax);
} else {
- // Fake TOS for labels that shadowed breaks and continues.
- frame_->Push(Immediate(Factory::undefined_value()));
+ // Fake TOS for targets that shadowed breaks and continues.
+ frame_->EmitPush(Immediate(Factory::undefined_value()));
}
__ Set(ecx, Immediate(Smi::FromInt(JUMPING + i)));
- __ jmp(&unlink);
+ unlink.Jump();
}
}
// Unlink from try chain; be careful not to destroy the TOS.
- __ bind(&unlink);
+ unlink.Bind();
// Reload sp from the top handler, because some statements that we
// break from (eg, for...in) may have left stuff on the stack.
// Preserve the TOS in a register across stack manipulation.
- frame_->Pop(eax);
+ frame_->EmitPop(eax);
ExternalReference handler_address(Top::k_handler_address);
__ mov(edx, Operand::StaticVariable(handler_address));
const int kNextOffset = StackHandlerConstants::kNextOffset +
StackHandlerConstants::kAddressDisplacement;
__ lea(esp, Operand(edx, kNextOffset));
+ frame_->Forget(frame_->height() - handler_height);
- frame_->Pop(Operand::StaticVariable(handler_address));
+ frame_->EmitPop(Operand::StaticVariable(handler_address));
frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1);
// Next_sp popped.
- frame_->Push(eax);
+ frame_->EmitPush(eax);
// --- Finally block ---
- __ bind(&finally_block);
+ finally_block.Bind();
// Push the state on the stack.
- frame_->Push(ecx);
+ frame_->EmitPush(ecx);
// We keep two elements on the stack - the (possibly faked) result
// and the state - while evaluating the finally block. Record it, so
break_stack_height_ += kFinallyStackSize;
// Generate code for the statements in the finally block.
- VisitStatements(node->finally_block()->statements());
+ VisitStatementsAndSpill(node->finally_block()->statements());
- // Restore state and return value or faked TOS.
- frame_->Pop(ecx);
- frame_->Pop(eax);
break_stack_height_ -= kFinallyStackSize;
-
- // Generate code to jump to the right destination for all used (formerly)
- // shadowing labels.
- for (int i = 0; i <= nof_escapes; i++) {
- if (shadows[i]->is_bound()) {
- __ cmp(Operand(ecx), Immediate(Smi::FromInt(JUMPING + i)));
- __ j(equal, shadows[i]->original_label());
+ if (has_valid_frame()) {
+ JumpTarget exit(this);
+ // Restore state and return value or faked TOS.
+ frame_->EmitPop(ecx);
+ frame_->EmitPop(eax);
+
+ // Generate code to jump to the right destination for all used
+ // formerly shadowing targets.
+ for (int i = 0; i <= nof_escapes; i++) {
+ if (shadows[i]->is_bound()) {
+ JumpTarget* original = shadows[i]->other_target();
+ __ cmp(Operand(ecx), Immediate(Smi::FromInt(JUMPING + i)));
+ if (!function_return_is_shadowed_ && i == kReturnShadowIndex) {
+ JumpTarget skip(this);
+ skip.Branch(not_equal);
+ frame_->PrepareForReturn();
+ original->Jump();
+ skip.Bind();
+ } else {
+ original->Branch(equal);
+ }
+ }
}
- }
- // Check if we need to rethrow the exception.
- __ cmp(Operand(ecx), Immediate(Smi::FromInt(THROWING)));
- __ j(not_equal, &exit);
+ // Check if we need to rethrow the exception.
+ __ cmp(Operand(ecx), Immediate(Smi::FromInt(THROWING)));
+ exit.Branch(not_equal);
- // Rethrow exception.
- frame_->Push(eax); // undo pop from above
- __ CallRuntime(Runtime::kReThrow, 1);
+ // Rethrow exception.
+ frame_->EmitPush(eax); // undo pop from above
+ frame_->CallRuntime(Runtime::kReThrow, 1);
- // Done.
- __ bind(&exit);
+ // Done.
+ exit.Bind();
+ }
}
void CodeGenerator::VisitDebuggerStatement(DebuggerStatement* node) {
+ ASSERT(!in_spilled_code());
Comment cmnt(masm_, "[ DebuggerStatement");
- CodeForStatement(node);
- __ CallRuntime(Runtime::kDebugBreak, 0);
+ CodeForStatementPosition(node);
+ // Spill everything, even constants, to the frame.
+ frame_->SpillAll();
+ frame_->CallRuntime(Runtime::kDebugBreak, 0);
// Ignore the return value.
}
ASSERT(boilerplate->IsBoilerplate());
// Push the boilerplate on the stack.
- frame_->Push(Immediate(boilerplate));
+ frame_->Push(boilerplate);
// Create a new closure.
frame_->Push(esi);
- __ CallRuntime(Runtime::kNewClosure, 2);
- frame_->Push(eax);
+ Result result = frame_->CallRuntime(Runtime::kNewClosure, 2);
+ frame_->Push(&result);
}
void CodeGenerator::VisitConditional(Conditional* node) {
Comment cmnt(masm_, "[ Conditional");
- Label then, else_, exit;
- LoadCondition(node->condition(), NOT_INSIDE_TYPEOF, &then, &else_, true);
- Branch(false, &else_);
- __ bind(&then);
- Load(node->then_expression(), typeof_state());
- __ jmp(&exit);
- __ bind(&else_);
- Load(node->else_expression(), typeof_state());
- __ bind(&exit);
+ JumpTarget then(this);
+ JumpTarget else_(this);
+ JumpTarget exit(this);
+ ControlDestination dest(&then, &else_, true);
+ LoadCondition(node->condition(), NOT_INSIDE_TYPEOF, &dest, true);
+
+ if (dest.false_was_fall_through()) {
+ // The else target was bound, so we compile the else part first.
+ Load(node->else_expression(), typeof_state());
+
+ if (then.is_linked()) {
+ exit.Jump();
+ then.Bind();
+ Load(node->then_expression(), typeof_state());
+ }
+ } else {
+ // The then target was bound, so we compile the then part first.
+ Load(node->then_expression(), typeof_state());
+
+ if (else_.is_linked()) {
+ exit.Jump();
+ else_.Bind();
+ Load(node->else_expression(), typeof_state());
+ }
+ }
+
+ exit.Bind();
}
if (slot->type() == Slot::LOOKUP) {
ASSERT(slot->var()->is_dynamic());
- Label slow, done;
+ JumpTarget slow(this);
+ JumpTarget done(this);
+ Result value(this);
// Generate fast-case code for variables that might be shadowed by
// eval-introduced variables. Eval is used a lot without
// perform a runtime call for all variables in the scope
// containing the eval.
if (slot->var()->mode() == Variable::DYNAMIC_GLOBAL) {
- LoadFromGlobalSlotCheckExtensions(slot, typeof_state, ebx, &slow);
- __ jmp(&done);
+ value = LoadFromGlobalSlotCheckExtensions(slot, typeof_state, &slow);
+ // If there was no control flow to slow, we can exit early.
+ if (!slow.is_linked()) {
+ frame_->Push(&value);
+ return;
+ }
+
+ done.Jump(&value);
} else if (slot->var()->mode() == Variable::DYNAMIC_LOCAL) {
Slot* potential_slot = slot->var()->local_if_not_shadowed()->slot();
// Only generate the fast case for locals that rewrite to slots.
// This rules out argument loads.
if (potential_slot != NULL) {
- __ mov(eax,
+ // Allocate a fresh register to use as a temp in
+ // ContextSlotOperandCheckExtensions and to hold the result
+ // value.
+ value = allocator_->Allocate();
+ ASSERT(value.is_valid());
+ __ mov(value.reg(),
ContextSlotOperandCheckExtensions(potential_slot,
- ebx,
+ value,
&slow));
- __ jmp(&done);
+ // There is always control flow to slow from
+ // ContextSlotOperandCheckExtensions.
+ done.Jump(&value);
}
}
- __ bind(&slow);
+ slow.Bind();
frame_->Push(esi);
- frame_->Push(Immediate(slot->var()->name()));
+ frame_->Push(slot->var()->name());
if (typeof_state == INSIDE_TYPEOF) {
- __ CallRuntime(Runtime::kLoadContextSlotNoReferenceError, 2);
+ value =
+ frame_->CallRuntime(Runtime::kLoadContextSlotNoReferenceError, 2);
} else {
- __ CallRuntime(Runtime::kLoadContextSlot, 2);
+ value = frame_->CallRuntime(Runtime::kLoadContextSlot, 2);
}
- __ bind(&done);
- frame_->Push(eax);
+ done.Bind(&value);
+ frame_->Push(&value);
+
+ } else if (slot->var()->mode() == Variable::CONST) {
+ // Const slots may contain 'the hole' value (the constant hasn't been
+ // initialized yet) which needs to be converted into the 'undefined'
+ // value.
+ //
+ // We currently spill the virtual frame because constants use the
+ // potentially unsafe direct-frame access of SlotOperand.
+ VirtualFrame::SpilledScope spilled_scope(this);
+ Comment cmnt(masm_, "[ Load const");
+ JumpTarget exit(this);
+ __ mov(ecx, SlotOperand(slot, ecx));
+ __ cmp(ecx, Factory::the_hole_value());
+ exit.Branch(not_equal);
+ __ mov(ecx, Factory::undefined_value());
+ exit.Bind();
+ frame_->EmitPush(ecx);
+
+ } else if (slot->type() == Slot::PARAMETER) {
+ frame_->PushParameterAt(slot->index());
+
+ } else if (slot->type() == Slot::LOCAL) {
+ frame_->PushLocalAt(slot->index());
} else {
- // Note: We would like to keep the assert below, but it fires because of
- // some nasty code in LoadTypeofExpression() which should be removed...
- // ASSERT(!slot->var()->is_dynamic());
- if (slot->var()->mode() == Variable::CONST) {
- // Const slots may contain 'the hole' value (the constant hasn't been
- // initialized yet) which needs to be converted into the 'undefined'
- // value.
- Comment cmnt(masm_, "[ Load const");
- Label exit;
- __ mov(eax, SlotOperand(slot, ecx));
- __ cmp(eax, Factory::the_hole_value());
- __ j(not_equal, &exit);
- __ mov(eax, Factory::undefined_value());
- __ bind(&exit);
- frame_->Push(eax);
- } else {
- frame_->Push(SlotOperand(slot, ecx));
- }
+ // The other remaining slot types (LOOKUP and GLOBAL) cannot reach
+ // here.
+ //
+ // The use of SlotOperand below is safe for an unspilled frame
+ // because it will always be a context slot.
+ ASSERT(slot->type() == Slot::CONTEXT);
+ Result temp = allocator_->Allocate();
+ ASSERT(temp.is_valid());
+ __ mov(temp.reg(), SlotOperand(slot, temp.reg()));
+ frame_->Push(&temp);
}
}
-void CodeGenerator::LoadFromGlobalSlotCheckExtensions(Slot* slot,
- TypeofState typeof_state,
- Register tmp,
- Label* slow) {
+Result CodeGenerator::LoadFromGlobalSlotCheckExtensions(
+ Slot* slot,
+ TypeofState typeof_state,
+ JumpTarget* slow) {
// Check that no extension objects have been created by calls to
// eval from the current scope to the global scope.
- Register context = esi;
+ Result context(esi, this);
+ Result tmp = allocator_->Allocate();
+ ASSERT(tmp.is_valid()); // All non-reserved registers were available.
+
Scope* s = scope();
while (s != NULL) {
if (s->num_heap_slots() > 0) {
if (s->calls_eval()) {
// Check that extension is NULL.
- __ cmp(ContextOperand(context, Context::EXTENSION_INDEX), Immediate(0));
- __ j(not_equal, slow, not_taken);
+ __ cmp(ContextOperand(context.reg(), Context::EXTENSION_INDEX),
+ Immediate(0));
+ slow->Branch(not_equal, not_taken);
}
// Load next context in chain.
- __ mov(tmp, ContextOperand(context, Context::CLOSURE_INDEX));
- __ mov(tmp, FieldOperand(tmp, JSFunction::kContextOffset));
+ __ mov(tmp.reg(), ContextOperand(context.reg(), Context::CLOSURE_INDEX));
+ __ mov(tmp.reg(), FieldOperand(tmp.reg(), JSFunction::kContextOffset));
context = tmp;
}
// If no outer scope calls eval, we do not need to check more
}
if (s->is_eval_scope()) {
+ // Loop up the context chain. There is no frame effect so it is
+ // safe to use raw labels here.
Label next, fast;
- if (!context.is(tmp)) __ mov(tmp, Operand(context));
+ if (!context.reg().is(tmp.reg())) __ mov(tmp.reg(), context.reg());
__ bind(&next);
// Terminate at global context.
- __ cmp(FieldOperand(tmp, HeapObject::kMapOffset),
+ __ cmp(FieldOperand(tmp.reg(), HeapObject::kMapOffset),
Immediate(Factory::global_context_map()));
__ j(equal, &fast);
// Check that extension is NULL.
- __ cmp(ContextOperand(tmp, Context::EXTENSION_INDEX), Immediate(0));
- __ j(not_equal, slow, not_taken);
+ __ cmp(ContextOperand(tmp.reg(), Context::EXTENSION_INDEX), Immediate(0));
+ slow->Branch(not_equal, not_taken);
// Load next context in chain.
- __ mov(tmp, ContextOperand(tmp, Context::CLOSURE_INDEX));
- __ mov(tmp, FieldOperand(tmp, JSFunction::kContextOffset));
+ __ mov(tmp.reg(), ContextOperand(tmp.reg(), Context::CLOSURE_INDEX));
+ __ mov(tmp.reg(), FieldOperand(tmp.reg(), JSFunction::kContextOffset));
__ jmp(&next);
__ bind(&fast);
}
+ context.Unuse();
+ tmp.Unuse();
// All extension objects were empty and it is safe to use a global
// load IC call.
Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
// Load the global object.
LoadGlobal();
- // Setup the name register.
- __ mov(ecx, slot->var()->name());
- // Call IC stub.
- if (typeof_state == INSIDE_TYPEOF) {
- __ call(ic, RelocInfo::CODE_TARGET);
+ // Setup the name register. All non-reserved registers are available.
+ Result name = allocator_->Allocate(ecx);
+ ASSERT(name.is_valid());
+ __ mov(name.reg(), slot->var()->name());
+ RelocInfo::Mode rmode = (typeof_state == INSIDE_TYPEOF)
+ ? RelocInfo::CODE_TARGET
+ : RelocInfo::CODE_TARGET_CONTEXT;
+ Result answer = frame_->CallCodeObject(ic, rmode, &name, 0);
+
+ // Discard the global object. The result is in answer.
+ frame_->Drop();
+ return answer;
+}
+
+
+void CodeGenerator::StoreToSlot(Slot* slot, InitState init_state) {
+ if (slot->type() == Slot::LOOKUP) {
+ ASSERT(slot->var()->is_dynamic());
+
+ // For now, just do a runtime call.
+ frame_->Push(esi);
+ frame_->Push(slot->var()->name());
+
+ Result value(this);
+ if (init_state == CONST_INIT) {
+ // Same as the case for a normal store, but ignores attribute
+ // (e.g. READ_ONLY) of context slot so that we can initialize const
+ // properties (introduced via eval("const foo = (some expr);")). Also,
+ // uses the current function context instead of the top context.
+ //
+ // Note that we must declare the foo upon entry of eval(), via a
+ // context slot declaration, but we cannot initialize it at the same
+ // time, because the const declaration may be at the end of the eval
+ // code (sigh...) and the const variable may have been used before
+ // (where its value is 'undefined'). Thus, we can only do the
+ // initialization when we actually encounter the expression and when
+ // the expression operands are defined and valid, and thus we need the
+ // split into 2 operations: declaration of the context slot followed
+ // by initialization.
+ value = frame_->CallRuntime(Runtime::kInitializeConstContextSlot, 3);
+ } else {
+ value = frame_->CallRuntime(Runtime::kStoreContextSlot, 3);
+ }
+ // Storing a variable must keep the (new) value on the expression
+ // stack. This is necessary for compiling chained assignment
+ // expressions.
+ frame_->Push(&value);
+
} else {
- __ call(ic, RelocInfo::CODE_TARGET_CONTEXT);
- }
+ ASSERT(!slot->var()->is_dynamic());
+
+ JumpTarget exit(this);
+ if (init_state == CONST_INIT) {
+ ASSERT(slot->var()->mode() == Variable::CONST);
+ // Only the first const initialization must be executed (the slot
+ // still contains 'the hole' value). When the assignment is executed,
+ // the code is identical to a normal store (see below).
+ //
+ // We spill the frame in the code below because the direct-frame
+ // access of SlotOperand is potentially unsafe with an unspilled
+ // frame.
+ VirtualFrame::SpilledScope spilled_scope(this);
+ Comment cmnt(masm_, "[ Init const");
+ __ mov(ecx, SlotOperand(slot, ecx));
+ __ cmp(ecx, Factory::the_hole_value());
+ exit.Branch(not_equal);
+ }
- // Pop the global object. The result is in eax.
- frame_->Pop();
+ // We must execute the store. Storing a variable must keep the (new)
+ // value on the stack. This is necessary for compiling assignment
+ // expressions.
+ //
+ // Note: We will reach here even with slot->var()->mode() ==
+ // Variable::CONST because of const declarations which will initialize
+ // consts to 'the hole' value and by doing so, end up calling this code.
+ if (slot->type() == Slot::PARAMETER) {
+ frame_->StoreToParameterAt(slot->index());
+ } else if (slot->type() == Slot::LOCAL) {
+ frame_->StoreToLocalAt(slot->index());
+ } else {
+ // The other slot types (LOOKUP and GLOBAL) cannot reach here.
+ //
+ // The use of SlotOperand below is safe for an unspilled frame
+ // because the slot is a context slot.
+ ASSERT(slot->type() == Slot::CONTEXT);
+ frame_->Dup();
+ Result value = frame_->Pop();
+ value.ToRegister();
+ Result start = allocator_->Allocate();
+ ASSERT(start.is_valid());
+ __ mov(SlotOperand(slot, start.reg()), value.reg());
+ // RecordWrite may destroy the value registers.
+ //
+ // TODO(204): Avoid actually spilling when the value is not
+ // needed (probably the common case).
+ frame_->Spill(value.reg());
+ int offset = FixedArray::kHeaderSize + slot->index() * kPointerSize;
+ Result temp = allocator_->Allocate();
+ ASSERT(temp.is_valid());
+ __ RecordWrite(start.reg(), offset, value.reg(), temp.reg());
+ // The results start, value, and temp are unused by going out of
+ // scope.
+ }
+
+ exit.Bind();
+ }
}
void CodeGenerator::VisitLiteral(Literal* node) {
Comment cmnt(masm_, "[ Literal");
- if (node->handle()->IsSmi() && !IsInlineSmi(node)) {
- // To prevent long attacker-controlled byte sequences in code, larger
- // Smis are loaded in two steps.
- int bits = reinterpret_cast<int>(*node->handle());
- __ mov(eax, bits & 0x0000FFFF);
- __ xor_(eax, bits & 0xFFFF0000);
- frame_->Push(eax);
- } else {
- frame_->Push(Immediate(node->handle()));
+ frame_->Push(node->handle());
}
+
+
+void CodeGenerator::LoadUnsafeSmi(Register target, Handle<Object> value) {
+ ASSERT(target.is_valid());
+ ASSERT(value->IsSmi());
+ int bits = reinterpret_cast<int>(*value);
+ __ Set(target, Immediate(bits & 0x0000FFFF));
+ __ xor_(target, bits & 0xFFFF0000);
+}
+
+
+bool CodeGenerator::IsUnsafeSmi(Handle<Object> value) {
+ if (!value->IsSmi()) return false;
+ int int_value = Smi::cast(*value)->value();
+ return !is_intn(int_value, kMaxSmiInlinedBits);
}
-class RegExpDeferred: public DeferredCode {
+class DeferredRegExpLiteral: public DeferredCode {
public:
- RegExpDeferred(CodeGenerator* generator, RegExpLiteral* node)
+ DeferredRegExpLiteral(CodeGenerator* generator, RegExpLiteral* node)
: DeferredCode(generator), node_(node) {
- set_comment("[ RegExpDeferred");
+ set_comment("[ DeferredRegExpLiteral");
}
+
virtual void Generate();
+
private:
RegExpLiteral* node_;
};
-void RegExpDeferred::Generate() {
- // If the entry is undefined we call the runtime system to computed
- // the literal.
+void DeferredRegExpLiteral::Generate() {
+ Result literals(generator());
+ enter()->Bind(&literals);
+ // Since the entry is undefined we call the runtime system to
+ // compute the literal.
+ VirtualFrame* frame = generator()->frame();
// Literal array (0).
- __ push(ecx);
+ frame->Push(&literals);
// Literal index (1).
- __ push(Immediate(Smi::FromInt(node_->literal_index())));
+ frame->Push(Smi::FromInt(node_->literal_index()));
// RegExp pattern (2).
- __ push(Immediate(node_->pattern()));
+ frame->Push(node_->pattern());
// RegExp flags (3).
- __ push(Immediate(node_->flags()));
- __ CallRuntime(Runtime::kMaterializeRegExpLiteral, 4);
- __ mov(ebx, Operand(eax)); // "caller" expects result in ebx
+ frame->Push(node_->flags());
+ Result boilerplate =
+ frame->CallRuntime(Runtime::kMaterializeRegExpLiteral, 4);
+ exit_.Jump(&boilerplate);
}
void CodeGenerator::VisitRegExpLiteral(RegExpLiteral* node) {
Comment cmnt(masm_, "[ RegExp Literal");
- RegExpDeferred* deferred = new RegExpDeferred(this, node);
+ DeferredRegExpLiteral* deferred = new DeferredRegExpLiteral(this, node);
- // Retrieve the literal array and check the allocated entry.
-
- // Load the function of this activation.
- __ mov(ecx, frame_->Function());
+ // Retrieve the literals array and check the allocated entry. Begin
+ // with a writable copy of the function of this activation in a
+ // register.
+ frame_->PushFunction();
+ Result literals = frame_->Pop();
+ literals.ToRegister();
+ frame_->Spill(literals.reg());
// Load the literals array of the function.
- __ mov(ecx, FieldOperand(ecx, JSFunction::kLiteralsOffset));
+ __ mov(literals.reg(),
+ FieldOperand(literals.reg(), JSFunction::kLiteralsOffset));
// Load the literal at the ast saved index.
int literal_offset =
FixedArray::kHeaderSize + node->literal_index() * kPointerSize;
- __ mov(ebx, FieldOperand(ecx, literal_offset));
+ Result boilerplate = allocator_->Allocate();
+ ASSERT(boilerplate.is_valid());
+ __ mov(boilerplate.reg(), FieldOperand(literals.reg(), literal_offset));
+
+ // Check whether we need to materialize the RegExp object. If so,
+ // jump to the deferred code passing the literals array.
+ __ cmp(boilerplate.reg(), Factory::undefined_value());
+ deferred->enter()->Branch(equal, &literals, not_taken);
- // Check whether we need to materialize the RegExp object.
- // If so, jump to the deferred code.
- __ cmp(ebx, Factory::undefined_value());
- __ j(equal, deferred->enter(), not_taken);
- __ bind(deferred->exit());
+ literals.Unuse();
+ // The deferred code returns the boilerplate object.
+ deferred->BindExit(&boilerplate);
- // Push the literal.
- frame_->Push(ebx);
+ // Push the boilerplate object.
+ frame_->Push(&boilerplate);
}
// by calling Runtime_CreateObjectLiteral.
// Each created boilerplate is stored in the JSFunction and they are
// therefore context dependent.
-class ObjectLiteralDeferred: public DeferredCode {
+class DeferredObjectLiteral: public DeferredCode {
public:
- ObjectLiteralDeferred(CodeGenerator* generator,
+ DeferredObjectLiteral(CodeGenerator* generator,
ObjectLiteral* node)
: DeferredCode(generator), node_(node) {
- set_comment("[ ObjectLiteralDeferred");
+ set_comment("[ DeferredObjectLiteral");
}
+
virtual void Generate();
+
private:
ObjectLiteral* node_;
};
-void ObjectLiteralDeferred::Generate() {
- // If the entry is undefined we call the runtime system to compute
- // the literal.
+void DeferredObjectLiteral::Generate() {
+ Result literals(generator());
+ enter()->Bind(&literals);
+ // Since the entry is undefined we call the runtime system to
+ // compute the literal.
+ VirtualFrame* frame = generator()->frame();
// Literal array (0).
- __ push(ecx);
+ frame->Push(&literals);
// Literal index (1).
- __ push(Immediate(Smi::FromInt(node_->literal_index())));
+ frame->Push(Smi::FromInt(node_->literal_index()));
// Constant properties (2).
- __ push(Immediate(node_->constant_properties()));
- __ CallRuntime(Runtime::kCreateObjectLiteralBoilerplate, 3);
- __ mov(ebx, Operand(eax));
+ frame->Push(node_->constant_properties());
+ Result boilerplate =
+ frame->CallRuntime(Runtime::kCreateObjectLiteralBoilerplate, 3);
+ exit_.Jump(&boilerplate);
}
void CodeGenerator::VisitObjectLiteral(ObjectLiteral* node) {
Comment cmnt(masm_, "[ ObjectLiteral");
- ObjectLiteralDeferred* deferred = new ObjectLiteralDeferred(this, node);
-
- // Retrieve the literal array and check the allocated entry.
+ DeferredObjectLiteral* deferred = new DeferredObjectLiteral(this, node);
- // Load the function of this activation.
- __ mov(ecx, frame_->Function());
+ // Retrieve the literals array and check the allocated entry. Begin
+ // with a writable copy of the function of this activation in a
+ // register.
+ frame_->PushFunction();
+ Result literals = frame_->Pop();
+ literals.ToRegister();
+ frame_->Spill(literals.reg());
// Load the literals array of the function.
- __ mov(ecx, FieldOperand(ecx, JSFunction::kLiteralsOffset));
+ __ mov(literals.reg(),
+ FieldOperand(literals.reg(), JSFunction::kLiteralsOffset));
// Load the literal at the ast saved index.
int literal_offset =
FixedArray::kHeaderSize + node->literal_index() * kPointerSize;
- __ mov(ebx, FieldOperand(ecx, literal_offset));
+ Result boilerplate = allocator_->Allocate();
+ ASSERT(boilerplate.is_valid());
+ __ mov(boilerplate.reg(), FieldOperand(literals.reg(), literal_offset));
// Check whether we need to materialize the object literal boilerplate.
- // If so, jump to the deferred code.
- __ cmp(ebx, Factory::undefined_value());
- __ j(equal, deferred->enter(), not_taken);
- __ bind(deferred->exit());
+ // If so, jump to the deferred code passing the literals array.
+ __ cmp(boilerplate.reg(), Factory::undefined_value());
+ deferred->enter()->Branch(equal, &literals, not_taken);
- // Push the literal.
- frame_->Push(ebx);
- // Clone the boilerplate object.
- __ CallRuntime(Runtime::kCloneObjectLiteralBoilerplate, 1);
- // Push the new cloned literal object as the result.
- frame_->Push(eax);
+ literals.Unuse();
+ // The deferred code returns the boilerplate object.
+ deferred->BindExit(&boilerplate);
+ // Push the boilerplate object.
+ frame_->Push(&boilerplate);
+ // Clone the boilerplate object.
+ Result clone =
+ frame_->CallRuntime(Runtime::kCloneObjectLiteralBoilerplate, 1);
+ // Push the newly cloned literal object as the result.
+ frame_->Push(&clone);
for (int i = 0; i < node->properties()->length(); i++) {
ObjectLiteral::Property* property = node->properties()->at(i);
Handle<Object> key(property->key()->handle());
Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
if (key->IsSymbol()) {
- __ mov(eax, frame_->Top());
- frame_->Push(eax);
+ // Duplicate the object as the IC receiver.
+ frame_->Dup();
Load(property->value());
- frame_->Pop(eax);
- __ Set(ecx, Immediate(key));
- __ call(ic, RelocInfo::CODE_TARGET);
- frame_->Pop();
- // Ignore result.
+ Result value = frame_->Pop();
+ value.ToRegister(eax);
+
+ Result name = allocator_->Allocate(ecx);
+ ASSERT(name.is_valid());
+ __ Set(name.reg(), Immediate(key));
+ Result ignored =
+ frame_->CallCodeObject(ic, RelocInfo::CODE_TARGET,
+ &value, &name, 0);
+ // Drop the duplicated receiver and ignore the result.
+ frame_->Drop();
break;
}
// Fall through
}
case ObjectLiteral::Property::PROTOTYPE: {
- __ mov(eax, frame_->Top());
- frame_->Push(eax);
+ // Duplicate the object as an argument to the runtime call.
+ frame_->Dup();
Load(property->key());
Load(property->value());
- __ CallRuntime(Runtime::kSetProperty, 3);
- // Ignore result.
+ Result ignored = frame_->CallRuntime(Runtime::kSetProperty, 3);
+ // Ignore the result.
break;
}
case ObjectLiteral::Property::SETTER: {
- // Duplicate the resulting object on the stack. The runtime
- // function will pop the three arguments passed in.
- __ mov(eax, frame_->Top());
- frame_->Push(eax);
+ // Duplicate the object as an argument to the runtime call.
+ frame_->Dup();
Load(property->key());
- frame_->Push(Immediate(Smi::FromInt(1)));
+ frame_->Push(Smi::FromInt(1));
Load(property->value());
- __ CallRuntime(Runtime::kDefineAccessor, 4);
- // Ignore result.
+ Result ignored = frame_->CallRuntime(Runtime::kDefineAccessor, 4);
+ // Ignore the result.
break;
}
case ObjectLiteral::Property::GETTER: {
- // Duplicate the resulting object on the stack. The runtime
- // function will pop the three arguments passed in.
- __ mov(eax, frame_->Top());
- frame_->Push(eax);
+ // Duplicate the object as an argument to the runtime call.
+ frame_->Dup();
Load(property->key());
- frame_->Push(Immediate(Smi::FromInt(0)));
+ frame_->Push(Smi::FromInt(0));
Load(property->value());
- __ CallRuntime(Runtime::kDefineAccessor, 4);
- // Ignore result.
+ Result ignored = frame_->CallRuntime(Runtime::kDefineAccessor, 4);
+ // Ignore the result.
break;
}
default: UNREACHABLE();
void CodeGenerator::VisitArrayLiteral(ArrayLiteral* node) {
Comment cmnt(masm_, "[ ArrayLiteral");
- // Call runtime to create the array literal.
- frame_->Push(Immediate(node->literals()));
- // Load the function of this frame.
- __ mov(ecx, frame_->Function());
- // Load the literals array of the function.
- __ mov(ecx, FieldOperand(ecx, JSFunction::kLiteralsOffset));
- frame_->Push(ecx);
- __ CallRuntime(Runtime::kCreateArrayLiteral, 2);
+ // Call the runtime to create the array literal.
+ frame_->Push(node->literals());
+ // Load the literals array of the current function.
+ frame_->PushFunction();
+ Result literals = frame_->Pop();
+ literals.ToRegister();
+ frame_->Spill(literals.reg()); // Make it writable.
+ __ mov(literals.reg(),
+ FieldOperand(literals.reg(), JSFunction::kLiteralsOffset));
+ frame_->Push(&literals);
+ Result array = frame_->CallRuntime(Runtime::kCreateArrayLiteral, 2);
// Push the resulting array literal on the stack.
- frame_->Push(eax);
+ frame_->Push(&array);
// Generate code to set the elements in the array that are not
// literals.
for (int i = 0; i < node->values()->length(); i++) {
Expression* value = node->values()->at(i);
- // If value is literal the property value is already
- // set in the boilerplate object.
+ // If value is literal the property value is already set in the
+ // boilerplate object.
if (value->AsLiteral() == NULL) {
// The property must be set by generated code.
Load(value);
- // Get the value off the stack.
- frame_->Pop(eax);
- // Fetch the object literal while leaving on the stack.
- __ mov(ecx, frame_->Top());
+ // Get the property value off the stack.
+ Result prop_value = frame_->Pop();
+ prop_value.ToRegister();
+
+ // Fetch the array literal while leaving a copy on the stack and
+ // use it to get the elements array.
+ frame_->Dup();
+ Result elements = frame_->Pop();
+ elements.ToRegister();
+ frame_->Spill(elements.reg());
// Get the elements array.
- __ mov(ecx, FieldOperand(ecx, JSObject::kElementsOffset));
+ __ mov(elements.reg(),
+ FieldOperand(elements.reg(), JSObject::kElementsOffset));
// Write to the indexed properties array.
int offset = i * kPointerSize + Array::kHeaderSize;
- __ mov(FieldOperand(ecx, offset), eax);
+ __ mov(FieldOperand(elements.reg(), offset), prop_value.reg());
// Update the write barrier for the array address.
- __ RecordWrite(ecx, offset, eax, ebx);
+ frame_->Spill(prop_value.reg()); // Overwritten by the write barrier.
+ Result scratch = allocator_->Allocate();
+ ASSERT(scratch.is_valid());
+ __ RecordWrite(elements.reg(), offset, prop_value.reg(), scratch.reg());
}
}
}
void CodeGenerator::VisitCatchExtensionObject(CatchExtensionObject* node) {
+ ASSERT(!in_spilled_code());
// Call runtime routine to allocate the catch extension object and
// assign the exception value to the catch variable.
- Comment cmnt(masm_, "[CatchExtensionObject ");
+ Comment cmnt(masm_, "[ CatchExtensionObject");
Load(node->key());
Load(node->value());
- __ CallRuntime(Runtime::kCreateCatchExtensionObject, 2);
- frame_->Push(eax);
+ Result result =
+ frame_->CallRuntime(Runtime::kCreateCatchExtensionObject, 2);
+ frame_->Push(&result);
}
void CodeGenerator::VisitAssignment(Assignment* node) {
Comment cmnt(masm_, "[ Assignment");
- CodeForStatement(node);
-
- Reference target(this, node->target());
- if (target.is_illegal()) return;
-
- if (node->starts_initialization_block()) {
- ASSERT(target.type() == Reference::NAMED ||
- target.type() == Reference::KEYED);
- // Change to slow case in the beginning of an initialization block
- // to avoid the quadratic behavior of repeatedly adding fast properties.
- int stack_position = (target.type() == Reference::NAMED) ? 0 : 1;
- frame_->Push(Operand(esp, stack_position * kPointerSize));
- __ CallRuntime(Runtime::kToSlowProperties, 1);
- }
- if (node->op() == Token::ASSIGN ||
- node->op() == Token::INIT_VAR ||
- node->op() == Token::INIT_CONST) {
- Load(node->value());
+ CodeForStatementPosition(node);
- } else {
- target.GetValue(NOT_INSIDE_TYPEOF);
- Literal* literal = node->value()->AsLiteral();
- if (IsInlineSmi(literal)) {
- SmiOperation(node->binary_op(), node->type(), literal->handle(), false,
- NO_OVERWRITE);
- } else {
+ { Reference target(this, node->target());
+ if (target.is_illegal()) {
+ // Fool the virtual frame into thinking that we left the assignment's
+ // value on the frame.
+ frame_->Push(Smi::FromInt(0));
+ return;
+ }
+ Variable* var = node->target()->AsVariableProxy()->AsVariable();
+
+ if (node->starts_initialization_block()) {
+ ASSERT(target.type() == Reference::NAMED ||
+ target.type() == Reference::KEYED);
+ // Change to slow case in the beginning of an initialization
+ // block to avoid the quadratic behavior of repeatedly adding
+ // fast properties.
+
+ // The receiver is the argument to the runtime call. It is the
+ // first value pushed when the reference was loaded to the
+ // frame.
+ frame_->PushElementAt(target.size() - 1);
+ Result ignored = frame_->CallRuntime(Runtime::kToSlowProperties, 1);
+ }
+ if (node->op() == Token::ASSIGN ||
+ node->op() == Token::INIT_VAR ||
+ node->op() == Token::INIT_CONST) {
Load(node->value());
- GenericBinaryOperation(node->binary_op(), node->type());
+
+ } else {
+ Literal* literal = node->value()->AsLiteral();
+ Variable* right_var = node->value()->AsVariableProxy()->AsVariable();
+ // There are two cases where the target is not read in the right hand
+ // side, that are easy to test for: the right hand side is a literal,
+ // or the right hand side is a different variable. TakeValue invalidates
+ // the target, with an implicit promise that it will be written to again
+ // before it is read.
+ if (literal != NULL || (right_var != NULL && right_var != var)) {
+ target.TakeValue(NOT_INSIDE_TYPEOF);
+ } else {
+ target.GetValue(NOT_INSIDE_TYPEOF);
+ }
+ if (IsInlineSmi(literal)) {
+ SmiOperation(node->binary_op(), node->type(), literal->handle(), false,
+ NO_OVERWRITE);
+ } else {
+ Load(node->value());
+ GenericBinaryOperation(node->binary_op(), node->type());
+ }
}
- }
- Variable* var = node->target()->AsVariableProxy()->AsVariable();
- if (var != NULL &&
- var->mode() == Variable::CONST &&
- node->op() != Token::INIT_VAR && node->op() != Token::INIT_CONST) {
- // Assignment ignored - leave the value on the stack.
- } else {
- CodeForSourcePosition(node->position());
- if (node->op() == Token::INIT_CONST) {
- // Dynamic constant initializations must use the function context
- // and initialize the actual constant declared. Dynamic variable
- // initializations are simply assignments and use SetValue.
- target.SetValue(CONST_INIT);
+ if (var != NULL &&
+ var->mode() == Variable::CONST &&
+ node->op() != Token::INIT_VAR && node->op() != Token::INIT_CONST) {
+ // Assignment ignored - leave the value on the stack.
} else {
- target.SetValue(NOT_CONST_INIT);
+ CodeForSourcePosition(node->position());
+ if (node->op() == Token::INIT_CONST) {
+ // Dynamic constant initializations must use the function context
+ // and initialize the actual constant declared. Dynamic variable
+ // initializations are simply assignments and use SetValue.
+ target.SetValue(CONST_INIT);
+ } else {
+ target.SetValue(NOT_CONST_INIT);
+ }
if (node->ends_initialization_block()) {
ASSERT(target.type() == Reference::NAMED ||
target.type() == Reference::KEYED);
- // End of initialization block. Revert to fast case.
- int stack_position = (target.type() == Reference::NAMED) ? 1 : 2;
- frame_->Push(Operand(esp, stack_position * kPointerSize));
- __ CallRuntime(Runtime::kToFastProperties, 1);
+ // End of initialization block. Revert to fast case. The
+ // argument to the runtime call is the receiver, which is the
+ // first value pushed as part of the reference, which is below
+ // the lhs value.
+ frame_->PushElementAt(target.size());
+ Result ignored = frame_->CallRuntime(Runtime::kToFastProperties, 1);
}
}
}
void CodeGenerator::VisitThrow(Throw* node) {
Comment cmnt(masm_, "[ Throw");
- CodeForStatement(node);
+ CodeForStatementPosition(node);
Load(node->exception());
- __ CallRuntime(Runtime::kThrow, 1);
- frame_->Push(eax);
+ Result result = frame_->CallRuntime(Runtime::kThrow, 1);
+ frame_->Push(&result);
}
void CodeGenerator::VisitProperty(Property* node) {
Comment cmnt(masm_, "[ Property");
-
Reference property(this, node);
property.GetValue(typeof_state());
}
ZoneList<Expression*>* args = node->arguments();
- CodeForStatement(node);
+ CodeForStatementPosition(node);
// Check if the function is a variable or a property.
Expression* function = node->expression();
// ----------------------------------
// Push the name of the function and the receiver onto the stack.
- frame_->Push(Immediate(var->name()));
+ frame_->Push(var->name());
// Pass the global object as the receiver and let the IC stub
// patch the stack to use the global proxy as 'this' in the
// invoked function.
LoadGlobal();
+
// Load the arguments.
- for (int i = 0; i < args->length(); i++) {
+ int arg_count = args->length();
+ for (int i = 0; i < arg_count; i++) {
Load(args->at(i));
}
// Setup the receiver register and call the IC initialization code.
Handle<Code> stub = (loop_nesting() > 0)
- ? ComputeCallInitializeInLoop(args->length())
- : ComputeCallInitialize(args->length());
+ ? ComputeCallInitializeInLoop(arg_count)
+ : ComputeCallInitialize(arg_count);
CodeForSourcePosition(node->position());
- __ call(stub, RelocInfo::CODE_TARGET_CONTEXT);
- __ mov(esi, frame_->Context());
+ Result result = frame_->CallCodeObject(stub,
+ RelocInfo::CODE_TARGET_CONTEXT,
+ arg_count + 1);
+ frame_->RestoreContextRegister();
- // Overwrite the function on the stack with the result.
- __ mov(frame_->Top(), eax);
+ // Replace the function on the stack with the result.
+ frame_->SetElementAt(0, &result);
} else if (var != NULL && var->slot() != NULL &&
var->slot()->type() == Slot::LOOKUP) {
// Load the function
frame_->Push(esi);
- frame_->Push(Immediate(var->name()));
- __ CallRuntime(Runtime::kLoadContextSlot, 2);
+ frame_->Push(var->name());
+ frame_->CallRuntime(Runtime::kLoadContextSlot, 2);
// eax: slot value; edx: receiver
// Load the receiver.
// ------------------------------------------------------------------
// Push the name of the function and the receiver onto the stack.
- frame_->Push(Immediate(literal->handle()));
+ frame_->Push(literal->handle());
Load(property->obj());
// Load the arguments.
- for (int i = 0; i < args->length(); i++) Load(args->at(i));
+ int arg_count = args->length();
+ for (int i = 0; i < arg_count; i++) {
+ Load(args->at(i));
+ }
// Call the IC initialization code.
Handle<Code> stub = (loop_nesting() > 0)
- ? ComputeCallInitializeInLoop(args->length())
- : ComputeCallInitialize(args->length());
+ ? ComputeCallInitializeInLoop(arg_count)
+ : ComputeCallInitialize(arg_count);
CodeForSourcePosition(node->position());
- __ call(stub, RelocInfo::CODE_TARGET);
- __ mov(esi, frame_->Context());
+ Result result = frame_->CallCodeObject(stub,
+ RelocInfo::CODE_TARGET,
+ arg_count + 1);
+ frame_->RestoreContextRegister();
- // Overwrite the function on the stack with the result.
- __ mov(frame_->Top(), eax);
+ // Replace the function on the stack with the result.
+ frame_->SetElementAt(0, &result);
} else {
// -------------------------------------------
// Pass receiver to called function.
// The reference's size is non-negative.
- frame_->Push(frame_->Element(ref.size()));
+ frame_->SpillAll();
+ frame_->EmitPush(frame_->ElementAt(ref.size()));
// Call the function.
CallWithArguments(args, node->position());
Load(function);
// Pass the global proxy as the receiver.
- LoadGlobalReceiver(eax);
+ LoadGlobalReceiver();
// Call the function.
CallWithArguments(args, node->position());
void CodeGenerator::VisitCallNew(CallNew* node) {
Comment cmnt(masm_, "[ CallNew");
- CodeForStatement(node);
+ CodeForStatementPosition(node);
// According to ECMA-262, section 11.2.2, page 44, the function
// expression in new calls must be evaluated before the
// Push the arguments ("left-to-right") on the stack.
ZoneList<Expression*>* args = node->arguments();
- for (int i = 0; i < args->length(); i++) Load(args->at(i));
+ int arg_count = args->length();
+ for (int i = 0; i < arg_count; i++) {
+ Load(args->at(i));
+ }
// Constructors are called with the number of arguments in register
// eax for now. Another option would be to have separate construct
// call trampolines per different arguments counts encountered.
- __ Set(eax, Immediate(args->length()));
+ Result num_args = allocator()->Allocate(eax);
+ ASSERT(num_args.is_valid());
+ __ Set(num_args.reg(), Immediate(arg_count));
// Load the function into temporary function slot as per calling
// convention.
- __ mov(edi, frame_->Element(args->length() + 1));
+ frame_->PushElementAt(arg_count + 1);
+ Result function = frame_->Pop();
+ function.ToRegister(edi);
+ ASSERT(function.is_valid());
// Call the construct call builtin that handles allocation and
// constructor invocation.
CodeForSourcePosition(node->position());
- __ call(Handle<Code>(Builtins::builtin(Builtins::JSConstructCall)),
- RelocInfo::CONSTRUCT_CALL);
- // Discard the function and "push" the newly created object.
- __ mov(frame_->Top(), eax);
+ Handle<Code> ic(Builtins::builtin(Builtins::JSConstructCall));
+ Result result = frame_->CallCodeObject(ic,
+ RelocInfo::CONSTRUCT_CALL,
+ &num_args,
+ &function,
+ arg_count + 1);
+
+ // Replace the function on the stack with the result.
+ frame_->SetElementAt(0, &result);
}
ZoneList<Expression*>* args = node->arguments();
Expression* function = node->expression();
- CodeForStatement(node);
+ CodeForStatementPosition(node);
- // Prepare stack for call to resolved function.
+ // Prepare the stack for the call to the resolved function.
Load(function);
- __ push(Immediate(Factory::undefined_value())); // Slot for receiver
- for (int i = 0; i < args->length(); i++) {
+
+ // Allocate a frame slot for the receiver.
+ frame_->Push(Factory::undefined_value());
+ int arg_count = args->length();
+ for (int i = 0; i < arg_count; i++) {
Load(args->at(i));
}
- // Prepare stack for call to ResolvePossiblyDirectEval.
- __ push(Operand(esp, args->length() * kPointerSize + kPointerSize));
- if (args->length() > 0) {
- __ push(Operand(esp, args->length() * kPointerSize));
+ // Prepare the stack for the call to ResolvePossiblyDirectEval.
+ frame_->PushElementAt(arg_count + 1);
+ if (arg_count > 0) {
+ frame_->PushElementAt(arg_count);
} else {
- __ push(Immediate(Factory::undefined_value()));
+ frame_->Push(Factory::undefined_value());
}
// Resolve the call.
- __ CallRuntime(Runtime::kResolvePossiblyDirectEval, 2);
-
- // Touch up stack with the right values for the function and the receiver.
- __ mov(edx, FieldOperand(eax, FixedArray::kHeaderSize));
- __ mov(Operand(esp, (args->length() + 1) * kPointerSize), edx);
- __ mov(edx, FieldOperand(eax, FixedArray::kHeaderSize + kPointerSize));
- __ mov(Operand(esp, args->length() * kPointerSize), edx);
+ Result result =
+ frame_->CallRuntime(Runtime::kResolvePossiblyDirectEval, 2);
+
+ // Touch up the stack with the right values for the function and the
+ // receiver. Use a scratch register to avoid destroying the result.
+ Result scratch = allocator_->Allocate();
+ ASSERT(scratch.is_valid());
+ __ mov(scratch.reg(), FieldOperand(result.reg(), FixedArray::kHeaderSize));
+ frame_->SetElementAt(arg_count + 1, &scratch);
+
+ // We can reuse the result register now.
+ frame_->Spill(result.reg());
+ __ mov(result.reg(),
+ FieldOperand(result.reg(), FixedArray::kHeaderSize + kPointerSize));
+ frame_->SetElementAt(arg_count, &result);
// Call the function.
CodeForSourcePosition(node->position());
+ CallFunctionStub call_function(arg_count);
+ result = frame_->CallStub(&call_function, arg_count + 1);
- CallFunctionStub call_function(args->length());
- __ CallStub(&call_function);
-
- // Restore context and pop function from the stack.
- __ mov(esi, frame_->Context());
- __ mov(frame_->Top(), eax);
+ // Restore the context and overwrite the function on the stack with
+ // the result.
+ frame_->RestoreContextRegister();
+ frame_->SetElementAt(0, &result);
}
void CodeGenerator::GenerateIsSmi(ZoneList<Expression*>* args) {
ASSERT(args->length() == 1);
Load(args->at(0));
- frame_->Pop(eax);
- __ test(eax, Immediate(kSmiTagMask));
- cc_reg_ = zero;
+ Result value = frame_->Pop();
+ value.ToRegister();
+ ASSERT(value.is_valid());
+ __ test(value.reg(), Immediate(kSmiTagMask));
+ value.Unuse();
+ destination()->Split(zero);
}
if (ShouldGenerateLog(args->at(0))) {
Load(args->at(1));
Load(args->at(2));
- __ CallRuntime(Runtime::kLog, 2);
+ frame_->CallRuntime(Runtime::kLog, 2);
}
#endif
// Finally, we're expected to leave a value on the top of the stack.
- frame_->Push(Immediate(Factory::undefined_value()));
+ frame_->Push(Factory::undefined_value());
}
void CodeGenerator::GenerateIsNonNegativeSmi(ZoneList<Expression*>* args) {
ASSERT(args->length() == 1);
Load(args->at(0));
- frame_->Pop(eax);
- __ test(eax, Immediate(kSmiTagMask | 0x80000000));
- cc_reg_ = zero;
+ Result value = frame_->Pop();
+ value.ToRegister();
+ ASSERT(value.is_valid());
+ __ test(value.reg(), Immediate(kSmiTagMask | 0x80000000));
+ value.Unuse();
+ destination()->Split(zero);
}
void CodeGenerator::GenerateFastCharCodeAt(ZoneList<Expression*>* args) {
ASSERT(args->length() == 2);
- Label slow_case;
- Label end;
- Label not_a_flat_string;
- Label not_a_cons_string_either;
- Label try_again_with_new_string;
- Label ascii_string;
- Label got_char_code;
+ JumpTarget slow_case(this);
+ JumpTarget end(this);
+ JumpTarget not_a_flat_string(this);
+ JumpTarget a_cons_string(this);
+ JumpTarget try_again_with_new_string(this, JumpTarget::BIDIRECTIONAL);
+ JumpTarget ascii_string(this);
+ JumpTarget got_char_code(this);
- // Load the string into eax and the index into ebx.
Load(args->at(0));
Load(args->at(1));
- frame_->Pop(ebx);
- frame_->Pop(eax);
+ // Reserve register ecx, to use as shift amount later
+ Result shift_amount = allocator()->Allocate(ecx);
+ ASSERT(shift_amount.is_valid());
+ Result index = frame_->Pop();
+ index.ToRegister();
+ Result object = frame_->Pop();
+ object.ToRegister();
// If the receiver is a smi return undefined.
ASSERT(kSmiTag == 0);
- __ test(eax, Immediate(kSmiTagMask));
- __ j(zero, &slow_case, not_taken);
+ __ test(object.reg(), Immediate(kSmiTagMask));
+ slow_case.Branch(zero, not_taken);
// Check for negative or non-smi index.
ASSERT(kSmiTag == 0);
- __ test(ebx, Immediate(kSmiTagMask | 0x80000000));
- __ j(not_zero, &slow_case, not_taken);
+ __ test(index.reg(), Immediate(kSmiTagMask | 0x80000000));
+ slow_case.Branch(not_zero, not_taken);
// Get rid of the smi tag on the index.
- __ sar(ebx, kSmiTagSize);
-
- __ bind(&try_again_with_new_string);
- // Get the type of the heap object into edi.
- __ mov(edx, FieldOperand(eax, HeapObject::kMapOffset));
- __ movzx_b(edi, FieldOperand(edx, Map::kInstanceTypeOffset));
+ frame_->Spill(index.reg());
+ __ sar(index.reg(), kSmiTagSize);
+
+ try_again_with_new_string.Bind(&object, &index, &shift_amount);
+ // Get the type of the heap object.
+ Result object_type = allocator()->Allocate();
+ ASSERT(object_type.is_valid());
+ __ mov(object_type.reg(), FieldOperand(object.reg(), HeapObject::kMapOffset));
+ __ movzx_b(object_type.reg(),
+ FieldOperand(object_type.reg(), Map::kInstanceTypeOffset));
// We don't handle non-strings.
- __ test(edi, Immediate(kIsNotStringMask));
- __ j(not_zero, &slow_case, not_taken);
+ __ test(object_type.reg(), Immediate(kIsNotStringMask));
+ slow_case.Branch(not_zero, not_taken);
// Here we make assumptions about the tag values and the shifts needed.
// See the comment in objects.h.
String::kMediumLengthShift);
ASSERT(kShortStringTag + String::kLongLengthShift ==
String::kShortLengthShift);
- __ mov(ecx, Operand(edi));
- __ and_(ecx, kStringSizeMask);
- __ add(Operand(ecx), Immediate(String::kLongLengthShift));
- // Get the length field.
- __ mov(edx, FieldOperand(eax, String::kLengthOffset));
- __ shr(edx); // ecx is implicit operand.
- // edx is now the length of the string.
-
+ __ mov(shift_amount.reg(), Operand(object_type.reg()));
+ __ and_(shift_amount.reg(), kStringSizeMask);
+ __ add(Operand(shift_amount.reg()), Immediate(String::kLongLengthShift));
+ // Get the length field. Temporary register now used for length.
+ Result length = object_type;
+ __ mov(length.reg(), FieldOperand(object.reg(), String::kLengthOffset));
+ __ shr(length.reg()); // shift_amount, in ecx, is implicit operand.
// Check for index out of range.
- __ cmp(ebx, Operand(edx));
- __ j(greater_equal, &slow_case, not_taken);
+ __ cmp(index.reg(), Operand(length.reg()));
+ slow_case.Branch(greater_equal, not_taken);
+ length.Unuse();
+ // Load the object type into object_type again.
+ // These two instructions are duplicated from above, to save a register.
+ __ mov(object_type.reg(), FieldOperand(object.reg(), HeapObject::kMapOffset));
+ __ movzx_b(object_type.reg(),
+ FieldOperand(object_type.reg(), Map::kInstanceTypeOffset));
// We need special handling for non-flat strings.
ASSERT(kSeqStringTag == 0);
- __ test(edi, Immediate(kStringRepresentationMask));
- __ j(not_zero, ¬_a_flat_string, not_taken);
-
+ __ test(object_type.reg(), Immediate(kStringRepresentationMask));
+ not_a_flat_string.Branch(not_zero, &object, &index, &object_type,
+ &shift_amount, not_taken);
+ shift_amount.Unuse();
// Check for 1-byte or 2-byte string.
- __ test(edi, Immediate(kStringEncodingMask));
- __ j(not_zero, &ascii_string, taken);
+ __ test(object_type.reg(), Immediate(kStringEncodingMask));
+ ascii_string.Branch(not_zero, &object, &index, &object_type, taken);
// 2-byte string.
// Load the 2-byte character code.
- __ movzx_w(eax,
- FieldOperand(eax, ebx, times_2, SeqTwoByteString::kHeaderSize));
- __ jmp(&got_char_code);
+ __ movzx_w(object_type.reg(), FieldOperand(object.reg(),
+ index.reg(),
+ times_2,
+ SeqTwoByteString::kHeaderSize));
+ object.Unuse();
+ index.Unuse();
+ got_char_code.Jump(&object_type);
// ASCII string.
- __ bind(&ascii_string);
+ ascii_string.Bind(&object, &index, &object_type);
// Load the byte.
- __ movzx_b(eax, FieldOperand(eax, ebx, times_1, SeqAsciiString::kHeaderSize));
-
- __ bind(&got_char_code);
+ __ movzx_b(object_type.reg(), FieldOperand(object.reg(),
+ index.reg(),
+ times_1,
+ SeqAsciiString::kHeaderSize));
+ object.Unuse();
+ index.Unuse();
+ got_char_code.Bind(&object_type);
ASSERT(kSmiTag == 0);
- __ shl(eax, kSmiTagSize);
- frame_->Push(eax);
- __ jmp(&end);
+ __ shl(object_type.reg(), kSmiTagSize);
+ frame_->Push(&object_type);
+ end.Jump();
// Handle non-flat strings.
- __ bind(¬_a_flat_string);
- __ and_(edi, kStringRepresentationMask);
- __ cmp(edi, kConsStringTag);
- __ j(not_equal, ¬_a_cons_string_either, not_taken);
-
- // ConsString.
- // Get the first of the two strings.
- __ mov(eax, FieldOperand(eax, ConsString::kFirstOffset));
- __ jmp(&try_again_with_new_string);
-
- __ bind(¬_a_cons_string_either);
- __ cmp(edi, kSlicedStringTag);
- __ j(not_equal, &slow_case, not_taken);
+ not_a_flat_string.Bind(&object, &index, &object_type, &shift_amount);
+ __ and_(object_type.reg(), kStringRepresentationMask);
+ __ cmp(object_type.reg(), kConsStringTag);
+ a_cons_string.Branch(equal, &object, &index, &shift_amount, taken);
+ __ cmp(object_type.reg(), kSlicedStringTag);
+ slow_case.Branch(not_equal, not_taken);
+ object_type.Unuse();
// SlicedString.
// Add the offset to the index.
- __ add(ebx, FieldOperand(eax, SlicedString::kStartOffset));
- __ j(overflow, &slow_case);
- // Get the underlying string.
- __ mov(eax, FieldOperand(eax, SlicedString::kBufferOffset));
- __ jmp(&try_again_with_new_string);
+ __ add(index.reg(), FieldOperand(object.reg(), SlicedString::kStartOffset));
+ slow_case.Branch(overflow);
+ // Getting the underlying string is done by running the cons string code.
- __ bind(&slow_case);
- frame_->Push(Immediate(Factory::undefined_value()));
-
- __ bind(&end);
+ // ConsString.
+ a_cons_string.Bind(&object, &index, &shift_amount);
+ // Get the first of the two strings.
+ frame_->Spill(object.reg());
+ // Both sliced and cons strings store their source string at the same place.
+ ASSERT(SlicedString::kBufferOffset == ConsString::kFirstOffset);
+ __ mov(object.reg(), FieldOperand(object.reg(), ConsString::kFirstOffset));
+ try_again_with_new_string.Jump(&object, &index, &shift_amount);
+
+ // No results live at this point.
+ slow_case.Bind();
+ frame_->Push(Factory::undefined_value());
+ end.Bind();
}
void CodeGenerator::GenerateIsArray(ZoneList<Expression*>* args) {
ASSERT(args->length() == 1);
Load(args->at(0));
- Label answer;
- // We need the CC bits to come out as not_equal in the case where the
- // object is a smi. This can't be done with the usual test opcode so
- // we copy the object to ecx and do some destructive ops on it that
- // result in the right CC bits.
- frame_->Pop(eax);
- __ mov(ecx, Operand(eax));
- __ and_(ecx, kSmiTagMask);
- __ xor_(ecx, kSmiTagMask);
- __ j(not_equal, &answer, not_taken);
+ Result value = frame_->Pop();
+ value.ToRegister();
+ ASSERT(value.is_valid());
+ __ test(value.reg(), Immediate(kSmiTagMask));
+ destination()->false_target()->Branch(equal);
// It is a heap object - get map.
- __ mov(eax, FieldOperand(eax, HeapObject::kMapOffset));
- __ movzx_b(eax, FieldOperand(eax, Map::kInstanceTypeOffset));
+ Result temp = allocator()->Allocate();
+ ASSERT(temp.is_valid());
+ __ mov(temp.reg(), FieldOperand(value.reg(), HeapObject::kMapOffset));
+ __ movzx_b(temp.reg(), FieldOperand(temp.reg(), Map::kInstanceTypeOffset));
// Check if the object is a JS array or not.
- __ cmp(eax, JS_ARRAY_TYPE);
- __ bind(&answer);
- cc_reg_ = equal;
+ __ cmp(temp.reg(), JS_ARRAY_TYPE);
+ value.Unuse();
+ temp.Unuse();
+ destination()->Split(equal);
}
void CodeGenerator::GenerateArgumentsLength(ZoneList<Expression*>* args) {
ASSERT(args->length() == 0);
-
- // Seed the result with the formal parameters count, which will be
- // used in case no arguments adaptor frame is found below the
- // current frame.
- __ Set(eax, Immediate(Smi::FromInt(scope_->num_parameters())));
-
+ Result initial_value = allocator()->Allocate(eax);
+ ASSERT(initial_value.is_valid());
+ __ Set(initial_value.reg(),
+ Immediate(Smi::FromInt(scope_->num_parameters())));
+ // ArgumentsAccessStub takes the parameter count as an input argument
+ // in register eax.
// Call the shared stub to get to the arguments.length.
ArgumentsAccessStub stub(ArgumentsAccessStub::READ_LENGTH);
- __ CallStub(&stub);
- frame_->Push(eax);
+ Result result = frame_->CallStub(&stub, &initial_value, 0);
+ frame_->Push(&result);
}
void CodeGenerator::GenerateValueOf(ZoneList<Expression*>* args) {
ASSERT(args->length() == 1);
- Label leave;
+ JumpTarget leave(this);
Load(args->at(0)); // Load the object.
- __ mov(eax, frame_->Top());
+ frame_->Dup();
+ Result object = frame_->Pop();
+ object.ToRegister();
+ ASSERT(object.is_valid());
// if (object->IsSmi()) return object.
- __ test(eax, Immediate(kSmiTagMask));
- __ j(zero, &leave, taken);
+ __ test(object.reg(), Immediate(kSmiTagMask));
+ leave.Branch(zero, taken);
// It is a heap object - get map.
- __ mov(ecx, FieldOperand(eax, HeapObject::kMapOffset));
- __ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset));
+ Result temp = allocator()->Allocate();
+ ASSERT(temp.is_valid());
+ __ mov(temp.reg(), FieldOperand(object.reg(), HeapObject::kMapOffset));
+ __ movzx_b(temp.reg(), FieldOperand(temp.reg(), Map::kInstanceTypeOffset));
// if (!object->IsJSValue()) return object.
- __ cmp(ecx, JS_VALUE_TYPE);
- __ j(not_equal, &leave, not_taken);
- __ mov(eax, FieldOperand(eax, JSValue::kValueOffset));
- __ mov(frame_->Top(), eax);
- __ bind(&leave);
+ __ cmp(temp.reg(), JS_VALUE_TYPE);
+ leave.Branch(not_equal, not_taken);
+ __ mov(temp.reg(), FieldOperand(object.reg(), JSValue::kValueOffset));
+ object.Unuse();
+ frame_->SetElementAt(0, &temp);
+ leave.Bind();
}
void CodeGenerator::GenerateSetValueOf(ZoneList<Expression*>* args) {
ASSERT(args->length() == 2);
- Label leave;
+ JumpTarget leave(this);
Load(args->at(0)); // Load the object.
Load(args->at(1)); // Load the value.
- __ mov(eax, frame_->Element(1));
- __ mov(ecx, frame_->Top());
- // if (object->IsSmi()) return object.
- __ test(eax, Immediate(kSmiTagMask));
- __ j(zero, &leave, taken);
- // It is a heap object - get map.
- __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
- __ movzx_b(ebx, FieldOperand(ebx, Map::kInstanceTypeOffset));
- // if (!object->IsJSValue()) return object.
- __ cmp(ebx, JS_VALUE_TYPE);
- __ j(not_equal, &leave, not_taken);
+ Result value = frame_->Pop();
+ Result object = frame_->Pop();
+ value.ToRegister();
+ object.ToRegister();
+
+ // if (object->IsSmi()) return value.
+ __ test(object.reg(), Immediate(kSmiTagMask));
+ leave.Branch(zero, &value, taken);
+
+ // It is a heap object - get its map.
+ Result scratch = allocator_->Allocate();
+ ASSERT(scratch.is_valid());
+ __ mov(scratch.reg(), FieldOperand(object.reg(), HeapObject::kMapOffset));
+ __ movzx_b(scratch.reg(),
+ FieldOperand(scratch.reg(), Map::kInstanceTypeOffset));
+ // if (!object->IsJSValue()) return value.
+ __ cmp(scratch.reg(), JS_VALUE_TYPE);
+ leave.Branch(not_equal, &value, not_taken);
+
// Store the value.
- __ mov(FieldOperand(eax, JSValue::kValueOffset), ecx);
- // Update the write barrier.
- __ RecordWrite(eax, JSValue::kValueOffset, ecx, ebx);
+ __ mov(FieldOperand(object.reg(), JSValue::kValueOffset), value.reg());
+ // Update the write barrier. Save the value as it will be
+ // overwritten by the write barrier code and is needed afterward.
+ Result duplicate_value = allocator_->Allocate();
+ ASSERT(duplicate_value.is_valid());
+ __ mov(duplicate_value.reg(), value.reg());
+ // The object register is also overwritten by the write barrier and
+ // possibly aliased in the frame.
+ frame_->Spill(object.reg());
+ __ RecordWrite(object.reg(), JSValue::kValueOffset, duplicate_value.reg(),
+ scratch.reg());
+ object.Unuse();
+ scratch.Unuse();
+ duplicate_value.Unuse();
+
// Leave.
- __ bind(&leave);
- __ mov(ecx, frame_->Top());
- frame_->Pop();
- __ mov(frame_->Top(), ecx);
+ leave.Bind(&value);
+ frame_->Push(&value);
}
// Load the key onto the stack and set register eax to the formal
// parameters count for the currently executing function.
Load(args->at(0));
- __ Set(eax, Immediate(Smi::FromInt(scope_->num_parameters())));
+ Result parameters_count = allocator()->Allocate(eax);
+ ASSERT(parameters_count.is_valid());
+ __ Set(parameters_count.reg(),
+ Immediate(Smi::FromInt(scope_->num_parameters())));
// Call the shared stub to get to arguments[key].
ArgumentsAccessStub stub(ArgumentsAccessStub::READ_ELEMENT);
- __ CallStub(&stub);
- __ mov(frame_->Top(), eax);
+ Result result = frame_->CallStub(&stub, ¶meters_count, 0);
+ frame_->SetElementAt(0, &result);
}
// Load the two objects into registers and perform the comparison.
Load(args->at(0));
Load(args->at(1));
- frame_->Pop(eax);
- frame_->Pop(ecx);
- __ cmp(eax, Operand(ecx));
- cc_reg_ = equal;
+ Result right = frame_->Pop();
+ Result left = frame_->Pop();
+ right.ToRegister();
+ left.ToRegister();
+ __ cmp(right.reg(), Operand(left.reg()));
+ right.Unuse();
+ left.Unuse();
+ destination()->Split(equal);
}
void CodeGenerator::VisitCallRuntime(CallRuntime* node) {
- if (CheckForInlineRuntimeCall(node)) return;
+ if (CheckForInlineRuntimeCall(node)) {
+ return;
+ }
ZoneList<Expression*>* args = node->arguments();
Comment cmnt(masm_, "[ CallRuntime");
if (function == NULL) {
// Prepare stack for calling JS runtime function.
- frame_->Push(Immediate(node->name()));
+ frame_->Push(node->name());
// Push the builtins object found in the current global object.
- __ mov(edx, GlobalObject());
- frame_->Push(FieldOperand(edx, GlobalObject::kBuiltinsOffset));
+ Result temp = allocator()->Allocate();
+ ASSERT(temp.is_valid());
+ __ mov(temp.reg(), GlobalObject());
+ __ mov(temp.reg(), FieldOperand(temp.reg(), GlobalObject::kBuiltinsOffset));
+ frame_->Push(&temp);
}
// Push the arguments ("left-to-right").
- for (int i = 0; i < args->length(); i++)
+ int arg_count = args->length();
+ for (int i = 0; i < arg_count; i++) {
Load(args->at(i));
+ }
- if (function != NULL) {
- // Call the C runtime function.
- __ CallRuntime(function, args->length());
- frame_->Push(eax);
- } else {
+ if (function == NULL) {
// Call the JS runtime function.
- Handle<Code> stub = ComputeCallInitialize(args->length());
- __ Set(eax, Immediate(args->length()));
- __ call(stub, RelocInfo::CODE_TARGET);
- __ mov(esi, frame_->Context());
- __ mov(frame_->Top(), eax);
+ Handle<Code> stub = ComputeCallInitialize(arg_count);
+
+ Result num_args = allocator()->Allocate(eax);
+ ASSERT(num_args.is_valid());
+ __ Set(num_args.reg(), Immediate(args->length()));
+ Result answer = frame_->CallCodeObject(stub, RelocInfo::CODE_TARGET,
+ &num_args, arg_count + 1);
+ frame_->RestoreContextRegister();
+ frame_->SetElementAt(0, &answer);
+ } else {
+ // Call the C runtime function.
+ Result answer = frame_->CallRuntime(function, arg_count);
+ frame_->Push(&answer);
}
}
void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) {
+ // Note that because of NOT and an optimization in comparison of a typeof
+ // expression to a literal string, this function can fail to leave a value
+ // on top of the frame or in the cc register.
Comment cmnt(masm_, "[ UnaryOperation");
Token::Value op = node->op();
if (op == Token::NOT) {
- LoadCondition(node->expression(), NOT_INSIDE_TYPEOF,
- false_target(), true_target(), true);
- cc_reg_ = NegateCondition(cc_reg_);
+ // Swap the true and false targets but keep the same actual label
+ // as the fall through.
+ destination()->Invert();
+ LoadCondition(node->expression(), NOT_INSIDE_TYPEOF, destination(), true);
+ // Swap the labels back.
+ destination()->Invert();
} else if (op == Token::DELETE) {
Property* property = node->expression()->AsProperty();
if (property != NULL) {
Load(property->obj());
Load(property->key());
- __ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION);
- frame_->Push(eax);
+ Result answer = frame_->InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION, 2);
+ frame_->Push(&answer);
return;
}
Slot* slot = variable->slot();
if (variable->is_global()) {
LoadGlobal();
- frame_->Push(Immediate(variable->name()));
- __ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION);
- frame_->Push(eax);
+ frame_->Push(variable->name());
+ Result answer = frame_->InvokeBuiltin(Builtins::DELETE,
+ CALL_FUNCTION, 2);
+ frame_->Push(&answer);
return;
} else if (slot != NULL && slot->type() == Slot::LOOKUP) {
// lookup the context holding the named variable
frame_->Push(esi);
- frame_->Push(Immediate(variable->name()));
- __ CallRuntime(Runtime::kLookupContext, 2);
- // eax: context
- frame_->Push(eax);
- frame_->Push(Immediate(variable->name()));
- __ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION);
- frame_->Push(eax);
+ frame_->Push(variable->name());
+ Result context = frame_->CallRuntime(Runtime::kLookupContext, 2);
+ frame_->Push(&context);
+ frame_->Push(variable->name());
+ Result answer = frame_->InvokeBuiltin(Builtins::DELETE,
+ CALL_FUNCTION, 2);
+ frame_->Push(&answer);
return;
}
// Default: Result of deleting non-global, not dynamically
// introduced variables is false.
- frame_->Push(Immediate(Factory::false_value()));
+ frame_->Push(Factory::false_value());
} else {
// Default: Result of deleting expressions is true.
Load(node->expression()); // may have side-effects
- __ Set(frame_->Top(), Immediate(Factory::true_value()));
+ frame_->SetElementAt(0, Factory::true_value());
}
} else if (op == Token::TYPEOF) {
// Special case for loading the typeof expression; see comment on
// LoadTypeofExpression().
LoadTypeofExpression(node->expression());
- __ CallRuntime(Runtime::kTypeof, 1);
- frame_->Push(eax);
+ Result answer = frame_->CallRuntime(Runtime::kTypeof, 1);
+ frame_->Push(&answer);
} else if (op == Token::VOID) {
Expression* expression = node->expression();
expression->AsLiteral()->IsNull())) {
// Omit evaluating the value of the primitive literal.
// It will be discarded anyway, and can have no side effect.
- frame_->Push(Immediate(Factory::undefined_value()));
+ frame_->Push(Factory::undefined_value());
} else {
Load(node->expression());
- __ mov(frame_->Top(), Factory::undefined_value());
+ frame_->SetElementAt(0, Factory::undefined_value());
}
} else {
case Token::SUB: {
UnarySubStub stub;
// TODO(1222589): remove dependency of TOS being cached inside stub
- frame_->Pop(eax);
- __ CallStub(&stub);
- frame_->Push(eax);
+ Result operand = frame_->Pop();
+ operand.ToRegister(eax);
+ Result answer = frame_->CallStub(&stub, &operand, 0);
+ frame_->Push(&answer);
break;
}
case Token::BIT_NOT: {
// Smi check.
- Label smi_label;
- Label continue_label;
- frame_->Pop(eax);
- __ test(eax, Immediate(kSmiTagMask));
- __ j(zero, &smi_label, taken);
-
- frame_->Push(eax); // undo popping of TOS
- __ InvokeBuiltin(Builtins::BIT_NOT, CALL_FUNCTION);
-
- __ jmp(&continue_label);
- __ bind(&smi_label);
- __ not_(eax);
- __ and_(eax, ~kSmiTagMask); // Remove inverted smi-tag.
- __ bind(&continue_label);
- frame_->Push(eax);
+ JumpTarget smi_label(this);
+ JumpTarget continue_label(this);
+ Result operand = frame_->Pop();
+ operand.ToRegister();
+ __ test(operand.reg(), Immediate(kSmiTagMask));
+ smi_label.Branch(zero, &operand, taken);
+
+ frame_->Push(&operand); // undo popping of TOS
+ Result answer = frame_->InvokeBuiltin(Builtins::BIT_NOT,
+ CALL_FUNCTION, 1);
+
+ continue_label.Jump(&answer);
+ smi_label.Bind(&answer);
+ answer.ToRegister();
+ frame_->Spill(answer.reg());
+ __ not_(answer.reg());
+ __ and_(answer.reg(), ~kSmiTagMask); // Remove inverted smi-tag.
+ continue_label.Bind(&answer);
+ frame_->Push(&answer);
break;
}
case Token::ADD: {
// Smi check.
- Label continue_label;
- frame_->Pop(eax);
- __ test(eax, Immediate(kSmiTagMask));
- __ j(zero, &continue_label);
-
- frame_->Push(eax);
- __ InvokeBuiltin(Builtins::TO_NUMBER, CALL_FUNCTION);
-
- __ bind(&continue_label);
- frame_->Push(eax);
+ JumpTarget continue_label(this);
+ Result operand = frame_->Pop();
+ operand.ToRegister();
+ __ test(operand.reg(), Immediate(kSmiTagMask));
+ continue_label.Branch(zero, &operand, taken);
+
+ frame_->Push(&operand);
+ Result answer = frame_->InvokeBuiltin(Builtins::TO_NUMBER,
+ CALL_FUNCTION, 1);
+
+ continue_label.Bind(&answer);
+ frame_->Push(&answer);
break;
}
}
-class CountOperationDeferred: public DeferredCode {
+class DeferredCountOperation: public DeferredCode {
public:
- CountOperationDeferred(CodeGenerator* generator,
+ DeferredCountOperation(CodeGenerator* generator,
bool is_postfix,
bool is_increment,
int result_offset)
is_postfix_(is_postfix),
is_increment_(is_increment),
result_offset_(result_offset) {
- set_comment("[ CountOperationDeferred");
+ set_comment("[ DeferredCountOperation");
}
virtual void Generate();
};
-void CountOperationDeferred::Generate() {
+void DeferredCountOperation::Generate() {
+ CodeGenerator* cgen = generator();
+
+ Result value(cgen);
+ enter()->Bind(&value);
+ value.ToRegister(eax); // The stubs below expect their argument in eax.
+
if (is_postfix_) {
RevertToNumberStub to_number_stub(is_increment_);
- __ CallStub(&to_number_stub);
+ value = generator()->frame()->CallStub(&to_number_stub, &value, 0);
}
+
CounterOpStub stub(result_offset_, is_postfix_, is_increment_);
- __ CallStub(&stub);
+ value = generator()->frame()->CallStub(&stub, &value, 0);
+ exit_.Jump(&value);
}
// Postfix: Make room for the result.
if (is_postfix) {
- frame_->Push(Immediate(0));
+ frame_->Push(Smi::FromInt(0));
}
{ Reference target(this, node->expression());
- if (target.is_illegal()) return;
- target.GetValue(NOT_INSIDE_TYPEOF);
+ if (target.is_illegal()) {
+ // Spoof the virtual frame to have the expected height (one higher
+ // than on entry).
+ if (!is_postfix) {
+ frame_->Push(Smi::FromInt(0));
+ }
+ return;
+ }
+ target.TakeValue(NOT_INSIDE_TYPEOF);
- CountOperationDeferred* deferred =
- new CountOperationDeferred(this, is_postfix, is_increment,
+ DeferredCountOperation* deferred =
+ new DeferredCountOperation(this, is_postfix, is_increment,
target.size() * kPointerSize);
- frame_->Pop(eax); // Load TOS into eax for calculations below
+ Result value = frame_->Pop();
+ value.ToRegister();
+ ASSERT(value.is_valid());
// Postfix: Store the old value as the result.
if (is_postfix) {
- __ mov(frame_->Element(target.size()), eax);
+ Result old_value = value;
+ frame_->SetElementAt(target.size(), &old_value);
+ }
+
+ // Perform optimistic increment/decrement. Ensure the value is
+ // writable.
+ frame_->Spill(value.reg());
+ ASSERT(allocator_->count(value.reg()) == 1);
+
+ // In order to combine the overflow and the smi check, we need to
+ // be able to allocate a byte register. We attempt to do so
+ // without spilling. If we fail, we will generate separate
+ // overflow and smi checks.
+ //
+ // We need to allocate and clear the temporary byte register
+ // before performing the count operation since clearing the
+ // register using xor will clear the overflow flag.
+ Result tmp = allocator_->AllocateByteRegisterWithoutSpilling();
+ if (tmp.is_valid()) {
+ __ Set(tmp.reg(), Immediate(0));
}
- // Perform optimistic increment/decrement.
if (is_increment) {
- __ add(Operand(eax), Immediate(Smi::FromInt(1)));
+ __ add(Operand(value.reg()), Immediate(Smi::FromInt(1)));
} else {
- __ sub(Operand(eax), Immediate(Smi::FromInt(1)));
+ __ sub(Operand(value.reg()), Immediate(Smi::FromInt(1)));
}
// If the count operation didn't overflow and the result is a
// valid smi, we're done. Otherwise, we jump to the deferred
// slow-case code.
- __ j(overflow, deferred->enter(), not_taken);
- __ test(eax, Immediate(kSmiTagMask));
- __ j(not_zero, deferred->enter(), not_taken);
+ //
+ // We combine the overflow and the smi check if we could
+ // successfully allocate a temporary byte register.
+ if (tmp.is_valid()) {
+ __ setcc(overflow, tmp.reg());
+ __ or_(Operand(value.reg()), tmp.reg());
+ tmp.Unuse();
+ __ test(value.reg(), Immediate(kSmiTagMask));
+ deferred->enter()->Branch(not_zero, &value, not_taken);
+ } else {
+ deferred->enter()->Branch(overflow, &value, not_taken);
+ __ test(value.reg(), Immediate(kSmiTagMask));
+ deferred->enter()->Branch(not_zero, &value, not_taken);
+ }
// Store the new value in the target if not const.
- __ bind(deferred->exit());
- frame_->Push(eax); // Push the new value to TOS
- if (!is_const) target.SetValue(NOT_CONST_INIT);
+ deferred->BindExit(&value);
+ frame_->Push(&value);
+ if (!is_const) {
+ target.SetValue(NOT_CONST_INIT);
+ }
}
// Postfix: Discard the new value and use the old.
if (is_postfix) {
- frame_->Pop();
+ frame_->Drop();
}
}
void CodeGenerator::VisitBinaryOperation(BinaryOperation* node) {
+ // Note that due to an optimization in comparison operations (typeof
+ // compared to a string literal), we can evaluate a binary expression such
+ // as AND or OR and not leave a value on the frame or in the cc register.
Comment cmnt(masm_, "[ BinaryOperation");
Token::Value op = node->op();
// before any ToBoolean() conversions. This means that the value
// produced by a && or || operator is not necessarily a boolean.
- // NOTE: If the left hand side produces a materialized value (not in
- // the CC register), we force the right hand side to do the
- // same. This is necessary because we may have to branch to the exit
- // after evaluating the left hand side (due to the shortcut
- // semantics), but the compiler must (statically) know if the result
- // of compiling the binary operation is materialized or not.
-
+ // NOTE: If the left hand side produces a materialized value (not
+ // control flow), we force the right hand side to do the same. This
+ // is necessary because we assume that if we get control flow on the
+ // last path out of an expression we got it on all paths.
if (op == Token::AND) {
- Label is_true;
- LoadCondition(node->left(), NOT_INSIDE_TYPEOF, &is_true,
- false_target(), false);
- if (has_cc()) {
- Branch(false, false_target());
+ JumpTarget is_true(this);
+ ControlDestination dest(&is_true, destination()->false_target(), true);
+ LoadCondition(node->left(), NOT_INSIDE_TYPEOF, &dest, false);
+
+ if (dest.false_was_fall_through()) {
+ // The current false target was used as the fall-through. If
+ // there are no dangling jumps to is_true then the left
+ // subexpression was unconditionally false. Otherwise we have
+ // paths where we do have to evaluate the right subexpression.
+ if (is_true.is_linked()) {
+ // We need to compile the right subexpression. If the jump to
+ // the current false target was a forward jump then we have a
+ // valid frame, we have just bound the false target, and we
+ // have to jump around the code for the right subexpression.
+ if (has_valid_frame()) {
+ destination()->false_target()->Unuse();
+ destination()->false_target()->Jump();
+ }
+ is_true.Bind();
+ // The left subexpression compiled to control flow, so the
+ // right one is free to do so as well.
+ LoadCondition(node->right(), NOT_INSIDE_TYPEOF, destination(), false);
+ } else {
+ // We have actually just jumped to or bound the current false
+ // target but the current control destination is not marked as
+ // used.
+ destination()->Use(false);
+ }
- // Evaluate right side expression.
- __ bind(&is_true);
- LoadCondition(node->right(), NOT_INSIDE_TYPEOF, true_target(),
- false_target(), false);
+ } else if (dest.is_used()) {
+ // The left subexpression compiled to control flow (and is_true
+ // was just bound), so the right is free to do so as well.
+ LoadCondition(node->right(), NOT_INSIDE_TYPEOF, destination(), false);
} else {
- Label pop_and_continue, exit;
+ // We have a materialized value on the frame, so we exit with
+ // one on all paths. There are possibly also jumps to is_true
+ // from nested subexpressions.
+ JumpTarget pop_and_continue(this);
+ JumpTarget exit(this);
// Avoid popping the result if it converts to 'false' using the
// standard ToBoolean() conversion as described in ECMA-262,
// section 9.2, page 30.
- // Duplicate the TOS value. The duplicate will be popped by ToBoolean.
- __ mov(eax, frame_->Top());
- frame_->Push(eax);
- ToBoolean(&pop_and_continue, &exit);
- Branch(false, &exit);
+ //
+ // Duplicate the TOS value. The duplicate will be popped by
+ // ToBoolean.
+ frame_->Dup();
+ ControlDestination dest(&pop_and_continue, &exit, true);
+ ToBoolean(&dest);
// Pop the result of evaluating the first part.
- __ bind(&pop_and_continue);
- frame_->Pop();
+ frame_->Drop();
- // Evaluate right side expression.
- __ bind(&is_true);
+ // Compile right side expression.
+ is_true.Bind();
Load(node->right());
// Exit (always with a materialized value).
- __ bind(&exit);
+ exit.Bind();
}
} else if (op == Token::OR) {
- Label is_false;
- LoadCondition(node->left(), NOT_INSIDE_TYPEOF, true_target(),
- &is_false, false);
- if (has_cc()) {
- Branch(true, true_target());
+ JumpTarget is_false(this);
+ ControlDestination dest(destination()->true_target(), &is_false, false);
+ LoadCondition(node->left(), NOT_INSIDE_TYPEOF, &dest, false);
+
+ if (dest.true_was_fall_through()) {
+ // The current true target was used as the fall-through. If
+ // there are no dangling jumps to is_false then the left
+ // subexpression was unconditionally true. Otherwise we have
+ // paths where we do have to evaluate the right subexpression.
+ if (is_false.is_linked()) {
+ // We need to compile the right subexpression. If the jump to
+ // the current true target was a forward jump then we have a
+ // valid frame, we have just bound the true target, and we
+ // have to jump around the code for the right subexpression.
+ if (has_valid_frame()) {
+ destination()->true_target()->Unuse();
+ destination()->true_target()->Jump();
+ }
+ is_false.Bind();
+ // The left subexpression compiled to control flow, so the
+ // right one is free to do so as well.
+ LoadCondition(node->right(), NOT_INSIDE_TYPEOF, destination(), false);
+ } else {
+ // We have just jumped to or bound the current true target but
+ // the current control destination is not marked as used.
+ destination()->Use(true);
+ }
- // Evaluate right side expression.
- __ bind(&is_false);
- LoadCondition(node->right(), NOT_INSIDE_TYPEOF, true_target(),
- false_target(), false);
+ } else if (dest.is_used()) {
+ // The left subexpression compiled to control flow (and is_false
+ // was just bound), so the right is free to do so as well.
+ LoadCondition(node->right(), NOT_INSIDE_TYPEOF, destination(), false);
} else {
- Label pop_and_continue, exit;
+ // We have a materialized value on the frame, so we exit with
+ // one on all paths. There are possibly also jumps to is_false
+ // from nested subexpressions.
+ JumpTarget pop_and_continue(this);
+ JumpTarget exit(this);
// Avoid popping the result if it converts to 'true' using the
// standard ToBoolean() conversion as described in ECMA-262,
// section 9.2, page 30.
- // Duplicate the TOS value. The duplicate will be popped by ToBoolean.
- __ mov(eax, frame_->Top());
- frame_->Push(eax);
- ToBoolean(&exit, &pop_and_continue);
- Branch(true, &exit);
+ //
+ // Duplicate the TOS value. The duplicate will be popped by
+ // ToBoolean.
+ frame_->Dup();
+ ControlDestination dest(&exit, &pop_and_continue, false);
+ ToBoolean(&dest);
// Pop the result of evaluating the first part.
- __ bind(&pop_and_continue);
- frame_->Pop();
+ frame_->Drop();
- // Evaluate right side expression.
- __ bind(&is_false);
+ // Compile right side expression.
+ is_false.Bind();
Load(node->right());
// Exit (always with a materialized value).
- __ bind(&exit);
+ exit.Bind();
}
} else {
void CodeGenerator::VisitThisFunction(ThisFunction* node) {
- frame_->Push(frame_->Function());
+ frame_->PushFunction();
}
Expression* left = node->left();
Expression* right = node->right();
Token::Value op = node->op();
-
- // To make null checks efficient, we check if either left or right is the
- // literal 'null'. If so, we optimize the code by inlining a null check
- // instead of calling the (very) general runtime routine for checking
- // equality.
- if (op == Token::EQ || op == Token::EQ_STRICT) {
- bool left_is_null =
- left->AsLiteral() != NULL && left->AsLiteral()->IsNull();
- bool right_is_null =
- right->AsLiteral() != NULL && right->AsLiteral()->IsNull();
- // The 'null' value can only be equal to 'null' or 'undefined'.
- if (left_is_null || right_is_null) {
- Load(left_is_null ? right : left);
- frame_->Pop(eax);
- __ cmp(eax, Factory::null_value());
-
- // The 'null' value is only equal to 'undefined' if using non-strict
- // comparisons.
- if (op != Token::EQ_STRICT) {
- __ j(equal, true_target());
-
- __ cmp(eax, Factory::undefined_value());
- __ j(equal, true_target());
-
- __ test(eax, Immediate(kSmiTagMask));
- __ j(equal, false_target());
-
- // It can be an undetectable object.
- __ mov(eax, FieldOperand(eax, HeapObject::kMapOffset));
- __ movzx_b(eax, FieldOperand(eax, Map::kBitFieldOffset));
- __ and_(eax, 1 << Map::kIsUndetectable);
- __ cmp(eax, 1 << Map::kIsUndetectable);
- }
-
- cc_reg_ = equal;
- return;
- }
- }
-
// To make typeof testing for natives implemented in JavaScript really
// efficient, we generate special code for expressions of the form:
// 'typeof <expression> == <string>'.
right->AsLiteral()->handle()->IsString())) {
Handle<String> check(String::cast(*right->AsLiteral()->handle()));
- // Load the operand and move it to register edx.
+ // Load the operand and move it to a register.
LoadTypeofExpression(operation->expression());
- frame_->Pop(edx);
+ Result answer = frame_->Pop();
+ answer.ToRegister();
if (check->Equals(Heap::number_symbol())) {
- __ test(edx, Immediate(kSmiTagMask));
- __ j(zero, true_target());
- __ mov(edx, FieldOperand(edx, HeapObject::kMapOffset));
- __ cmp(edx, Factory::heap_number_map());
- cc_reg_ = equal;
+ __ test(answer.reg(), Immediate(kSmiTagMask));
+ destination()->true_target()->Branch(zero);
+ frame_->Spill(answer.reg());
+ __ mov(answer.reg(), FieldOperand(answer.reg(), HeapObject::kMapOffset));
+ __ cmp(answer.reg(), Factory::heap_number_map());
+ answer.Unuse();
+ destination()->Split(equal);
} else if (check->Equals(Heap::string_symbol())) {
- __ test(edx, Immediate(kSmiTagMask));
- __ j(zero, false_target());
-
- __ mov(edx, FieldOperand(edx, HeapObject::kMapOffset));
+ __ test(answer.reg(), Immediate(kSmiTagMask));
+ destination()->false_target()->Branch(zero);
// It can be an undetectable string object.
- __ movzx_b(ecx, FieldOperand(edx, Map::kBitFieldOffset));
- __ and_(ecx, 1 << Map::kIsUndetectable);
- __ cmp(ecx, 1 << Map::kIsUndetectable);
- __ j(equal, false_target());
-
- __ movzx_b(ecx, FieldOperand(edx, Map::kInstanceTypeOffset));
- __ cmp(ecx, FIRST_NONSTRING_TYPE);
- cc_reg_ = less;
+ Result temp = allocator()->Allocate();
+ ASSERT(temp.is_valid());
+ __ mov(temp.reg(), FieldOperand(answer.reg(), HeapObject::kMapOffset));
+ __ movzx_b(temp.reg(), FieldOperand(temp.reg(), Map::kBitFieldOffset));
+ __ test(temp.reg(), Immediate(1 << Map::kIsUndetectable));
+ destination()->false_target()->Branch(not_zero);
+ __ mov(temp.reg(), FieldOperand(answer.reg(), HeapObject::kMapOffset));
+ __ movzx_b(temp.reg(),
+ FieldOperand(temp.reg(), Map::kInstanceTypeOffset));
+ __ cmp(temp.reg(), FIRST_NONSTRING_TYPE);
+ temp.Unuse();
+ answer.Unuse();
+ destination()->Split(less);
} else if (check->Equals(Heap::boolean_symbol())) {
- __ cmp(edx, Factory::true_value());
- __ j(equal, true_target());
- __ cmp(edx, Factory::false_value());
- cc_reg_ = equal;
+ __ cmp(answer.reg(), Factory::true_value());
+ destination()->true_target()->Branch(equal);
+ __ cmp(answer.reg(), Factory::false_value());
+ answer.Unuse();
+ destination()->Split(equal);
} else if (check->Equals(Heap::undefined_symbol())) {
- __ cmp(edx, Factory::undefined_value());
- __ j(equal, true_target());
+ __ cmp(answer.reg(), Factory::undefined_value());
+ destination()->true_target()->Branch(equal);
- __ test(edx, Immediate(kSmiTagMask));
- __ j(zero, false_target());
+ __ test(answer.reg(), Immediate(kSmiTagMask));
+ destination()->false_target()->Branch(zero);
// It can be an undetectable object.
- __ mov(edx, FieldOperand(edx, HeapObject::kMapOffset));
- __ movzx_b(ecx, FieldOperand(edx, Map::kBitFieldOffset));
- __ and_(ecx, 1 << Map::kIsUndetectable);
- __ cmp(ecx, 1 << Map::kIsUndetectable);
-
- cc_reg_ = equal;
+ frame_->Spill(answer.reg());
+ __ mov(answer.reg(), FieldOperand(answer.reg(), HeapObject::kMapOffset));
+ __ movzx_b(answer.reg(),
+ FieldOperand(answer.reg(), Map::kBitFieldOffset));
+ __ test(answer.reg(), Immediate(1 << Map::kIsUndetectable));
+ answer.Unuse();
+ destination()->Split(not_zero);
} else if (check->Equals(Heap::function_symbol())) {
- __ test(edx, Immediate(kSmiTagMask));
- __ j(zero, false_target());
- __ mov(edx, FieldOperand(edx, HeapObject::kMapOffset));
- __ movzx_b(edx, FieldOperand(edx, Map::kInstanceTypeOffset));
- __ cmp(edx, JS_FUNCTION_TYPE);
- cc_reg_ = equal;
+ __ test(answer.reg(), Immediate(kSmiTagMask));
+ destination()->false_target()->Branch(zero);
+ frame_->Spill(answer.reg());
+ __ mov(answer.reg(), FieldOperand(answer.reg(), HeapObject::kMapOffset));
+ __ movzx_b(answer.reg(),
+ FieldOperand(answer.reg(), Map::kInstanceTypeOffset));
+ __ cmp(answer.reg(), JS_FUNCTION_TYPE);
+ answer.Unuse();
+ destination()->Split(equal);
} else if (check->Equals(Heap::object_symbol())) {
- __ test(edx, Immediate(kSmiTagMask));
- __ j(zero, false_target());
-
- __ mov(ecx, FieldOperand(edx, HeapObject::kMapOffset));
- __ cmp(edx, Factory::null_value());
- __ j(equal, true_target());
+ __ test(answer.reg(), Immediate(kSmiTagMask));
+ destination()->false_target()->Branch(zero);
+ __ cmp(answer.reg(), Factory::null_value());
+ destination()->true_target()->Branch(equal);
// It can be an undetectable object.
- __ movzx_b(edx, FieldOperand(ecx, Map::kBitFieldOffset));
- __ and_(edx, 1 << Map::kIsUndetectable);
- __ cmp(edx, 1 << Map::kIsUndetectable);
- __ j(equal, false_target());
-
- __ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset));
- __ cmp(ecx, FIRST_JS_OBJECT_TYPE);
- __ j(less, false_target());
- __ cmp(ecx, LAST_JS_OBJECT_TYPE);
- cc_reg_ = less_equal;
-
+ Result map = allocator()->Allocate();
+ ASSERT(map.is_valid());
+ __ mov(map.reg(), FieldOperand(answer.reg(), HeapObject::kMapOffset));
+ __ movzx_b(map.reg(), FieldOperand(map.reg(), Map::kBitFieldOffset));
+ __ test(map.reg(), Immediate(1 << Map::kIsUndetectable));
+ destination()->false_target()->Branch(not_zero);
+ __ mov(map.reg(), FieldOperand(answer.reg(), HeapObject::kMapOffset));
+ __ movzx_b(map.reg(), FieldOperand(map.reg(), Map::kInstanceTypeOffset));
+ __ cmp(map.reg(), FIRST_JS_OBJECT_TYPE);
+ destination()->false_target()->Branch(less);
+ __ cmp(map.reg(), LAST_JS_OBJECT_TYPE);
+ answer.Unuse();
+ map.Unuse();
+ destination()->Split(less_equal);
} else {
// Uncommon case: typeof testing against a string literal that is
// never returned from the typeof operator.
- __ jmp(false_target());
+ answer.Unuse();
+ destination()->Goto(false);
}
return;
}
case Token::IN: {
Load(left);
Load(right);
- __ InvokeBuiltin(Builtins::IN, CALL_FUNCTION);
- frame_->Push(eax); // push the result
+ Result answer = frame_->InvokeBuiltin(Builtins::IN, CALL_FUNCTION, 2);
+ frame_->Push(&answer); // push the result
return;
}
case Token::INSTANCEOF: {
Load(left);
Load(right);
InstanceofStub stub;
- __ CallStub(&stub);
- __ test(eax, Operand(eax));
- cc_reg_ = zero;
+ Result answer = frame_->CallStub(&stub, 2);
+ answer.ToRegister();
+ __ test(answer.reg(), Operand(answer.reg()));
+ answer.Unuse();
+ destination()->Split(zero);
return;
}
default:
UNREACHABLE();
}
-
- // Optimize for the case where (at least) one of the expressions
- // is a literal small integer.
- if (IsInlineSmi(left->AsLiteral())) {
- Load(right);
- SmiComparison(ReverseCondition(cc), left->AsLiteral()->handle(), strict);
- return;
- }
- if (IsInlineSmi(right->AsLiteral())) {
- Load(left);
- SmiComparison(cc, right->AsLiteral()->handle(), strict);
- return;
- }
-
Load(left);
Load(right);
- Comparison(cc, strict);
+ Comparison(cc, strict, destination());
+}
+
+
+#ifdef DEBUG
+bool CodeGenerator::HasValidEntryRegisters() {
+ return (allocator()->count(eax) == frame()->register_count(eax))
+ && (allocator()->count(ebx) == frame()->register_count(ebx))
+ && (allocator()->count(ecx) == frame()->register_count(ecx))
+ && (allocator()->count(edx) == frame()->register_count(edx))
+ && (allocator()->count(edi) == frame()->register_count(edi));
}
+#endif
class DeferredReferenceGetKeyedValue: public DeferredCode {
set_comment("[ DeferredReferenceGetKeyedValue");
}
- virtual void Generate() {
- Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
- // Calculate the delta from the IC call instruction to the map
- // check cmp instruction in the inlined version. This delta is
- // stored in a test(eax, delta) instruction after the call so that
- // we can find it in the IC initialization code and patch the cmp
- // instruction. This means that we cannot allow test instructions
- // after calls to KeyedLoadIC stubs in other places.
- int delta_to_patch_site = __ SizeOfCodeGeneratedSince(patch_site());
- if (is_global_) {
- __ call(ic, RelocInfo::CODE_TARGET_CONTEXT);
- } else {
- __ call(ic, RelocInfo::CODE_TARGET);
- }
- __ test(eax, Immediate(-delta_to_patch_site));
- __ IncrementCounter(&Counters::keyed_load_inline_miss, 1);
- }
+ virtual void Generate();
Label* patch_site() { return &patch_site_; }
};
+void DeferredReferenceGetKeyedValue::Generate() {
+ CodeGenerator* cgen = generator();
+ Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
+ Result receiver(cgen);
+ Result key(cgen);
+ enter()->Bind(&receiver, &key);
+ cgen->frame()->Push(&receiver); // First IC argument.
+ cgen->frame()->Push(&key); // Second IC argument.
+
+ // Calculate the delta from the IC call instruction to the map check
+ // cmp instruction in the inlined version. This delta is stored in
+ // a test(eax, delta) instruction after the call so that we can find
+ // it in the IC initialization code and patch the cmp instruction.
+ // This means that we cannot allow test instructions after calls to
+ // KeyedLoadIC stubs in other places.
+ Result value(cgen);
+ if (is_global_) {
+ value = cgen->frame()->CallCodeObject(ic,
+ RelocInfo::CODE_TARGET_CONTEXT,
+ 0);
+ } else {
+ value = cgen->frame()->CallCodeObject(ic, RelocInfo::CODE_TARGET, 0);
+ }
+ // The result needs to be specifically the eax register because the
+ // offset to the patch site will be expected in a test eax
+ // instruction.
+ ASSERT(value.is_register() && value.reg().is(eax));
+ // The delta from the start of the map-compare instruction to the
+ // test eax instruction.
+ int delta_to_patch_site = __ SizeOfCodeGeneratedSince(patch_site());
+ __ test(value.reg(), Immediate(-delta_to_patch_site));
+ __ IncrementCounter(&Counters::keyed_load_inline_miss, 1);
+
+ // The receiver and key were spilled by the call, so their state as
+ // constants or copies has been changed. Thus, they need to be
+ // "mergable" in the block at the exit label and are therefore
+ // passed as return results here.
+ key = cgen->frame()->Pop();
+ receiver = cgen->frame()->Pop();
+ exit_.Jump(&receiver, &key, &value);
+}
+
#undef __
#define __ masm->
void Reference::GetValue(TypeofState typeof_state) {
+ ASSERT(!cgen_->in_spilled_code());
+ ASSERT(cgen_->HasValidEntryRegisters());
ASSERT(!is_illegal());
- ASSERT(!cgen_->has_cc());
MacroAssembler* masm = cgen_->masm();
- VirtualFrame* frame = cgen_->frame();
switch (type_) {
case SLOT: {
Comment cmnt(masm, "[ Load from Slot");
// thrown below, we must distinguish between the two kinds of
// loads (typeof expression loads must not throw a reference
// error).
+ VirtualFrame* frame = cgen_->frame();
Comment cmnt(masm, "[ Load from named Property");
Handle<String> name(GetName());
Variable* var = expression_->AsVariableProxy()->AsVariable();
Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
// Setup the name register.
- __ mov(ecx, name);
- if (var != NULL) {
- ASSERT(var->is_global());
- __ call(ic, RelocInfo::CODE_TARGET_CONTEXT);
- } else {
- __ call(ic, RelocInfo::CODE_TARGET);
- }
- // Push the result.
- frame->Push(eax);
+ Result name_reg = cgen_->allocator()->Allocate(ecx);
+ ASSERT(name_reg.is_valid());
+ __ mov(name_reg.reg(), name);
+ ASSERT(var == NULL || var->is_global());
+ RelocInfo::Mode rmode = (var == NULL)
+ ? RelocInfo::CODE_TARGET
+ : RelocInfo::CODE_TARGET_CONTEXT;
+ Result answer = frame->CallCodeObject(ic, rmode, &name_reg, 0);
+ frame->Push(&answer);
break;
}
case KEYED: {
- // TODO(1241834): Make sure that it is safe to ignore the
- // distinction between expressions in a typeof and not in a
- // typeof.
+ // TODO(1241834): Make sure that this it is safe to ignore the
+ // distinction between expressions in a typeof and not in a typeof.
+ Comment cmnt(masm, "[ Load from keyed Property");
Variable* var = expression_->AsVariableProxy()->AsVariable();
bool is_global = var != NULL;
ASSERT(!is_global || var->is_global());
Comment cmnt(masm, "[ Inlined array index load");
DeferredReferenceGetKeyedValue* deferred =
new DeferredReferenceGetKeyedValue(cgen_, is_global);
- // Load receiver and check that it is not a smi (only needed
- // if this is not a load from the global context) and that it
- // has the expected map.
- __ mov(edx, Operand(esp, kPointerSize));
+
+ Result key = cgen_->frame()->Pop();
+ Result receiver = cgen_->frame()->Pop();
+ key.ToRegister();
+ receiver.ToRegister();
+
+ // Check that the receiver is not a smi (only needed if this
+ // is not a load from the global context) and that it has the
+ // expected map.
if (!is_global) {
- __ test(edx, Immediate(kSmiTagMask));
- __ j(zero, deferred->enter(), not_taken);
+ __ test(receiver.reg(), Immediate(kSmiTagMask));
+ deferred->enter()->Branch(zero, &receiver, &key, not_taken);
}
+
// Initially, use an invalid map. The map is patched in the IC
// initialization code.
__ bind(deferred->patch_site());
- __ cmp(FieldOperand(edx, HeapObject::kMapOffset),
+ __ cmp(FieldOperand(receiver.reg(), HeapObject::kMapOffset),
Immediate(Factory::null_value()));
- __ j(not_equal, deferred->enter(), not_taken);
- // Load key and check that it is a smi.
- __ mov(eax, Operand(esp, 0));
- __ test(eax, Immediate(kSmiTagMask));
- __ j(not_zero, deferred->enter(), not_taken);
- // Shift to get actual index value.
- __ sar(eax, kSmiTagSize);
+ deferred->enter()->Branch(not_equal, &receiver, &key, not_taken);
+
+ // Check that the key is a smi.
+ __ test(key.reg(), Immediate(kSmiTagMask));
+ deferred->enter()->Branch(not_zero, &receiver, &key, not_taken);
+
// Get the elements array from the receiver and check that it
// is not a dictionary.
- __ mov(edx, FieldOperand(edx, JSObject::kElementsOffset));
- __ cmp(FieldOperand(edx, HeapObject::kMapOffset),
+ Result elements = cgen_->allocator()->Allocate();
+ ASSERT(elements.is_valid());
+ __ mov(elements.reg(),
+ FieldOperand(receiver.reg(), JSObject::kElementsOffset));
+ __ cmp(FieldOperand(elements.reg(), HeapObject::kMapOffset),
Immediate(Factory::hash_table_map()));
- __ j(equal, deferred->enter(), not_taken);
- // Check that key is within bounds.
- __ cmp(eax, FieldOperand(edx, Array::kLengthOffset));
- __ j(above_equal, deferred->enter(), not_taken);
- // Load and check that the result is not the hole.
- __ mov(eax,
- Operand(edx, eax, times_4, Array::kHeaderSize - kHeapObjectTag));
- __ cmp(Operand(eax), Immediate(Factory::the_hole_value()));
- __ j(equal, deferred->enter(), not_taken);
+ deferred->enter()->Branch(equal, &receiver, &key, not_taken);
+
+ // Shift the key to get the actual index value and check that
+ // it is within bounds.
+ Result index = cgen_->allocator()->Allocate();
+ ASSERT(index.is_valid());
+ __ mov(index.reg(), key.reg());
+ __ sar(index.reg(), kSmiTagSize);
+ __ cmp(index.reg(),
+ FieldOperand(elements.reg(), Array::kLengthOffset));
+ deferred->enter()->Branch(above_equal, &receiver, &key, not_taken);
+
+ // Load and check that the result is not the hole. We could
+ // reuse the index or elements register for the value.
+ //
+ // TODO(206): Consider whether it makes sense to try some
+ // heuristic about which register to reuse. For example, if
+ // one is eax, the we can reuse that one because the value
+ // coming from the deferred code will be in eax.
+ Result value = index;
+ __ mov(value.reg(), Operand(elements.reg(),
+ index.reg(),
+ times_4,
+ Array::kHeaderSize - kHeapObjectTag));
+ elements.Unuse();
+ index.Unuse();
+ __ cmp(Operand(value.reg()), Immediate(Factory::the_hole_value()));
+ deferred->enter()->Branch(equal, &receiver, &key, not_taken);
__ IncrementCounter(&Counters::keyed_load_inline, 1);
- __ bind(deferred->exit());
+
+ // Restore the receiver and key to the frame and push the
+ // result on top of it.
+ deferred->BindExit(&receiver, &key, &value);
+ cgen_->frame()->Push(&receiver);
+ cgen_->frame()->Push(&key);
+ cgen_->frame()->Push(&value);
+
} else {
+ VirtualFrame* frame = cgen_->frame();
Comment cmnt(masm, "[ Load from keyed Property");
Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
- if (is_global) {
- __ call(ic, RelocInfo::CODE_TARGET_CONTEXT);
- } else {
- __ call(ic, RelocInfo::CODE_TARGET);
- }
+ RelocInfo::Mode rmode = is_global
+ ? RelocInfo::CODE_TARGET_CONTEXT
+ : RelocInfo::CODE_TARGET;
+ Result answer = frame->CallCodeObject(ic, rmode, 0);
// Make sure that we do not have a test instruction after the
// call. A test instruction after the call is used to
// indicate that we have generated an inline version of the
// keyed load. The explicit nop instruction is here because
// the push that follows might be peep-hole optimized away.
__ nop();
+ frame->Push(&answer);
}
- // Push the result.
- frame->Push(eax);
break;
}
}
+void Reference::TakeValue(TypeofState typeof_state) {
+ // For non-constant frame-allocated slots, we invalidate the value in the
+ // slot. For all others, we fall back on GetValue.
+ ASSERT(!cgen_->in_spilled_code());
+ ASSERT(!is_illegal());
+ if (type_ != SLOT) {
+ GetValue(typeof_state);
+ return;
+ }
+
+ Slot* slot = expression_->AsVariableProxy()->AsVariable()->slot();
+ ASSERT(slot != NULL);
+ if (slot->type() == Slot::LOOKUP ||
+ slot->type() == Slot::CONTEXT ||
+ slot->var()->mode() == Variable::CONST) {
+ GetValue(typeof_state);
+ return;
+ }
+
+ // Only non-constant, frame-allocated parameters and locals can reach
+ // here.
+ if (slot->type() == Slot::PARAMETER) {
+ cgen_->frame()->TakeParameterAt(slot->index());
+ } else {
+ ASSERT(slot->type() == Slot::LOCAL);
+ cgen_->frame()->TakeLocalAt(slot->index());
+ }
+}
+
+
void Reference::SetValue(InitState init_state) {
+ ASSERT(cgen_->HasValidEntryRegisters());
ASSERT(!is_illegal());
- ASSERT(!cgen_->has_cc());
MacroAssembler* masm = cgen_->masm();
VirtualFrame* frame = cgen_->frame();
switch (type_) {
Comment cmnt(masm, "[ Store to Slot");
Slot* slot = expression_->AsVariableProxy()->AsVariable()->slot();
ASSERT(slot != NULL);
- if (slot->type() == Slot::LOOKUP) {
- ASSERT(slot->var()->is_dynamic());
-
- // For now, just do a runtime call.
- frame->Push(esi);
- frame->Push(Immediate(slot->var()->name()));
-
- if (init_state == CONST_INIT) {
- // Same as the case for a normal store, but ignores attribute
- // (e.g. READ_ONLY) of context slot so that we can initialize
- // const properties (introduced via eval("const foo = (some
- // expr);")). Also, uses the current function context instead of
- // the top context.
- //
- // Note that we must declare the foo upon entry of eval(), via a
- // context slot declaration, but we cannot initialize it at the
- // same time, because the const declaration may be at the end of
- // the eval code (sigh...) and the const variable may have been
- // used before (where its value is 'undefined'). Thus, we can only
- // do the initialization when we actually encounter the expression
- // and when the expression operands are defined and valid, and
- // thus we need the split into 2 operations: declaration of the
- // context slot followed by initialization.
- __ CallRuntime(Runtime::kInitializeConstContextSlot, 3);
- } else {
- __ CallRuntime(Runtime::kStoreContextSlot, 3);
- }
- // Storing a variable must keep the (new) value on the expression
- // stack. This is necessary for compiling chained assignment
- // expressions.
- frame->Push(eax);
-
- } else {
- ASSERT(!slot->var()->is_dynamic());
-
- Label exit;
- if (init_state == CONST_INIT) {
- ASSERT(slot->var()->mode() == Variable::CONST);
- // Only the first const initialization must be executed (the slot
- // still contains 'the hole' value). When the assignment is
- // executed, the code is identical to a normal store (see below).
- Comment cmnt(masm, "[ Init const");
- __ mov(eax, cgen_->SlotOperand(slot, ecx));
- __ cmp(eax, Factory::the_hole_value());
- __ j(not_equal, &exit);
- }
-
- // We must execute the store. Storing a variable must keep the
- // (new) value on the stack. This is necessary for compiling
- // assignment expressions.
- //
- // Note: We will reach here even with slot->var()->mode() ==
- // Variable::CONST because of const declarations which will
- // initialize consts to 'the hole' value and by doing so, end up
- // calling this code.
- frame->Pop(eax);
- __ mov(cgen_->SlotOperand(slot, ecx), eax);
- frame->Push(eax); // RecordWrite may destroy the value in eax.
- if (slot->type() == Slot::CONTEXT) {
- // ecx is loaded with context when calling SlotOperand above.
- int offset = FixedArray::kHeaderSize + slot->index() * kPointerSize;
- __ RecordWrite(ecx, offset, eax, ebx);
- }
- // If we definitely did not jump over the assignment, we do not need
- // to bind the exit label. Doing so can defeat peephole
- // optimization.
- if (init_state == CONST_INIT) __ bind(&exit);
- }
+ cgen_->StoreToSlot(slot, init_state);
break;
}
Handle<String> name(GetName());
Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
// TODO(1222589): Make the IC grab the values from the stack.
- frame->Pop(eax);
+ Result argument = frame->Pop();
+ argument.ToRegister(eax);
+ ASSERT(argument.is_valid());
+ Result property_name = cgen_->allocator()->Allocate(ecx);
+ ASSERT(property_name.is_valid());
// Setup the name register.
- __ mov(ecx, name);
- __ call(ic, RelocInfo::CODE_TARGET);
- frame->Push(eax); // IC call leaves result in eax, push it out
+ __ mov(property_name.reg(), name);
+ Result answer = frame->CallCodeObject(ic, RelocInfo::CODE_TARGET,
+ &argument, &property_name, 0);
+ frame->Push(&answer);
break;
}
// Call IC code.
Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
// TODO(1222589): Make the IC grab the values from the stack.
- frame->Pop(eax);
- __ call(ic, RelocInfo::CODE_TARGET);
- frame->Push(eax); // IC call leaves result in eax, push it out
+ Result arg = frame->Pop();
+ arg.ToRegister(eax);
+ ASSERT(arg.is_valid());
+ Result answer = frame->CallCodeObject(ic, RelocInfo::CODE_TARGET,
+ &arg, 0);
+ frame->Push(&answer);
break;
}
}
+#undef __
+#define __ masm_->
+
+Result DeferredInlineBinaryOperation::GenerateInlineCode() {
+ // Perform fast-case smi code for the operation (left <op> right) and
+ // returns the result in a Result.
+ // If any fast-case tests fail, it jumps to the slow-case deferred code,
+ // which calls the binary operation stub, with the arguments (in registers)
+ // on top of the frame.
+
+ VirtualFrame* frame = generator()->frame();
+ // If operation is division or modulus, ensure
+ // that the special registers needed are free.
+ Result reg_eax(generator()); // Valid only if op is DIV or MOD.
+ Result reg_edx(generator()); // Valid only if op is DIV or MOD.
+ if (op_ == Token::DIV || op_ == Token::MOD) {
+ reg_eax = generator()->allocator()->Allocate(eax);
+ ASSERT(reg_eax.is_valid());
+ reg_edx = generator()->allocator()->Allocate(edx);
+ ASSERT(reg_edx.is_valid());
+ }
+
+ Result right = frame->Pop();
+ Result left = frame->Pop();
+ left.ToRegister();
+ right.ToRegister();
+ // Answer is used to compute the answer, leaving left and right unchanged.
+ // It is also returned from this function.
+ // It is used as a temporary register in a few places, as well.
+ Result answer(generator());
+ if (reg_eax.is_valid()) {
+ answer = reg_eax;
+ } else {
+ answer = generator()->allocator()->Allocate();
+ }
+ ASSERT(answer.is_valid());
+ // Perform the smi check.
+ __ mov(answer.reg(), Operand(left.reg()));
+ __ or_(answer.reg(), Operand(right.reg()));
+ ASSERT(kSmiTag == 0); // adjust zero check if not the case
+ __ test(answer.reg(), Immediate(kSmiTagMask));
+ enter()->Branch(not_zero, &left, &right, not_taken);
+
+ // All operations start by copying the left argument into answer.
+ __ mov(answer.reg(), Operand(left.reg()));
+ switch (op_) {
+ case Token::ADD:
+ __ add(answer.reg(), Operand(right.reg())); // add optimistically
+ enter()->Branch(overflow, &left, &right, not_taken);
+ break;
+
+ case Token::SUB:
+ __ sub(answer.reg(), Operand(right.reg())); // subtract optimistically
+ enter()->Branch(overflow, &left, &right, not_taken);
+ break;
+
+
+ case Token::MUL: {
+ // If the smi tag is 0 we can just leave the tag on one operand.
+ ASSERT(kSmiTag == 0); // adjust code below if not the case
+ // Remove tag from the left operand (but keep sign).
+ // Left hand operand has been copied into answer.
+ __ sar(answer.reg(), kSmiTagSize);
+ // Do multiplication of smis, leaving result in answer.
+ __ imul(answer.reg(), Operand(right.reg()));
+ // Go slow on overflows.
+ enter()->Branch(overflow, &left, &right, not_taken);
+ // Check for negative zero result. If product is zero,
+ // and one argument is negative, go to slow case.
+ // The frame is unchanged in this block, so local control flow can
+ // use a Label rather than a JumpTarget.
+ Label non_zero_result;
+ __ test(answer.reg(), Operand(answer.reg()));
+ __ j(not_zero, &non_zero_result, taken);
+ __ mov(answer.reg(), Operand(left.reg()));
+ __ or_(answer.reg(), Operand(right.reg()));
+ enter()->Branch(negative, &left, &right, not_taken);
+ __ xor_(answer.reg(), Operand(answer.reg())); // Positive 0 is correct.
+ __ bind(&non_zero_result);
+ break;
+ }
+
+ case Token::DIV: {
+ // Left hand argument has been copied into answer, which is eax.
+ // Sign extend eax into edx:eax.
+ __ cdq();
+ // Check for 0 divisor.
+ __ test(right.reg(), Operand(right.reg()));
+ enter()->Branch(zero, &left, &right, not_taken);
+ // Divide edx:eax by ebx.
+ __ idiv(right.reg());
+ // Check for negative zero result. If result is zero, and divisor
+ // is negative, return a floating point negative zero.
+ // The frame is unchanged in this block, so local control flow can
+ // use a Label rather than a JumpTarget.
+ Label non_zero_result;
+ __ test(left.reg(), Operand(left.reg()));
+ __ j(not_zero, &non_zero_result, taken);
+ __ test(right.reg(), Operand(right.reg()));
+ enter()->Branch(negative, &left, &right, not_taken);
+ __ bind(&non_zero_result);
+ // Check for the corner case of dividing the most negative smi
+ // by -1. We cannot use the overflow flag, since it is not set
+ // by idiv instruction.
+ ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
+ __ cmp(reg_eax.reg(), 0x40000000);
+ enter()->Branch(equal, &left, &right, not_taken);
+ // Check that the remainder is zero.
+ __ test(reg_edx.reg(), Operand(reg_edx.reg()));
+ enter()->Branch(not_zero, &left, &right, not_taken);
+ // Tag the result and store it in register temp.
+ ASSERT(kSmiTagSize == times_2); // adjust code if not the case
+ __ lea(answer.reg(), Operand(eax, eax, times_1, kSmiTag));
+ break;
+ }
+
+ case Token::MOD: {
+ // Left hand argument has been copied into answer, which is eax.
+ // Sign extend eax into edx:eax.
+ __ cdq();
+ // Check for 0 divisor.
+ __ test(right.reg(), Operand(right.reg()));
+ enter()->Branch(zero, &left, &right, not_taken);
+
+ // Divide edx:eax by ebx.
+ __ idiv(right.reg());
+ // Check for negative zero result. If result is zero, and divisor
+ // is negative, return a floating point negative zero.
+ // The frame is unchanged in this block, so local control flow can
+ // use a Label rather than a JumpTarget.
+ Label non_zero_result;
+ __ test(reg_edx.reg(), Operand(reg_edx.reg()));
+ __ j(not_zero, &non_zero_result, taken);
+ __ test(left.reg(), Operand(left.reg()));
+ enter()->Branch(negative, &left, &right, not_taken);
+ __ bind(&non_zero_result);
+ // The answer is in edx.
+ answer = reg_edx;
+ break;
+ }
+
+ case Token::BIT_OR:
+ __ or_(answer.reg(), Operand(right.reg()));
+ break;
+
+ case Token::BIT_AND:
+ __ and_(answer.reg(), Operand(right.reg()));
+ break;
+
+ case Token::BIT_XOR:
+ __ xor_(answer.reg(), Operand(right.reg()));
+ break;
+
+ case Token::SHL:
+ case Token::SHR:
+ case Token::SAR:
+ // Move right into ecx.
+ // Left is in two registers already, so even if left or answer is ecx,
+ // we can move right to it, and use the other one.
+ // Right operand must be in register cl because x86 likes it that way.
+ if (right.reg().is(ecx)) {
+ // Right is already in the right place. Left may be in the
+ // same register, which causes problems. Use answer instead.
+ if (left.reg().is(ecx)) {
+ left = answer;
+ }
+ } else if (left.reg().is(ecx)) {
+ generator()->frame()->Spill(left.reg());
+ __ mov(left.reg(), Operand(right.reg()));
+ right = left;
+ left = answer; // Use copy of left in answer as left.
+ } else if (answer.reg().is(ecx)) {
+ __ mov(answer.reg(), Operand(right.reg()));
+ right = answer;
+ } else {
+ Result reg_ecx = generator()->allocator()->Allocate(ecx);
+ ASSERT(reg_ecx.is_valid());
+ __ mov(reg_ecx.reg(), Operand(right.reg()));
+ right = reg_ecx;
+ }
+ ASSERT(left.reg().is_valid());
+ ASSERT(!left.reg().is(ecx));
+ ASSERT(right.reg().is(ecx));
+ answer.Unuse(); // Answer may now be being used for left or right.
+ // We will modify left and right, which we do not do in any other
+ // binary operation. The exits to slow code need to restore the
+ // original values of left and right, or at least values that give
+ // the same answer.
+
+ // We are modifying left and right. They must be spilled!
+ generator()->frame()->Spill(left.reg());
+ generator()->frame()->Spill(right.reg());
+
+ // Remove tags from operands (but keep sign).
+ __ sar(left.reg(), kSmiTagSize);
+ __ sar(ecx, kSmiTagSize);
+ // Perform the operation.
+ switch (op_) {
+ case Token::SAR:
+ __ sar(left.reg());
+ // No checks of result necessary
+ break;
+ case Token::SHR: {
+ __ shr(left.reg());
+ // Check that the *unsigned* result fits in a smi.
+ // Neither of the two high-order bits can be set:
+ // - 0x80000000: high bit would be lost when smi tagging.
+ // - 0x40000000: this number would convert to negative when
+ // Smi tagging these two cases can only happen with shifts
+ // by 0 or 1 when handed a valid smi.
+ // If the answer cannot be represented by a SMI, restore
+ // the left and right arguments, and jump to slow case.
+ // The low bit of the left argument may be lost, but only
+ // in a case where it is dropped anyway.
+ JumpTarget result_ok(generator());
+ __ test(left.reg(), Immediate(0xc0000000));
+ result_ok.Branch(zero, &left, &right, taken);
+ __ shl(left.reg());
+ ASSERT(kSmiTag == 0);
+ __ shl(left.reg(), kSmiTagSize);
+ __ shl(right.reg(), kSmiTagSize);
+ enter()->Jump(&left, &right);
+ result_ok.Bind(&left, &right);
+ break;
+ }
+ case Token::SHL: {
+ __ shl(left.reg());
+ // Check that the *signed* result fits in a smi.
+ //
+ // TODO(207): Can reduce registers from 4 to 3 by
+ // preallocating ecx.
+ JumpTarget result_ok(generator());
+ Result smi_test_reg = generator()->allocator()->Allocate();
+ ASSERT(smi_test_reg.is_valid());
+ __ lea(smi_test_reg.reg(), Operand(left.reg(), 0x40000000));
+ __ test(smi_test_reg.reg(), Immediate(0x80000000));
+ smi_test_reg.Unuse();
+ result_ok.Branch(zero, &left, &right, taken);
+ __ shr(left.reg());
+ ASSERT(kSmiTag == 0);
+ __ shl(left.reg(), kSmiTagSize);
+ __ shl(right.reg(), kSmiTagSize);
+ enter()->Jump(&left, &right);
+ result_ok.Bind(&left, &right);
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
+ // Smi-tag the result, in left, and make answer an alias for left.
+ answer = left;
+ answer.ToRegister();
+ ASSERT(kSmiTagSize == times_2); // adjust code if not the case
+ __ lea(answer.reg(),
+ Operand(answer.reg(), answer.reg(), times_1, kSmiTag));
+ break;
+
+ default:
+ UNREACHABLE();
+ break;
+ }
+ return answer;
+}
+
+
+#undef __
+#define __ masm->
+
void GenericBinaryOpStub::GenerateSmiCode(MacroAssembler* masm, Label* slow) {
// Perform fast-case smi code for the operation (eax <op> ebx) and
// leave result in register eax.
#define V8_CODEGEN_IA32_H_
#include "scopes.h"
+#include "register-allocator.h"
namespace v8 { namespace internal {
enum TypeofState { INSIDE_TYPEOF, NOT_INSIDE_TYPEOF };
-// -------------------------------------------------------------------------
-// Virtual frame
-
-class VirtualFrame BASE_EMBEDDED {
- public:
- explicit VirtualFrame(CodeGenerator* cgen);
-
- void Enter();
- void Exit();
-
- void AllocateLocals();
-
- Operand Top() const { return Operand(esp, 0); }
-
- Operand Element(int index) const {
- return Operand(esp, index * kPointerSize);
- }
-
- Operand Local(int index) const {
- ASSERT(0 <= index && index < frame_local_count_);
- return Operand(ebp, kLocal0Offset - index * kPointerSize);
- }
-
- Operand Function() const { return Operand(ebp, kFunctionOffset); }
-
- Operand Context() const { return Operand(ebp, kContextOffset); }
-
- Operand Parameter(int index) const {
- ASSERT(-1 <= index && index < parameter_count_);
- return Operand(ebp, (1 + parameter_count_ - index) * kPointerSize);
- }
-
- Operand Receiver() const { return Parameter(-1); }
-
- inline void Drop(int count);
-
- inline void Pop();
- inline void Pop(Register reg);
- inline void Pop(Operand operand);
-
- inline void Push(Register reg);
- inline void Push(Operand operand);
- inline void Push(Immediate immediate);
-
- private:
- static const int kLocal0Offset = JavaScriptFrameConstants::kLocal0Offset;
- static const int kFunctionOffset = JavaScriptFrameConstants::kFunctionOffset;
- static const int kContextOffset = StandardFrameConstants::kContextOffset;
-
- MacroAssembler* masm_;
- int frame_local_count_;
- int parameter_count_;
-};
-
-
// -------------------------------------------------------------------------
// Reference support
// the expression stack, and it is left in place with its value above it.
void GetValue(TypeofState typeof_state);
+ // Generate code to push the value of a reference on top of the expression
+ // stack and then spill the stack frame. This function is used temporarily
+ // while the code generator is being transformed.
+ inline void GetValueAndSpill(TypeofState typeof_state);
+
+ // Like GetValue except that the slot is expected to be written to before
+ // being read from again. Thae value of the reference may be invalidated,
+ // causing subsequent attempts to read it to fail.
+ void TakeValue(TypeofState typeof_state);
+
// Generate code to store the value on top of the expression stack in the
// reference. The reference is expected to be immediately below the value
// on the expression stack. The stored value is left in place (with the
};
+// -------------------------------------------------------------------------
+// Control destinations.
+
+// A control destination encapsulates a pair of jump targets and a
+// flag indicating which one is the preferred fall-through. The
+// preferred fall-through must be unbound, the other may be already
+// bound (ie, a backward target).
+//
+// The true and false targets may be jumped to unconditionally or
+// control may split conditionally. Unconditional jumping and
+// splitting should be emitted in tail position (as the last thing
+// when compiling an expression) because they can cause either label
+// to be bound or the non-fall through to be jumped to leaving an
+// invalid virtual frame.
+//
+// The labels in the control destination can be extracted and
+// manipulated normally without affecting the state of the
+// destination.
+
+class ControlDestination BASE_EMBEDDED {
+ public:
+ ControlDestination(JumpTarget* true_target,
+ JumpTarget* false_target,
+ bool true_is_fall_through)
+ : true_target_(true_target),
+ false_target_(false_target),
+ true_is_fall_through_(true_is_fall_through),
+ is_used_(false) {
+ ASSERT(true_is_fall_through ? !true_target->is_bound()
+ : !false_target->is_bound());
+ }
+
+ // Accessors for the jump targets. Directly jumping or branching to
+ // or binding the targets will not update the destination's state.
+ JumpTarget* true_target() const { return true_target_; }
+ JumpTarget* false_target() const { return false_target_; }
+
+ // True if the the destination has been jumped to unconditionally or
+ // control has been split to both targets. This predicate does not
+ // test whether the targets have been extracted and manipulated as
+ // raw jump targets.
+ bool is_used() const { return is_used_; }
+
+ // True if the destination is used and the true target (respectively
+ // false target) was the fall through. If the target is backward,
+ // "fall through" included jumping unconditionally to it.
+ bool true_was_fall_through() const {
+ return is_used_ && true_is_fall_through_;
+ }
+
+ bool false_was_fall_through() const {
+ return is_used_ && !true_is_fall_through_;
+ }
+
+ // Emit a branch to one of the true or false targets, and bind the
+ // other target. Because this binds the fall-through target, it
+ // should be emitted in tail position (as the last thing when
+ // compiling an expression).
+ void Split(Condition cc) {
+ ASSERT(!is_used_);
+ if (true_is_fall_through_) {
+ false_target_->Branch(NegateCondition(cc));
+ true_target_->Bind();
+ } else {
+ true_target_->Branch(cc);
+ false_target_->Bind();
+ }
+ is_used_ = true;
+ }
+
+ // Emit an unconditional jump in tail position, to the true target
+ // (if the argument is true) or the false target. The "jump" will
+ // actually bind the jump target if it is forward, jump to it if it
+ // is backward.
+ void Goto(bool where) {
+ ASSERT(!is_used_);
+ JumpTarget* target = where ? true_target_ : false_target_;
+ if (target->is_bound()) {
+ target->Jump();
+ } else {
+ target->Bind();
+ }
+ is_used_ = true;
+ true_is_fall_through_ = where;
+ }
+
+ // Mark this jump target as used as if Goto had been called, but
+ // without generating a jump or binding a label (the control effect
+ // should have already happened). This is used when the left
+ // subexpression of the short-circuit boolean operators are
+ // compiled.
+ void Use(bool where) {
+ ASSERT(!is_used_);
+ ASSERT((where ? true_target_ : false_target_)->is_bound());
+ is_used_ = true;
+ true_is_fall_through_ = where;
+ }
+
+ // Swap the true and false targets but keep the same actual label as
+ // the fall through. This is used when compiling negated
+ // expressions, where we want to swap the targets but preserve the
+ // state.
+ void Invert() {
+ JumpTarget* temp_target = true_target_;
+ true_target_ = false_target_;
+ false_target_ = temp_target;
+
+ true_is_fall_through_ = !true_is_fall_through_;
+ }
+
+ private:
+ // True and false jump targets.
+ JumpTarget* true_target_;
+ JumpTarget* false_target_;
+
+ // Before using the destination: true if the true target is the
+ // preferred fall through, false if the false target is. After
+ // using the destination: true if the true target was actually used
+ // as the fall through, false if the false target was.
+ bool true_is_fall_through_;
+
+ // True if the Split or Goto functions have been called.
+ bool is_used_;
+};
+
+
// -------------------------------------------------------------------------
// Code generation state
// The state is passed down the AST by the code generator (and back up, in
-// the form of the state of the label pair). It is threaded through the
-// call stack. Constructing a state implicitly pushes it on the owning code
-// generator's stack of states, and destroying one implicitly pops it.
+// the form of the state of the jump target pair). It is threaded through
+// the call stack. Constructing a state implicitly pushes it on the owning
+// code generator's stack of states, and destroying one implicitly pops it.
+//
+// The code generator state is only used for expressions, so statements have
+// the initial state.
class CodeGenState BASE_EMBEDDED {
public:
explicit CodeGenState(CodeGenerator* owner);
// Create a code generator state based on a code generator's current
- // state. The new state has its own access type and pair of branch
- // labels, and no reference.
+ // state. The new state may or may not be inside a typeof, and has its
+ // own control destination.
CodeGenState(CodeGenerator* owner,
TypeofState typeof_state,
- Label* true_target,
- Label* false_target);
+ ControlDestination* destination);
// Destroy a code generator state and restore the owning code generator's
// previous state.
~CodeGenState();
+ // Accessors for the state.
TypeofState typeof_state() const { return typeof_state_; }
- Label* true_target() const { return true_target_; }
- Label* false_target() const { return false_target_; }
+ ControlDestination* destination() const { return destination_; }
private:
+ // The owning code generator.
CodeGenerator* owner_;
+
+ // A flag indicating whether we are compiling the immediate subexpression
+ // of a typeof expression.
TypeofState typeof_state_;
- Label* true_target_;
- Label* false_target_;
+
+ // A control destination in case the expression has a control-flow
+ // effect.
+ ControlDestination* destination_;
+
+ // The previous state of the owning code generator, restored when
+ // this state is destroyed.
CodeGenState* previous_;
};
VirtualFrame* frame() const { return frame_; }
+ bool has_valid_frame() const { return frame_ != NULL; }
+
+ // Set the virtual frame to be new_frame, with non-frame register
+ // reference counts given by non_frame_registers. The non-frame
+ // register reference counts of the old frame are returned in
+ // non_frame_registers.
+ void SetFrame(VirtualFrame* new_frame, RegisterFile* non_frame_registers);
+
+ void DeleteFrame();
+
+ RegisterAllocator* allocator() const { return allocator_; }
+
CodeGenState* state() { return state_; }
void set_state(CodeGenState* state) { state_ = state; }
void AddDeferred(DeferredCode* code) { deferred_.Add(code); }
+ bool in_spilled_code() const { return in_spilled_code_; }
+ void set_in_spilled_code(bool flag) { in_spilled_code_ = flag; }
+
private:
// Construction/Destruction
CodeGenerator(int buffer_size, Handle<Script> script, bool is_eval);
bool is_eval() { return is_eval_; }
// State
- bool has_cc() const { return cc_reg_ >= 0; }
TypeofState typeof_state() const { return state_->typeof_state(); }
- Label* true_target() const { return state_->true_target(); }
- Label* false_target() const { return state_->false_target(); }
+ ControlDestination* destination() const { return state_->destination(); }
// Track loop nesting level.
int loop_nesting() const { return loop_nesting_; }
// Node visitors.
+ void VisitStatements(ZoneList<Statement*>* statements);
+
#define DEF_VISIT(type) \
void Visit##type(type* node);
NODE_LIST(DEF_VISIT)
#undef DEF_VISIT
+ // Visit a statement and then spill the virtual frame if control flow can
+ // reach the end of the statement (ie, it does not exit via break,
+ // continue, return, or throw). This function is used temporarily while
+ // the code generator is being transformed.
+ void VisitAndSpill(Statement* statement) {
+ ASSERT(in_spilled_code());
+ set_in_spilled_code(false);
+ Visit(statement);
+ if (frame_ != NULL) {
+ frame_->SpillAll();
+ }
+ set_in_spilled_code(true);
+ }
+
+ // Visit a list of statements and then spill the virtual frame if control
+ // flow can reach the end of the list.
+ void VisitStatementsAndSpill(ZoneList<Statement*>* statements) {
+ ASSERT(in_spilled_code());
+ set_in_spilled_code(false);
+ VisitStatements(statements);
+ if (frame_ != NULL) {
+ frame_->SpillAll();
+ }
+ set_in_spilled_code(true);
+ }
+
// Main code generation function
void GenCode(FunctionLiteral* fun);
+ // Generate the return sequence code. Should be called no more than once
+ // per compiled function (it binds the return target, which can not be
+ // done more than once). The return value is assumed to be in eax by the
+ // code generated.
+ void GenerateReturnSequence();
+
// The following are used by class Reference.
void LoadReference(Reference* ref);
void UnloadReference(Reference* ref);
Operand SlotOperand(Slot* slot, Register tmp);
Operand ContextSlotOperandCheckExtensions(Slot* slot,
- Register tmp,
- Label* slow);
+ Result tmp,
+ JumpTarget* slow);
// Expressions
Operand GlobalObject() const {
void LoadCondition(Expression* x,
TypeofState typeof_state,
- Label* true_target,
- Label* false_target,
- bool force_cc);
+ ControlDestination* destination,
+ bool force_control);
void Load(Expression* x, TypeofState typeof_state = NOT_INSIDE_TYPEOF);
void LoadGlobal();
- void LoadGlobalReceiver(Register scratch);
+ void LoadGlobalReceiver();
+
+ // Generate code to push the value of an expression on top of the frame
+ // and then spill the frame fully to memory. This function is used
+ // temporarily while the code generator is being transformed.
+ void LoadAndSpill(Expression* expression,
+ TypeofState typeof_state = NOT_INSIDE_TYPEOF) {
+ ASSERT(in_spilled_code());
+ set_in_spilled_code(false);
+ Load(expression, typeof_state);
+ frame_->SpillAll();
+ set_in_spilled_code(true);
+ }
+
+ // Call LoadCondition and then spill the virtual frame unless control flow
+ // cannot reach the end of the expression (ie, by emitting only
+ // unconditional jumps to the control targets).
+ void LoadConditionAndSpill(Expression* expression,
+ TypeofState typeof_state,
+ ControlDestination* destination,
+ bool force_control) {
+ ASSERT(in_spilled_code());
+ set_in_spilled_code(false);
+ LoadCondition(expression, typeof_state, destination, force_control);
+ if (frame_ != NULL) {
+ frame_->SpillAll();
+ }
+ set_in_spilled_code(true);
+ }
// Read a value from a slot and leave it on top of the expression stack.
void LoadFromSlot(Slot* slot, TypeofState typeof_state);
- void LoadFromGlobalSlotCheckExtensions(Slot* slot,
- TypeofState typeof_state,
- Register tmp,
- Label* slow);
+ Result LoadFromGlobalSlotCheckExtensions(Slot* slot,
+ TypeofState typeof_state,
+ JumpTarget* slow);
+
+ // Store the value on top of the expression stack into a slot, leaving the
+ // value in place.
+ void StoreToSlot(Slot* slot, InitState init_state);
// Special code for typeof expressions: Unfortunately, we must
// be careful when loading the expression in 'typeof'
// through the context chain.
void LoadTypeofExpression(Expression* x);
- void ToBoolean(Label* true_target, Label* false_target);
+ // Translate the value on top of the frame into control flow to the
+ // control destination.
+ void ToBoolean(ControlDestination* destination);
void GenericBinaryOperation(Token::Value op,
StaticType* type,
const OverwriteMode overwrite_mode = NO_OVERWRITE);
- void Comparison(Condition cc, bool strict = false);
+ void Comparison(Condition cc,
+ bool strict,
+ ControlDestination* destination);
- // Inline small integer literals. To prevent long attacker-controlled byte
- // sequences, we only inline small Smis.
+ // To prevent long attacker-controlled byte sequences, integer constants
+ // from the JavaScript source are loaded in two parts if they are larger
+ // than 16 bits.
static const int kMaxSmiInlinedBits = 16;
+ bool IsUnsafeSmi(Handle<Object> value);
+ // Load an integer constant x into a register target using
+ // at most 16 bits of user-controlled data per assembly operation.
+ void LoadUnsafeSmi(Register target, Handle<Object> value);
+
bool IsInlineSmi(Literal* literal);
- void SmiComparison(Condition cc, Handle<Object> value, bool strict = false);
void SmiOperation(Token::Value op,
StaticType* type,
Handle<Object> value,
void CallWithArguments(ZoneList<Expression*>* arguments, int position);
- // Control flow
- void Branch(bool if_true, Label* L);
void CheckStack();
void CleanStack(int num_bytes);
// Generate the code for cases for the fast case switch.
// Called by GenerateFastCaseSwitchJumpTable.
void GenerateFastCaseSwitchCases(SwitchStatement* node,
- Vector<Label> case_labels);
+ Vector<Label> case_labels,
+ VirtualFrame* start_frame);
// Fast support for constant-Smi switches.
void GenerateFastCaseSwitchStatement(SwitchStatement* node,
// Methods used to indicate which source code is generated for. Source
// positions are collected by the assembler and emitted with the relocation
// information.
- void CodeForStatement(Node* node);
+ void CodeForFunctionPosition(FunctionLiteral* fun);
+ void CodeForStatementPosition(Node* node);
void CodeForSourcePosition(int pos);
+#ifdef DEBUG
+ // True if the registers are valid for entry to a block. There should be
+ // no frame-external references to eax, ebx, ecx, edx, or edi.
+ bool HasValidEntryRegisters();
+#endif
+
bool is_eval_; // Tells whether code is generated for eval.
+
Handle<Script> script_;
List<DeferredCode*> deferred_;
// Code generation state
Scope* scope_;
VirtualFrame* frame_;
- Condition cc_reg_;
+ RegisterAllocator* allocator_;
CodeGenState* state_;
- bool is_inside_try_;
int break_stack_height_;
int loop_nesting_;
- // Labels
- Label function_return_;
+ // Jump targets.
+ // The target of the return from the function.
+ JumpTarget function_return_;
+
+ // True if the function return is shadowed (ie, jumping to the target
+ // function_return_ does not jump to the true function return, but rather
+ // to some unlinking code).
+ bool function_return_is_shadowed_;
+
+ // True when we are in code that expects the virtual frame to be fully
+ // spilled. Some virtual frame function are disabled in DEBUG builds when
+ // called from spilled code, because they do not leave the virtual frame
+ // in a spilled state.
+ bool in_spilled_code_;
friend class VirtualFrame;
+ friend class JumpTarget;
friend class Reference;
+ friend class Result;
DISALLOW_COPY_AND_ASSIGN(CodeGenerator);
};
+void Reference::GetValueAndSpill(TypeofState typeof_state) {
+ ASSERT(cgen_->in_spilled_code());
+ cgen_->set_in_spilled_code(false);
+ GetValue(typeof_state);
+ cgen_->frame()->SpillAll();
+ cgen_->set_in_spilled_code(true);
+}
+
+
} } // namespace v8::internal
#endif // V8_CODEGEN_IA32_H_
namespace v8 { namespace internal {
DeferredCode::DeferredCode(CodeGenerator* generator)
- : masm_(generator->masm()),
- generator_(generator),
+ : generator_(generator),
+ masm_(generator->masm()),
+ enter_(generator),
+ exit_(generator, JumpTarget::BIDIRECTIONAL),
statement_position_(masm_->current_statement_position()),
position_(masm_->current_position()) {
generator->AddDeferred(this);
if (code->position() != RelocInfo::kNoPosition) {
masm->RecordPosition(code->position());
}
- // Bind labels and generate the code.
- masm->bind(code->enter());
+ // Generate the code.
Comment cmnt(masm, code->comment());
code->Generate();
- if (code->exit()->is_bound()) {
- masm->jmp(code->exit()); // platform independent?
- }
+ ASSERT(code->enter()->is_bound());
+ }
+}
+
+
+void CodeGenerator::SetFrame(VirtualFrame* new_frame,
+ RegisterFile* non_frame_registers) {
+ RegisterFile saved_counts;
+ if (has_valid_frame()) {
+ frame_->DetachFromCodeGenerator();
+ // The remaining register reference counts are the non-frame ones.
+ allocator_->SaveTo(&saved_counts);
+ }
+
+ if (new_frame != NULL) {
+ // Restore the non-frame register references that go with the new frame.
+ allocator_->RestoreFrom(non_frame_registers);
+ new_frame->AttachToCodeGenerator();
+ }
+
+ frame_ = new_frame;
+ saved_counts.CopyTo(non_frame_registers);
+}
+
+
+void CodeGenerator::DeleteFrame() {
+ if (has_valid_frame()) {
+ frame_->DetachFromCodeGenerator();
+ delete frame_;
+ frame_ = NULL;
}
}
return Handle<Code>::null();
}
- // Process any deferred code.
- cgen.ProcessDeferred();
-
// Allocate and install the code.
CodeDesc desc;
cgen.masm()->GetCode(&desc);
ZoneList<CaseClause*>* cases = node->cases();
int length = cases->length();
- // Label pointer per number in range
+ // Label pointer per number in range.
SmartPointer<Label*> case_targets(NewArray<Label*>(range));
- // Label per switch case
+ // Label per switch case.
SmartPointer<Label> case_labels(NewArray<Label>(length));
- Label* fail_label = default_index >= 0 ? &(case_labels[default_index])
- : node->break_target();
+ Label* fail_label =
+ default_index >= 0 ? &(case_labels[default_index]) : NULL;
// Populate array of label pointers for each number in the range.
// Initally put the failure label everywhere.
// Overwrite with label of a case for the number value of that case.
// (In reverse order, so that if the same label occurs twice, the
// first one wins).
- for (int i = length-1; i >= 0 ; i--) {
+ for (int i = length - 1; i >= 0 ; i--) {
CaseClause* clause = cases->at(i);
if (!clause->is_default()) {
Object* label_value = *(clause->label()->AsLiteral()->handle());
void CodeGenerator::GenerateFastCaseSwitchCases(
SwitchStatement* node,
- Vector<Label> case_labels) {
+ Vector<Label> case_labels,
+ VirtualFrame* start_frame) {
ZoneList<CaseClause*>* cases = node->cases();
int length = cases->length();
for (int i = 0; i < length; i++) {
Comment cmnt(masm(), "[ Case clause");
- masm()->bind(&(case_labels[i]));
+
+ // We may not have a virtual frame if control flow did not fall
+ // off the end of the previous case. In that case, use the start
+ // frame. Otherwise, we have to merge the existing one to the
+ // start frame as part of the previous case.
+ if (!has_valid_frame()) {
+ RegisterFile non_frame_registers = RegisterAllocator::Reserved();
+ SetFrame(new VirtualFrame(start_frame), &non_frame_registers);
+ } else {
+ frame_->MergeTo(start_frame);
+ }
+ masm()->bind(&case_labels[i]);
VisitStatements(cases->at(i)->statements());
}
-
- masm()->bind(node->break_target());
}
bool CodeGenerator::TryGenerateFastCaseSwitchStatement(SwitchStatement* node) {
+ // TODO(238): Due to issue 238, fast case switches can crash on ARM
+ // and possibly IA32. They are disabled for now.
+ // See http://code.google.com/p/v8/issues/detail?id=238
+ return false;
+
ZoneList<CaseClause*>* cases = node->cases();
int length = cases->length();
CaseClause* clause = cases->at(i);
if (clause->is_default()) {
if (default_index >= 0) {
- return false; // More than one default label:
- // Defer to normal case for error.
- }
+ // There is more than one default label. Defer to the normal case
+ // for error.
+ return false;
+ }
default_index = i;
} else {
Expression* label = clause->label();
if (!value->IsSmi()) {
return false;
}
- int smi = Smi::cast(value)->value();
- if (smi < min_index) { min_index = smi; }
- if (smi > max_index) { max_index = smi; }
+ int int_value = Smi::cast(value)->value();
+ min_index = Min(int_value, min_index);
+ max_index = Max(int_value, max_index);
}
}
}
-void CodeGenerator::CodeForStatement(Node* node) {
+void CodeGenerator::CodeForFunctionPosition(FunctionLiteral* fun) {
+ if (FLAG_debug_info) {
+ int pos = fun->start_position();
+ if (pos != RelocInfo::kNoPosition) {
+ masm()->RecordStatementPosition(pos);
+ masm()->RecordPosition(pos);
+ }
+ }
+}
+
+
+void CodeGenerator::CodeForStatementPosition(Node* node) {
if (FLAG_debug_info) {
int pos = node->statement_pos();
if (pos != RelocInfo::kNoPosition) {
// of Visitor and that the following methods are available publicly:
// CodeGenerator::MakeCode
// CodeGenerator::SetFunctionInfo
-// CodeGenerator::AddDeferred
// CodeGenerator::masm
+// CodeGenerator::frame
+// CodeGenerator::has_valid_frame
+// CodeGenerator::SetFrame
+// CodeGenerator::DeleteFrame
+// CodeGenerator::allocator
+// CodeGenerator::AddDeferred
+// CodeGenerator::in_spilled_code
+// CodeGenerator::set_in_spilled_code
//
// These methods are either used privately by the shared code or implemented as
// shared code:
MacroAssembler* masm() const { return masm_; }
CodeGenerator* generator() const { return generator_; }
- Label* enter() { return &enter_; }
- Label* exit() { return &exit_; }
+ JumpTarget* enter() { return &enter_; }
+ void BindExit() { exit_.Bind(0); }
+ void BindExit(Result* result) { exit_.Bind(result, 1); }
+ void BindExit(Result* result0, Result* result1, Result* result2) {
+ exit_.Bind(result0, result1, result2, 3);
+ }
int statement_position() const { return statement_position_; }
int position() const { return position_; }
#endif
protected:
- // The masm_ field is manipulated when compiling stubs with the
- // BEGIN_STUB and END_STUB macros. For that reason, it cannot be
- // constant.
- MacroAssembler* masm_;
+ CodeGenerator* const generator_;
+ MacroAssembler* const masm_;
+ JumpTarget enter_;
+ JumpTarget exit_;
private:
- CodeGenerator* const generator_;
- Label enter_;
- Label exit_;
int statement_position_;
int position_;
#ifdef DEBUG
}
+const char* NameConverter::NameOfByteCPURegister(int reg) const {
+ UNREACHABLE(); // ARM does not have the concept of a byte register
+ return "nobytereg";
+}
+
+
const char* NameConverter::NameOfXMMRegister(int reg) const {
UNREACHABLE(); // ARM does not have any XMM registers
return "noxmmreg";
{0x85, "test", REG_OPER_OP_ORDER},
{0x31, "xor", OPER_REG_OP_ORDER},
{0x33, "xor", REG_OPER_OP_ORDER},
+ {0x87, "xchg", REG_OPER_OP_ORDER},
{0x8A, "mov_b", REG_OPER_OP_ORDER},
{0x8B, "mov", REG_OPER_OP_ORDER},
{-1, "", UNSET_OP_ORDER}
};
+static const char* set_conditional_mnem[] = {
+ /*0*/ "seto", "setno", "setc", "setnc",
+ /*4*/ "setz", "setnz", "setna", "seta",
+ /*8*/ "sets", "setns", "setpe", "setpo",
+ /*12*/ "setl", "setnl", "setng", "setg"
+};
+
+
enum InstructionType {
NO_INSTR,
ZERO_OPERANDS_INSTR,
SetTableRange(REGISTER_INSTR, 0x48, 0x4F, "dec");
SetTableRange(REGISTER_INSTR, 0x50, 0x57, "push");
SetTableRange(REGISTER_INSTR, 0x58, 0x5F, "pop");
+ SetTableRange(REGISTER_INSTR, 0x91, 0x97, "xchg eax,"); // 0x90 is nop.
SetTableRange(MOVE_REG_INSTR, 0xB8, 0xBF, "mov");
}
}
+ const char* NameOfByteCPURegister(int reg) const {
+ return converter_.NameOfByteCPURegister(reg);
+ }
+
+
const char* NameOfXMMRegister(int reg) const {
return converter_.NameOfXMMRegister(reg);
}
*base = data & 7;
}
+ typedef const char* (DisassemblerIA32::*RegisterNameMapping)(int reg) const;
+ int PrintRightOperandHelper(byte* modrmp, RegisterNameMapping register_name);
int PrintRightOperand(byte* modrmp);
+ int PrintRightByteOperand(byte* modrmp);
int PrintOperands(const char* mnem, OperandOrder op_order, byte* data);
int PrintImmediateOp(byte* data);
int F7Instruction(byte* data);
int JumpShort(byte* data);
int JumpConditional(byte* data, const char* comment);
int JumpConditionalShort(byte* data, const char* comment);
+ int SetCC(byte* data);
int FPUInstruction(byte* data);
void AppendToBuffer(const char* format, ...);
tmp_buffer_pos_ += result;
}
-
-// Returns number of bytes used including the current *modrmp.
-// Writes instruction's right operand to 'tmp_buffer_'.
-int DisassemblerIA32::PrintRightOperand(byte* modrmp) {
+int DisassemblerIA32::PrintRightOperandHelper(
+ byte* modrmp,
+ RegisterNameMapping register_name) {
int mod, regop, rm;
get_modrm(*modrmp, &mod, ®op, &rm);
switch (mod) {
int scale, index, base;
get_sib(sib, &scale, &index, &base);
if (index == esp && base == esp && scale == 0 /*times_1*/) {
- AppendToBuffer("[%s]", NameOfCPURegister(rm));
+ AppendToBuffer("[%s]", (this->*register_name)(rm));
return 2;
} else if (base == ebp) {
int32_t disp = *reinterpret_cast<int32_t*>(modrmp + 2);
AppendToBuffer("[%s*%d+0x%x]",
- NameOfCPURegister(index),
+ (this->*register_name)(index),
1 << scale,
disp);
return 6;
} else if (index != esp && base != ebp) {
// [base+index*scale]
AppendToBuffer("[%s+%s*%d]",
- NameOfCPURegister(base),
- NameOfCPURegister(index),
+ (this->*register_name)(base),
+ (this->*register_name)(index),
1 << scale);
return 2;
} else {
return 1;
}
} else {
- AppendToBuffer("[%s]", NameOfCPURegister(rm));
+ AppendToBuffer("[%s]", (this->*register_name)(rm));
return 1;
}
break;
int disp =
mod == 2 ? *reinterpret_cast<int32_t*>(modrmp + 2) : *(modrmp + 2);
if (index == base && index == rm /*esp*/ && scale == 0 /*times_1*/) {
- AppendToBuffer("[%s+0x%x]", NameOfCPURegister(rm), disp);
+ AppendToBuffer("[%s+0x%x]", (this->*register_name)(rm), disp);
} else {
AppendToBuffer("[%s+%s*%d+0x%x]",
- NameOfCPURegister(base),
- NameOfCPURegister(index),
+ (this->*register_name)(base),
+ (this->*register_name)(index),
1 << scale,
disp);
}
// No sib.
int disp =
mod == 2 ? *reinterpret_cast<int32_t*>(modrmp + 1) : *(modrmp + 1);
- AppendToBuffer("[%s+0x%x]", NameOfCPURegister(rm), disp);
+ AppendToBuffer("[%s+0x%x]", (this->*register_name)(rm), disp);
return mod == 2 ? 5 : 2;
}
break;
case 3:
- AppendToBuffer("%s", NameOfCPURegister(rm));
+ AppendToBuffer("%s", (this->*register_name)(rm));
return 1;
default:
UnimplementedInstruction();
}
+int DisassemblerIA32::PrintRightOperand(byte* modrmp) {
+ return PrintRightOperandHelper(modrmp, &DisassemblerIA32::NameOfCPURegister);
+}
+
+
+int DisassemblerIA32::PrintRightByteOperand(byte* modrmp) {
+ return PrintRightOperandHelper(modrmp,
+ &DisassemblerIA32::NameOfByteCPURegister);
+}
+
+
// Returns number of bytes used including the current *data.
// Writes instruction's mnemonic, left and right operands to 'tmp_buffer_'.
int DisassemblerIA32::PrintOperands(const char* mnem,
}
+// Returns number of bytes used, including *data.
+int DisassemblerIA32::SetCC(byte* data) {
+ assert(*data == 0x0F);
+ byte cond = *(data+1) & 0x0F;
+ const char* mnem = set_conditional_mnem[cond];
+ AppendToBuffer("%s ", mnem);
+ PrintRightByteOperand(data+2);
+ return 3; // includes 0x0F
+}
+
+
// Returns number of bytes used, including *data.
int DisassemblerIA32::FPUInstruction(byte* data) {
byte b1 = *data;
f0byte == 0xB7 || f0byte == 0xAF) {
data += 2;
data += PrintOperands(f0mnem, REG_OPER_OP_ORDER, data);
+ } else if ((f0byte & 0xF0) == 0x90) {
+ data += SetCC(data);
} else {
data += 2;
if (f0byte == 0xAB || f0byte == 0xA5 || f0byte == 0xAD) {
static const char* cpu_regs[8] = {
- "eax", "ecx", "edx", "ebx", "esp", "ebp", "esi", "edi",
+ "eax", "ecx", "edx", "ebx", "esp", "ebp", "esi", "edi"
+};
+
+
+static const char* byte_cpu_regs[8] = {
+ "al", "cl", "dl", "bl", "ah", "ch", "dh", "bh"
};
static const char* xmm_regs[8] = {
- "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5", "xmm6", "xmm7",
+ "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5", "xmm6", "xmm7"
};
}
+const char* NameConverter::NameOfByteCPURegister(int reg) const {
+ if (0 <= reg && reg < 8) return byte_cpu_regs[reg];
+ return "noreg";
+}
+
+
const char* NameConverter::NameOfXMMRegister(int reg) const {
if (0 <= reg && reg < 8) return xmm_regs[reg];
return "noxmmreg";
public:
virtual ~NameConverter() {}
virtual const char* NameOfCPURegister(int reg) const;
+ virtual const char* NameOfByteCPURegister(int reg) const;
virtual const char* NameOfXMMRegister(int reg) const;
virtual const char* NameOfAddress(byte* addr) const;
virtual const char* NameOfConstant(byte* addr) const;
}
}
-static const int kOutBufferSize = 256 + String::kMaxShortPrintLength;
+static const int kOutBufferSize = 1024 + String::kMaxShortPrintLength;
static const int kRelocInfoPosition = 57;
static int DecodeIt(FILE* f,
class JSArray;
class JSFunction;
class JSObject;
-class LabelCollector;
class LargeObjectSpace;
template <typename T, class P = FreeStoreAllocationPolicy> class List;
class LookupResult;
// The keyed load has a fast inlined case if the IC call instruction
// is immediately followed by a test instruction.
if (*test_instruction_address == kTestEaxByte) {
- // Fetch the offset from the call instruction to the map cmp
+ // Fetch the offset from the test instruction to the map cmp
// instruction. This offset is stored in the last 4 bytes of the
// 5 byte test instruction.
Address offset_address = test_instruction_address + 1;
int offset_value = *(reinterpret_cast<int*>(offset_address));
- // Compute the map address. The operand-immediate compare
- // instruction is two bytes larger than a call instruction so we
- // add 2 to get to the map address.
- Address map_address = address + offset_value + 2;
+ // Compute the map address. The map address is in the last 4
+ // bytes of the 7-byte operand-immediate compare instruction, so
+ // we add 3 to the offset to get the map address.
+ Address map_address = test_instruction_address + offset_value + 3;
// patch the map check.
(*(reinterpret_cast<Object**>(map_address))) = value;
}
--- /dev/null
+// Copyright 2008 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "codegen.h"
+#include "jump-target.h"
+
+namespace v8 { namespace internal {
+
+// -------------------------------------------------------------------------
+// JumpTarget implementation.
+
+#define __ masm_->
+
+void JumpTarget::Jump() {
+ ASSERT(cgen_ != NULL);
+ ASSERT(cgen_->has_valid_frame());
+ // Live non-frame registers are not allowed at unconditional jumps
+ // because we have no way of invalidating the corresponding results
+ // which are still live in the C++ code.
+ ASSERT(cgen_->HasValidEntryRegisters());
+
+ if (is_bound()) {
+ // Backward jump. There is an expected frame to merge to.
+ ASSERT(direction_ == BIDIRECTIONAL);
+ cgen_->frame()->MergeTo(entry_frame_);
+ cgen_->DeleteFrame();
+ __ jmp(&entry_label_);
+ } else {
+ // Forward jump. The current frame is added to the end of the list
+ // of frames reaching the target block and a jump to the merge code
+ // is emitted.
+ AddReachingFrame(cgen_->frame());
+ RegisterFile empty;
+ cgen_->SetFrame(NULL, &empty);
+ __ jmp(&merge_labels_.last());
+ }
+
+ is_linked_ = !is_bound_;
+}
+
+
+void JumpTarget::Branch(Condition cc, Hint ignored) {
+ ASSERT(cgen_ != NULL);
+ ASSERT(cgen_->has_valid_frame());
+
+ if (is_bound()) {
+ // Backward branch. We have an expected frame to merge to on the
+ // backward edge. We negate the condition and emit the merge code
+ // here.
+ //
+ // TODO(210): we should try to avoid negating the condition in the
+ // case where there is no merge code to emit. Otherwise, we emit
+ // a branch around an unconditional jump.
+ ASSERT(direction_ == BIDIRECTIONAL);
+ Label original_fall_through;
+ __ b(NegateCondition(cc), &original_fall_through);
+ // Swap the current frame for a copy of it, saving non-frame
+ // register reference counts and invalidating all non-frame register
+ // references except the reserved ones on the backward edge.
+ VirtualFrame* original_frame = cgen_->frame();
+ VirtualFrame* working_frame = new VirtualFrame(original_frame);
+ RegisterFile non_frame_registers = RegisterAllocator::Reserved();
+ cgen_->SetFrame(working_frame, &non_frame_registers);
+
+ working_frame->MergeTo(entry_frame_);
+ cgen_->DeleteFrame();
+ __ jmp(&entry_label_);
+
+ // Restore the frame and its associated non-frame registers.
+ cgen_->SetFrame(original_frame, &non_frame_registers);
+ __ bind(&original_fall_through);
+ } else {
+ // Forward branch. A copy of the current frame is added to the end
+ // of the list of frames reaching the target block and a branch to
+ // the merge code is emitted.
+ AddReachingFrame(new VirtualFrame(cgen_->frame()));
+ __ b(cc, &merge_labels_.last());
+ }
+
+ is_linked_ = !is_bound_;
+}
+
+
+void JumpTarget::Call() {
+ // Call is used to push the address of the catch block on the stack as
+ // a return address when compiling try/catch and try/finally. We
+ // fully spill the frame before making the call. The expected frame
+ // at the label (which should be the only one) is the spilled current
+ // frame plus an in-memory return address. The "fall-through" frame
+ // at the return site is the spilled current frame.
+ ASSERT(cgen_ != NULL);
+ ASSERT(cgen_->has_valid_frame());
+ // There are no non-frame references across the call.
+ ASSERT(cgen_->HasValidEntryRegisters());
+ ASSERT(!is_linked());
+
+ cgen_->frame()->SpillAll();
+ VirtualFrame* target_frame = new VirtualFrame(cgen_->frame());
+ target_frame->Adjust(1);
+ AddReachingFrame(target_frame);
+ __ bl(&merge_labels_.last());
+
+ is_linked_ = !is_bound_;
+}
+
+
+void JumpTarget::Bind(int mergable_elements) {
+ ASSERT(cgen_ != NULL);
+ ASSERT(!is_bound());
+
+ // Live non-frame registers are not allowed at the start of a basic
+ // block.
+ ASSERT(!cgen_->has_valid_frame() || cgen_->HasValidEntryRegisters());
+
+ // Compute the frame to use for entry to the block.
+ ComputeEntryFrame(mergable_elements);
+
+ if (is_linked()) {
+ // There were forward jumps. Handle merging the reaching frames
+ // and possible fall through to the entry frame.
+
+ // Some moves required to merge to an expected frame require
+ // purely frame state changes, and do not require any code
+ // generation. Perform those first to increase the possibility of
+ // finding equal frames below.
+ if (cgen_->has_valid_frame()) {
+ cgen_->frame()->PrepareMergeTo(entry_frame_);
+ }
+ for (int i = 0; i < reaching_frames_.length(); i++) {
+ reaching_frames_[i]->PrepareMergeTo(entry_frame_);
+ }
+
+ // If there is a fall through to the jump target and it needs
+ // merge code, process it first.
+ if (cgen_->has_valid_frame() && !cgen_->frame()->Equals(entry_frame_)) {
+ // Loop over all the reaching frames, looking for any that can
+ // share merge code with this one.
+ for (int i = 0; i < reaching_frames_.length(); i++) {
+ if (cgen_->frame()->Equals(reaching_frames_[i])) {
+ // Set the reaching frames element to null to avoid
+ // processing it later, and then bind its entry label.
+ delete reaching_frames_[i];
+ reaching_frames_[i] = NULL;
+ __ bind(&merge_labels_[i]);
+ }
+ }
+
+ // Emit the merge code.
+ cgen_->frame()->MergeTo(entry_frame_);
+ }
+
+ // Loop over the (non-null) reaching frames and process any that
+ // need merge code.
+ for (int i = 0; i < reaching_frames_.length(); i++) {
+ VirtualFrame* frame = reaching_frames_[i];
+ if (frame != NULL && !frame->Equals(entry_frame_)) {
+ // Set the reaching frames element to null to avoid processing
+ // it later. Do not delete it as it is needed for merging.
+ reaching_frames_[i] = NULL;
+
+ // If the code generator has a current frame (a fall-through
+ // or a previously merged frame), insert a jump around the
+ // merge code we are about to generate.
+ if (cgen_->has_valid_frame()) {
+ cgen_->DeleteFrame();
+ __ jmp(&entry_label_);
+ }
+
+ // Set the frame to merge as the code generator's current
+ // frame and bind its merge label.
+ RegisterFile reserved_registers = RegisterAllocator::Reserved();
+ cgen_->SetFrame(frame, &reserved_registers);
+ __ bind(&merge_labels_[i]);
+
+ // Loop over the remaining (non-null) reaching frames, looking
+ // for any that can share merge code with this one.
+ for (int j = i + 1; j < reaching_frames_.length(); j++) {
+ VirtualFrame* other = reaching_frames_[j];
+ if (other != NULL && frame->Equals(other)) {
+ delete other;
+ reaching_frames_[j] = NULL;
+ __ bind(&merge_labels_[j]);
+ }
+ }
+
+ // Emit the merge code.
+ cgen_->frame()->MergeTo(entry_frame_);
+ }
+ }
+
+ // The code generator may not have a current frame if there was no
+ // fall through and none of the reaching frames needed merging.
+ // In that case, clone the entry frame as the current frame.
+ if (!cgen_->has_valid_frame()) {
+ RegisterFile reserved_registers = RegisterAllocator::Reserved();
+ cgen_->SetFrame(new VirtualFrame(entry_frame_), &reserved_registers);
+ }
+
+ // There is certainly a current frame equal to the entry frame.
+ // Bind the entry frame label.
+ __ bind(&entry_label_);
+
+ // There may be unprocessed reaching frames that did not need
+ // merge code. Bind their merge labels to be the same as the
+ // entry label.
+ for (int i = 0; i < reaching_frames_.length(); i++) {
+ if (reaching_frames_[i] != NULL) {
+ delete reaching_frames_[i];
+ __ bind(&merge_labels_[i]);
+ }
+ }
+
+ // All the reaching frames except the one that is the current
+ // frame (if it is one of the reaching frames) have been deleted.
+ reaching_frames_.Clear();
+ merge_labels_.Clear();
+
+ } else {
+ // There were no forward jumps. The current frame is merged to
+ // the entry frame.
+ cgen_->frame()->MergeTo(entry_frame_);
+ __ bind(&entry_label_);
+ }
+
+ is_linked_ = false;
+ is_bound_ = true;
+}
+
+#undef __
+
+
+} } // namespace v8::internal
--- /dev/null
+// Copyright 2008 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "codegen.h"
+#include "jump-target.h"
+
+namespace v8 { namespace internal {
+
+// -------------------------------------------------------------------------
+// JumpTarget implementation.
+
+#define __ masm_->
+
+void JumpTarget::Jump() {
+ ASSERT(cgen_ != NULL);
+ ASSERT(cgen_->has_valid_frame());
+ // Live non-frame registers are not allowed at unconditional jumps
+ // because we have no way of invalidating the corresponding results
+ // which are still live in the C++ code.
+ ASSERT(cgen_->HasValidEntryRegisters());
+
+ if (is_bound()) {
+ // Backward jump. There is an expected frame to merge to.
+ ASSERT(direction_ == BIDIRECTIONAL);
+ cgen_->frame()->MergeTo(entry_frame_);
+ cgen_->DeleteFrame();
+ __ jmp(&entry_label_);
+ } else {
+ // Forward jump. The current frame is added to the end of the list
+ // of frames reaching the target block and a jump to the merge code
+ // is emitted.
+ AddReachingFrame(cgen_->frame());
+ RegisterFile empty;
+ cgen_->SetFrame(NULL, &empty);
+ __ jmp(&merge_labels_.last());
+ }
+
+ is_linked_ = !is_bound_;
+}
+
+
+void JumpTarget::Branch(Condition cc, Hint hint) {
+ ASSERT(cgen_ != NULL);
+ ASSERT(cgen_->has_valid_frame());
+
+ if (is_bound()) {
+ // Backward branch. We have an expected frame to merge to on the
+ // backward edge. We negate the condition and emit the merge code
+ // here.
+ //
+ // TODO(210): we should try to avoid negating the condition in the
+ // case where there is no merge code to emit. Otherwise, we emit
+ // a branch around an unconditional jump.
+ ASSERT(direction_ == BIDIRECTIONAL);
+ Label original_fall_through;
+ __ j(NegateCondition(cc), &original_fall_through, NegateHint(hint));
+ // Swap the current frame for a copy of it, saving non-frame
+ // register reference counts and invalidating all non-frame register
+ // references except the reserved ones on the backward edge.
+ VirtualFrame* original_frame = cgen_->frame();
+ VirtualFrame* working_frame = new VirtualFrame(original_frame);
+ RegisterFile non_frame_registers = RegisterAllocator::Reserved();
+ cgen_->SetFrame(working_frame, &non_frame_registers);
+
+ working_frame->MergeTo(entry_frame_);
+ cgen_->DeleteFrame();
+ __ jmp(&entry_label_);
+
+ // Restore the frame and its associated non-frame registers.
+ cgen_->SetFrame(original_frame, &non_frame_registers);
+ __ bind(&original_fall_through);
+ } else {
+ // Forward branch. A copy of the current frame is added to the end
+ // of the list of frames reaching the target block and a branch to
+ // the merge code is emitted.
+ AddReachingFrame(new VirtualFrame(cgen_->frame()));
+ __ j(cc, &merge_labels_.last(), hint);
+ }
+
+ is_linked_ = !is_bound_;
+}
+
+
+void JumpTarget::Call() {
+ // Call is used to push the address of the catch block on the stack as
+ // a return address when compiling try/catch and try/finally. We
+ // fully spill the frame before making the call. The expected frame
+ // at the label (which should be the only one) is the spilled current
+ // frame plus an in-memory return address. The "fall-through" frame
+ // at the return site is the spilled current frame.
+ ASSERT(cgen_ != NULL);
+ ASSERT(cgen_->has_valid_frame());
+ // There are no non-frame references across the call.
+ ASSERT(cgen_->HasValidEntryRegisters());
+ ASSERT(!is_linked());
+
+ cgen_->frame()->SpillAll();
+ VirtualFrame* target_frame = new VirtualFrame(cgen_->frame());
+ target_frame->Adjust(1);
+ AddReachingFrame(target_frame);
+ __ call(&merge_labels_.last());
+
+ is_linked_ = !is_bound_;
+}
+
+
+void JumpTarget::Bind(int mergable_elements) {
+ ASSERT(cgen_ != NULL);
+ ASSERT(!is_bound());
+
+ // Live non-frame registers are not allowed at the start of a basic
+ // block.
+ ASSERT(!cgen_->has_valid_frame() || cgen_->HasValidEntryRegisters());
+
+ // Compute the frame to use for entry to the block.
+ ComputeEntryFrame(mergable_elements);
+
+ if (is_linked()) {
+ // There were forward jumps. Handle merging the reaching frames
+ // and possible fall through to the entry frame.
+
+ // Some moves required to merge to an expected frame require
+ // purely frame state changes, and do not require any code
+ // generation. Perform those first to increase the possibility of
+ // finding equal frames below.
+ if (cgen_->has_valid_frame()) {
+ cgen_->frame()->PrepareMergeTo(entry_frame_);
+ }
+ for (int i = 0; i < reaching_frames_.length(); i++) {
+ reaching_frames_[i]->PrepareMergeTo(entry_frame_);
+ }
+
+ // If there is a fall through to the jump target and it needs
+ // merge code, process it first.
+ if (cgen_->has_valid_frame() && !cgen_->frame()->Equals(entry_frame_)) {
+ // Loop over all the reaching frames, looking for any that can
+ // share merge code with this one.
+ for (int i = 0; i < reaching_frames_.length(); i++) {
+ if (cgen_->frame()->Equals(reaching_frames_[i])) {
+ // Set the reaching frames element to null to avoid
+ // processing it later, and then bind its entry label.
+ delete reaching_frames_[i];
+ reaching_frames_[i] = NULL;
+ __ bind(&merge_labels_[i]);
+ }
+ }
+
+ // Emit the merge code.
+ cgen_->frame()->MergeTo(entry_frame_);
+ }
+
+ // Loop over the (non-null) reaching frames and process any that
+ // need merge code.
+ for (int i = 0; i < reaching_frames_.length(); i++) {
+ VirtualFrame* frame = reaching_frames_[i];
+ if (frame != NULL && !frame->Equals(entry_frame_)) {
+ // Set the reaching frames element to null to avoid processing
+ // it later. Do not delete it as it is needed for merging.
+ reaching_frames_[i] = NULL;
+
+ // If the code generator has a current frame (a fall-through
+ // or a previously merged frame), insert a jump around the
+ // merge code we are about to generate.
+ if (cgen_->has_valid_frame()) {
+ cgen_->DeleteFrame();
+ __ jmp(&entry_label_);
+ }
+
+ // Set the frame to merge as the code generator's current
+ // frame and bind its merge label.
+ RegisterFile reserved_registers = RegisterAllocator::Reserved();
+ cgen_->SetFrame(frame, &reserved_registers);
+ __ bind(&merge_labels_[i]);
+
+ // Loop over the remaining (non-null) reaching frames, looking
+ // for any that can share merge code with this one.
+ for (int j = i + 1; j < reaching_frames_.length(); j++) {
+ VirtualFrame* other = reaching_frames_[j];
+ if (other != NULL && frame->Equals(other)) {
+ delete other;
+ reaching_frames_[j] = NULL;
+ __ bind(&merge_labels_[j]);
+ }
+ }
+
+ // Emit the merge code.
+ cgen_->frame()->MergeTo(entry_frame_);
+ }
+ }
+
+ // The code generator may not have a current frame if there was no
+ // fall through and none of the reaching frames needed merging.
+ // In that case, clone the entry frame as the current frame.
+ if (!cgen_->has_valid_frame()) {
+ RegisterFile reserved_registers = RegisterAllocator::Reserved();
+ cgen_->SetFrame(new VirtualFrame(entry_frame_), &reserved_registers);
+ }
+
+ // There is certainly a current frame equal to the entry frame.
+ // Bind the entry frame label.
+ __ bind(&entry_label_);
+
+ // There may be unprocessed reaching frames that did not need
+ // merge code. Bind their merge labels to be the same as the
+ // entry label.
+ for (int i = 0; i < reaching_frames_.length(); i++) {
+ if (reaching_frames_[i] != NULL) {
+ delete reaching_frames_[i];
+ __ bind(&merge_labels_[i]);
+ }
+ }
+
+ // All the reaching frames except the one that is the current
+ // frame (if it is one of the reaching frames) have been deleted.
+ reaching_frames_.Clear();
+ merge_labels_.Clear();
+
+ } else {
+ // There were no forward jumps. The current frame is merged to
+ // the entry frame.
+ cgen_->frame()->MergeTo(entry_frame_);
+ __ bind(&entry_label_);
+ }
+
+ is_linked_ = false;
+ is_bound_ = true;
+}
+
+#undef __
+
+
+} } // namespace v8::internal
--- /dev/null
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "codegen.h"
+#include "jump-target.h"
+
+namespace v8 { namespace internal {
+
+// -------------------------------------------------------------------------
+// JumpTarget implementation.
+
+JumpTarget::JumpTarget(CodeGenerator* cgen, Directionality direction)
+ : cgen_(cgen),
+ direction_(direction),
+ reaching_frames_(0),
+ merge_labels_(0),
+ entry_frame_(NULL),
+ is_bound_(false),
+ is_linked_(false) {
+ ASSERT(cgen_ != NULL);
+ masm_ = cgen_->masm();
+}
+
+
+JumpTarget::JumpTarget()
+ : cgen_(NULL),
+ masm_(NULL),
+ direction_(FORWARD_ONLY),
+ reaching_frames_(0),
+ merge_labels_(0),
+ entry_frame_(NULL),
+ is_bound_(false),
+ is_linked_(false) {
+}
+
+
+void JumpTarget::Initialize(CodeGenerator* cgen, Directionality direction) {
+ ASSERT(cgen != NULL);
+ ASSERT(cgen_ == NULL);
+ cgen_ = cgen;
+ masm_ = cgen->masm();
+ direction_ = direction;
+}
+
+
+void JumpTarget::Unuse() {
+ ASSERT(!is_linked());
+ entry_label_.Unuse();
+ delete entry_frame_;
+ entry_frame_ = NULL;
+ is_bound_ = false;
+ is_linked_ = false;
+}
+
+
+void JumpTarget::Reset() {
+ reaching_frames_.Clear();
+ merge_labels_.Clear();
+ entry_frame_ = NULL;
+ entry_label_.Unuse();
+ is_bound_ = false;
+ is_linked_ = false;
+}
+
+
+FrameElement* JumpTarget::Combine(FrameElement* left, FrameElement* right) {
+ // Given a pair of non-null frame element pointers, return one of
+ // them as an entry frame candidate or null if they are
+ // incompatible.
+
+ // If either is invalid, the result is.
+ if (!left->is_valid()) return left;
+ if (!right->is_valid()) return right;
+
+ // If they have the same value, the result is the same. (Exception:
+ // bidirectional frames cannot have constants or copies.) If either
+ // is unsynced, the result is.
+ if (left->is_memory() && right->is_memory()) return left;
+
+ if (left->is_register() && right->is_register() &&
+ left->reg().is(right->reg())) {
+ if (!left->is_synced()) {
+ return left;
+ } else {
+ return right;
+ }
+ }
+
+ if (direction_ == FORWARD_ONLY &&
+ left->is_constant() &&
+ right->is_constant() &&
+ left->handle().is_identical_to(right->handle())) {
+ if (!left->is_synced()) {
+ return left;
+ } else {
+ return right;
+ }
+ }
+
+ if (direction_ == FORWARD_ONLY &&
+ left->is_copy() &&
+ right->is_copy() &&
+ left->index() == right->index()) {
+ if (!left->is_synced()) {
+ return left;
+ } else {
+ return right;
+ }
+ }
+
+ // Otherwise they are incompatible and we will reallocate them.
+ return NULL;
+}
+
+
+void JumpTarget::ComputeEntryFrame(int mergable_elements) {
+ // Given: a collection of frames reaching by forward CFG edges
+ // (including the code generator's current frame) and the
+ // directionality of the block. Compute: an entry frame for the
+ // block.
+
+ // Choose an initial frame, either the code generator's current
+ // frame if there is one, or the first reaching frame if not.
+ VirtualFrame* initial_frame = cgen_->frame();
+ int start_index = 0; // Begin iteration with the 1st reaching frame.
+ if (initial_frame == NULL) {
+ initial_frame = reaching_frames_[0];
+ start_index = 1; // Begin iteration with the 2nd reaching frame.
+ }
+
+ // A list of pointers to frame elements in the entry frame. NULL
+ // indicates that the element has not yet been determined.
+ int length = initial_frame->elements_.length();
+ List<FrameElement*> elements(length);
+
+ // Convert the number of mergable elements (counted from the top
+ // down) to a frame high-water mark (counted from the bottom up).
+ // Elements strictly above the high-water index will be mergable in
+ // entry frames for bidirectional jump targets.
+ int high_water_mark = (mergable_elements == kAllElements)
+ ? VirtualFrame::kIllegalIndex // All frame indices are above this.
+ : length - mergable_elements - 1; // Top index if m_e == 0.
+
+ // Initially populate the list of elements based on the initial
+ // frame.
+ for (int i = 0; i < length; i++) {
+ FrameElement element = initial_frame->elements_[i];
+ // We do not allow copies or constants in bidirectional frames.
+ if (direction_ == BIDIRECTIONAL &&
+ i > high_water_mark &&
+ (element.is_constant() || element.is_copy())) {
+ elements.Add(NULL);
+ } else {
+ elements.Add(&initial_frame->elements_[i]);
+ }
+ }
+
+ // Compute elements based on the other reaching frames.
+ if (start_index < reaching_frames_.length()) {
+ for (int i = 0; i < length; i++) {
+ for (int j = start_index; j < reaching_frames_.length(); j++) {
+ FrameElement* element = elements[i];
+
+ // Element computation is monotonic: new information will not
+ // change our decision about undetermined or invalid elements.
+ if (element == NULL || !element->is_valid()) break;
+
+ elements[i] = Combine(element, &reaching_frames_[j]->elements_[i]);
+ }
+ }
+ }
+
+ // Compute the registers already reserved by values in the frame.
+ // Count the reserved registers to avoid using them.
+ RegisterFile frame_registers = RegisterAllocator::Reserved();
+ for (int i = 0; i < length; i++) {
+ FrameElement* element = elements[i];
+ if (element != NULL && element->is_register()) {
+ frame_registers.Use(element->reg());
+ }
+ }
+
+ // Build the new frame. The frame already has memory elements for
+ // the parameters (including the receiver) and the return address.
+ // We will fill it up with memory elements.
+ entry_frame_ = new VirtualFrame(cgen_);
+ while (entry_frame_->elements_.length() < length) {
+ entry_frame_->elements_.Add(FrameElement::MemoryElement());
+ }
+
+
+ // Copy the already-determined frame elements to the entry frame,
+ // and allocate any still-undetermined frame elements to registers
+ // or memory, from the top down.
+ for (int i = length - 1; i >= 0; i--) {
+ if (elements[i] == NULL) {
+ // If the value is synced on all frames, put it in memory. This
+ // costs nothing at the merge code but will incur a
+ // memory-to-register move when the value is needed later.
+ bool is_synced = initial_frame->elements_[i].is_synced();
+ int j = start_index;
+ while (is_synced && j < reaching_frames_.length()) {
+ is_synced = reaching_frames_[j]->elements_[i].is_synced();
+ j++;
+ }
+ // There is nothing to be done if the elements are all synced.
+ // It is already recorded as a memory element.
+ if (is_synced) continue;
+
+ // Choose an available register. Prefer ones that the element
+ // is already occupying on some reaching frame.
+ RegisterFile candidate_registers;
+ int max_count = kMinInt;
+ int best_reg_code = no_reg.code_;
+
+ // Consider the initial frame.
+ FrameElement element = initial_frame->elements_[i];
+ if (element.is_register() &&
+ !frame_registers.is_used(element.reg())) {
+ candidate_registers.Use(element.reg());
+ max_count = 1;
+ best_reg_code = element.reg().code();
+ }
+ // Consider the other frames.
+ for (int j = start_index; j < reaching_frames_.length(); j++) {
+ element = reaching_frames_[j]->elements_[i];
+ if (element.is_register() &&
+ !frame_registers.is_used(element.reg())) {
+ candidate_registers.Use(element.reg());
+ if (candidate_registers.count(element.reg()) > max_count) {
+ max_count = candidate_registers.count(element.reg());
+ best_reg_code = element.reg().code();
+ }
+ }
+ }
+ // If there was no preferred choice consider any free register.
+ if (best_reg_code == no_reg.code_) {
+ for (int j = 0; j < kNumRegisters; j++) {
+ if (!frame_registers.is_used(j)) {
+ best_reg_code = j;
+ break;
+ }
+ }
+ }
+
+ // If there was a register choice, use it. If not do nothing
+ // (the element is already recorded as in memory)
+ if (best_reg_code != no_reg.code_) {
+ Register reg = { best_reg_code };
+ frame_registers.Use(reg);
+ entry_frame_->elements_[i] =
+ FrameElement::RegisterElement(reg,
+ FrameElement::NOT_SYNCED);
+ }
+ } else {
+ // The element is already determined.
+ entry_frame_->elements_[i] = *elements[i];
+ }
+ }
+
+ // Fill in the other fields of the entry frame.
+ entry_frame_->local_count_ = initial_frame->local_count_;
+ entry_frame_->frame_pointer_ = initial_frame->frame_pointer_;
+
+ // The stack pointer is at the highest synced element or the base of
+ // the expression stack.
+ int stack_pointer = length - 1;
+ while (stack_pointer >= entry_frame_->expression_base_index() &&
+ !entry_frame_->elements_[stack_pointer].is_synced()) {
+ stack_pointer--;
+ }
+ entry_frame_->stack_pointer_ = stack_pointer;
+
+ // Unuse the reserved registers---they do not actually appear in
+ // the entry frame.
+ RegisterAllocator::UnuseReserved(&frame_registers);
+ entry_frame_->frame_registers_ = frame_registers;
+}
+
+
+void JumpTarget::Jump(Result* arg) {
+ ASSERT(cgen_ != NULL);
+ ASSERT(cgen_->has_valid_frame());
+
+ cgen_->frame()->Push(arg);
+ Jump();
+}
+
+
+void JumpTarget::Jump(Result* arg0, Result* arg1) {
+ ASSERT(cgen_ != NULL);
+ ASSERT(cgen_->has_valid_frame());
+
+ cgen_->frame()->Push(arg0);
+ cgen_->frame()->Push(arg1);
+ Jump();
+}
+
+
+void JumpTarget::Jump(Result* arg0, Result* arg1, Result* arg2) {
+ ASSERT(cgen_ != NULL);
+ ASSERT(cgen_->has_valid_frame());
+
+ cgen_->frame()->Push(arg0);
+ cgen_->frame()->Push(arg1);
+ cgen_->frame()->Push(arg2);
+ Jump();
+}
+
+
+#ifdef DEBUG
+#define DECLARE_ARGCHECK_VARS(name) \
+ Result::Type name##_type = name->type(); \
+ Register name##_reg = name->is_register() ? name->reg() : no_reg
+
+#define ASSERT_ARGCHECK(name) \
+ ASSERT(name->type() == name##_type); \
+ ASSERT(!name->is_register() || name->reg().is(name##_reg))
+
+#else
+#define DECLARE_ARGCHECK_VARS(name) do {} while (false)
+
+#define ASSERT_ARGCHECK(name) do {} while (false)
+#endif
+
+void JumpTarget::Branch(Condition cc, Result* arg, Hint hint) {
+ ASSERT(cgen_ != NULL);
+ ASSERT(cgen_->has_valid_frame());
+
+ // We want to check that non-frame registers at the call site stay in
+ // the same registers on the fall-through branch.
+ DECLARE_ARGCHECK_VARS(arg);
+
+ cgen_->frame()->Push(arg);
+ Branch(cc, hint);
+ *arg = cgen_->frame()->Pop();
+
+ ASSERT_ARGCHECK(arg);
+}
+
+
+void JumpTarget::Branch(Condition cc, Result* arg0, Result* arg1, Hint hint) {
+ ASSERT(cgen_ != NULL);
+ ASSERT(cgen_->frame() != NULL);
+
+ // We want to check that non-frame registers at the call site stay in
+ // the same registers on the fall-through branch.
+ DECLARE_ARGCHECK_VARS(arg0);
+ DECLARE_ARGCHECK_VARS(arg1);
+
+ cgen_->frame()->Push(arg0);
+ cgen_->frame()->Push(arg1);
+ Branch(cc, hint);
+ *arg1 = cgen_->frame()->Pop();
+ *arg0 = cgen_->frame()->Pop();
+
+ ASSERT_ARGCHECK(arg0);
+ ASSERT_ARGCHECK(arg1);
+}
+
+
+void JumpTarget::Branch(Condition cc,
+ Result* arg0,
+ Result* arg1,
+ Result* arg2,
+ Hint hint) {
+ ASSERT(cgen_ != NULL);
+ ASSERT(cgen_->frame() != NULL);
+
+ // We want to check that non-frame registers at the call site stay in
+ // the same registers on the fall-through branch.
+ DECLARE_ARGCHECK_VARS(arg0);
+ DECLARE_ARGCHECK_VARS(arg1);
+ DECLARE_ARGCHECK_VARS(arg2);
+
+ cgen_->frame()->Push(arg0);
+ cgen_->frame()->Push(arg1);
+ cgen_->frame()->Push(arg2);
+ Branch(cc, hint);
+ *arg2 = cgen_->frame()->Pop();
+ *arg1 = cgen_->frame()->Pop();
+ *arg0 = cgen_->frame()->Pop();
+
+ ASSERT_ARGCHECK(arg0);
+ ASSERT_ARGCHECK(arg1);
+ ASSERT_ARGCHECK(arg2);
+}
+
+
+void JumpTarget::Branch(Condition cc,
+ Result* arg0,
+ Result* arg1,
+ Result* arg2,
+ Result* arg3,
+ Hint hint) {
+ ASSERT(cgen_ != NULL);
+ ASSERT(cgen_->frame() != NULL);
+
+ // We want to check that non-frame registers at the call site stay in
+ // the same registers on the fall-through branch.
+ DECLARE_ARGCHECK_VARS(arg0);
+ DECLARE_ARGCHECK_VARS(arg1);
+ DECLARE_ARGCHECK_VARS(arg2);
+ DECLARE_ARGCHECK_VARS(arg3);
+
+ cgen_->frame()->Push(arg0);
+ cgen_->frame()->Push(arg1);
+ cgen_->frame()->Push(arg2);
+ cgen_->frame()->Push(arg3);
+ Branch(cc, hint);
+ *arg3 = cgen_->frame()->Pop();
+ *arg2 = cgen_->frame()->Pop();
+ *arg1 = cgen_->frame()->Pop();
+ *arg0 = cgen_->frame()->Pop();
+
+ ASSERT_ARGCHECK(arg0);
+ ASSERT_ARGCHECK(arg1);
+ ASSERT_ARGCHECK(arg2);
+ ASSERT_ARGCHECK(arg3);
+}
+
+#undef DECLARE_ARGCHECK_VARS
+#undef ASSERT_ARGCHECK
+
+
+void JumpTarget::Bind(Result* arg, int mergable_elements) {
+ ASSERT(cgen_ != NULL);
+
+ if (cgen_->has_valid_frame()) {
+ cgen_->frame()->Push(arg);
+ }
+ Bind(mergable_elements);
+ *arg = cgen_->frame()->Pop();
+}
+
+
+void JumpTarget::Bind(Result* arg0, Result* arg1, int mergable_elements) {
+ ASSERT(cgen_ != NULL);
+
+ if (cgen_->has_valid_frame()) {
+ cgen_->frame()->Push(arg0);
+ cgen_->frame()->Push(arg1);
+ }
+ Bind(mergable_elements);
+ *arg1 = cgen_->frame()->Pop();
+ *arg0 = cgen_->frame()->Pop();
+}
+
+
+void JumpTarget::Bind(Result* arg0,
+ Result* arg1,
+ Result* arg2,
+ int mergable_elements) {
+ ASSERT(cgen_ != NULL);
+
+ if (cgen_->has_valid_frame()) {
+ cgen_->frame()->Push(arg0);
+ cgen_->frame()->Push(arg1);
+ cgen_->frame()->Push(arg2);
+ }
+ Bind(mergable_elements);
+ *arg2 = cgen_->frame()->Pop();
+ *arg1 = cgen_->frame()->Pop();
+ *arg0 = cgen_->frame()->Pop();
+}
+
+
+void JumpTarget::Bind(Result* arg0,
+ Result* arg1,
+ Result* arg2,
+ Result* arg3,
+ int mergable_elements) {
+ ASSERT(cgen_ != NULL);
+
+ if (cgen_->has_valid_frame()) {
+ cgen_->frame()->Push(arg0);
+ cgen_->frame()->Push(arg1);
+ cgen_->frame()->Push(arg2);
+ cgen_->frame()->Push(arg3);
+ }
+ Bind(mergable_elements);
+ *arg3 = cgen_->frame()->Pop();
+ *arg2 = cgen_->frame()->Pop();
+ *arg1 = cgen_->frame()->Pop();
+ *arg0 = cgen_->frame()->Pop();
+}
+
+
+void JumpTarget::CopyTo(JumpTarget* destination) {
+ ASSERT(destination != NULL);
+ destination->cgen_ = cgen_;
+ destination->masm_ = masm_;
+ destination->direction_ = direction_;
+ destination->reaching_frames_.Clear();
+ destination->merge_labels_.Clear();
+ ASSERT(reaching_frames_.length() == merge_labels_.length());
+ for (int i = 0; i < reaching_frames_.length(); i++) {
+ destination->reaching_frames_.Add(reaching_frames_[i]);
+ destination->merge_labels_.Add(merge_labels_[i]);
+ }
+ destination->entry_frame_ = entry_frame_;
+ destination->entry_label_ = entry_label_;
+ destination->is_bound_ = is_bound_;
+ destination->is_linked_ = is_linked_;
+}
+
+
+void JumpTarget::AddReachingFrame(VirtualFrame* frame) {
+ ASSERT(reaching_frames_.length() == merge_labels_.length());
+ Label fresh;
+ merge_labels_.Add(fresh);
+ reaching_frames_.Add(frame);
+}
+
+
+// -------------------------------------------------------------------------
+// ShadowTarget implementation.
+
+ShadowTarget::ShadowTarget(JumpTarget* shadowed) {
+ ASSERT(shadowed != NULL);
+ other_target_ = shadowed;
+
+#ifdef DEBUG
+ is_shadowing_ = true;
+#endif
+ // While shadowing this shadow target saves the state of the original.
+ shadowed->CopyTo(this);
+
+ // Setting the code generator to null prevents the shadow target from
+ // being used until shadowing stops.
+ cgen_ = NULL;
+ masm_ = NULL;
+
+ // The original's state is reset. We do not Unuse it because that
+ // would delete the expected frame and assert that the target is not
+ // linked.
+ shadowed->Reset();
+}
+
+
+void ShadowTarget::StopShadowing() {
+ ASSERT(is_shadowing_);
+
+ // This target does not have a valid code generator yet.
+ cgen_ = other_target_->code_generator();
+ ASSERT(cgen_ != NULL);
+ masm_ = cgen_->masm();
+
+ // The states of this target, which was shadowed, and the original
+ // target, which was shadowing, are swapped.
+ JumpTarget temp;
+ other_target_->CopyTo(&temp);
+ CopyTo(other_target_);
+ temp.CopyTo(this);
+ temp.Reset(); // So the destructor does not deallocate virtual frames.
+
+#ifdef DEBUG
+ is_shadowing_ = false;
+#endif
+}
+
+
+} } // namespace v8::internal
--- /dev/null
+// Copyright 2008 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_JUMP_TARGET_H_
+#define V8_JUMP_TARGET_H_
+
+#include "virtual-frame.h"
+
+namespace v8 { namespace internal {
+
+// -------------------------------------------------------------------------
+// Jump targets
+//
+// A jump target is an abstraction of a basic-block entry in generated
+// code. It collects all the virtual frames reaching the block by
+// forward jumps and pairs them with labels for the merge code along
+// all forward-reaching paths. When bound, an expected frame for the
+// block is determined and code is generated to merge to the expected
+// frame. For backward jumps, the merge code is generated at the edge
+// leaving the predecessor block.
+//
+// A jump target must have been reached via control flow (either by
+// jumping, branching, or falling through) at the time it is bound.
+// In particular, this means that at least one of the control-flow
+// graph edges reaching the target must be a forward edge.
+
+class JumpTarget : public ZoneObject { // Shadows are dynamically allocated.
+ public:
+ // Forward-only jump targets can only be reached by forward CFG edges.
+ enum Directionality { FORWARD_ONLY, BIDIRECTIONAL };
+
+ // Construct a jump target with a given code generator used to generate
+ // code and to provide access to a current frame.
+ explicit JumpTarget(CodeGenerator* cgen,
+ Directionality direction = FORWARD_ONLY);
+
+ // Construct a jump target without a code generator. A code generator
+ // must be supplied before using the jump target as a label. This is
+ // useful, eg, when jump targets are embedded in AST nodes.
+ JumpTarget();
+
+ virtual ~JumpTarget() { Unuse(); }
+
+ // Supply a code generator and directionality to an already
+ // constructed jump target. This function expects to be given a
+ // non-null code generator, and to be called only when the code
+ // generator is not yet set.
+ void Initialize(CodeGenerator* cgen,
+ Directionality direction = FORWARD_ONLY);
+
+ // Accessors.
+ CodeGenerator* code_generator() const { return cgen_; }
+
+ Label* entry_label() { return &entry_label_; }
+
+ VirtualFrame* entry_frame() const { return entry_frame_; }
+ void set_entry_frame(VirtualFrame* frame) {
+ entry_frame_ = frame;
+ }
+
+ // Predicates testing the state of the encapsulated label.
+ bool is_bound() const { return is_bound_; }
+ bool is_linked() const { return is_linked_; }
+ bool is_unused() const { return !is_bound() && !is_linked(); }
+
+ // Treat the jump target as a fresh one. The expected frame if any
+ // will be deallocated and there should be no dangling jumps to the
+ // target (thus no reaching frames).
+ void Unuse();
+
+ // Reset the internal state of this jump target. Pointed-to virtual
+ // frames are not deallocated and dangling jumps to the target are
+ // left dangling.
+ void Reset();
+
+ // Copy the state of this jump target to the destination. The lists
+ // of forward-reaching frames and merge-point labels are copied.
+ // All virtual frame pointers are copied, not the pointed-to frames.
+ // The previous state of the destination is overwritten, without
+ // deallocating pointed-to virtual frames.
+ void CopyTo(JumpTarget* destination);
+
+ // Emit a jump to the target. There must be a current frame at the
+ // jump and there will be no current frame after the jump.
+ void Jump();
+ void Jump(Result* arg);
+ void Jump(Result* arg0, Result* arg1);
+ void Jump(Result* arg0, Result* arg1, Result* arg2);
+
+ // Emit a conditional branch to the target. There must be a current
+ // frame at the branch. The current frame will fall through to the
+ // code after the branch.
+ void Branch(Condition cc, Hint hint = no_hint);
+ void Branch(Condition cc, Result* arg, Hint hint = no_hint);
+ void Branch(Condition cc, Result* arg0, Result* arg1, Hint hint = no_hint);
+ void Branch(Condition cc,
+ Result* arg0,
+ Result* arg1,
+ Result* arg2,
+ Hint hint = no_hint);
+ void Branch(Condition cc,
+ Result* arg0,
+ Result* arg1,
+ Result* arg2,
+ Result* arg3,
+ Hint hint = no_hint);
+
+ // Bind a jump target. If there is no current frame at the binding
+ // site, there must be at least one frame reaching via a forward
+ // jump.
+ //
+ // The number of mergable elements is a number of frame elements
+ // counting from the top down which must be "mergable" (not
+ // constants or copies) in the entry frame at the jump target.
+ // Backward jumps to the target must contain the same constants and
+ // sharing as the entry frame, except for the mergable elements.
+ //
+ // A mergable elements argument of kAllElements indicates that all
+ // frame elements must be mergable. Mergable elements are ignored
+ // completely for forward-only jump targets.
+ void Bind(int mergable_elements = kAllElements);
+ void Bind(Result* arg, int mergable_elements = kAllElements);
+ void Bind(Result* arg0, Result* arg1, int mergable_elements = kAllElements);
+ void Bind(Result* arg0,
+ Result* arg1,
+ Result* arg2,
+ int mergable_elements = kAllElements);
+ void Bind(Result* arg0,
+ Result* arg1,
+ Result* arg2,
+ Result* arg3,
+ int mergable_elements = kAllElements);
+
+ // Emit a call to a jump target. There must be a current frame at
+ // the call. The frame at the target is the same as the current
+ // frame except for an extra return address on top of it. The frame
+ // after the call is the same as the frame before the call.
+ void Call();
+
+ static const int kAllElements = -1; // Not a valid number of elements.
+
+ protected:
+ // The code generator gives access to its current frame.
+ CodeGenerator* cgen_;
+
+ // Used to emit code.
+ MacroAssembler* masm_;
+
+ private:
+ // Directionality flag set at initialization time.
+ Directionality direction_;
+
+ // A list of frames reaching this block via forward jumps.
+ List<VirtualFrame*> reaching_frames_;
+
+ // A parallel list of labels for merge code.
+ List<Label> merge_labels_;
+
+ // The frame used on entry to the block and expected at backward
+ // jumps to the block. Set when the jump target is bound, but may
+ // or may not be set for forward-only blocks.
+ VirtualFrame* entry_frame_;
+
+ // The actual entry label of the block.
+ Label entry_label_;
+
+ // A target is bound if its Bind member function has been called.
+ // It is linked if it is not bound but its Jump, Branch, or Call
+ // member functions have been called.
+ bool is_bound_;
+ bool is_linked_;
+
+ // Add a virtual frame reaching this labeled block via a forward
+ // jump, and a fresh label for its merge code.
+ void AddReachingFrame(VirtualFrame* frame);
+
+ // Choose an element from a pair of frame elements to be in the
+ // expected frame. Return null if they are incompatible.
+ FrameElement* Combine(FrameElement* left, FrameElement* right);
+
+ // Compute a frame to use for entry to this block. Mergable
+ // elements is as described for the Bind function.
+ void ComputeEntryFrame(int mergable_elements);
+
+ DISALLOW_COPY_AND_ASSIGN(JumpTarget);
+};
+
+
+// -------------------------------------------------------------------------
+// Shadow jump targets
+//
+// Shadow jump targets represent a jump target that is temporarily shadowed
+// by another one (represented by the original during shadowing). They are
+// used to catch jumps to labels in certain contexts, e.g. try blocks.
+// After shadowing ends, the formerly shadowed target is again represented
+// by the original and the ShadowTarget can be used as a jump target in its
+// own right, representing the formerly shadowing target.
+
+class ShadowTarget : public JumpTarget {
+ public:
+ // Construct a shadow jump target. After construction the shadow
+ // target object holds the state of the original jump target, and
+ // the original target is actually a fresh one that intercepts jumps
+ // intended for the shadowed one.
+ explicit ShadowTarget(JumpTarget* shadowed);
+
+ virtual ~ShadowTarget() {
+ ASSERT(!is_shadowing_);
+ }
+
+ // End shadowing. After shadowing ends, the original jump target
+ // again gives access to the formerly shadowed target and the shadow
+ // target object gives access to the formerly shadowing target.
+ void StopShadowing();
+
+ // During shadowing, the currently shadowing target. After
+ // shadowing, the target that was shadowed.
+ JumpTarget* other_target() const { return other_target_; }
+
+ private:
+ // During shadowing, the currently shadowing target. After
+ // shadowing, the target that was shadowed.
+ JumpTarget* other_target_;
+
+#ifdef DEBUG
+ bool is_shadowing_;
+#endif
+
+ DISALLOW_COPY_AND_ASSIGN(ShadowTarget);
+};
+
+
+} } // namespace v8::internal
+
+#endif // V8_JUMP_TARGET_H_
ASSERT(0 <= i && i < length_);
return data_[i];
}
- inline T& at(int i) const { return this->operator[](i); }
- INLINE(const T& last() const) {
+ inline T& at(int i) const { return operator[](i); }
+ inline T& last() const {
ASSERT(!is_empty());
- return this->at(length_ - 1);
+ return at(length_ - 1);
}
INLINE(bool is_empty() const) { return length_ == 0; }
namespace v8 { namespace internal {
+// -------------------------------------------------------------------------
+// MacroAssembler implementation.
+
MacroAssembler::MacroAssembler(void* buffer, int size)
: Assembler(buffer, size),
unresolved_(0),
// scratch) OOOOAAAASSSS.
class ScratchBits: public BitField<uint32_t, 0, 4> {};
class AddressBits: public BitField<uint32_t, 4, 4> {};
- class ObjectBits: public BitField<uint32_t, 8, 4> {
-};
+ class ObjectBits: public BitField<uint32_t, 8, 4> {};
Major MajorKey() { return RecordWrite; }
}
+void MacroAssembler::NegativeZeroTest(CodeGenerator* cgen,
+ Register result,
+ Register op,
+ JumpTarget* then_target) {
+ JumpTarget ok(cgen);
+ test(result, Operand(result));
+ ok.Branch(not_zero, taken);
+ test(op, Operand(op));
+ then_target->Branch(sign, not_taken);
+ ok.Bind();
+}
+
+
void MacroAssembler::NegativeZeroTest(Register result,
Register op,
Label* then_label) {
namespace v8 { namespace internal {
+// Forward declaration.
+class JumpTarget;
-// Helper type to make boolean flag easier to read at call-site.
+
+// Helper types to make flags easier to read at call sites.
enum InvokeFlag {
CALL_FUNCTION,
JUMP_FUNCTION
// Check if result is zero and op is negative.
void NegativeZeroTest(Register result, Register op, Label* then_label);
+ // Check if result is zero and op is negative in code using jump targets.
+ void NegativeZeroTest(CodeGenerator* cgen,
+ Register result,
+ Register op,
+ JumpTarget* then_target);
+
// Check if result is zero and any of op1 and op2 are negative.
// Register scratch is destroyed, and it must be different from op2.
void NegativeZeroTest(Register result, Register op1, Register op2,
return Operand(object, index, scale, offset - kHeapObjectTag);
}
-
} } // namespace v8::internal
#endif // V8_MACRO_ASSEMBLER_IA32_H_
BreakableStatement* LookupBreakTarget(Handle<String> label, bool* ok);
IterationStatement* LookupContinueTarget(Handle<String> label, bool* ok);
- void RegisterLabelUse(Label* label, int index);
+ void RegisterTargetUse(JumpTarget* target, int index);
// Create a number literal.
Literal* NewNumberLiteral(double value);
bool is_catch_block,
bool* ok) {
// Parse the statement and collect escaping labels.
- ZoneList<Label*>* label_list = NEW(ZoneList<Label*>(0));
- LabelCollector collector(label_list);
+ ZoneList<JumpTarget*>* target_list = NEW(ZoneList<JumpTarget*>(0));
+ TargetCollector collector(target_list);
Statement* stat;
{ Target target(this, &collector);
with_nesting_level_++;
// 2: The try-finally block evaluating the body.
Block* result = NEW(Block(NULL, 2, false));
- if (result) {
+ if (result != NULL) {
result->AddStatement(NEW(WithEnterStatement(obj, is_catch_block)));
// Create body block.
// Return a try-finally statement.
TryFinally* wrapper = NEW(TryFinally(body, exit));
- wrapper->set_escaping_labels(collector.labels());
+ wrapper->set_escaping_targets(collector.targets());
result->AddStatement(wrapper);
- return result;
- } else {
- return NULL;
}
+ return result;
}
Expect(Token::TRY, CHECK_OK);
- ZoneList<Label*>* label_list = NEW(ZoneList<Label*>(0));
- LabelCollector collector(label_list);
+ ZoneList<JumpTarget*>* target_list = NEW(ZoneList<JumpTarget*>(0));
+ TargetCollector collector(target_list);
Block* try_block;
{ Target target(this, &collector);
}
// If we can break out from the catch block and there is a finally block,
- // then we will need to collect labels from the catch block. Since we don't
- // know yet if there will be a finally block, we always collect the labels.
- ZoneList<Label*>* catch_label_list = NEW(ZoneList<Label*>(0));
- LabelCollector catch_collector(catch_label_list);
+ // then we will need to collect jump targets from the catch block. Since
+ // we don't know yet if there will be a finally block, we always collect
+ // the jump targets.
+ ZoneList<JumpTarget*>* catch_target_list = NEW(ZoneList<JumpTarget*>(0));
+ TargetCollector catch_collector(catch_target_list);
bool has_catch = false;
if (tok == Token::CATCH) {
has_catch = true;
if (!is_pre_parsing_ && catch_block != NULL && finally_block != NULL) {
TryCatch* statement = NEW(TryCatch(try_block, catch_var, catch_block));
- statement->set_escaping_labels(collector.labels());
+ statement->set_escaping_targets(collector.targets());
try_block = NEW(Block(NULL, 1, false));
try_block->AddStatement(statement);
catch_block = NULL;
if (catch_block != NULL) {
ASSERT(finally_block == NULL);
result = NEW(TryCatch(try_block, catch_var, catch_block));
- result->set_escaping_labels(collector.labels());
+ result->set_escaping_targets(collector.targets());
} else {
ASSERT(finally_block != NULL);
result = NEW(TryFinally(try_block, finally_block));
- // Add the labels of the try block and the catch block.
- for (int i = 0; i < collector.labels()->length(); i++) {
- catch_collector.labels()->Add(collector.labels()->at(i));
+ // Add the jump targets of the try block and the catch block.
+ for (int i = 0; i < collector.targets()->length(); i++) {
+ catch_collector.targets()->Add(collector.targets()->at(i));
}
- result->set_escaping_labels(catch_collector.labels());
+ result->set_escaping_targets(catch_collector.targets());
}
}
if ((anonymous && stat->is_target_for_anonymous()) ||
(!anonymous && ContainsLabel(stat->labels(), label))) {
- RegisterLabelUse(stat->break_target(), i);
+ RegisterTargetUse(stat->break_target(), i);
return stat;
}
}
ASSERT(stat->is_target_for_anonymous());
if (anonymous || ContainsLabel(stat->labels(), label)) {
- RegisterLabelUse(stat->continue_target(), i);
+ RegisterTargetUse(stat->continue_target(), i);
return stat;
}
}
}
-void Parser::RegisterLabelUse(Label* label, int index) {
- // Register that a label found at the given index in the target
- // stack has been used from the top of the target stack. Add the
- // label to any LabelCollectors passed on the stack.
+void Parser::RegisterTargetUse(JumpTarget* target, int index) {
+ // Register that a jump target found at the given index in the target
+ // stack has been used from the top of the target stack. Add the jump
+ // target to any TargetCollectors passed on the stack.
for (int i = target_stack_->length(); i-- > index;) {
- LabelCollector* collector = target_stack_->at(i)->AsLabelCollector();
- if (collector != NULL) collector->AddLabel(label);
+ TargetCollector* collector = target_stack_->at(i)->AsTargetCollector();
+ if (collector != NULL) collector->AddTarget(target);
}
}
--- /dev/null
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "codegen.h"
+#include "register-allocator.h"
+
+namespace v8 { namespace internal {
+
+// -------------------------------------------------------------------------
+// Result implementation.
+
+void Result::ToRegister() {
+ UNIMPLEMENTED();
+}
+
+
+void Result::ToRegister(Register target) {
+ UNIMPLEMENTED();
+}
+
+
+// -------------------------------------------------------------------------
+// RegisterAllocator implementation.
+
+RegisterFile RegisterAllocator::Reserved() {
+ RegisterFile reserved;
+ reserved.Use(sp);
+ reserved.Use(fp);
+ reserved.Use(cp);
+ reserved.Use(pc);
+ return reserved;
+}
+
+
+void RegisterAllocator::UnuseReserved(RegisterFile* register_file) {
+ register_file->ref_counts_[sp.code()] = 0;
+ register_file->ref_counts_[fp.code()] = 0;
+ register_file->ref_counts_[cp.code()] = 0;
+ register_file->ref_counts_[pc.code()] = 0;
+}
+
+
+void RegisterAllocator::Initialize() {
+ Reset();
+ // The following registers are live on function entry, saved in the
+ // frame, and available for allocation during execution.
+ Use(r1); // JS function.
+ Use(lr); // Return address.
+}
+
+
+void RegisterAllocator::Reset() {
+ registers_.Reset();
+ // The following registers are live on function entry and reserved
+ // during execution.
+ Use(sp); // Stack pointer.
+ Use(fp); // Frame pointer (caller's frame pointer on entry).
+ Use(cp); // Context context (callee's context on entry).
+ Use(pc); // Program counter.
+}
+
+
+Result RegisterAllocator::AllocateByteRegisterWithoutSpilling() {
+ UNIMPLEMENTED();
+ Result invalid(cgen_);
+ return invalid;
+}
+
+
+} } // namespace v8::internal
--- /dev/null
+// Copyright 2008 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "codegen.h"
+#include "register-allocator.h"
+
+namespace v8 { namespace internal {
+
+// -------------------------------------------------------------------------
+// Result implementation.
+
+void Result::ToRegister() {
+ ASSERT(is_valid());
+ if (is_constant()) {
+ Result fresh = cgen_->allocator()->Allocate();
+ ASSERT(fresh.is_valid());
+ if (cgen_->IsUnsafeSmi(handle())) {
+ cgen_->LoadUnsafeSmi(fresh.reg(), handle());
+ } else {
+ cgen_->masm()->Set(fresh.reg(), Immediate(handle()));
+ }
+ // This result becomes a copy of the fresh one.
+ *this = fresh;
+ }
+ ASSERT(is_register());
+}
+
+
+void Result::ToRegister(Register target) {
+ ASSERT(is_valid());
+ if (!is_register() || !reg().is(target)) {
+ Result fresh = cgen_->allocator()->Allocate(target);
+ ASSERT(fresh.is_valid());
+ if (is_register()) {
+ cgen_->masm()->mov(fresh.reg(), reg());
+ } else {
+ ASSERT(is_constant());
+ if (cgen_->IsUnsafeSmi(handle())) {
+ cgen_->LoadUnsafeSmi(fresh.reg(), handle());
+ } else {
+ cgen_->masm()->Set(fresh.reg(), Immediate(handle()));
+ }
+ }
+ *this = fresh;
+ } else if (is_register() && reg().is(target)) {
+ ASSERT(cgen_->has_valid_frame());
+ cgen_->frame()->Spill(target);
+ ASSERT(cgen_->allocator()->count(target) == 1);
+ }
+ ASSERT(is_register());
+ ASSERT(reg().is(target));
+}
+
+
+// -------------------------------------------------------------------------
+// RegisterAllocator implementation.
+
+RegisterFile RegisterAllocator::Reserved() {
+ RegisterFile reserved;
+ reserved.Use(esp);
+ reserved.Use(ebp);
+ reserved.Use(esi);
+ return reserved;
+}
+
+
+void RegisterAllocator::UnuseReserved(RegisterFile* register_file) {
+ register_file->ref_counts_[esp.code()] = 0;
+ register_file->ref_counts_[ebp.code()] = 0;
+ register_file->ref_counts_[esi.code()] = 0;
+}
+
+
+void RegisterAllocator::Initialize() {
+ Reset();
+ // The following register is live on function entry, saved in the
+ // frame, and available for allocation during execution.
+ Use(edi); // JS function.
+}
+
+
+void RegisterAllocator::Reset() {
+ registers_.Reset();
+ // The following registers are live on function entry and reserved
+ // during execution.
+ Use(esp); // Stack pointer.
+ Use(ebp); // Frame pointer (caller's frame pointer on entry).
+ Use(esi); // Context (callee's context on entry).
+}
+
+
+Result RegisterAllocator::AllocateByteRegisterWithoutSpilling() {
+ Result result = AllocateWithoutSpilling();
+ // Check that the register is a byte register. If not, unuse the
+ // register if valid and return an invalid result.
+ if (result.is_valid() && !result.reg().is_byte_register()) {
+ result.Unuse();
+ return Result(cgen_);
+ }
+ return result;
+}
+
+
+} } // namespace v8::internal
--- /dev/null
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "codegen.h"
+#include "register-allocator.h"
+
+namespace v8 { namespace internal {
+
+// -------------------------------------------------------------------------
+// Result implementation.
+
+Result::Result(Register reg, CodeGenerator* cgen)
+ : type_(REGISTER),
+ cgen_(cgen) {
+ data_.reg_ = reg;
+ ASSERT(reg.is_valid());
+ cgen_->allocator()->Use(reg);
+}
+
+
+void Result::CopyTo(Result* destination) const {
+ destination->type_ = type();
+ destination->cgen_ = cgen_;
+
+ if (is_register()) {
+ destination->data_.reg_ = reg();
+ cgen_->allocator()->Use(reg());
+ } else if (is_constant()) {
+ destination->data_.handle_ = data_.handle_;
+ } else {
+ ASSERT(!is_valid());
+ }
+}
+
+
+void Result::Unuse() {
+ if (is_register()) {
+ cgen_->allocator()->Unuse(reg());
+ }
+ type_ = INVALID;
+}
+
+
+// -------------------------------------------------------------------------
+// RegisterFile implementation.
+
+void RegisterFile::CopyTo(RegisterFile* other) {
+ for (int i = 0; i < kNumRegisters; i++) {
+ other->ref_counts_[i] = ref_counts_[i];
+ }
+}
+
+
+// -------------------------------------------------------------------------
+// RegisterAllocator implementation.
+
+
+Result RegisterAllocator::AllocateWithoutSpilling() {
+ // Return the first free register, if any.
+ for (int i = 0; i < kNumRegisters; i++) {
+ if (!is_used(i)) {
+ Register free_reg = { i };
+ return Result(free_reg, cgen_);
+ }
+ }
+ return Result(cgen_);
+}
+
+
+Result RegisterAllocator::Allocate() {
+ Result result = AllocateWithoutSpilling();
+ if (!result.is_valid()) {
+ // Ask the current frame to spill a register.
+ ASSERT(cgen_->has_valid_frame());
+ Register free_reg = cgen_->frame()->SpillAnyRegister();
+ if (free_reg.is_valid()) {
+ ASSERT(!is_used(free_reg));
+ return Result(free_reg, cgen_);
+ }
+ }
+ return result;
+}
+
+
+Result RegisterAllocator::Allocate(Register target) {
+ // If the target is not referenced, it can simply be allocated.
+ if (!is_used(target)) {
+ return Result(target, cgen_);
+ }
+ // If the target is only referenced in the frame, it can be spilled and
+ // then allocated.
+ ASSERT(cgen_->has_valid_frame());
+ if (count(target) == cgen_->frame()->register_count(target)) {
+ cgen_->frame()->Spill(target);
+ ASSERT(!is_used(target));
+ return Result(target, cgen_);
+ }
+ // Otherwise (if it's referenced outside the frame) we cannot allocate it.
+ return Result(cgen_);
+}
+
+
+} } // namespace v8::internal
--- /dev/null
+// Copyright 2008 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_REGISTER_ALLOCATOR_H_
+#define V8_REGISTER_ALLOCATOR_H_
+
+#include "macro-assembler.h"
+
+namespace v8 { namespace internal {
+
+// -------------------------------------------------------------------------
+// Results
+//
+// Results encapsulate the compile-time values manipulated by the code
+// generator. They can represent registers or constants.
+
+class Result BASE_EMBEDDED {
+ public:
+ enum Type {
+ INVALID,
+ REGISTER,
+ CONSTANT
+ };
+
+ // Construct an invalid result.
+ explicit Result(CodeGenerator* cgen) : type_(INVALID), cgen_(cgen) {}
+
+ // Construct a register Result.
+ Result(Register reg, CodeGenerator* cgen);
+
+ // Construct a Result whose value is a compile-time constant.
+ Result(Handle<Object> value, CodeGenerator * cgen)
+ : type_(CONSTANT),
+ cgen_(cgen) {
+ data_.handle_ = value.location();
+ }
+
+ // The copy constructor and assignment operators could each create a new
+ // register reference.
+ Result(const Result& other) {
+ other.CopyTo(this);
+ }
+
+ Result& operator=(const Result& other) {
+ if (this != &other) {
+ Unuse();
+ other.CopyTo(this);
+ }
+ return *this;
+ }
+
+ ~Result() { Unuse(); }
+
+ void Unuse();
+
+ Type type() const { return type_; }
+
+ bool is_valid() const { return type() != INVALID; }
+ bool is_register() const { return type() == REGISTER; }
+ bool is_constant() const { return type() == CONSTANT; }
+
+ Register reg() const {
+ ASSERT(type() == REGISTER);
+ return data_.reg_;
+ }
+
+ Handle<Object> handle() const {
+ ASSERT(type() == CONSTANT);
+ return Handle<Object>(data_.handle_);
+ }
+
+ // Move this result to an arbitrary register. The register is not
+ // necessarily spilled from the frame or even singly-referenced outside
+ // it.
+ void ToRegister();
+
+ // Move this result to a specified register. The register is spilled from
+ // the frame, and the register is singly-referenced (by this result)
+ // outside the frame.
+ void ToRegister(Register reg);
+
+ private:
+ Type type_;
+
+ union {
+ Register reg_;
+ Object** handle_;
+ } data_;
+
+ CodeGenerator* cgen_;
+
+ void CopyTo(Result* destination) const;
+};
+
+
+// -------------------------------------------------------------------------
+// Register file
+//
+// The register file tracks reference counts for the processor registers.
+// It is used by both the register allocator and the virtual frame.
+
+class RegisterFile BASE_EMBEDDED {
+ public:
+ RegisterFile() { Reset(); }
+
+ void Reset() {
+ for (int i = 0; i < kNumRegisters; i++) {
+ ref_counts_[i] = 0;
+ }
+ }
+
+ // Predicates and accessors for the reference counts. The versions
+ // that take a register code rather than a register are for
+ // convenience in loops over the register codes.
+ bool is_used(int reg_code) const { return ref_counts_[reg_code] > 0; }
+ bool is_used(Register reg) const { return is_used(reg.code()); }
+ int count(int reg_code) const { return ref_counts_[reg_code]; }
+ int count(Register reg) const { return count(reg.code()); }
+
+ // Record a use of a register by incrementing its reference count.
+ void Use(Register reg) {
+ ref_counts_[reg.code()]++;
+ }
+
+ // Record that a register will no longer be used by decrementing its
+ // reference count.
+ void Unuse(Register reg) {
+ ASSERT(is_used(reg.code()));
+ if (is_used(reg.code())) {
+ ref_counts_[reg.code()]--;
+ }
+ }
+
+ // Copy the reference counts from this register file to the other.
+ void CopyTo(RegisterFile* other);
+
+ private:
+ int ref_counts_[kNumRegisters];
+
+ friend class RegisterAllocator;
+};
+
+
+// -------------------------------------------------------------------------
+// Register allocator
+//
+
+class RegisterAllocator BASE_EMBEDDED {
+ public:
+ explicit RegisterAllocator(CodeGenerator* cgen) : cgen_(cgen) {}
+
+ // A register file with each of the reserved registers counted once.
+ static RegisterFile Reserved();
+
+ // Unuse all the reserved registers in a register file.
+ static void UnuseReserved(RegisterFile* register_file);
+
+ // Predicates and accessors for the registers' reference counts.
+ bool is_used(int reg_code) const { return registers_.is_used(reg_code); }
+ bool is_used(Register reg) const { return registers_.is_used(reg.code()); }
+ int count(int reg_code) const { return registers_.count(reg_code); }
+ int count(Register reg) const { return registers_.count(reg.code()); }
+
+ // Explicitly record a reference to a register.
+ void Use(Register reg) { registers_.Use(reg); }
+
+ // Explicitly record that a register will no longer be used.
+ void Unuse(Register reg) { registers_.Unuse(reg); }
+
+ // Initialize the register allocator for entry to a JS function. On
+ // entry, the registers used by the JS calling convention are
+ // externally referenced (ie, outside the virtual frame); and the
+ // other registers are free.
+ void Initialize();
+
+ // Reset the register reference counts to free all non-reserved registers.
+ // A frame-external reference is kept to each of the reserved registers.
+ void Reset();
+
+ // Allocate a free register and return a register result if possible or
+ // fail and return an invalid result.
+ Result Allocate();
+
+ // Allocate a specific register if possible, spilling it from the frame if
+ // necessary, or else fail and return an invalid result.
+ Result Allocate(Register target);
+
+ // Allocate a free register without spilling any from the current frame or
+ // fail and return an invalid result.
+ Result AllocateWithoutSpilling();
+
+ // Allocate a free byte register without spilling any from the
+ // current frame or fail and return an invalid result.
+ Result AllocateByteRegisterWithoutSpilling();
+
+ // Copy the internal state to a register file, to be restored later by
+ // RestoreFrom.
+ void SaveTo(RegisterFile* register_file) {
+ registers_.CopyTo(register_file);
+ }
+
+ void RestoreFrom(RegisterFile* register_file) {
+ register_file->CopyTo(®isters_);
+ }
+
+ private:
+ CodeGenerator* cgen_;
+ RegisterFile registers_;
+};
+
+} } // namespace v8::internal
+
+#endif // V8_REGISTER_ALLOCATOR_H_
Object* Simulator::Call(int32_t entry, int32_t p0, int32_t p1, int32_t p2,
- int32_t p3, int32_t p4) {
+ int32_t p3, int32_t p4) {
// Setup parameters
set_register(r0, p0);
set_register(r1, p1);
--- /dev/null
+// Copyright 2008 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "codegen.h"
+#include "codegen-inl.h"
+#include "virtual-frame.h"
+
+namespace v8 { namespace internal {
+
+// -------------------------------------------------------------------------
+// VirtualFrame implementation.
+
+#define __ masm_->
+
+// On entry to a function, the virtual frame already contains the
+// receiver and the parameters. All initial frame elements are in
+// memory.
+VirtualFrame::VirtualFrame(CodeGenerator* cgen)
+ : cgen_(cgen),
+ masm_(cgen->masm()),
+ elements_(0),
+ parameter_count_(cgen->scope()->num_parameters()),
+ local_count_(0),
+ stack_pointer_(parameter_count_), // 0-based index of TOS.
+ frame_pointer_(kIllegalIndex) {
+ for (int i = 0; i < parameter_count_ + 1; i++) {
+ elements_.Add(FrameElement::MemoryElement());
+ }
+}
+
+
+// Clear the dirty bit for the element at a given index if it is a
+// valid element. The stack address corresponding to the element must
+// be allocated on the physical stack, or the first element above the
+// stack pointer so it can be allocated by a single push instruction.
+void VirtualFrame::RawSyncElementAt(int index) {
+ FrameElement element = elements_[index];
+
+ if (!element.is_valid() || element.is_synced()) return;
+
+ if (index <= stack_pointer_) {
+ // Emit code to write elements below the stack pointer to their
+ // (already allocated) stack address.
+ switch (element.type()) {
+ case FrameElement::INVALID: // Fall through.
+ case FrameElement::MEMORY:
+ // There was an early bailout for invalid and synced elements
+ // (memory elements are always synced).
+ UNREACHABLE();
+ break;
+
+ case FrameElement::REGISTER:
+ __ str(element.reg(), MemOperand(fp, fp_relative(index)));
+ break;
+
+ case FrameElement::CONSTANT: {
+ Result temp = cgen_->allocator()->Allocate();
+ ASSERT(temp.is_valid());
+ __ mov(temp.reg(), Operand(element.handle()));
+ __ str(temp.reg(), MemOperand(fp, fp_relative(index)));
+ break;
+ }
+
+ case FrameElement::COPY: {
+ int backing_index = element.index();
+ FrameElement backing_element = elements_[backing_index];
+ if (backing_element.is_memory()) {
+ Result temp = cgen_->allocator()->Allocate();
+ ASSERT(temp.is_valid());
+ __ ldr(temp.reg(), MemOperand(fp, fp_relative(backing_index)));
+ __ str(temp.reg(), MemOperand(fp, fp_relative(index)));
+ } else {
+ ASSERT(backing_element.is_register());
+ __ str(backing_element.reg(), MemOperand(fp, fp_relative(index)));
+ }
+ break;
+ }
+ }
+
+ } else {
+ // Push elements above the stack pointer to allocate space and
+ // sync them. Space should have already been allocated in the
+ // actual frame for all the elements below this one.
+ ASSERT(index == stack_pointer_ + 1);
+ stack_pointer_++;
+ switch (element.type()) {
+ case FrameElement::INVALID: // Fall through.
+ case FrameElement::MEMORY:
+ // There was an early bailout for invalid and synced elements
+ // (memory elements are always synced).
+ UNREACHABLE();
+ break;
+
+ case FrameElement::REGISTER:
+ __ push(element.reg());
+ break;
+
+ case FrameElement::CONSTANT: {
+ Result temp = cgen_->allocator()->Allocate();
+ ASSERT(temp.is_valid());
+ __ mov(temp.reg(), Operand(element.handle()));
+ __ push(temp.reg());
+ break;
+ }
+
+ case FrameElement::COPY: {
+ int backing_index = element.index();
+ FrameElement backing = elements_[backing_index];
+ ASSERT(backing.is_memory() || backing.is_register());
+ if (backing.is_memory()) {
+ Result temp = cgen_->allocator()->Allocate();
+ ASSERT(temp.is_valid());
+ __ ldr(temp.reg(), MemOperand(fp, fp_relative(backing_index)));
+ __ push(temp.reg());
+ } else {
+ __ push(backing.reg());
+ }
+ break;
+ }
+ }
+ }
+
+ elements_[index].set_sync();
+}
+
+
+void VirtualFrame::MergeTo(VirtualFrame* expected) {
+ Comment cmnt(masm_, "[ Merge frame");
+ // We should always be merging the code generator's current frame to an
+ // expected frame.
+ ASSERT(cgen_->frame() == this);
+
+ // Adjust the stack pointer upward (toward the top of the virtual
+ // frame) if necessary.
+ if (stack_pointer_ < expected->stack_pointer_) {
+ int difference = expected->stack_pointer_ - stack_pointer_;
+ stack_pointer_ = expected->stack_pointer_;
+ __ sub(sp, sp, Operand(difference * kPointerSize));
+ }
+
+ MergeMoveRegistersToMemory(expected);
+ MergeMoveRegistersToRegisters(expected);
+ MergeMoveMemoryToRegisters(expected);
+
+ // Fix any sync bit problems.
+ for (int i = 0; i <= stack_pointer_; i++) {
+ FrameElement source = elements_[i];
+ FrameElement target = expected->elements_[i];
+ if (source.is_synced() && !target.is_synced()) {
+ elements_[i].clear_sync();
+ } else if (!source.is_synced() && target.is_synced()) {
+ SyncElementAt(i);
+ }
+ }
+
+ // Adjust the stack point downard if necessary.
+ if (stack_pointer_ > expected->stack_pointer_) {
+ int difference = stack_pointer_ - expected->stack_pointer_;
+ stack_pointer_ = expected->stack_pointer_;
+ __ add(sp, sp, Operand(difference * kPointerSize));
+ }
+
+ // At this point, the frames should be identical.
+ ASSERT(Equals(expected));
+}
+
+
+void VirtualFrame::MergeMoveRegistersToMemory(VirtualFrame* expected) {
+ ASSERT(stack_pointer_ >= expected->stack_pointer_);
+
+ // Move registers, constants, and copies to memory. Perform moves
+ // from the top downward in the frame in order to leave the backing
+ // stores of copies in registers.
+ //
+ // Moving memory-backed copies to memory requires a spare register
+ // for the memory-to-memory moves. Since we are performing a merge,
+ // we use esi (which is already saved in the frame). We keep track
+ // of the index of the frame element esi is caching or kIllegalIndex
+ // if esi has not been disturbed.
+
+ for (int i = 0; i < elements_.length(); i++) {
+ ASSERT(elements_[i].is_memory());
+ ASSERT(expected->elements_[i].is_memory());
+ }
+}
+
+
+void VirtualFrame::MergeMoveRegistersToRegisters(VirtualFrame* expected) {
+}
+
+
+void VirtualFrame::MergeMoveMemoryToRegisters(VirtualFrame *expected) {
+}
+
+
+void VirtualFrame::Enter() {
+ Comment cmnt(masm_, "[ Enter JS frame");
+
+#ifdef DEBUG
+ // Verify that r1 contains a JS function. The following code relies
+ // on r2 being available for use.
+ { Label map_check, done;
+ __ tst(r1, Operand(kSmiTagMask));
+ __ b(ne, &map_check);
+ __ stop("VirtualFrame::Enter - r1 is not a function (smi check).");
+ __ bind(&map_check);
+ __ ldr(r2, FieldMemOperand(r1, HeapObject::kMapOffset));
+ __ ldrb(r2, FieldMemOperand(r2, Map::kInstanceTypeOffset));
+ __ cmp(r2, Operand(JS_FUNCTION_TYPE));
+ __ b(eq, &done);
+ __ stop("VirtualFrame::Enter - r1 is not a function (map check).");
+ __ bind(&done);
+ }
+#endif // DEBUG
+
+ // We are about to push four values to the frame.
+ Adjust(4);
+ __ stm(db_w, sp, r1.bit() | cp.bit() | fp.bit() | lr.bit());
+ // Adjust FP to point to saved FP.
+ frame_pointer_ = elements_.length() - 2;
+ __ add(fp, sp, Operand(2 * kPointerSize));
+ cgen_->allocator()->Unuse(r1);
+ cgen_->allocator()->Unuse(lr);
+}
+
+
+void VirtualFrame::Exit() {
+ Comment cmnt(masm_, "[ Exit JS frame");
+ // Drop the execution stack down to the frame pointer and restore the caller
+ // frame pointer and return address.
+ __ mov(sp, fp);
+ __ ldm(ia_w, sp, fp.bit() | lr.bit());
+}
+
+
+void VirtualFrame::AllocateStackSlots(int count) {
+ ASSERT(height() == 0);
+ local_count_ = count;
+ Adjust(count);
+ if (count > 0) {
+ Comment cmnt(masm_, "[ Allocate space for locals");
+ // Initialize stack slots with 'undefined' value.
+ __ mov(ip, Operand(Factory::undefined_value()));
+ for (int i = 0; i < count; i++) {
+ __ push(ip);
+ }
+ }
+}
+
+
+void VirtualFrame::SaveContextRegister() {
+ UNIMPLEMENTED();
+}
+
+
+void VirtualFrame::RestoreContextRegister() {
+ UNIMPLEMENTED();
+}
+
+
+void VirtualFrame::PushReceiverSlotAddress() {
+ UNIMPLEMENTED();
+}
+
+
+// Before changing an element which is copied, adjust so that the
+// first copy becomes the new backing store and all the other copies
+// are updated. If the original was in memory, the new backing store
+// is allocated to a register. Return a copy of the new backing store
+// or an invalid element if the original was not a copy.
+FrameElement VirtualFrame::AdjustCopies(int index) {
+ UNIMPLEMENTED();
+ return FrameElement::InvalidElement();
+}
+
+
+void VirtualFrame::TakeFrameSlotAt(int index) {
+ UNIMPLEMENTED();
+}
+
+
+void VirtualFrame::StoreToFrameSlotAt(int index) {
+ UNIMPLEMENTED();
+}
+
+
+void VirtualFrame::PushTryHandler(HandlerType type) {
+ // Grow the expression stack by handler size less one (the return address
+ // is already pushed by a call instruction).
+ Adjust(kHandlerSize - 1);
+ __ PushTryHandler(IN_JAVASCRIPT, type);
+}
+
+
+Result VirtualFrame::RawCallStub(CodeStub* stub, int frame_arg_count) {
+ ASSERT(cgen_->HasValidEntryRegisters());
+ __ CallStub(stub);
+ Result result = cgen_->allocator()->Allocate(r0);
+ ASSERT(result.is_valid());
+ return result;
+}
+
+
+Result VirtualFrame::CallRuntime(Runtime::Function* f,
+ int frame_arg_count) {
+ PrepareForCall(frame_arg_count, frame_arg_count);
+ ASSERT(cgen_->HasValidEntryRegisters());
+ __ CallRuntime(f, frame_arg_count);
+ Result result = cgen_->allocator()->Allocate(r0);
+ ASSERT(result.is_valid());
+ return result;
+}
+
+
+Result VirtualFrame::CallRuntime(Runtime::FunctionId id,
+ int frame_arg_count) {
+ PrepareForCall(frame_arg_count, frame_arg_count);
+ ASSERT(cgen_->HasValidEntryRegisters());
+ __ CallRuntime(id, frame_arg_count);
+ Result result = cgen_->allocator()->Allocate(r0);
+ ASSERT(result.is_valid());
+ return result;
+}
+
+
+Result VirtualFrame::InvokeBuiltin(Builtins::JavaScript id,
+ InvokeJSFlags flags,
+ Result* arg_count_register,
+ int frame_arg_count) {
+ ASSERT(arg_count_register->reg().is(r0));
+ PrepareForCall(frame_arg_count, frame_arg_count);
+ arg_count_register->Unuse();
+ __ InvokeBuiltin(id, flags);
+ Result result = cgen_->allocator()->Allocate(r0);
+ return result;
+}
+
+
+Result VirtualFrame::RawCallCodeObject(Handle<Code> code,
+ RelocInfo::Mode rmode) {
+ ASSERT(cgen_->HasValidEntryRegisters());
+ __ Call(code, rmode);
+ Result result = cgen_->allocator()->Allocate(r0);
+ ASSERT(result.is_valid());
+ return result;
+}
+
+
+Result VirtualFrame::CallCodeObject(Handle<Code> code,
+ RelocInfo::Mode rmode,
+ Result* arg,
+ int dropped_args) {
+ int spilled_args = 0;
+ switch (code->kind()) {
+ case Code::LOAD_IC:
+ ASSERT(arg->reg().is(r2));
+ ASSERT(dropped_args == 0);
+ spilled_args = 1;
+ break;
+ case Code::KEYED_STORE_IC:
+ ASSERT(arg->reg().is(r0));
+ ASSERT(dropped_args == 0);
+ spilled_args = 2;
+ break;
+ default:
+ // No other types of code objects are called with values
+ // in exactly one register.
+ UNREACHABLE();
+ break;
+ }
+ PrepareForCall(spilled_args, dropped_args);
+ arg->Unuse();
+ return RawCallCodeObject(code, rmode);
+}
+
+
+Result VirtualFrame::CallCodeObject(Handle<Code> code,
+ RelocInfo::Mode rmode,
+ Result* arg0,
+ Result* arg1,
+ int dropped_args) {
+ int spilled_args = 1;
+ switch (code->kind()) {
+ case Code::STORE_IC:
+ ASSERT(arg0->reg().is(r0));
+ ASSERT(arg1->reg().is(r2));
+ ASSERT(dropped_args == 0);
+ spilled_args = 1;
+ break;
+ case Code::BUILTIN:
+ ASSERT(*code == Builtins::builtin(Builtins::JSConstructCall));
+ ASSERT(arg0->reg().is(r0));
+ ASSERT(arg1->reg().is(r1));
+ spilled_args = dropped_args + 1;
+ break;
+ default:
+ // No other types of code objects are called with values
+ // in exactly two registers.
+ UNREACHABLE();
+ break;
+ }
+ PrepareForCall(spilled_args, dropped_args);
+ arg0->Unuse();
+ arg1->Unuse();
+ return RawCallCodeObject(code, rmode);
+}
+
+
+void VirtualFrame::Drop(int count) {
+ ASSERT(height() >= count);
+ int num_virtual_elements = (elements_.length() - 1) - stack_pointer_;
+
+ // Emit code to lower the stack pointer if necessary.
+ if (num_virtual_elements < count) {
+ int num_dropped = count - num_virtual_elements;
+ stack_pointer_ -= num_dropped;
+ __ add(sp, sp, Operand(num_dropped * kPointerSize));
+ }
+
+ // Discard elements from the virtual frame and free any registers.
+ for (int i = 0; i < count; i++) {
+ FrameElement dropped = elements_.RemoveLast();
+ if (dropped.is_register()) {
+ Unuse(dropped.reg());
+ }
+ }
+}
+
+
+Result VirtualFrame::Pop() {
+ UNIMPLEMENTED();
+ Result invalid(cgen_);
+ return invalid;
+}
+
+
+void VirtualFrame::EmitPop(Register reg) {
+ ASSERT(stack_pointer_ == elements_.length() - 1);
+ stack_pointer_--;
+ elements_.RemoveLast();
+ __ pop(reg);
+}
+
+
+void VirtualFrame::EmitPush(Register reg) {
+ ASSERT(stack_pointer_ == elements_.length() - 1);
+ elements_.Add(FrameElement::MemoryElement());
+ stack_pointer_++;
+ __ push(reg);
+}
+
+
+#undef __
+
+} } // namespace v8::internal
--- /dev/null
+// Copyright 2008 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_VIRTUAL_FRAME_ARM_H_
+#define V8_VIRTUAL_FRAME_ARM_H_
+
+namespace v8 { namespace internal {
+
+// -------------------------------------------------------------------------
+// Virtual frames
+//
+// The virtual frame is an abstraction of the physical stack frame. It
+// encapsulates the parameters, frame-allocated locals, and the expression
+// stack. It supports push/pop operations on the expression stack, as well
+// as random access to the expression stack elements, locals, and
+// parameters.
+
+class VirtualFrame : public Malloced {
+ public:
+ // A utility class to introduce a scope where the virtual frame is
+ // expected to remain spilled. The constructor spills the code
+ // generator's current frame, but no attempt is made to require it
+ // to stay spilled. It is intended as documentation while the code
+ // generator is being transformed.
+ class SpilledScope BASE_EMBEDDED {
+ public:
+ explicit SpilledScope(CodeGenerator* cgen);
+
+ ~SpilledScope();
+
+ private:
+ CodeGenerator* cgen_;
+ bool previous_state_;
+ };
+
+ // Construct an initial virtual frame on entry to a JS function.
+ explicit VirtualFrame(CodeGenerator* cgen);
+
+ // Construct a virtual frame as a clone of an existing one.
+ explicit VirtualFrame(VirtualFrame* original);
+
+ // Create a duplicate of an existing valid frame element.
+ FrameElement CopyElementAt(int index);
+
+ // The height of the virtual expression stack.
+ int height() const {
+ return elements_.length() - expression_base_index();
+ }
+
+ int register_count(Register reg) {
+ return frame_registers_.count(reg);
+ }
+
+ // Add extra in-memory elements to the top of the frame to match an actual
+ // frame (eg, the frame after an exception handler is pushed). No code is
+ // emitted.
+ void Adjust(int count);
+
+ // Forget elements from the top of the frame to match an actual frame (eg,
+ // the frame after a runtime call). No code is emitted.
+ void Forget(int count);
+
+ // Spill all values from the frame to memory.
+ void SpillAll();
+
+ // Spill all occurrences of a specific register from the frame.
+ void Spill(Register reg);
+
+ // Spill all occurrences of an arbitrary register if possible. Return the
+ // register spilled or no_reg if it was not possible to free any register
+ // (ie, they all have frame-external references).
+ Register SpillAnyRegister();
+
+ // Prepare this virtual frame for merging to an expected frame by
+ // performing some state changes that do not require generating
+ // code. It is guaranteed that no code will be generated.
+ void PrepareMergeTo(VirtualFrame* expected);
+
+ // Make this virtual frame have a state identical to an expected virtual
+ // frame. As a side effect, code may be emitted to make this frame match
+ // the expected one.
+ void MergeTo(VirtualFrame* expected);
+
+ // Detach a frame from its code generator, perhaps temporarily. This
+ // tells the register allocator that it is free to use frame-internal
+ // registers. Used when the code generator's frame is switched from this
+ // one to NULL by an unconditional jump.
+ void DetachFromCodeGenerator();
+
+ // (Re)attach a frame to its code generator. This informs the register
+ // allocator that the frame-internal register references are active again.
+ // Used when a code generator's frame is switched from NULL to this one by
+ // binding a label.
+ void AttachToCodeGenerator();
+
+ // Emit code for the physical JS entry and exit frame sequences. After
+ // calling Enter, the virtual frame is ready for use; and after calling
+ // Exit it should not be used. Note that Enter does not allocate space in
+ // the physical frame for storing frame-allocated locals.
+ void Enter();
+ void Exit();
+
+ // Prepare for returning from the frame by spilling locals and
+ // dropping all non-locals elements in the virtual frame. This
+ // avoids generating unnecessary merge code when jumping to the
+ // shared return site. Emits code for spills.
+ void PrepareForReturn();
+
+ // Allocate and initialize the frame-allocated locals.
+ void AllocateStackSlots(int count);
+
+ // The current top of the expression stack as an assembly operand.
+ MemOperand Top() const { return MemOperand(sp, 0); }
+
+ // An element of the expression stack as an assembly operand.
+ MemOperand ElementAt(int index) const {
+ return MemOperand(sp, index * kPointerSize);
+ }
+
+ // Random-access store to a frame-top relative frame element. The result
+ // becomes owned by the frame and is invalidated.
+ void SetElementAt(int index, Result* value);
+
+ // Set a frame element to a constant. The index is frame-top relative.
+ void SetElementAt(int index, Handle<Object> value) {
+ Result temp(value, cgen_);
+ SetElementAt(index, &temp);
+ }
+
+ void PushElementAt(int index) {
+ PushFrameSlotAt(elements_.length() - index - 1);
+ }
+
+ // A frame-allocated local as an assembly operand.
+ MemOperand LocalAt(int index) const {
+ ASSERT(0 <= index);
+ ASSERT(index < local_count_);
+ return MemOperand(fp, kLocal0Offset - index * kPointerSize);
+ }
+
+ // Push a copy of the value of a local frame slot on top of the frame.
+ void PushLocalAt(int index) {
+ PushFrameSlotAt(local0_index() + index);
+ }
+
+ // Push the value of a local frame slot on top of the frame and invalidate
+ // the local slot. The slot should be written to before trying to read
+ // from it again.
+ void TakeLocalAt(int index) {
+ TakeFrameSlotAt(local0_index() + index);
+ }
+
+ // Store the top value on the virtual frame into a local frame slot. The
+ // value is left in place on top of the frame.
+ void StoreToLocalAt(int index) {
+ StoreToFrameSlotAt(local0_index() + index);
+ }
+
+ // Push the address of the receiver slot on the frame.
+ void PushReceiverSlotAddress();
+
+ // The function frame slot.
+ MemOperand Function() const { return MemOperand(fp, kFunctionOffset); }
+
+ // Push the function on top of the frame.
+ void PushFunction() { PushFrameSlotAt(function_index()); }
+
+ // The context frame slot.
+ MemOperand Context() const { return MemOperand(fp, kContextOffset); }
+
+ // Save the value of the esi register to the context frame slot.
+ void SaveContextRegister();
+
+ // Restore the esi register from the value of the context frame
+ // slot.
+ void RestoreContextRegister();
+
+ // A parameter as an assembly operand.
+ MemOperand ParameterAt(int index) const {
+ // Index -1 corresponds to the receiver.
+ ASSERT(-1 <= index && index <= parameter_count_);
+ return MemOperand(fp, (1 + parameter_count_ - index) * kPointerSize);
+ }
+
+ // Push a copy of the value of a parameter frame slot on top of the frame.
+ void PushParameterAt(int index) {
+ PushFrameSlotAt(param0_index() + index);
+ }
+
+ // Push the value of a paramter frame slot on top of the frame and
+ // invalidate the parameter slot. The slot should be written to before
+ // trying to read from it again.
+ void TakeParameterAt(int index) {
+ TakeFrameSlotAt(param0_index() + index);
+ }
+
+ // Store the top value on the virtual frame into a parameter frame slot.
+ // The value is left in place on top of the frame.
+ void StoreToParameterAt(int index) {
+ StoreToFrameSlotAt(param0_index() + index);
+ }
+
+ // The receiver frame slot.
+ MemOperand Receiver() const { return ParameterAt(-1); }
+
+ // Push a try-catch or try-finally handler on top of the virtual frame.
+ void PushTryHandler(HandlerType type);
+
+ // Call a code stub, given the number of arguments it expects on (and
+ // removes from) the top of the physical frame.
+ Result CallStub(CodeStub* stub, int frame_arg_count);
+ Result CallStub(CodeStub* stub, Result* arg, int frame_arg_count);
+ Result CallStub(CodeStub* stub,
+ Result* arg0,
+ Result* arg1,
+ int frame_arg_count);
+
+ // Call the runtime, given the number of arguments expected on (and
+ // removed from) the top of the physical frame.
+ Result CallRuntime(Runtime::Function* f, int frame_arg_count);
+ Result CallRuntime(Runtime::FunctionId id, int frame_arg_count);
+
+ // Invoke a builtin, given the number of arguments it expects on (and
+ // removes from) the top of the physical frame.
+ Result InvokeBuiltin(Builtins::JavaScript id,
+ InvokeJSFlags flag,
+ Result* arg_count_register,
+ int frame_arg_count);
+
+ // Call into a JS code object, given the number of arguments it
+ // removes from the top of the physical frame.
+ // Register arguments are passed as results and consumed by the call.
+ Result CallCodeObject(Handle<Code> ic,
+ RelocInfo::Mode rmode,
+ int dropped_args);
+ Result CallCodeObject(Handle<Code> ic,
+ RelocInfo::Mode rmode,
+ Result* arg,
+ int dropped_args);
+ Result CallCodeObject(Handle<Code> ic,
+ RelocInfo::Mode rmode,
+ Result* arg0,
+ Result* arg1,
+ int dropped_args);
+
+ // Drop a number of elements from the top of the expression stack. May
+ // emit code to affect the physical frame. Does not clobber any registers
+ // excepting possibly the stack pointer.
+ void Drop(int count);
+
+ // Drop one element.
+ void Drop() { Drop(1); }
+
+ // Duplicate the top element of the frame.
+ void Dup() { PushFrameSlotAt(elements_.length() - 1); }
+
+ // Pop an element from the top of the expression stack. Returns a
+ // Result, which may be a constant or a register.
+ Result Pop();
+
+ // Pop and save an element from the top of the expression stack and
+ // emit a corresponding pop instruction.
+ void EmitPop(Register reg);
+
+ // Push an element on top of the expression stack and emit a
+ // corresponding push instruction.
+ void EmitPush(Register reg);
+
+ // Push an element on the virtual frame.
+ void Push(Register reg);
+ void Push(Handle<Object> value);
+ void Push(Smi* value) { Push(Handle<Object>(value)); }
+
+ // Pushing a result invalidates it (its contents become owned by the
+ // frame).
+ void Push(Result* result);
+
+ // Nip removes zero or more elements from immediately below the top
+ // of the frame, leaving the previous top-of-frame value on top of
+ // the frame. Nip(k) is equivalent to x = Pop(), Drop(k), Push(x).
+ void Nip(int num_dropped);
+
+ private:
+ // An illegal index into the virtual frame.
+ static const int kIllegalIndex = -1;
+
+ static const int kLocal0Offset = JavaScriptFrameConstants::kLocal0Offset;
+ static const int kFunctionOffset = JavaScriptFrameConstants::kFunctionOffset;
+ static const int kContextOffset = StandardFrameConstants::kContextOffset;
+
+ static const int kHandlerSize = StackHandlerConstants::kSize / kPointerSize;
+
+ CodeGenerator* cgen_;
+ MacroAssembler* masm_;
+
+ List<FrameElement> elements_;
+
+ // The number of frame-allocated locals and parameters respectively.
+ int parameter_count_;
+ int local_count_;
+
+ // The index of the element that is at the processor's stack pointer
+ // (the sp register).
+ int stack_pointer_;
+
+ // The index of the element that is at the processor's frame pointer
+ // (the fp register).
+ int frame_pointer_;
+
+ // The frame has an embedded register file that it uses to track registers
+ // used in the frame.
+ RegisterFile frame_registers_;
+
+ // The index of the first parameter. The receiver lies below the first
+ // parameter.
+ int param0_index() const { return 1; }
+
+ // The index of the context slot in the frame.
+ int context_index() const {
+ ASSERT(frame_pointer_ != kIllegalIndex);
+ return frame_pointer_ - 1;
+ }
+
+ // The index of the function slot in the frame. It lies above the context
+ // slot.
+ int function_index() const {
+ ASSERT(frame_pointer_ != kIllegalIndex);
+ return frame_pointer_ - 2;
+ }
+
+ // The index of the first local. Between the parameters and the locals
+ // lie the return address, the saved frame pointer, the context, and the
+ // function.
+ int local0_index() const {
+ ASSERT(frame_pointer_ != kIllegalIndex);
+ return frame_pointer_ + 2;
+ }
+
+ // The index of the base of the expression stack.
+ int expression_base_index() const { return local0_index() + local_count_; }
+
+ // Convert a frame index into a frame pointer relative offset into the
+ // actual stack.
+ int fp_relative(int index) const {
+ return (frame_pointer_ - index) * kPointerSize;
+ }
+
+ // Record an occurrence of a register in the virtual frame. This has the
+ // effect of incrementing both the register's frame-internal reference
+ // count and its external reference count.
+ void Use(Register reg);
+
+ // Record that a register reference has been dropped from the frame. This
+ // decrements both the register's internal and external reference counts.
+ void Unuse(Register reg);
+
+ // Spill the element at a particular index---write it to memory if
+ // necessary, free any associated register, and forget its value if
+ // constant.
+ void SpillElementAt(int index);
+
+ // Sync the element at a particular index. If it is a register or
+ // constant that disagrees with the value on the stack, write it to memory.
+ // Keep the element type as register or constant, and clear the dirty bit.
+ void SyncElementAt(int index);
+
+ // Sync the range of elements in [begin, end).
+ void SyncRange(int begin, int end);
+
+ // Sync a single element, assuming that its index is less than
+ // or equal to stack pointer + 1.
+ void RawSyncElementAt(int index);
+
+ // Push a copy of a frame slot (typically a local or parameter) on top of
+ // the frame.
+ void PushFrameSlotAt(int index);
+
+ // Push a the value of a frame slot (typically a local or parameter) on
+ // top of the frame and invalidate the slot.
+ void TakeFrameSlotAt(int index);
+
+ // Store the value on top of the frame to a frame slot (typically a local
+ // or parameter).
+ void StoreToFrameSlotAt(int index);
+
+ // Spill all elements in registers. Spill the top spilled_args elements
+ // on the frame. Sync all other frame elements.
+ // Then drop dropped_args elements from the virtual frame, to match
+ // the effect of an upcoming call that will drop them from the stack.
+ void PrepareForCall(int spilled_args, int dropped_args);
+
+ // Move frame elements currently in registers or constants, that
+ // should be in memory in the expected frame, to memory.
+ void MergeMoveRegistersToMemory(VirtualFrame* expected);
+
+ // Make the register-to-register moves necessary to
+ // merge this frame with the expected frame.
+ // Register to memory moves must already have been made,
+ // and memory to register moves must follow this call.
+ // This is because some new memory-to-register moves are
+ // created in order to break cycles of register moves.
+ // Used in the implementation of MergeTo().
+ void MergeMoveRegistersToRegisters(VirtualFrame* expected);
+
+ // Make the memory-to-register and constant-to-register moves
+ // needed to make this frame equal the expected frame.
+ // Called after all register-to-memory and register-to-register
+ // moves have been made. After this function returns, the frames
+ // should be equal.
+ void MergeMoveMemoryToRegisters(VirtualFrame* expected);
+
+ // Helper function to implement the copy-on-write semantics of an
+ // element's copies just before writing to the element. The copies
+ // are updated, but the element is not changed. A copy of the new
+ // backing store of all the copies is returned if there were any
+ // copies and in invalid frame element is returned if there were no
+ // copies.
+ FrameElement AdjustCopies(int index);
+
+ // Call a code stub that has already been prepared for calling (via
+ // PrepareForCall).
+ Result RawCallStub(CodeStub* stub, int frame_arg_count);
+
+ // Calls a code object which has already been prepared for calling
+ // (via PrepareForCall).
+ Result RawCallCodeObject(Handle<Code> code, RelocInfo::Mode rmode);
+
+ bool Equals(VirtualFrame* other);
+
+ friend class JumpTarget;
+};
+
+
+} } // namespace v8::internal
+
+#endif // V8_VIRTUAL_FRAME_ARM_H_
--- /dev/null
+// Copyright 2008 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "codegen.h"
+#include "codegen-inl.h"
+#include "virtual-frame.h"
+
+namespace v8 { namespace internal {
+
+#define __ masm_->
+
+// -------------------------------------------------------------------------
+// VirtualFrame implementation.
+
+// On entry to a function, the virtual frame already contains the receiver,
+// the parameters, and a return address. All frame elements are in memory.
+VirtualFrame::VirtualFrame(CodeGenerator* cgen)
+ : cgen_(cgen),
+ masm_(cgen->masm()),
+ elements_(0),
+ parameter_count_(cgen->scope()->num_parameters()),
+ local_count_(0),
+ stack_pointer_(parameter_count_ + 1), // 0-based index of TOS.
+ frame_pointer_(kIllegalIndex) {
+ for (int i = 0; i < parameter_count_ + 2; i++) {
+ elements_.Add(FrameElement::MemoryElement());
+ }
+}
+
+
+// Clear the dirty bit for the element at a given index if it is a
+// valid element. The stack address corresponding to the element must
+// be allocated on the physical stack, or the first element above the
+// stack pointer so it can be allocated by a single push instruction.
+void VirtualFrame::RawSyncElementAt(int index) {
+ FrameElement element = elements_[index];
+
+ if (!element.is_valid() || element.is_synced()) return;
+
+ if (index <= stack_pointer_) {
+ // Emit code to write elements below the stack pointer to their
+ // (already allocated) stack address.
+ switch (element.type()) {
+ case FrameElement::INVALID: // Fall through.
+ case FrameElement::MEMORY:
+ // There was an early bailout for invalid and synced elements
+ // (memory elements are always synced).
+ UNREACHABLE();
+ break;
+
+ case FrameElement::REGISTER:
+ __ mov(Operand(ebp, fp_relative(index)), element.reg());
+ break;
+
+ case FrameElement::CONSTANT:
+ if (cgen_->IsUnsafeSmi(element.handle())) {
+ Result temp = cgen_->allocator()->Allocate();
+ ASSERT(temp.is_valid());
+ cgen_->LoadUnsafeSmi(temp.reg(), element.handle());
+ __ mov(Operand(ebp, fp_relative(index)), temp.reg());
+ } else {
+ __ Set(Operand(ebp, fp_relative(index)),
+ Immediate(element.handle()));
+ }
+ break;
+
+ case FrameElement::COPY: {
+ int backing_index = element.index();
+ FrameElement backing_element = elements_[backing_index];
+ if (backing_element.is_memory()) {
+ Result temp = cgen_->allocator()->Allocate();
+ ASSERT(temp.is_valid());
+ __ mov(temp.reg(), Operand(ebp, fp_relative(backing_index)));
+ __ mov(Operand(ebp, fp_relative(index)), temp.reg());
+ } else {
+ ASSERT(backing_element.is_register());
+ __ mov(Operand(ebp, fp_relative(index)), backing_element.reg());
+ }
+ break;
+ }
+ }
+
+ } else {
+ // Push elements above the stack pointer to allocate space and
+ // sync them. Space should have already been allocated in the
+ // actual frame for all the elements below this one.
+ ASSERT(index == stack_pointer_ + 1);
+ stack_pointer_++;
+ switch (element.type()) {
+ case FrameElement::INVALID: // Fall through.
+ case FrameElement::MEMORY:
+ // There was an early bailout for invalid and synced elements
+ // (memory elements are always synced).
+ UNREACHABLE();
+ break;
+
+ case FrameElement::REGISTER:
+ __ push(element.reg());
+ break;
+
+ case FrameElement::CONSTANT:
+ if (cgen_->IsUnsafeSmi(element.handle())) {
+ Result temp = cgen_->allocator()->Allocate();
+ ASSERT(temp.is_valid());
+ cgen_->LoadUnsafeSmi(temp.reg(), element.handle());
+ __ push(temp.reg());
+ } else {
+ __ push(Immediate(element.handle()));
+ }
+ break;
+
+ case FrameElement::COPY: {
+ int backing_index = element.index();
+ FrameElement backing = elements_[backing_index];
+ ASSERT(backing.is_memory() || backing.is_register());
+ if (backing.is_memory()) {
+ __ push(Operand(ebp, fp_relative(backing_index)));
+ } else {
+ __ push(backing.reg());
+ }
+ break;
+ }
+ }
+ }
+
+ elements_[index].set_sync();
+}
+
+
+void VirtualFrame::MergeTo(VirtualFrame* expected) {
+ Comment cmnt(masm_, "[ Merge frame");
+ // We should always be merging the code generator's current frame to an
+ // expected frame.
+ ASSERT(cgen_->frame() == this);
+
+ // Adjust the stack pointer upward (toward the top of the virtual
+ // frame) if necessary.
+ if (stack_pointer_ < expected->stack_pointer_) {
+ int difference = expected->stack_pointer_ - stack_pointer_;
+ stack_pointer_ = expected->stack_pointer_;
+ __ sub(Operand(esp), Immediate(difference * kPointerSize));
+ }
+
+ MergeMoveRegistersToMemory(expected);
+ MergeMoveRegistersToRegisters(expected);
+ MergeMoveMemoryToRegisters(expected);
+
+ // Fix any sync bit problems.
+ for (int i = 0; i <= stack_pointer_; i++) {
+ FrameElement source = elements_[i];
+ FrameElement target = expected->elements_[i];
+ if (source.is_synced() && !target.is_synced()) {
+ elements_[i].clear_sync();
+ } else if (!source.is_synced() && target.is_synced()) {
+ SyncElementAt(i);
+ }
+ }
+
+ // Adjust the stack point downard if necessary.
+ if (stack_pointer_ > expected->stack_pointer_) {
+ int difference = stack_pointer_ - expected->stack_pointer_;
+ stack_pointer_ = expected->stack_pointer_;
+ __ add(Operand(esp), Immediate(difference * kPointerSize));
+ }
+
+ // At this point, the frames should be identical.
+ ASSERT(Equals(expected));
+}
+
+
+void VirtualFrame::MergeMoveRegistersToMemory(VirtualFrame* expected) {
+ ASSERT(stack_pointer_ >= expected->stack_pointer_);
+
+ // Move registers, constants, and copies to memory. Perform moves
+ // from the top downward in the frame in order to leave the backing
+ // stores of copies in registers.
+ //
+ // Moving memory-backed copies to memory requires a spare register
+ // for the memory-to-memory moves. Since we are performing a merge,
+ // we use esi (which is already saved in the frame). We keep track
+ // of the index of the frame element esi is caching or kIllegalIndex
+ // if esi has not been disturbed.
+ int esi_caches = kIllegalIndex;
+ // A "singleton" memory element.
+ FrameElement memory_element = FrameElement::MemoryElement();
+ for (int i = stack_pointer_; i >= 0; i--) {
+ FrameElement target = expected->elements_[i];
+ if (target.is_memory()) {
+ FrameElement source = elements_[i];
+ switch (source.type()) {
+ case FrameElement::INVALID:
+ // Not a legal merge move.
+ UNREACHABLE();
+ break;
+
+ case FrameElement::MEMORY:
+ // Already in place.
+ break;
+
+ case FrameElement::REGISTER:
+ Unuse(source.reg());
+ if (!source.is_synced()) {
+ __ mov(Operand(ebp, fp_relative(i)), source.reg());
+ }
+ break;
+
+ case FrameElement::CONSTANT:
+ if (!source.is_synced()) {
+ if (cgen_->IsUnsafeSmi(source.handle())) {
+ esi_caches = i;
+ cgen_->LoadUnsafeSmi(esi, source.handle());
+ __ mov(Operand(ebp, fp_relative(i)), esi);
+ } else {
+ __ Set(Operand(ebp, fp_relative(i)), Immediate(source.handle()));
+ }
+ }
+ break;
+
+ case FrameElement::COPY:
+ if (!source.is_synced()) {
+ int backing_index = source.index();
+ FrameElement backing_element = elements_[backing_index];
+ if (backing_element.is_memory()) {
+ // If we have to spill a register, we spill esi.
+ if (esi_caches != backing_index) {
+ esi_caches = backing_index;
+ __ mov(esi, Operand(ebp, fp_relative(backing_index)));
+ }
+ __ mov(Operand(ebp, fp_relative(i)), esi);
+ } else {
+ ASSERT(backing_element.is_register());
+ __ mov(Operand(ebp, fp_relative(i)), backing_element.reg());
+ }
+ }
+ break;
+ }
+ elements_[i] = memory_element;
+ }
+ }
+
+ if (esi_caches != kIllegalIndex) {
+ __ mov(esi, Operand(ebp, fp_relative(context_index())));
+ }
+}
+
+
+void VirtualFrame::MergeMoveRegistersToRegisters(VirtualFrame* expected) {
+ // We have already done X-to-memory moves.
+ ASSERT(stack_pointer_ >= expected->stack_pointer_);
+
+ // Perform register-to-register moves.
+ int start = 0;
+ int end = elements_.length() - 1;
+ bool any_moves_blocked; // Did we fail to make some moves this iteration?
+ bool should_break_cycles = false;
+ bool any_moves_made; // Did we make any progress this iteration?
+ do {
+ any_moves_blocked = false;
+ any_moves_made = false;
+ int first_move_blocked = kIllegalIndex;
+ int last_move_blocked = kIllegalIndex;
+ for (int i = start; i <= end; i++) {
+ FrameElement source = elements_[i];
+ FrameElement target = expected->elements_[i];
+ if (source.is_register() && target.is_register()) {
+ if (target.reg().is(source.reg())) {
+ if (target.is_synced() && !source.is_synced()) {
+ __ mov(Operand(ebp, fp_relative(i)), source.reg());
+ }
+ elements_[i] = target;
+ } else {
+ // We need to move source to target.
+ if (frame_registers_.is_used(target.reg())) {
+ // The move is blocked because the target contains valid data.
+ // If we are stuck with only cycles remaining, then we spill source.
+ // Otherwise, we just need more iterations.
+ if (should_break_cycles) {
+ SpillElementAt(i);
+ should_break_cycles = false;
+ } else { // Record a blocked move.
+ if (!any_moves_blocked) {
+ first_move_blocked = i;
+ }
+ last_move_blocked = i;
+ any_moves_blocked = true;
+ }
+ } else {
+ // The move is not blocked. This frame element can be moved from
+ // its source register to its target register.
+ if (target.is_synced() && !source.is_synced()) {
+ SyncElementAt(i);
+ }
+ Use(target.reg());
+ Unuse(source.reg());
+ elements_[i] = target;
+ __ mov(target.reg(), source.reg());
+ any_moves_made = true;
+ }
+ }
+ }
+ }
+ // Update control flags for next iteration.
+ should_break_cycles = (any_moves_blocked && !any_moves_made);
+ if (any_moves_blocked) {
+ start = first_move_blocked;
+ end = last_move_blocked;
+ }
+ } while (any_moves_blocked);
+}
+
+
+void VirtualFrame::MergeMoveMemoryToRegisters(VirtualFrame *expected) {
+ // Move memory, constants, and copies to registers. This is the
+ // final step and is done from the bottom up so that the backing
+ // elements of copies are in their correct locations when we
+ // encounter the copies.
+ for (int i = 0; i < elements_.length(); i++) {
+ FrameElement source = elements_[i];
+ FrameElement target = expected->elements_[i];
+ if (target.is_register() && !source.is_register()) {
+ switch (source.type()) {
+ case FrameElement::INVALID: // Fall through.
+ case FrameElement::REGISTER:
+ UNREACHABLE();
+ break;
+
+ case FrameElement::MEMORY:
+ ASSERT(i <= stack_pointer_);
+ __ mov(target.reg(), Operand(ebp, fp_relative(i)));
+ break;
+
+ case FrameElement::CONSTANT:
+ if (cgen_->IsUnsafeSmi(source.handle())) {
+ cgen_->LoadUnsafeSmi(target.reg(), source.handle());
+ } else {
+ __ Set(target.reg(), Immediate(source.handle()));
+ }
+ break;
+
+ case FrameElement::COPY: {
+ FrameElement backing = elements_[source.index()];
+ ASSERT(backing.is_memory() || backing.is_register());
+ if (backing.is_memory()) {
+ ASSERT(source.index() <= stack_pointer_);
+ __ mov(target.reg(), Operand(ebp, fp_relative(source.index())));
+ } else {
+ __ mov(target.reg(), backing.reg());
+ }
+ }
+ }
+ // Ensure the proper sync state. If the source was memory no
+ // code needs to be emitted.
+ if (target.is_synced() && !source.is_memory()) {
+ SyncElementAt(i);
+ }
+ Use(target.reg());
+ elements_[i] = target;
+ }
+ }
+}
+
+
+void VirtualFrame::Enter() {
+ // Registers live on entry: esp, ebp, esi, edi.
+ Comment cmnt(masm_, "[ Enter JS frame");
+
+#ifdef DEBUG
+ // Verify that edi contains a JS function. The following code
+ // relies on eax being available for use.
+ __ test(edi, Immediate(kSmiTagMask));
+ __ Check(not_zero,
+ "VirtualFrame::Enter - edi is not a function (smi check).");
+ __ mov(eax, FieldOperand(edi, HeapObject::kMapOffset));
+ __ movzx_b(eax, FieldOperand(eax, Map::kInstanceTypeOffset));
+ __ cmp(eax, JS_FUNCTION_TYPE);
+ __ Check(equal,
+ "VirtualFrame::Enter - edi is not a function (map check).");
+#endif
+
+ EmitPush(ebp);
+
+ frame_pointer_ = stack_pointer_;
+ __ mov(ebp, Operand(esp));
+
+ // Store the context in the frame. The context is kept in esi and a
+ // copy is stored in the frame. The external reference to esi
+ // remains.
+ EmitPush(esi);
+
+ // Store the function in the frame. The frame owns the register
+ // reference now (ie, it can keep it in edi or spill it later).
+ Push(edi);
+ SyncElementAt(elements_.length() - 1);
+ cgen_->allocator()->Unuse(edi);
+}
+
+
+void VirtualFrame::Exit() {
+ Comment cmnt(masm_, "[ Exit JS frame");
+ // Record the location of the JS exit code for patching when setting
+ // break point.
+ __ RecordJSReturn();
+
+ // Avoid using the leave instruction here, because it is too
+ // short. We need the return sequence to be a least the size of a
+ // call instruction to support patching the exit code in the
+ // debugger. See VisitReturnStatement for the full return sequence.
+ __ mov(esp, Operand(ebp));
+ stack_pointer_ = frame_pointer_;
+ for (int i = elements_.length() - 1; i > stack_pointer_; i--) {
+ FrameElement last = elements_.RemoveLast();
+ if (last.is_register()) {
+ Unuse(last.reg());
+ }
+ }
+
+ frame_pointer_ = kIllegalIndex;
+ EmitPop(ebp);
+}
+
+
+void VirtualFrame::AllocateStackSlots(int count) {
+ ASSERT(height() == 0);
+ local_count_ = count;
+
+ if (count > 0) {
+ Comment cmnt(masm_, "[ Allocate space for locals");
+ // The locals are initialized to a constant (the undefined value), but
+ // we sync them with the actual frame to allocate space for spilling
+ // them later. First sync everything above the stack pointer so we can
+ // use pushes to allocate and initialize the locals.
+ SyncRange(stack_pointer_ + 1, elements_.length());
+ Handle<Object> undefined = Factory::undefined_value();
+ FrameElement initial_value =
+ FrameElement::ConstantElement(undefined, FrameElement::SYNCED);
+ Result temp = cgen_->allocator()->Allocate();
+ ASSERT(temp.is_valid());
+ __ Set(temp.reg(), Immediate(undefined));
+ for (int i = 0; i < count; i++) {
+ elements_.Add(initial_value);
+ stack_pointer_++;
+ __ push(temp.reg());
+ }
+ }
+}
+
+
+void VirtualFrame::SaveContextRegister() {
+ ASSERT(elements_[context_index()].is_memory());
+ __ mov(Operand(ebp, fp_relative(context_index())), esi);
+}
+
+
+void VirtualFrame::RestoreContextRegister() {
+ ASSERT(elements_[context_index()].is_memory());
+ __ mov(esi, Operand(ebp, fp_relative(context_index())));
+}
+
+
+void VirtualFrame::PushReceiverSlotAddress() {
+ Result temp = cgen_->allocator()->Allocate();
+ ASSERT(temp.is_valid());
+ __ lea(temp.reg(), ParameterAt(-1));
+ Push(&temp);
+}
+
+
+// Before changing an element which is copied, adjust so that the
+// first copy becomes the new backing store and all the other copies
+// are updated. If the original was in memory, the new backing store
+// is allocated to a register. Return a copy of the new backing store
+// or an invalid element if the original was not a copy.
+FrameElement VirtualFrame::AdjustCopies(int index) {
+ FrameElement original = elements_[index];
+ ASSERT(original.is_memory() || original.is_register());
+
+ // Go looking for a first copy above index.
+ int i = index + 1;
+ while (i < elements_.length()) {
+ FrameElement elt = elements_[i];
+ if (elt.is_copy() && elt.index() == index) break;
+ i++;
+ }
+
+ if (i < elements_.length()) {
+ // There was a first copy. Make it the new backing element.
+ Register backing_reg;
+ if (original.is_memory()) {
+ Result fresh = cgen_->allocator()->Allocate();
+ ASSERT(fresh.is_valid());
+ backing_reg = fresh.reg();
+ __ mov(backing_reg, Operand(ebp, fp_relative(index)));
+ } else {
+ // The original was in a register.
+ backing_reg = original.reg();
+ }
+ FrameElement new_backing_element =
+ FrameElement::RegisterElement(backing_reg, FrameElement::NOT_SYNCED);
+ if (elements_[i].is_synced()) {
+ new_backing_element.set_sync();
+ }
+ Use(backing_reg);
+ elements_[i] = new_backing_element;
+
+ // Update the other copies.
+ FrameElement copy = CopyElementAt(i);
+ for (int j = i; j < elements_.length(); j++) {
+ FrameElement elt = elements_[j];
+ if (elt.is_copy() && elt.index() == index) {
+ if (elt.is_synced()) {
+ copy.set_sync();
+ } else {
+ copy.clear_sync();
+ }
+ elements_[j] = copy;
+ }
+ }
+
+ copy.clear_sync();
+ return copy;
+ }
+
+ return FrameElement::InvalidElement();
+}
+
+
+void VirtualFrame::TakeFrameSlotAt(int index) {
+ ASSERT(index >= 0);
+ ASSERT(index <= elements_.length());
+ FrameElement original = elements_[index];
+
+ switch (original.type()) {
+ case FrameElement::INVALID:
+ UNREACHABLE();
+ break;
+
+ case FrameElement::MEMORY: {
+ // Allocate the element to a register. If it is not copied,
+ // push that register on top of the frame. If it is copied,
+ // make the first copy the backing store and push a fresh copy
+ // on top of the frame.
+ FrameElement copy = AdjustCopies(index);
+ if (copy.is_valid()) {
+ // The original element was a copy. Push the copy of the new
+ // backing store.
+ elements_.Add(copy);
+ } else {
+ // The element was not a copy. Move it to a register and push
+ // that.
+ Result fresh = cgen_->allocator()->Allocate();
+ ASSERT(fresh.is_valid());
+ FrameElement new_element =
+ FrameElement::RegisterElement(fresh.reg(),
+ FrameElement::NOT_SYNCED);
+ Use(fresh.reg());
+ elements_.Add(new_element);
+ __ mov(fresh.reg(), Operand(ebp, fp_relative(index)));
+ }
+ break;
+ }
+
+ case FrameElement::REGISTER: {
+ // If the element is not copied, push it on top of the frame.
+ // If it is copied, make the first copy be the new backing store
+ // and push a fresh copy on top of the frame.
+ FrameElement copy = AdjustCopies(index);
+ if (copy.is_valid()) {
+ // The original element was a copy. Push the copy of the new
+ // backing store.
+ elements_.Add(copy);
+ // This is the only case where we have to unuse the original
+ // register. The original is still counted and so is the new
+ // backing store of the copies.
+ Unuse(original.reg());
+ } else {
+ // The element was not a copy. Push it.
+ original.clear_sync();
+ elements_.Add(original);
+ }
+ break;
+ }
+
+ case FrameElement::CONSTANT:
+ original.clear_sync();
+ elements_.Add(original);
+ break;
+
+ case FrameElement::COPY:
+ original.clear_sync();
+ elements_.Add(original);
+ break;
+ }
+ elements_[index] = FrameElement::InvalidElement();
+}
+
+
+void VirtualFrame::StoreToFrameSlotAt(int index) {
+ // Store the value on top of the frame to the virtual frame slot at
+ // a given index. The value on top of the frame is left in place.
+ // This is a duplicating operation, so it can create copies.
+ ASSERT(index >= 0);
+ ASSERT(index < elements_.length());
+
+ FrameElement original = elements_[index];
+ // If the stored-to slot may be copied, adjust to preserve the
+ // copy-on-write semantics of copied elements.
+ if (original.is_register() || original.is_memory()) {
+ FrameElement ignored = AdjustCopies(index);
+ }
+
+ // If the stored-to slot is a register reference, deallocate it.
+ if (original.is_register()) {
+ Unuse(original.reg());
+ }
+
+ int top_index = elements_.length() - 1;
+ FrameElement top = elements_[top_index];
+ ASSERT(top.is_valid());
+
+ if (top.is_copy()) {
+ // There are two cases based on the relative positions of the
+ // stored-to slot and the backing slot of the top element.
+ int backing_index = top.index();
+ ASSERT(backing_index != index);
+ if (backing_index < index) {
+ // 1. The top element is a copy of a slot below the stored-to
+ // slot. The stored-to slot becomes an unsynced copy of that
+ // same backing slot.
+ elements_[index] = CopyElementAt(backing_index);
+ } else {
+ // 2. The top element is a copy of a slot above the stored-to
+ // slot. The stored-to slot becomes the new (unsynced) backing
+ // slot and both the top element and the element at the former
+ // backing slot become copies of it. The sync state of the top
+ // and former backing elements is preserved.
+ FrameElement backing_element = elements_[backing_index];
+ ASSERT(backing_element.is_memory() || backing_element.is_register());
+ if (backing_element.is_memory()) {
+ // Because sets of copies are canonicalized to be backed by
+ // their lowest frame element, and because memory frame
+ // elements are backed by the corresponding stack address, we
+ // have to move the actual value down in the stack.
+ //
+ // TODO(209): considering allocating the stored-to slot to the
+ // temp register. Alternatively, allow copies to appear in
+ // any order in the frame and lazily move the value down to
+ // the slot.
+ Result temp = cgen_->allocator()->Allocate();
+ ASSERT(temp.is_valid());
+ __ mov(temp.reg(), Operand(ebp, fp_relative(backing_index)));
+ __ mov(Operand(ebp, fp_relative(index)), temp.reg());
+ } else if (backing_element.is_synced()) {
+ // If the element is a register, we will not actually move
+ // anything on the stack but only update the virtual frame
+ // element.
+ backing_element.clear_sync();
+ }
+ elements_[index] = backing_element;
+
+ // The old backing element becomes a copy of the new backing
+ // element.
+ FrameElement new_element = CopyElementAt(index);
+ elements_[backing_index] = new_element;
+ if (backing_element.is_synced()) {
+ elements_[backing_index].set_sync();
+ }
+
+ // All the copies of the old backing element (including the top
+ // element) become copies of the new backing element.
+ for (int i = backing_index + 1; i < elements_.length(); i++) {
+ FrameElement current = elements_[i];
+ if (current.is_copy() && current.index() == backing_index) {
+ elements_[i] = new_element;
+ if (current.is_synced()) {
+ elements_[i].set_sync();
+ }
+ }
+ }
+ }
+
+ return;
+ }
+
+ // Move the top element to the stored-to slot and replace it (the
+ // top element) with a copy.
+ elements_[index] = top;
+ if (top.is_memory()) {
+ // TODO(209): consider allocating the stored-to slot to the temp
+ // register. Alternatively, allow copies to appear in any order
+ // in the frame and lazily move the value down to the slot.
+ FrameElement new_top = CopyElementAt(index);
+ new_top.set_sync();
+ elements_[top_index] = new_top;
+
+ // The sync state of the former top element is correct (synced).
+ // Emit code to move the value down in the frame.
+ Result temp = cgen_->allocator()->Allocate();
+ ASSERT(temp.is_valid());
+ __ mov(temp.reg(), Operand(esp, 0));
+ __ mov(Operand(ebp, fp_relative(index)), temp.reg());
+ } else if (top.is_register()) {
+ // The stored-to slot has the (unsynced) register reference and
+ // the top element becomes a copy. The sync state of the top is
+ // preserved.
+ FrameElement new_top = CopyElementAt(index);
+ if (top.is_synced()) {
+ new_top.set_sync();
+ elements_[index].clear_sync();
+ }
+ elements_[top_index] = new_top;
+ } else {
+ // The stored-to slot holds the same value as the top but
+ // unsynced. (We do not have copies of constants yet.)
+ ASSERT(top.is_constant());
+ elements_[index].clear_sync();
+ }
+}
+
+
+void VirtualFrame::PushTryHandler(HandlerType type) {
+ ASSERT(cgen_->HasValidEntryRegisters());
+ // Grow the expression stack by handler size less two (the return address
+ // is already pushed by a call instruction, and PushTryHandler from the
+ // macro assembler will leave the top of stack in the eax register to be
+ // pushed separately).
+ Adjust(kHandlerSize - 2);
+ __ PushTryHandler(IN_JAVASCRIPT, type);
+ // TODO(1222589): remove the reliance of PushTryHandler on a cached TOS
+ EmitPush(eax);
+}
+
+
+Result VirtualFrame::RawCallStub(CodeStub* stub, int frame_arg_count) {
+ ASSERT(cgen_->HasValidEntryRegisters());
+ __ CallStub(stub);
+ Result result = cgen_->allocator()->Allocate(eax);
+ ASSERT(result.is_valid());
+ return result;
+}
+
+
+Result VirtualFrame::CallRuntime(Runtime::Function* f,
+ int frame_arg_count) {
+ PrepareForCall(frame_arg_count, frame_arg_count);
+ ASSERT(cgen_->HasValidEntryRegisters());
+ __ CallRuntime(f, frame_arg_count);
+ Result result = cgen_->allocator()->Allocate(eax);
+ ASSERT(result.is_valid());
+ return result;
+}
+
+
+Result VirtualFrame::CallRuntime(Runtime::FunctionId id,
+ int frame_arg_count) {
+ PrepareForCall(frame_arg_count, frame_arg_count);
+ ASSERT(cgen_->HasValidEntryRegisters());
+ __ CallRuntime(id, frame_arg_count);
+ Result result = cgen_->allocator()->Allocate(eax);
+ ASSERT(result.is_valid());
+ return result;
+}
+
+
+Result VirtualFrame::InvokeBuiltin(Builtins::JavaScript id,
+ InvokeFlag flag,
+ int frame_arg_count) {
+ PrepareForCall(frame_arg_count, frame_arg_count);
+ ASSERT(cgen_->HasValidEntryRegisters());
+ __ InvokeBuiltin(id, flag);
+ Result result = cgen_->allocator()->Allocate(eax);
+ ASSERT(result.is_valid());
+ return result;
+}
+
+
+Result VirtualFrame::RawCallCodeObject(Handle<Code> code,
+ RelocInfo::Mode rmode) {
+ ASSERT(cgen_->HasValidEntryRegisters());
+ __ call(code, rmode);
+ Result result = cgen_->allocator()->Allocate(eax);
+ ASSERT(result.is_valid());
+ return result;
+}
+
+
+Result VirtualFrame::CallCodeObject(Handle<Code> code,
+ RelocInfo::Mode rmode,
+ Result* arg,
+ int dropped_args) {
+ int spilled_args = 0;
+ switch (code->kind()) {
+ case Code::CALL_IC:
+ ASSERT(arg->reg().is(eax));
+ spilled_args = dropped_args + 1;
+ break;
+ case Code::LOAD_IC:
+ ASSERT(arg->reg().is(ecx));
+ ASSERT(dropped_args == 0);
+ spilled_args = 1;
+ break;
+ case Code::KEYED_STORE_IC:
+ ASSERT(arg->reg().is(eax));
+ ASSERT(dropped_args == 0);
+ spilled_args = 2;
+ break;
+ default:
+ // No other types of code objects are called with values
+ // in exactly one register.
+ UNREACHABLE();
+ break;
+ }
+ PrepareForCall(spilled_args, dropped_args);
+ arg->Unuse();
+ return RawCallCodeObject(code, rmode);
+}
+
+
+Result VirtualFrame::CallCodeObject(Handle<Code> code,
+ RelocInfo::Mode rmode,
+ Result* arg0,
+ Result* arg1,
+ int dropped_args) {
+ int spilled_args = 1;
+ switch (code->kind()) {
+ case Code::STORE_IC:
+ ASSERT(arg0->reg().is(eax));
+ ASSERT(arg1->reg().is(ecx));
+ ASSERT(dropped_args == 0);
+ spilled_args = 1;
+ break;
+ case Code::BUILTIN:
+ ASSERT(*code == Builtins::builtin(Builtins::JSConstructCall));
+ ASSERT(arg0->reg().is(eax));
+ ASSERT(arg1->reg().is(edi));
+ spilled_args = dropped_args + 1;
+ break;
+ default:
+ // No other types of code objects are called with values
+ // in exactly two registers.
+ UNREACHABLE();
+ break;
+ }
+ PrepareForCall(spilled_args, dropped_args);
+ arg0->Unuse();
+ arg1->Unuse();
+ return RawCallCodeObject(code, rmode);
+}
+
+
+void VirtualFrame::Drop(int count) {
+ ASSERT(height() >= count);
+ int num_virtual_elements = (elements_.length() - 1) - stack_pointer_;
+
+ // Emit code to lower the stack pointer if necessary.
+ if (num_virtual_elements < count) {
+ int num_dropped = count - num_virtual_elements;
+ stack_pointer_ -= num_dropped;
+ __ add(Operand(esp), Immediate(num_dropped * kPointerSize));
+ }
+
+ // Discard elements from the virtual frame and free any registers.
+ for (int i = 0; i < count; i++) {
+ FrameElement dropped = elements_.RemoveLast();
+ if (dropped.is_register()) {
+ Unuse(dropped.reg());
+ }
+ }
+}
+
+
+Result VirtualFrame::Pop() {
+ FrameElement element = elements_.RemoveLast();
+ int index = elements_.length();
+ ASSERT(element.is_valid());
+
+ bool pop_needed = (stack_pointer_ == index);
+ if (pop_needed) {
+ stack_pointer_--;
+ if (element.is_memory()) {
+ Result temp = cgen_->allocator()->Allocate();
+ ASSERT(temp.is_valid());
+ __ pop(temp.reg());
+ return temp;
+ }
+
+ __ add(Operand(esp), Immediate(kPointerSize));
+ }
+ ASSERT(!element.is_memory());
+
+ // The top element is a register, constant, or a copy. Unuse
+ // registers and follow copies to their backing store.
+ if (element.is_register()) {
+ Unuse(element.reg());
+ } else if (element.is_copy()) {
+ ASSERT(element.index() < index);
+ index = element.index();
+ element = elements_[index];
+ }
+ ASSERT(!element.is_copy());
+
+ // The element is memory, a register, or a constant.
+ if (element.is_memory()) {
+ // Memory elements could only be the backing store of a copy.
+ // Allocate the original to a register.
+ ASSERT(index <= stack_pointer_);
+ Result temp = cgen_->allocator()->Allocate();
+ ASSERT(temp.is_valid());
+ Use(temp.reg());
+ FrameElement new_element =
+ FrameElement::RegisterElement(temp.reg(), FrameElement::SYNCED);
+ elements_[index] = new_element;
+ __ mov(temp.reg(), Operand(ebp, fp_relative(index)));
+ return Result(temp.reg(), cgen_);
+ } else if (element.is_register()) {
+ return Result(element.reg(), cgen_);
+ } else {
+ ASSERT(element.is_constant());
+ return Result(element.handle(), cgen_);
+ }
+}
+
+
+void VirtualFrame::EmitPop(Register reg) {
+ ASSERT(stack_pointer_ == elements_.length() - 1);
+ stack_pointer_--;
+ elements_.RemoveLast();
+ __ pop(reg);
+}
+
+
+void VirtualFrame::EmitPop(Operand operand) {
+ ASSERT(stack_pointer_ == elements_.length() - 1);
+ stack_pointer_--;
+ elements_.RemoveLast();
+ __ pop(operand);
+}
+
+
+void VirtualFrame::EmitPush(Register reg) {
+ ASSERT(stack_pointer_ == elements_.length() - 1);
+ elements_.Add(FrameElement::MemoryElement());
+ stack_pointer_++;
+ __ push(reg);
+}
+
+
+void VirtualFrame::EmitPush(Operand operand) {
+ ASSERT(stack_pointer_ == elements_.length() - 1);
+ elements_.Add(FrameElement::MemoryElement());
+ stack_pointer_++;
+ __ push(operand);
+}
+
+
+void VirtualFrame::EmitPush(Immediate immediate) {
+ ASSERT(stack_pointer_ == elements_.length() - 1);
+ elements_.Add(FrameElement::MemoryElement());
+ stack_pointer_++;
+ __ push(immediate);
+}
+
+
+#undef __
+
+} } // namespace v8::internal
--- /dev/null
+// Copyright 2008 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_VIRTUAL_FRAME_IA32_H_
+#define V8_VIRTUAL_FRAME_IA32_H_
+
+namespace v8 { namespace internal {
+
+// -------------------------------------------------------------------------
+// Virtual frames
+//
+// The virtual frame is an abstraction of the physical stack frame. It
+// encapsulates the parameters, frame-allocated locals, and the expression
+// stack. It supports push/pop operations on the expression stack, as well
+// as random access to the expression stack elements, locals, and
+// parameters.
+
+class VirtualFrame : public Malloced {
+ public:
+ // A utility class to introduce a scope where the virtual frame is
+ // expected to remain spilled. The constructor spills the code
+ // generator's current frame, but no attempt is made to require it
+ // to stay spilled. It is intended as documentation while the code
+ // generator is being transformed.
+ class SpilledScope BASE_EMBEDDED {
+ public:
+ explicit SpilledScope(CodeGenerator* cgen);
+
+ ~SpilledScope();
+
+ private:
+ CodeGenerator* cgen_;
+ bool previous_state_;
+ };
+
+ // Construct an initial virtual frame on entry to a JS function.
+ explicit VirtualFrame(CodeGenerator* cgen);
+
+ // Construct a virtual frame as a clone of an existing one.
+ explicit VirtualFrame(VirtualFrame* original);
+
+ // Create a duplicate of an existing valid frame element.
+ FrameElement CopyElementAt(int index);
+
+ // The height of the virtual expression stack.
+ int height() const {
+ return elements_.length() - expression_base_index();
+ }
+
+ int register_count(Register reg) {
+ return frame_registers_.count(reg);
+ }
+
+ // Add extra in-memory elements to the top of the frame to match an actual
+ // frame (eg, the frame after an exception handler is pushed). No code is
+ // emitted.
+ void Adjust(int count);
+
+ // Forget elements from the top of the frame to match an actual frame (eg,
+ // the frame after a runtime call). No code is emitted.
+ void Forget(int count);
+
+ // Spill all values from the frame to memory.
+ void SpillAll();
+
+ // Spill all occurrences of a specific register from the frame.
+ void Spill(Register reg);
+
+ // Spill all occurrences of an arbitrary register if possible. Return the
+ // register spilled or no_reg if it was not possible to free any register
+ // (ie, they all have frame-external references).
+ Register SpillAnyRegister();
+
+ // Prepare this virtual frame for merging to an expected frame by
+ // performing some state changes that do not require generating
+ // code. It is guaranteed that no code will be generated.
+ void PrepareMergeTo(VirtualFrame* expected);
+
+ // Make this virtual frame have a state identical to an expected virtual
+ // frame. As a side effect, code may be emitted to make this frame match
+ // the expected one.
+ void MergeTo(VirtualFrame* expected);
+
+ // Detach a frame from its code generator, perhaps temporarily. This
+ // tells the register allocator that it is free to use frame-internal
+ // registers. Used when the code generator's frame is switched from this
+ // one to NULL by an unconditional jump.
+ void DetachFromCodeGenerator();
+
+ // (Re)attach a frame to its code generator. This informs the register
+ // allocator that the frame-internal register references are active again.
+ // Used when a code generator's frame is switched from NULL to this one by
+ // binding a label.
+ void AttachToCodeGenerator();
+
+ // Emit code for the physical JS entry and exit frame sequences. After
+ // calling Enter, the virtual frame is ready for use; and after calling
+ // Exit it should not be used. Note that Enter does not allocate space in
+ // the physical frame for storing frame-allocated locals.
+ void Enter();
+ void Exit();
+
+ // Prepare for returning from the frame by spilling locals and
+ // dropping all non-locals elements in the virtual frame. This
+ // avoids generating unnecessary merge code when jumping to the
+ // shared return site. Emits code for spills.
+ void PrepareForReturn();
+
+ // Allocate and initialize the frame-allocated locals.
+ void AllocateStackSlots(int count);
+
+ // An element of the expression stack as an assembly operand.
+ Operand ElementAt(int index) const {
+ return Operand(esp, index * kPointerSize);
+ }
+
+ // Random-access store to a frame-top relative frame element. The result
+ // becomes owned by the frame and is invalidated.
+ void SetElementAt(int index, Result* value);
+
+ // Set a frame element to a constant. The index is frame-top relative.
+ void SetElementAt(int index, Handle<Object> value) {
+ Result temp(value, cgen_);
+ SetElementAt(index, &temp);
+ }
+
+ void PushElementAt(int index) {
+ PushFrameSlotAt(elements_.length() - index - 1);
+ }
+
+ // A frame-allocated local as an assembly operand.
+ Operand LocalAt(int index) const {
+ ASSERT(0 <= index);
+ ASSERT(index < local_count_);
+ return Operand(ebp, kLocal0Offset - index * kPointerSize);
+ }
+
+ // Push a copy of the value of a local frame slot on top of the frame.
+ void PushLocalAt(int index) {
+ PushFrameSlotAt(local0_index() + index);
+ }
+
+ // Push the value of a local frame slot on top of the frame and invalidate
+ // the local slot. The slot should be written to before trying to read
+ // from it again.
+ void TakeLocalAt(int index) {
+ TakeFrameSlotAt(local0_index() + index);
+ }
+
+ // Store the top value on the virtual frame into a local frame slot. The
+ // value is left in place on top of the frame.
+ void StoreToLocalAt(int index) {
+ StoreToFrameSlotAt(local0_index() + index);
+ }
+
+ // Push the address of the receiver slot on the frame.
+ void PushReceiverSlotAddress();
+
+ // Push the function on top of the frame.
+ void PushFunction() { PushFrameSlotAt(function_index()); }
+
+ // Save the value of the esi register to the context frame slot.
+ void SaveContextRegister();
+
+ // Restore the esi register from the value of the context frame
+ // slot.
+ void RestoreContextRegister();
+
+ // A parameter as an assembly operand.
+ Operand ParameterAt(int index) const {
+ ASSERT(-1 <= index); // -1 is the receiver.
+ ASSERT(index < parameter_count_);
+ return Operand(ebp, (1 + parameter_count_ - index) * kPointerSize);
+ }
+
+ // Push a copy of the value of a parameter frame slot on top of the frame.
+ void PushParameterAt(int index) {
+ PushFrameSlotAt(param0_index() + index);
+ }
+
+ // Push the value of a paramter frame slot on top of the frame and
+ // invalidate the parameter slot. The slot should be written to before
+ // trying to read from it again.
+ void TakeParameterAt(int index) {
+ TakeFrameSlotAt(param0_index() + index);
+ }
+
+ // Store the top value on the virtual frame into a parameter frame slot.
+ // The value is left in place on top of the frame.
+ void StoreToParameterAt(int index) {
+ StoreToFrameSlotAt(param0_index() + index);
+ }
+
+ // The receiver frame slot.
+ Operand Receiver() const { return ParameterAt(-1); }
+
+ // Push a try-catch or try-finally handler on top of the virtual frame.
+ void PushTryHandler(HandlerType type);
+
+ // Call a code stub, given the number of arguments it expects on (and
+ // removes from) the top of the physical frame.
+ Result CallStub(CodeStub* stub, int frame_arg_count);
+ Result CallStub(CodeStub* stub, Result* arg, int frame_arg_count);
+ Result CallStub(CodeStub* stub,
+ Result* arg0,
+ Result* arg1,
+ int frame_arg_count);
+
+ // Call the runtime, given the number of arguments expected on (and
+ // removed from) the top of the physical frame.
+ Result CallRuntime(Runtime::Function* f, int frame_arg_count);
+ Result CallRuntime(Runtime::FunctionId id, int frame_arg_count);
+
+ // Invoke a builtin, given the number of arguments it expects on (and
+ // removes from) the top of the physical frame.
+ Result InvokeBuiltin(Builtins::JavaScript id,
+ InvokeFlag flag,
+ int frame_arg_count);
+
+ // Call into a JS code object, given the number of arguments it
+ // removes from the top of the physical frame.
+ // Register arguments are passed as results and consumed by the call.
+ Result CallCodeObject(Handle<Code> ic,
+ RelocInfo::Mode rmode,
+ int dropped_args);
+ Result CallCodeObject(Handle<Code> ic,
+ RelocInfo::Mode rmode,
+ Result* arg,
+ int dropped_args);
+ Result CallCodeObject(Handle<Code> ic,
+ RelocInfo::Mode rmode,
+ Result* arg0,
+ Result* arg1,
+ int dropped_args);
+
+ // Drop a number of elements from the top of the expression stack. May
+ // emit code to affect the physical frame. Does not clobber any registers
+ // excepting possibly the stack pointer.
+ void Drop(int count);
+
+ // Drop one element.
+ void Drop() { Drop(1); }
+
+ // Duplicate the top element of the frame.
+ void Dup() { PushFrameSlotAt(elements_.length() - 1); }
+
+ // Pop an element from the top of the expression stack. Returns a
+ // Result, which may be a constant or a register.
+ Result Pop();
+
+ // Pop and save an element from the top of the expression stack and
+ // emit a corresponding pop instruction.
+ void EmitPop(Register reg);
+ void EmitPop(Operand operand);
+
+ // Push an element on top of the expression stack and emit a
+ // corresponding push instruction.
+ void EmitPush(Register reg);
+ void EmitPush(Operand operand);
+ void EmitPush(Immediate immediate);
+
+ // Push an element on the virtual frame.
+ void Push(Register reg);
+ void Push(Handle<Object> value);
+ void Push(Smi* value) { Push(Handle<Object>(value)); }
+
+ // Pushing a result invalidates it (its contents become owned by the
+ // frame).
+ void Push(Result* result);
+
+ // Nip removes zero or more elements from immediately below the top
+ // of the frame, leaving the previous top-of-frame value on top of
+ // the frame. Nip(k) is equivalent to x = Pop(), Drop(k), Push(x).
+ void Nip(int num_dropped);
+
+ private:
+ // An illegal index into the virtual frame.
+ static const int kIllegalIndex = -1;
+
+ static const int kLocal0Offset = JavaScriptFrameConstants::kLocal0Offset;
+ static const int kFunctionOffset = JavaScriptFrameConstants::kFunctionOffset;
+ static const int kContextOffset = StandardFrameConstants::kContextOffset;
+
+ static const int kHandlerSize = StackHandlerConstants::kSize / kPointerSize;
+
+ CodeGenerator* cgen_;
+ MacroAssembler* masm_;
+
+ List<FrameElement> elements_;
+
+ // The number of frame-allocated locals and parameters respectively.
+ int parameter_count_;
+ int local_count_;
+
+ // The index of the element that is at the processor's stack pointer
+ // (the esp register).
+ int stack_pointer_;
+
+ // The index of the element that is at the processor's frame pointer
+ // (the ebp register).
+ int frame_pointer_;
+
+ // The frame has an embedded register file that it uses to track registers
+ // used in the frame.
+ RegisterFile frame_registers_;
+
+ // The index of the first parameter. The receiver lies below the first
+ // parameter.
+ int param0_index() const { return 1; }
+
+ // The index of the context slot in the frame.
+ int context_index() const {
+ ASSERT(frame_pointer_ != kIllegalIndex);
+ return frame_pointer_ + 1;
+ }
+
+ // The index of the function slot in the frame. It lies above the context
+ // slot.
+ int function_index() const {
+ ASSERT(frame_pointer_ != kIllegalIndex);
+ return frame_pointer_ + 2;
+ }
+
+ // The index of the first local. Between the parameters and the locals
+ // lie the return address, the saved frame pointer, the context, and the
+ // function.
+ int local0_index() const {
+ ASSERT(frame_pointer_ != kIllegalIndex);
+ return frame_pointer_ + 3;
+ }
+
+ // The index of the base of the expression stack.
+ int expression_base_index() const { return local0_index() + local_count_; }
+
+ // Convert a frame index into a frame pointer relative offset into the
+ // actual stack.
+ int fp_relative(int index) const {
+ return (frame_pointer_ - index) * kPointerSize;
+ }
+
+ // Record an occurrence of a register in the virtual frame. This has the
+ // effect of incrementing both the register's frame-internal reference
+ // count and its external reference count.
+ void Use(Register reg);
+
+ // Record that a register reference has been dropped from the frame. This
+ // decrements both the register's internal and external reference counts.
+ void Unuse(Register reg);
+
+ // Spill the element at a particular index---write it to memory if
+ // necessary, free any associated register, and forget its value if
+ // constant.
+ void SpillElementAt(int index);
+
+ // Sync the element at a particular index. If it is a register or
+ // constant that disagrees with the value on the stack, write it to memory.
+ // Keep the element type as register or constant, and clear the dirty bit.
+ void SyncElementAt(int index);
+
+ // Sync the range of elements in [begin, end).
+ void SyncRange(int begin, int end);
+
+ // Sync a single element, assuming that its index is less than
+ // or equal to stack pointer + 1.
+ void RawSyncElementAt(int index);
+
+ // Push a copy of a frame slot (typically a local or parameter) on top of
+ // the frame.
+ void PushFrameSlotAt(int index);
+
+ // Push a the value of a frame slot (typically a local or parameter) on
+ // top of the frame and invalidate the slot.
+ void TakeFrameSlotAt(int index);
+
+ // Store the value on top of the frame to a frame slot (typically a local
+ // or parameter).
+ void StoreToFrameSlotAt(int index);
+
+ // Spill all elements in registers. Spill the top spilled_args elements
+ // on the frame. Sync all other frame elements.
+ // Then drop dropped_args elements from the virtual frame, to match
+ // the effect of an upcoming call that will drop them from the stack.
+ void PrepareForCall(int spilled_args, int dropped_args);
+
+ // Move frame elements currently in registers or constants, that
+ // should be in memory in the expected frame, to memory.
+ void MergeMoveRegistersToMemory(VirtualFrame* expected);
+
+ // Make the register-to-register moves necessary to
+ // merge this frame with the expected frame.
+ // Register to memory moves must already have been made,
+ // and memory to register moves must follow this call.
+ // This is because some new memory-to-register moves are
+ // created in order to break cycles of register moves.
+ // Used in the implementation of MergeTo().
+ void MergeMoveRegistersToRegisters(VirtualFrame* expected);
+
+ // Make the memory-to-register and constant-to-register moves
+ // needed to make this frame equal the expected frame.
+ // Called after all register-to-memory and register-to-register
+ // moves have been made. After this function returns, the frames
+ // should be equal.
+ void MergeMoveMemoryToRegisters(VirtualFrame* expected);
+
+ // Helper function to implement the copy-on-write semantics of an
+ // element's copies just before writing to the element. The copies
+ // are updated, but the element is not changed. A copy of the new
+ // backing store of all the copies is returned if there were any
+ // copies and in invalid frame element is returned if there were no
+ // copies.
+ FrameElement AdjustCopies(int index);
+
+ // Call a code stub that has already been prepared for calling (via
+ // PrepareForCall).
+ Result RawCallStub(CodeStub* stub, int frame_arg_count);
+
+ // Calls a code object which has already been prepared for calling
+ // (via PrepareForCall).
+ Result RawCallCodeObject(Handle<Code> code, RelocInfo::Mode rmode);
+
+ bool Equals(VirtualFrame* other);
+
+ friend class JumpTarget;
+};
+
+} } // namespace v8::internal
+
+#endif // V8_VIRTUAL_FRAME_IA32_H_
--- /dev/null
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "codegen.h"
+#include "codegen-inl.h"
+#include "virtual-frame.h"
+
+namespace v8 { namespace internal {
+
+// -------------------------------------------------------------------------
+// VirtualFrame implementation.
+
+VirtualFrame::SpilledScope::SpilledScope(CodeGenerator* cgen)
+ : cgen_(cgen),
+ previous_state_(cgen->in_spilled_code()) {
+ ASSERT(cgen->has_valid_frame());
+ cgen->frame()->SpillAll();
+ cgen->set_in_spilled_code(true);
+}
+
+
+VirtualFrame::SpilledScope::~SpilledScope() {
+ cgen_->set_in_spilled_code(previous_state_);
+}
+
+
+// When cloned, a frame is a deep copy of the original.
+VirtualFrame::VirtualFrame(VirtualFrame* original)
+ : cgen_(original->cgen_),
+ masm_(original->masm_),
+ elements_(original->elements_.length()),
+ parameter_count_(original->parameter_count_),
+ local_count_(original->local_count_),
+ stack_pointer_(original->stack_pointer_),
+ frame_pointer_(original->frame_pointer_),
+ frame_registers_(original->frame_registers_) {
+ // Copy all the elements from the original.
+ for (int i = 0; i < original->elements_.length(); i++) {
+ elements_.Add(original->elements_[i]);
+ }
+}
+
+
+FrameElement VirtualFrame::CopyElementAt(int index) {
+ ASSERT(index >= 0);
+ ASSERT(index < elements_.length());
+
+ FrameElement target = elements_[index];
+ FrameElement result;
+
+ switch (target.type()) {
+ case FrameElement::CONSTANT:
+ // We do not copy constants and instead return a fresh unsynced
+ // constant.
+ result = FrameElement::ConstantElement(target.handle(),
+ FrameElement::NOT_SYNCED);
+ break;
+
+ case FrameElement::COPY:
+ // We do not allow copies of copies, so we follow one link to
+ // the actual backing store of a copy before making a copy.
+ index = target.index();
+ ASSERT(elements_[index].is_memory() || elements_[index].is_register());
+ // Fall through.
+
+ case FrameElement::MEMORY: // Fall through.
+ case FrameElement::REGISTER:
+ // All copies are backed by memory or register locations.
+ result.type_ =
+ FrameElement::TypeField::encode(FrameElement::COPY) |
+ FrameElement::SyncField::encode(FrameElement::NOT_SYNCED);
+ result.data_.index_ = index;
+ break;
+
+ case FrameElement::INVALID:
+ // We should not try to copy invalid elements.
+ UNREACHABLE();
+ break;
+ }
+ return result;
+}
+
+
+// Modify the state of the virtual frame to match the actual frame by adding
+// extra in-memory elements to the top of the virtual frame. The extra
+// elements will be externally materialized on the actual frame (eg, by
+// pushing an exception handler). No code is emitted.
+void VirtualFrame::Adjust(int count) {
+ ASSERT(count >= 0);
+ ASSERT(stack_pointer_ == elements_.length() - 1);
+
+ for (int i = 0; i < count; i++) {
+ elements_.Add(FrameElement::MemoryElement());
+ }
+ stack_pointer_ += count;
+}
+
+
+// Modify the state of the virtual frame to match the actual frame by
+// removing elements from the top of the virtual frame. The elements will
+// be externally popped from the actual frame (eg, by a runtime call). No
+// code is emitted.
+void VirtualFrame::Forget(int count) {
+ ASSERT(count >= 0);
+ ASSERT(stack_pointer_ == elements_.length() - 1);
+ ASSERT(elements_.length() >= count);
+
+ stack_pointer_ -= count;
+ for (int i = 0; i < count; i++) {
+ FrameElement last = elements_.RemoveLast();
+ if (last.is_register()) {
+ Unuse(last.reg());
+ }
+ }
+}
+
+
+void VirtualFrame::Use(Register reg) {
+ frame_registers_.Use(reg);
+ cgen_->allocator()->Use(reg);
+}
+
+
+void VirtualFrame::Unuse(Register reg) {
+ frame_registers_.Unuse(reg);
+ cgen_->allocator()->Unuse(reg);
+}
+
+
+void VirtualFrame::Spill(Register target) {
+ if (!frame_registers_.is_used(target)) return;
+ for (int i = 0; i < elements_.length(); i++) {
+ if (elements_[i].is_register() && elements_[i].reg().is(target)) {
+ SpillElementAt(i);
+ }
+ }
+}
+
+
+// Spill any register if possible, making its external reference count zero.
+Register VirtualFrame::SpillAnyRegister() {
+ // Find the leftmost (ordered by register code), least
+ // internally-referenced register whose internal reference count matches
+ // its external reference count (so that spilling it from the frame frees
+ // it for use).
+ int min_count = kMaxInt;
+ int best_register_code = no_reg.code_;
+
+ for (int i = 0; i < kNumRegisters; i++) {
+ int count = frame_registers_.count(i);
+ if (count < min_count && count == cgen_->allocator()->count(i)) {
+ min_count = count;
+ best_register_code = i;
+ }
+ }
+
+ Register result = { best_register_code };
+ if (result.is_valid()) {
+ Spill(result);
+ ASSERT(!cgen_->allocator()->is_used(result));
+ }
+ return result;
+}
+
+
+// Make the type of the element at a given index be MEMORY.
+void VirtualFrame::SpillElementAt(int index) {
+ if (!elements_[index].is_valid()) return;
+
+ SyncElementAt(index);
+ if (elements_[index].is_register()) {
+ Unuse(elements_[index].reg());
+ }
+ // The element is now in memory.
+ elements_[index] = FrameElement::MemoryElement();
+}
+
+
+// Clear the dirty bits for the range of elements in [begin, end).
+void VirtualFrame::SyncRange(int begin, int end) {
+ ASSERT(begin >= 0);
+ ASSERT(end <= elements_.length());
+ for (int i = begin; i < end; i++) {
+ RawSyncElementAt(i);
+ }
+}
+
+
+// Clear the dirty bit for the element at a given index.
+void VirtualFrame::SyncElementAt(int index) {
+ if (index > stack_pointer_ + 1) {
+ SyncRange(stack_pointer_ + 1, index);
+ }
+ RawSyncElementAt(index);
+}
+
+
+// Make the type of all elements be MEMORY.
+void VirtualFrame::SpillAll() {
+ for (int i = 0; i < elements_.length(); i++) {
+ SpillElementAt(i);
+ }
+}
+
+
+void VirtualFrame::PrepareMergeTo(VirtualFrame* expected) {
+ // No code needs to be generated to invalidate valid elements. No
+ // code needs to be generated to move values to memory if they are
+ // already synced.
+ for (int i = 0; i < elements_.length(); i++) {
+ FrameElement source = elements_[i];
+ FrameElement target = expected->elements_[i];
+ if (!target.is_valid() ||
+ (target.is_memory() && !source.is_memory() && source.is_synced())) {
+ if (source.is_register()) {
+ // If the frame is the code generator's current frame, we have
+ // to decrement both the frame-internal and global register
+ // counts.
+ if (cgen_->frame() == this) {
+ Unuse(source.reg());
+ } else {
+ frame_registers_.Unuse(source.reg());
+ }
+ }
+ elements_[i] = target;
+ }
+ }
+}
+
+
+void VirtualFrame::PrepareForCall(int spilled_args, int dropped_args) {
+ ASSERT(height() >= dropped_args);
+ ASSERT(height() >= spilled_args);
+ ASSERT(dropped_args <= spilled_args);
+
+ int arg_base_index = elements_.length() - spilled_args;
+ // Spill the arguments. We spill from the top down so that the
+ // backing stores of register copies will be spilled only after all
+ // the copies are spilled---it is better to spill via a
+ // register-to-memory move than a memory-to-memory move.
+ for (int i = elements_.length() - 1; i >= arg_base_index; i--) {
+ SpillElementAt(i);
+ }
+
+ // Below the arguments, spill registers and sync everything else.
+ // Syncing is necessary for the locals and parameters to give the
+ // debugger a consistent view of the frame.
+ for (int i = arg_base_index - 1; i >= 0; i--) {
+ FrameElement element = elements_[i];
+ if (element.is_register()) {
+ SpillElementAt(i);
+ } else if (element.is_valid()) {
+ SyncElementAt(i);
+ }
+ }
+
+ // Forget the frame elements that will be popped by the call.
+ Forget(dropped_args);
+}
+
+
+void VirtualFrame::DetachFromCodeGenerator() {
+ // Tell the global register allocator that it is free to reallocate all
+ // register references contained in this frame. The frame elements remain
+ // register references, so the frame-internal reference count is not
+ // decremented.
+ for (int i = 0; i < elements_.length(); i++) {
+ if (elements_[i].is_register()) {
+ cgen_->allocator()->Unuse(elements_[i].reg());
+ }
+ }
+}
+
+
+void VirtualFrame::AttachToCodeGenerator() {
+ // Tell the global register allocator that the frame-internal register
+ // references are live again.
+ for (int i = 0; i < elements_.length(); i++) {
+ if (elements_[i].is_register()) {
+ cgen_->allocator()->Use(elements_[i].reg());
+ }
+ }
+}
+
+
+void VirtualFrame::PrepareForReturn() {
+ // Spill all locals. This is necessary to make sure all locals have
+ // the right value when breaking at the return site in the debugger.
+ for (int i = 0; i < expression_base_index(); i++) SpillElementAt(i);
+
+ // Drop all non-local stack elements.
+ Drop(height());
+
+ // Validate state: The expression stack should be empty and the
+ // stack pointer should have been updated to reflect this.
+ ASSERT(height() == 0);
+ ASSERT(stack_pointer_ == expression_base_index() - 1);
+}
+
+
+void VirtualFrame::SetElementAt(int index, Result* value) {
+ int frame_index = elements_.length() - index - 1;
+ ASSERT(frame_index >= 0);
+ ASSERT(frame_index < elements_.length());
+ ASSERT(value->is_valid());
+ FrameElement original = elements_[frame_index];
+
+ // Early exit if the element is the same as the one being set.
+ bool same_register = original.is_register()
+ && value->is_register()
+ && original.reg().is(value->reg());
+ bool same_constant = original.is_constant()
+ && value->is_constant()
+ && original.handle().is_identical_to(value->handle());
+ if (same_register || same_constant) {
+ value->Unuse();
+ return;
+ }
+
+ // If the original may be a copy, adjust to preserve the copy-on-write
+ // semantics of copied elements.
+ if (original.is_register() || original.is_memory()) {
+ FrameElement ignored = AdjustCopies(frame_index);
+ }
+
+ // If the original is a register reference, deallocate it.
+ if (original.is_register()) {
+ Unuse(original.reg());
+ }
+
+ FrameElement new_element;
+ if (value->is_register()) {
+ // There are two cases depending no whether the register already
+ // occurs in the frame or not.
+ if (register_count(value->reg()) == 0) {
+ Use(value->reg());
+ elements_[frame_index] =
+ FrameElement::RegisterElement(value->reg(),
+ FrameElement::NOT_SYNCED);
+ } else {
+ for (int i = 0; i < elements_.length(); i++) {
+ FrameElement element = elements_[i];
+ if (element.is_register() && element.reg().is(value->reg())) {
+ // The register backing store is lower in the frame than its
+ // copy.
+ if (i < frame_index) {
+ elements_[frame_index] = CopyElementAt(i);
+ } else {
+ // There was an early bailout for the case of setting a
+ // register element to itself.
+ ASSERT(i != frame_index);
+ element.clear_sync();
+ elements_[frame_index] = element;
+ elements_[i] = CopyElementAt(frame_index);
+ }
+ // Exit the loop once the appropriate copy is inserted.
+ break;
+ }
+ }
+ }
+ } else {
+ ASSERT(value->is_constant());
+ elements_[frame_index] =
+ FrameElement::ConstantElement(value->handle(),
+ FrameElement::NOT_SYNCED);
+ }
+ value->Unuse();
+}
+
+
+void VirtualFrame::PushFrameSlotAt(int index) {
+ FrameElement new_element = CopyElementAt(index);
+ elements_.Add(new_element);
+}
+
+
+Result VirtualFrame::CallStub(CodeStub* stub, int frame_arg_count) {
+ PrepareForCall(frame_arg_count, frame_arg_count);
+ return RawCallStub(stub, frame_arg_count);
+}
+
+
+Result VirtualFrame::CallStub(CodeStub* stub,
+ Result* arg,
+ int frame_arg_count) {
+ PrepareForCall(frame_arg_count, frame_arg_count);
+ arg->Unuse();
+ return RawCallStub(stub, frame_arg_count);
+}
+
+
+Result VirtualFrame::CallStub(CodeStub* stub,
+ Result* arg0,
+ Result* arg1,
+ int frame_arg_count) {
+ PrepareForCall(frame_arg_count, frame_arg_count);
+ arg0->Unuse();
+ arg1->Unuse();
+ return RawCallStub(stub, frame_arg_count);
+}
+
+
+Result VirtualFrame::CallCodeObject(Handle<Code> code,
+ RelocInfo::Mode rmode,
+ int dropped_args) {
+ int spilled_args = 0;
+ switch (code->kind()) {
+ case Code::CALL_IC:
+ spilled_args = dropped_args + 1;
+ break;
+ case Code::FUNCTION:
+ spilled_args = dropped_args + 1;
+ break;
+ case Code::KEYED_LOAD_IC:
+ ASSERT(dropped_args == 0);
+ spilled_args = 2;
+ break;
+ default:
+ // The other types of code objects are called with values
+ // in specific registers, and are handled in functions with
+ // a different signature.
+ UNREACHABLE();
+ break;
+ }
+ PrepareForCall(spilled_args, dropped_args);
+ return RawCallCodeObject(code, rmode);
+}
+
+
+void VirtualFrame::Push(Register reg) {
+ FrameElement new_element;
+ if (register_count(reg) == 0) {
+ Use(reg);
+ new_element =
+ FrameElement::RegisterElement(reg, FrameElement::NOT_SYNCED);
+ } else {
+ for (int i = 0; i < elements_.length(); i++) {
+ FrameElement element = elements_[i];
+ if (element.is_register() && element.reg().is(reg)) {
+ new_element = CopyElementAt(i);
+ break;
+ }
+ }
+ }
+ elements_.Add(new_element);
+}
+
+
+void VirtualFrame::Push(Handle<Object> value) {
+ elements_.Add(FrameElement::ConstantElement(value,
+ FrameElement::NOT_SYNCED));
+}
+
+
+void VirtualFrame::Push(Result* result) {
+ if (result->is_register()) {
+ Push(result->reg());
+ } else {
+ ASSERT(result->is_constant());
+ Push(result->handle());
+ }
+ result->Unuse();
+}
+
+
+void VirtualFrame::Nip(int num_dropped) {
+ ASSERT(num_dropped >= 0);
+ if (num_dropped == 0) return;
+ Result tos = Pop();
+ if (num_dropped > 1) {
+ Drop(num_dropped - 1);
+ }
+ SetElementAt(0, &tos);
+}
+
+
+bool FrameElement::Equals(FrameElement other) {
+ if (type() != other.type()) return false;
+ if (is_synced() != other.is_synced()) return false;
+
+ if (is_register()) {
+ if (!reg().is(other.reg())) return false;
+ } else if (is_constant()) {
+ if (!handle().is_identical_to(other.handle())) return false;
+ } else if (is_copy()) {
+ if (index() != other.index()) return false;
+ }
+
+ return true;
+}
+
+
+bool VirtualFrame::Equals(VirtualFrame* other) {
+ if (cgen_ != other->cgen_) return false;
+ if (masm_ != other->masm_) return false;
+ if (elements_.length() != other->elements_.length()) return false;
+
+ for (int i = 0; i < elements_.length(); i++) {
+ if (!elements_[i].Equals(other->elements_[i])) return false;
+ }
+
+ if (parameter_count_ != other->parameter_count_) return false;
+ if (local_count_ != other->local_count_) return false;
+ if (stack_pointer_ != other->stack_pointer_) return false;
+ if (frame_pointer_ != other->frame_pointer_) return false;
+
+ for (int i = 0; i < kNumRegisters; i++) {
+ if (frame_registers_.count(i) != other->frame_registers_.count(i)) {
+ return false;
+ }
+ }
+
+ return true;
+}
+
+} } // namespace v8::internal
--- /dev/null
+// Copyright 2008 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_VIRTUAL_FRAME_H_
+#define V8_VIRTUAL_FRAME_H_
+
+#include "macro-assembler.h"
+#include "register-allocator.h"
+
+namespace v8 { namespace internal {
+
+// -------------------------------------------------------------------------
+// Virtual frame elements
+//
+// The internal elements of the virtual frames. There are several kinds of
+// elements:
+// * Invalid: elements that are uninitialized or not actually part
+// of the virtual frame. They should not be read.
+// * Memory: an element that resides in the actual frame. Its address is
+// given by its position in the virtual frame.
+// * Register: an element that resides in a register.
+// * Constant: an element whose value is known at compile time.
+
+class FrameElement BASE_EMBEDDED {
+ public:
+ enum SyncFlag {
+ SYNCED,
+ NOT_SYNCED
+ };
+
+ // The default constructor creates an invalid frame element.
+ FrameElement() {
+ type_ = TypeField::encode(INVALID) | SyncField::encode(NOT_SYNCED);
+ data_.reg_ = no_reg;
+ }
+
+ // Factory function to construct an invalid frame element.
+ static FrameElement InvalidElement() {
+ FrameElement result;
+ return result;
+ }
+
+ // Factory function to construct an in-memory frame element.
+ static FrameElement MemoryElement() {
+ FrameElement result;
+ result.type_ = TypeField::encode(MEMORY) | SyncField::encode(SYNCED);
+ // In-memory elements have no useful data.
+ result.data_.reg_ = no_reg;
+ return result;
+ }
+
+ // Factory function to construct an in-register frame element.
+ static FrameElement RegisterElement(Register reg, SyncFlag is_synced) {
+ FrameElement result;
+ result.type_ = TypeField::encode(REGISTER) | SyncField::encode(is_synced);
+ result.data_.reg_ = reg;
+ return result;
+ }
+
+ // Factory function to construct a frame element whose value is known at
+ // compile time.
+ static FrameElement ConstantElement(Handle<Object> value,
+ SyncFlag is_synced) {
+ FrameElement result;
+ result.type_ = TypeField::encode(CONSTANT) | SyncField::encode(is_synced);
+ result.data_.handle_ = value.location();
+ return result;
+ }
+
+ bool is_synced() const { return SyncField::decode(type_) == SYNCED; }
+
+ void set_sync() {
+ ASSERT(type() != MEMORY);
+ type_ = (type_ & ~SyncField::mask()) | SyncField::encode(SYNCED);
+ }
+
+ void clear_sync() {
+ ASSERT(type() != MEMORY);
+ type_ = (type_ & ~SyncField::mask()) | SyncField::encode(NOT_SYNCED);
+ }
+
+ bool is_valid() const { return type() != INVALID; }
+ bool is_memory() const { return type() == MEMORY; }
+ bool is_register() const { return type() == REGISTER; }
+ bool is_constant() const { return type() == CONSTANT; }
+ bool is_copy() const { return type() == COPY; }
+
+ Register reg() const {
+ ASSERT(is_register());
+ return data_.reg_;
+ }
+
+ Handle<Object> handle() const {
+ ASSERT(is_constant());
+ return Handle<Object>(data_.handle_);
+ }
+
+ int index() const {
+ ASSERT(is_copy());
+ return data_.index_;
+ }
+
+ bool Equals(FrameElement other);
+
+ private:
+ enum Type {
+ INVALID,
+ MEMORY,
+ REGISTER,
+ CONSTANT,
+ COPY
+ };
+
+ // BitField is <type, shift, size>.
+ class SyncField : public BitField<SyncFlag, 0, 1> {};
+ class TypeField : public BitField<Type, 1, 32 - 1> {};
+
+ Type type() const { return TypeField::decode(type_); }
+
+ // The element's type and a dirty bit. The dirty bit can be cleared
+ // for non-memory elements to indicate that the element agrees with
+ // the value in memory in the actual frame.
+ int type_;
+
+ union {
+ Register reg_;
+ Object** handle_;
+ int index_;
+ } data_;
+
+ friend class VirtualFrame;
+};
+
+
+} } // namespace v8::internal
+
+#ifdef ARM
+#include "virtual-frame-arm.h"
+#else // ia32
+#include "virtual-frame-ia32.h"
+#endif
+
+#endif // V8_VIRTUAL_FRAME_H_
--- /dev/null
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Test comparison operations that involve one or two constant smis.
+
+function test() {
+ var i = 5;
+ var j = 3;
+
+ assertTrue( j < i );
+ i = 5; j = 3;
+ assertTrue( j <= i );
+ i = 5; j = 3;
+ assertTrue( i > j );
+ i = 5; j = 3;
+ assertTrue( i >= j );
+ i = 5; j = 3;
+ assertTrue( i != j );
+ i = 5; j = 3;
+ assertTrue( i == i );
+ i = 5; j = 3;
+ assertFalse( i < j );
+ i = 5; j = 3;
+ assertFalse( i <= j );
+ i = 5; j = 3;
+ assertFalse( j > i );
+ i = 5; j = 3;
+ assertFalse(j >= i );
+ i = 5; j = 3;
+ assertFalse( j == i);
+ i = 5; j = 3;
+ assertFalse( i != i);
+
+ i = 10 * 10;
+ while ( i < 107 ) {
+ ++i;
+ }
+ j = 21;
+
+ assertTrue( j < i );
+ j = 21;
+ assertTrue( j <= i );
+ j = 21;
+ assertTrue( i > j );
+ j = 21;
+ assertTrue( i >= j );
+ j = 21;
+ assertTrue( i != j );
+ j = 21;
+ assertTrue( i == i );
+ j = 21;
+ assertFalse( i < j );
+ j = 21;
+ assertFalse( i <= j );
+ j = 21;
+ assertFalse( j > i );
+ j = 21;
+ assertFalse(j >= i );
+ j = 21;
+ assertFalse( j == i);
+ j = 21;
+ assertFalse( i != i);
+ j = 21;
+ assertTrue( j == j );
+ j = 21;
+ assertFalse( j != j );
+
+ assertTrue( 100 > 99 );
+ assertTrue( 101 >= 90 );
+ assertTrue( 11111 > -234 );
+ assertTrue( -888 <= -20 );
+
+ while ( 234 > 456 ) {
+ i = i + 1;
+ }
+
+ switch(3) {
+ case 5:
+ assertUnreachable();
+ break;
+ case 3:
+ j = 13;
+ default:
+ i = 2;
+ case 7:
+ j = 17;
+ break;
+ case 9:
+ j = 19;
+ assertUnreachable();
+ break;
+ }
+ assertEquals(17, j, "switch with constant value");
+}
+
+test();
+
function listener(event, exec_state, event_data, data) {
try {
- if (event == Debug.DebugEvent.Break) {
- // Get the debug command processor.
- var dcp = exec_state.debugCommandProcessor();
-
- // Test some illegal evaluate requests.
- testRequest(dcp, void 0, false);
- testRequest(dcp, '{"expression":"1","global"=true}', false);
- testRequest(dcp, '{"expression":"a","frame":4}', false);
-
- // Test some legal evaluate requests.
- testRequest(dcp, '{"expression":"1+2"}', true, 3);
- testRequest(dcp, '{"expression":"a+2"}', true, 5);
- testRequest(dcp, '{"expression":"({\\"a\\":1,\\"b\\":2}).b+2"}', true, 4);
-
- // Test evaluation of a in the stack frames and the global context.
- testRequest(dcp, '{"expression":"a"}', true, 3);
- testRequest(dcp, '{"expression":"a","frame":0}', true, 3);
- testRequest(dcp, '{"expression":"a","frame":1}', true, 2);
- testRequest(dcp, '{"expression":"a","frame":2}', true, 1);
- testRequest(dcp, '{"expression":"a","global":true}', true, 1);
- testRequest(dcp, '{"expression":"this.a","global":true}', true, 1);
-
- // Indicate that all was processed.
- listenerComplete = true;
- }
+ if (event == Debug.DebugEvent.Break) {
+ // Get the debug command processor.
+ var dcp = exec_state.debugCommandProcessor();
+
+ // Test some illegal evaluate requests.
+ testRequest(dcp, void 0, false);
+ testRequest(dcp, '{"expression":"1","global"=true}', false);
+ testRequest(dcp, '{"expression":"a","frame":4}', false);
+
+ // Test some legal evaluate requests.
+ testRequest(dcp, '{"expression":"1+2"}', true, 3);
+ testRequest(dcp, '{"expression":"a+2"}', true, 5);
+ testRequest(dcp, '{"expression":"({\\"a\\":1,\\"b\\":2}).b+2"}', true, 4);
+
+ // Test evaluation of a in the stack frames and the global context.
+ testRequest(dcp, '{"expression":"a"}', true, 3);
+ testRequest(dcp, '{"expression":"a","frame":0}', true, 3);
+ testRequest(dcp, '{"expression":"a","frame":1}', true, 2);
+ testRequest(dcp, '{"expression":"a","frame":2}', true, 1);
+ testRequest(dcp, '{"expression":"a","global":true}', true, 1);
+ testRequest(dcp, '{"expression":"this.a","global":true}', true, 1);
+
+ // Indicate that all was processed.
+ listenerComplete = true;
+ }
} catch (e) {
- exception = e
+ exception = e
};
};
var bp1, bp2;
function listener(event, exec_state, event_data, data) {
- if (event == Debug.DebugEvent.Break)
- {
+ if (event == Debug.DebugEvent.Break) {
if (state == 0) {
exec_state.prepareStep(Debug.StepAction.StepIn, 1000);
state = 1;
state = 0;
result = -1;
f();
-print(state);
assertEquals(499, result);
// Check that performing 1000 steps with a break point on the statement in the
--- /dev/null
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+function F() {
+ for (var x in [1,2,3]) {
+ return 42;
+ }
+ return 87;
+}
+
+
+function G() {
+ for (var x in [1,2,3]) {
+ try {
+ return 42;
+ } finally {
+ // Do nothing.
+ }
+ }
+ return 87;
+}
+
+
+function H() {
+ for (var x in [1,2,3]) {
+ try {
+ return 42;
+ } catch (e) {
+ // Do nothing.
+ }
+ }
+ return 87;
+}
+
+
+assertEquals(42, F());
+assertEquals(42, G());
+assertEquals(42, H());
--- /dev/null
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Test some code paths through the compiler for short-circuited
+// boolean expressions.
+
+function andTest0() {
+ var a = 0;
+ // Left subexpression is known false at compile time.
+ return a != 0 && "failure";
+}
+
+assertFalse(andTest0());
+
+
+function orTest0() {
+ var a = 0;
+ // Left subexpression is known true at compile time.
+ return a == 0 || "failure";
+}
+
+assertTrue(orTest0());
assertEquals(190, f6(20), "largeSwitch.20");
assertEquals(2016, f6(64), "largeSwitch.64");
assertEquals(4032, f6(128), "largeSwitch.128");
-assertEquals(4222, f6(148), "largeSwitch.148");
-
+assertEquals(4222, f6(148), "largeSwitch.148");
+
function f7(value) {
switch (value) {
case 11:
case 12:
case 13:
- case 14:
+ case 14:
case 15: // Dummy fillers
}
return "default";
function makeVeryLong(length) {
var res = "function() {\n" +
- " var res = 0;\n" +
+ " var res = 0;\n" +
" for (var i = 0; i <= " + length + "; i++) {\n" +
" switch(i) {\n";
for (var i = 0; i < length; i++) {
var verylong_size = 1000;
var verylong = makeVeryLong(verylong_size);
-assertEquals(verylong_size * 2 + 1, verylong());
\ No newline at end of file
+assertEquals(verylong_size * 2 + 1, verylong());
assertTrue(this === f());
var x = {}, y = {};
-x.f = y.f = f;
+x.f = y.f = f;
assertFalse(x === f());
assertFalse(y === f());
assertTrue(x === x.f());
/* End PBXAggregateTarget section */
/* Begin PBXBuildFile section */
+ 58950D5E0F55519800F3E8BA /* jump-target.cc in Sources */ = {isa = PBXBuildFile; fileRef = 58950D500F55514900F3E8BA /* jump-target.cc */; };
+ 58950D5F0F55519D00F3E8BA /* jump-target-ia32.cc in Sources */ = {isa = PBXBuildFile; fileRef = 58950D4F0F55514900F3E8BA /* jump-target-ia32.cc */; };
+ 58950D600F5551A300F3E8BA /* jump-target.cc in Sources */ = {isa = PBXBuildFile; fileRef = 58950D500F55514900F3E8BA /* jump-target.cc */; };
+ 58950D610F5551A400F3E8BA /* jump-target-arm.cc in Sources */ = {isa = PBXBuildFile; fileRef = 58950D4E0F55514900F3E8BA /* jump-target-arm.cc */; };
+ 58950D620F5551AF00F3E8BA /* register-allocator-ia32.cc in Sources */ = {isa = PBXBuildFile; fileRef = 58950D530F55514900F3E8BA /* register-allocator-ia32.cc */; };
+ 58950D630F5551AF00F3E8BA /* register-allocator.cc in Sources */ = {isa = PBXBuildFile; fileRef = 58950D540F55514900F3E8BA /* register-allocator.cc */; };
+ 58950D640F5551B500F3E8BA /* register-allocator.cc in Sources */ = {isa = PBXBuildFile; fileRef = 58950D540F55514900F3E8BA /* register-allocator.cc */; };
+ 58950D650F5551B600F3E8BA /* register-allocator-arm.cc in Sources */ = {isa = PBXBuildFile; fileRef = 58950D520F55514900F3E8BA /* register-allocator-arm.cc */; };
+ 58950D660F5551C200F3E8BA /* virtual-frame.cc in Sources */ = {isa = PBXBuildFile; fileRef = 58950D5A0F55514900F3E8BA /* virtual-frame.cc */; };
+ 58950D670F5551C400F3E8BA /* virtual-frame-ia32.cc in Sources */ = {isa = PBXBuildFile; fileRef = 58950D580F55514900F3E8BA /* virtual-frame-ia32.cc */; };
+ 58950D680F5551CB00F3E8BA /* virtual-frame.cc in Sources */ = {isa = PBXBuildFile; fileRef = 58950D5A0F55514900F3E8BA /* virtual-frame.cc */; };
+ 58950D690F5551CE00F3E8BA /* virtual-frame-arm.cc in Sources */ = {isa = PBXBuildFile; fileRef = 58950D560F55514900F3E8BA /* virtual-frame-arm.cc */; };
8900116C0E71CA2300F91F35 /* libraries.cc in Sources */ = {isa = PBXBuildFile; fileRef = 8900116B0E71CA2300F91F35 /* libraries.cc */; };
890A13FE0EE9C47F00E49346 /* interpreter-irregexp.cc in Sources */ = {isa = PBXBuildFile; fileRef = 89A15C660EE4665300B48DEB /* interpreter-irregexp.cc */; };
890A14010EE9C4B000E49346 /* regexp-macro-assembler-arm.cc in Sources */ = {isa = PBXBuildFile; fileRef = 89A15C700EE466D000B48DEB /* regexp-macro-assembler-arm.cc */; };
/* End PBXContainerItemProxy section */
/* Begin PBXFileReference section */
+ 58950D4E0F55514900F3E8BA /* jump-target-arm.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = "jump-target-arm.cc"; sourceTree = "<group>"; };
+ 58950D4F0F55514900F3E8BA /* jump-target-ia32.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = "jump-target-ia32.cc"; sourceTree = "<group>"; };
+ 58950D500F55514900F3E8BA /* jump-target.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = "jump-target.cc"; sourceTree = "<group>"; };
+ 58950D510F55514900F3E8BA /* jump-target.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "jump-target.h"; sourceTree = "<group>"; };
+ 58950D520F55514900F3E8BA /* register-allocator-arm.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = "register-allocator-arm.cc"; sourceTree = "<group>"; };
+ 58950D530F55514900F3E8BA /* register-allocator-ia32.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = "register-allocator-ia32.cc"; sourceTree = "<group>"; };
+ 58950D540F55514900F3E8BA /* register-allocator.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = "register-allocator.cc"; sourceTree = "<group>"; };
+ 58950D550F55514900F3E8BA /* register-allocator.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "register-allocator.h"; sourceTree = "<group>"; };
+ 58950D560F55514900F3E8BA /* virtual-frame-arm.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = "virtual-frame-arm.cc"; sourceTree = "<group>"; };
+ 58950D570F55514900F3E8BA /* virtual-frame-arm.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "virtual-frame-arm.h"; sourceTree = "<group>"; };
+ 58950D580F55514900F3E8BA /* virtual-frame-ia32.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = "virtual-frame-ia32.cc"; sourceTree = "<group>"; };
+ 58950D590F55514900F3E8BA /* virtual-frame-ia32.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "virtual-frame-ia32.h"; sourceTree = "<group>"; };
+ 58950D5A0F55514900F3E8BA /* virtual-frame.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = "virtual-frame.cc"; sourceTree = "<group>"; };
+ 58950D5B0F55514900F3E8BA /* virtual-frame.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "virtual-frame.h"; sourceTree = "<group>"; };
8900116B0E71CA2300F91F35 /* libraries.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = libraries.cc; sourceTree = "<group>"; };
893986D40F29020C007D5254 /* apiutils.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = apiutils.h; sourceTree = "<group>"; };
8939880B0F2A35FA007D5254 /* v8_shell */ = {isa = PBXFileReference; explicitFileType = "compiled.mach-o.executable"; includeInIndex = 0; path = v8_shell; sourceTree = BUILT_PRODUCTS_DIR; };
89A15C680EE4665300B48DEB /* jsregexp-inl.h */,
897FF14E0E719B8F00D62E90 /* jsregexp.cc */,
897FF14F0E719B8F00D62E90 /* jsregexp.h */,
+ 58950D4E0F55514900F3E8BA /* jump-target-arm.cc */,
+ 58950D4F0F55514900F3E8BA /* jump-target-ia32.cc */,
+ 58950D500F55514900F3E8BA /* jump-target.cc */,
+ 58950D510F55514900F3E8BA /* jump-target.h */,
897FF1500E719B8F00D62E90 /* list-inl.h */,
897FF1510E719B8F00D62E90 /* list.h */,
897FF1520E719B8F00D62E90 /* log.cc */,
89A15C7A0EE466D000B48DEB /* regexp-macro-assembler.h */,
8944AD0E0F1D4D3A0028D560 /* regexp-stack.cc */,
8944AD0F0F1D4D3A0028D560 /* regexp-stack.h */,
+ 58950D520F55514900F3E8BA /* register-allocator-arm.cc */,
+ 58950D530F55514900F3E8BA /* register-allocator-ia32.cc */,
+ 58950D540F55514900F3E8BA /* register-allocator.cc */,
+ 58950D550F55514900F3E8BA /* register-allocator.h */,
897FF16F0E719B8F00D62E90 /* rewriter.cc */,
897FF1700E719B8F00D62E90 /* rewriter.h */,
897FF1710E719B8F00D62E90 /* runtime.cc */,
897FF19E0E719B8F00D62E90 /* v8threads.h */,
897FF19F0E719B8F00D62E90 /* variables.cc */,
897FF1A00E719B8F00D62E90 /* variables.h */,
+ 58950D560F55514900F3E8BA /* virtual-frame-arm.cc */,
+ 58950D570F55514900F3E8BA /* virtual-frame-arm.h */,
+ 58950D580F55514900F3E8BA /* virtual-frame-ia32.cc */,
+ 58950D590F55514900F3E8BA /* virtual-frame-ia32.h */,
+ 58950D5A0F55514900F3E8BA /* virtual-frame.cc */,
+ 58950D5B0F55514900F3E8BA /* virtual-frame.h */,
897FF1A10E719B8F00D62E90 /* zone-inl.h */,
897FF1A20E719B8F00D62E90 /* zone.cc */,
897FF1A30E719B8F00D62E90 /* zone.h */,
89A88E0D0E71A66E0043BA31 /* ic.cc in Sources */,
89A15C850EE4678B00B48DEB /* interpreter-irregexp.cc in Sources */,
89A88E0E0E71A66F0043BA31 /* jsregexp.cc in Sources */,
+ 58950D5E0F55519800F3E8BA /* jump-target.cc in Sources */,
+ 58950D5F0F55519D00F3E8BA /* jump-target-ia32.cc in Sources */,
8900116C0E71CA2300F91F35 /* libraries.cc in Sources */,
89A88E0F0E71A6740043BA31 /* log.cc in Sources */,
89A88E100E71A6770043BA31 /* macro-assembler-ia32.cc in Sources */,
89A15C8A0EE467D100B48DEB /* regexp-macro-assembler-tracer.cc in Sources */,
89A15C810EE4674900B48DEB /* regexp-macro-assembler.cc in Sources */,
8944AD100F1D4D500028D560 /* regexp-stack.cc in Sources */,
+ 58950D620F5551AF00F3E8BA /* register-allocator-ia32.cc in Sources */,
+ 58950D630F5551AF00F3E8BA /* register-allocator.cc in Sources */,
89A88E190E71A6970043BA31 /* rewriter.cc in Sources */,
89A88E1A0E71A69B0043BA31 /* runtime.cc in Sources */,
89A88E1B0E71A69D0043BA31 /* scanner.cc in Sources */,
89A88E2B0E71A6D10043BA31 /* v8.cc in Sources */,
89A88E2C0E71A6D20043BA31 /* v8threads.cc in Sources */,
89A88E2D0E71A6D50043BA31 /* variables.cc in Sources */,
+ 58950D660F5551C200F3E8BA /* virtual-frame.cc in Sources */,
+ 58950D670F5551C400F3E8BA /* virtual-frame-ia32.cc in Sources */,
89A88E2E0E71A6D60043BA31 /* zone.cc in Sources */,
);
runOnlyForDeploymentPostprocessing = 0;
89F23C600E78D5B2006B2466 /* ic.cc in Sources */,
890A13FE0EE9C47F00E49346 /* interpreter-irregexp.cc in Sources */,
89F23C610E78D5B2006B2466 /* jsregexp.cc in Sources */,
+ 58950D600F5551A300F3E8BA /* jump-target.cc in Sources */,
+ 58950D610F5551A400F3E8BA /* jump-target-arm.cc in Sources */,
89F23C620E78D5B2006B2466 /* libraries.cc in Sources */,
89F23C630E78D5B2006B2466 /* log.cc in Sources */,
89F23C9E0E78D5FD006B2466 /* macro-assembler-arm.cc in Sources */,
890A14030EE9C4B500E49346 /* regexp-macro-assembler-tracer.cc in Sources */,
890A14040EE9C4B700E49346 /* regexp-macro-assembler.cc in Sources */,
8944AD110F1D4D570028D560 /* regexp-stack.cc in Sources */,
+ 58950D640F5551B500F3E8BA /* register-allocator.cc in Sources */,
+ 58950D650F5551B600F3E8BA /* register-allocator-arm.cc in Sources */,
89F23C6D0E78D5B2006B2466 /* rewriter.cc in Sources */,
89F23C6E0E78D5B2006B2466 /* runtime.cc in Sources */,
89F23C6F0E78D5B2006B2466 /* scanner.cc in Sources */,
89F23C7F0E78D5B2006B2466 /* v8.cc in Sources */,
89F23C800E78D5B2006B2466 /* v8threads.cc in Sources */,
89F23C810E78D5B2006B2466 /* variables.cc in Sources */,
+ 58950D680F5551CB00F3E8BA /* virtual-frame.cc in Sources */,
+ 58950D690F5551CE00F3E8BA /* virtual-frame-arm.cc in Sources */,
89F23C820E78D5B2006B2466 /* zone.cc in Sources */,
);
runOnlyForDeploymentPostprocessing = 0;
RelativePath="..\..\src\interpreter-irregexp.h"
>
</File>
+ <File
+ RelativePath="..\..\src\jump-target.h"
+ >
+ </File>
+ <File
+ RelativePath="..\..\src\jump-target.cc"
+ >
+ </File>
+ <File
+ RelativePath="..\..\src\jump-target-ia32.cc"
+ >
+ </File>
<File
RelativePath="..\..\src\jsregexp-inl.h"
>
RelativePath="..\..\src\regexp-stack.cc"
>
</File>
+ <File
+ RelativePath="..\..\src\register-allocator.h"
+ >
+ </File>
+ <File
+ RelativePath="..\..\src\register-allocator.cc"
+ >
+ </File>
+ <File
+ RelativePath="..\..\src\register-allocator-ia32.cc"
+ >
+ </File>
<File
RelativePath="..\..\src\rewriter.cc"
>
RelativePath="..\..\src\variables.h"
>
</File>
+ <File
+ RelativePath="..\..\src\virtual-frame.h"
+ >
+ </File>
+ <File
+ RelativePath="..\..\src\virtual-frame-ia32.h"
+ >
+ </File>
+ <File
+ RelativePath="..\..\src\virtual-frame.cc"
+ >
+ </File>
+ <File
+ RelativePath="..\..\src\virtual-frame-ia32.cc"
+ >
+ </File>
<File
RelativePath="..\..\src\zone-inl.h"
>
RelativePath="..\..\src\interpreter-irregexp.h"
>
</File>
+ <File
+ RelativePath="..\..\src\jump-target.h"
+ >
+ </File>
+ <File
+ RelativePath="..\..\src\jump-target.cc"
+ >
+ </File>
+ <File
+ RelativePath="..\..\src\jump-target-arm.cc"
+ >
+ </File>
<File
RelativePath="..\..\src\jsregexp-inl.h"
>
RelativePath="..\..\src\regexp-stack.cc"
>
</File>
+ <File
+ RelativePath="..\..\src\register-allocator.h"
+ >
+ </File>
+ <File
+ RelativePath="..\..\src\register-allocator.cc"
+ >
+ </File>
+ <File
+ RelativePath="..\..\src\register-allocator-arm.cc"
+ >
+ </File>
<File
RelativePath="..\..\src\rewriter.cc"
>
RelativePath="..\..\src\variables.h"
>
</File>
+ <File
+ RelativePath="..\..\src\virtual-frame.h"
+ >
+ </File>
+ <File
+ RelativePath="..\..\src\virtual-frame-arm.h"
+ >
+ </File>
+ <File
+ RelativePath="..\..\src\virtual-frame.cc"
+ >
+ </File>
+ <File
+ RelativePath="..\..\src\virtual-frame-arm.cc"
+ >
+ </File>
<File
RelativePath="..\..\src\zone-inl.h"
>