// for stack overflow.
frame_->AllocateStackSlots();
- VirtualFrame::SpilledScope spilled_scope;
+ VirtualFrame::SpilledScope spilled_scope(frame_);
int heap_slots = scope()->num_heap_slots();
if (heap_slots > 0) {
// Allocate local context.
// fp, and lr have been pushed on the stack. Adjust the virtual
// frame to match this state.
frame_->Adjust(4);
- allocator_->Unuse(r1);
- allocator_->Unuse(lr);
// Bind all the bailout labels to the beginning of the function.
List<CompilationInfo::Bailout*>* bailouts = info->bailouts();
has_valid_frame() &&
!has_cc() &&
frame_->height() == original_height) {
+ frame_->SpillAll();
true_target->Jump();
}
}
if (has_cc()) {
// Convert cc_reg_ into a boolean value.
+ VirtualFrame::SpilledScope scope(frame_);
JumpTarget loaded;
JumpTarget materialize_true;
materialize_true.Branch(cc_reg_);
}
if (true_target.is_linked() || false_target.is_linked()) {
+ VirtualFrame::SpilledScope scope(frame_);
// We have at least one condition value that has been "translated"
// into a branch, thus it needs to be loaded explicitly.
JumpTarget loaded;
void CodeGenerator::LoadGlobal() {
- VirtualFrame::SpilledScope spilled_scope;
+ VirtualFrame::SpilledScope spilled_scope(frame_);
__ ldr(r0, GlobalObject());
frame_->EmitPush(r0);
}
void CodeGenerator::LoadGlobalReceiver(Register scratch) {
- VirtualFrame::SpilledScope spilled_scope;
+ VirtualFrame::SpilledScope spilled_scope(frame_);
__ ldr(scratch, ContextOperand(cp, Context::GLOBAL_INDEX));
__ ldr(scratch,
FieldMemOperand(scratch, GlobalObject::kGlobalReceiverOffset));
void CodeGenerator::LoadTypeofExpression(Expression* expr) {
// Special handling of identifiers as subexpressions of typeof.
- VirtualFrame::SpilledScope spilled_scope;
+ VirtualFrame::SpilledScope spilled_scope(frame_);
Variable* variable = expr->AsVariableProxy()->AsVariable();
if (variable != NULL && !variable->is_this() && variable->is_global()) {
// For a global variable we build the property reference
Literal key(variable->name());
Property property(&global, &key, RelocInfo::kNoPosition);
Reference ref(this, &property);
- ref.GetValueAndSpill();
+ ref.GetValue();
} else if (variable != NULL && variable->slot() != NULL) {
// For a variable that rewrites to a slot, we signal it is the immediate
// subexpression of a typeof.
void CodeGenerator::LoadReference(Reference* ref) {
- VirtualFrame::SpilledScope spilled_scope;
+ VirtualFrame::SpilledScope spilled_scope(frame_);
Comment cmnt(masm_, "[ LoadReference");
Expression* e = ref->expression();
Property* property = e->AsProperty();
void CodeGenerator::UnloadReference(Reference* ref) {
- VirtualFrame::SpilledScope spilled_scope;
+ int size = ref->size();
+ ref->set_unloaded();
+ if (size == 0) return;
+
// Pop a reference from the stack while preserving TOS.
+ VirtualFrame::RegisterAllocationScope scope(this);
Comment cmnt(masm_, "[ UnloadReference");
- int size = ref->size();
if (size > 0) {
- frame_->EmitPop(r0);
+ Register tos = frame_->PopToRegister();
frame_->Drop(size);
- frame_->EmitPush(r0);
+ frame_->EmitPush(tos);
}
- ref->set_unloaded();
}
// may jump to 'false_target' in case the register converts to 'false'.
void CodeGenerator::ToBoolean(JumpTarget* true_target,
JumpTarget* false_target) {
- VirtualFrame::SpilledScope spilled_scope;
+ VirtualFrame::SpilledScope spilled_scope(frame_);
// Note: The generated code snippet does not change stack variables.
// Only the condition code should be set.
frame_->EmitPop(r0);
void CodeGenerator::GenericBinaryOperation(Token::Value op,
OverwriteMode overwrite_mode,
int constant_rhs) {
- VirtualFrame::SpilledScope spilled_scope;
+ VirtualFrame::SpilledScope spilled_scope(frame_);
// sp[0] : y
// sp[1] : x
// result : r0
// Stub is entered with a call: 'return address' is in lr.
switch (op) {
- case Token::ADD: // fall through.
- case Token::SUB: // fall through.
+ case Token::ADD:
+ case Token::SUB:
case Token::MUL:
case Token::DIV:
case Token::MOD:
case Token::COMMA:
frame_->EmitPop(r0);
- // simply discard left value
+ // Simply discard left value.
+ frame_->Drop();
+ break;
+
+ default:
+ // Other cases should have been handled before this point.
+ UNREACHABLE();
+ break;
+ }
+}
+
+
+void CodeGenerator::VirtualFrameBinaryOperation(Token::Value op,
+ OverwriteMode overwrite_mode,
+ int constant_rhs) {
+ // top of virtual frame: y
+ // 2nd elt. on virtual frame : x
+ // result : top of virtual frame
+
+ // Stub is entered with a call: 'return address' is in lr.
+ switch (op) {
+ case Token::ADD: // fall through.
+ case Token::SUB: // fall through.
+ case Token::MUL:
+ case Token::DIV:
+ case Token::MOD:
+ case Token::BIT_OR:
+ case Token::BIT_AND:
+ case Token::BIT_XOR:
+ case Token::SHL:
+ case Token::SHR:
+ case Token::SAR: {
+ frame_->PopToR1R0(); // Pop y to r0 and x to r1.
+ {
+ VirtualFrame::SpilledScope spilled_scope(frame_);
+ GenericBinaryOpStub stub(op, overwrite_mode, constant_rhs);
+ frame_->CallStub(&stub, 0);
+ }
+ frame_->EmitPush(r0);
+ break;
+ }
+
+ case Token::COMMA: {
+ Register scratch = frame_->PopToRegister();
+ // Simply discard left value.
frame_->Drop();
+ frame_->EmitPush(scratch);
break;
+ }
default:
// Other cases should have been handled before this point.
DeferredInlineSmiOperation(Token::Value op,
int value,
bool reversed,
- OverwriteMode overwrite_mode)
+ OverwriteMode overwrite_mode,
+ Register tos)
: op_(op),
value_(value),
reversed_(reversed),
- overwrite_mode_(overwrite_mode) {
+ overwrite_mode_(overwrite_mode),
+ tos_register_(tos) {
set_comment("[ DeferredInlinedSmiOperation");
}
int value_;
bool reversed_;
OverwriteMode overwrite_mode_;
+ Register tos_register_;
};
case Token::ADD: {
// Revert optimistic add.
if (reversed_) {
- __ sub(r0, r0, Operand(Smi::FromInt(value_)));
+ __ sub(r0, tos_register_, Operand(Smi::FromInt(value_)));
__ mov(r1, Operand(Smi::FromInt(value_)));
} else {
- __ sub(r1, r0, Operand(Smi::FromInt(value_)));
+ __ sub(r1, tos_register_, Operand(Smi::FromInt(value_)));
__ mov(r0, Operand(Smi::FromInt(value_)));
}
break;
case Token::SUB: {
// Revert optimistic sub.
if (reversed_) {
- __ rsb(r0, r0, Operand(Smi::FromInt(value_)));
+ __ rsb(r0, tos_register_, Operand(Smi::FromInt(value_)));
__ mov(r1, Operand(Smi::FromInt(value_)));
} else {
- __ add(r1, r0, Operand(Smi::FromInt(value_)));
+ __ add(r1, tos_register_, Operand(Smi::FromInt(value_)));
__ mov(r0, Operand(Smi::FromInt(value_)));
}
break;
case Token::BIT_XOR:
case Token::BIT_AND: {
if (reversed_) {
+ __ Move(r0, tos_register_);
__ mov(r1, Operand(Smi::FromInt(value_)));
} else {
- __ mov(r1, Operand(r0));
+ __ Move(r1, tos_register_);
__ mov(r0, Operand(Smi::FromInt(value_)));
}
break;
case Token::SHR:
case Token::SAR: {
if (!reversed_) {
- __ mov(r1, Operand(r0));
+ __ Move(r1, tos_register_);
__ mov(r0, Operand(Smi::FromInt(value_)));
} else {
UNREACHABLE(); // Should have been handled in SmiOperation.
GenericBinaryOpStub stub(op_, overwrite_mode_, value_);
__ CallStub(&stub);
+ // The generic stub returns its value in r0, but that's not
+ // necessarily what we want. We want whatever the inlined code
+ // expected, which is that the answer is in the same register as
+ // the operand was.
+ __ Move(tos_register_, r0);
}
}
+void CodeGenerator::VirtualFrameSmiOperation(Token::Value op,
+ Handle<Object> value,
+ bool reversed,
+ OverwriteMode mode) {
+ int int_value = Smi::cast(*value)->value();
+
+ bool something_to_inline;
+ switch (op) {
+ case Token::ADD:
+ case Token::SUB:
+ case Token::BIT_AND:
+ case Token::BIT_OR:
+ case Token::BIT_XOR: {
+ something_to_inline = true;
+ break;
+ }
+ case Token::SHL:
+ case Token::SHR:
+ case Token::SAR: {
+ if (reversed) {
+ something_to_inline = false;
+ } else {
+ something_to_inline = true;
+ }
+ break;
+ }
+ case Token::MOD: {
+ if (reversed || int_value < 2 || !IsPowerOf2(int_value)) {
+ something_to_inline = false;
+ } else {
+ something_to_inline = true;
+ }
+ break;
+ }
+ case Token::MUL: {
+ if (!IsEasyToMultiplyBy(int_value)) {
+ something_to_inline = false;
+ } else {
+ something_to_inline = true;
+ }
+ break;
+ }
+ default: {
+ something_to_inline = false;
+ break;
+ }
+ }
+
+ if (!something_to_inline) {
+ if (!reversed) {
+ // Move the lhs to r1.
+ frame_->PopToR1();
+ // Flush any other registers to the stack.
+ frame_->SpillAll();
+ // Tell the virtual frame that TOS is in r1 (no code emitted).
+ frame_->EmitPush(r1);
+ // We know that r0 is free.
+ __ mov(r0, Operand(value));
+ // Push r0 on the virtual frame (no code emitted).
+ frame_->EmitPush(r0);
+ // This likes having r1 and r0 on top of the stack. It pushes
+ // the answer on the virtual frame.
+ VirtualFrameBinaryOperation(op, mode, int_value);
+ } else {
+ // Move the rhs to r0.
+ frame_->PopToR0();
+ // Flush any other registers to the stack.
+ frame_->SpillAll();
+ // We know that r1 is free.
+ __ mov(r1, Operand(value));
+ // Tell the virtual frame that TOS is in r1 (no code emitted).
+ frame_->EmitPush(r1);
+ // Push r0 on the virtual frame (no code emitted).
+ frame_->EmitPush(r0);
+ // This likes having r1 and r0 on top of the stack. It pushes
+ // the answer on the virtual frame.
+ VirtualFrameBinaryOperation(op, mode, kUnknownIntValue);
+ }
+ return;
+ }
+
+ // We move the top of stack to a register (normally no move is invoved).
+ Register tos = frame_->PopToRegister();
+ // All other registers are spilled. The deferred code expects one argument
+ // in a register and all other values are flushed to the stack. The
+ // answer is returned in the same register that the top of stack argument was
+ // in.
+ frame_->SpillAll();
+
+ switch (op) {
+ case Token::ADD: {
+ DeferredCode* deferred =
+ new DeferredInlineSmiOperation(op, int_value, reversed, mode, tos);
+
+ __ add(tos, tos, Operand(value), SetCC);
+ deferred->Branch(vs);
+ __ tst(tos, Operand(kSmiTagMask));
+ deferred->Branch(ne);
+ deferred->BindExit();
+ frame_->EmitPush(tos);
+ break;
+ }
+
+ case Token::SUB: {
+ DeferredCode* deferred =
+ new DeferredInlineSmiOperation(op, int_value, reversed, mode, tos);
+
+ if (reversed) {
+ __ rsb(tos, tos, Operand(value), SetCC);
+ } else {
+ __ sub(tos, tos, Operand(value), SetCC);
+ }
+ deferred->Branch(vs);
+ __ tst(tos, Operand(kSmiTagMask));
+ deferred->Branch(ne);
+ deferred->BindExit();
+ frame_->EmitPush(tos);
+ break;
+ }
+
+
+ case Token::BIT_OR:
+ case Token::BIT_XOR:
+ case Token::BIT_AND: {
+ DeferredCode* deferred =
+ new DeferredInlineSmiOperation(op, int_value, reversed, mode, tos);
+ __ tst(tos, Operand(kSmiTagMask));
+ deferred->Branch(ne);
+ switch (op) {
+ case Token::BIT_OR: __ orr(tos, tos, Operand(value)); break;
+ case Token::BIT_XOR: __ eor(tos, tos, Operand(value)); break;
+ case Token::BIT_AND: __ and_(tos, tos, Operand(value)); break;
+ default: UNREACHABLE();
+ }
+ deferred->BindExit();
+ frame_->EmitPush(tos);
+ break;
+ }
+
+ case Token::SHL:
+ case Token::SHR:
+ case Token::SAR: {
+ ASSERT(!reversed);
+ Register scratch = VirtualFrame::scratch0();
+ Register scratch2 = VirtualFrame::scratch1();
+ int shift_value = int_value & 0x1f; // least significant 5 bits
+ DeferredCode* deferred =
+ new DeferredInlineSmiOperation(op, shift_value, false, mode, tos);
+ __ tst(tos, Operand(kSmiTagMask));
+ deferred->Branch(ne);
+ __ mov(scratch, Operand(tos, ASR, kSmiTagSize)); // remove tags
+ switch (op) {
+ case Token::SHL: {
+ if (shift_value != 0) {
+ __ mov(scratch, Operand(scratch, LSL, shift_value));
+ }
+ // check that the *unsigned* result fits in a smi
+ __ add(scratch2, scratch, Operand(0x40000000), SetCC);
+ deferred->Branch(mi);
+ break;
+ }
+ case Token::SHR: {
+ // LSR by immediate 0 means shifting 32 bits.
+ if (shift_value != 0) {
+ __ mov(scratch, Operand(scratch, LSR, shift_value));
+ }
+ // check that the *unsigned* result fits in a smi
+ // neither of the two high-order bits can be set:
+ // - 0x80000000: high bit would be lost when smi tagging
+ // - 0x40000000: this number would convert to negative when
+ // smi tagging these two cases can only happen with shifts
+ // by 0 or 1 when handed a valid smi
+ __ and_(scratch2, scratch, Operand(0xc0000000), SetCC);
+ deferred->Branch(ne);
+ break;
+ }
+ case Token::SAR: {
+ if (shift_value != 0) {
+ // ASR by immediate 0 means shifting 32 bits.
+ __ mov(scratch, Operand(scratch, ASR, shift_value));
+ }
+ break;
+ }
+ default: UNREACHABLE();
+ }
+ __ mov(tos, Operand(scratch, LSL, kSmiTagSize));
+ deferred->BindExit();
+ frame_->EmitPush(tos);
+ break;
+ }
+
+ case Token::MOD: {
+ ASSERT(!reversed);
+ ASSERT(int_value >= 2);
+ ASSERT(IsPowerOf2(int_value));
+ DeferredCode* deferred =
+ new DeferredInlineSmiOperation(op, int_value, reversed, mode, tos);
+ unsigned mask = (0x80000000u | kSmiTagMask);
+ __ tst(tos, Operand(mask));
+ deferred->Branch(ne); // Go to deferred code on non-Smis and negative.
+ mask = (int_value << kSmiTagSize) - 1;
+ __ and_(tos, tos, Operand(mask));
+ deferred->BindExit();
+ frame_->EmitPush(tos);
+ break;
+ }
+
+ case Token::MUL: {
+ ASSERT(IsEasyToMultiplyBy(int_value));
+ DeferredCode* deferred =
+ new DeferredInlineSmiOperation(op, int_value, reversed, mode, tos);
+ unsigned max_smi_that_wont_overflow = Smi::kMaxValue / int_value;
+ max_smi_that_wont_overflow <<= kSmiTagSize;
+ unsigned mask = 0x80000000u;
+ while ((mask & max_smi_that_wont_overflow) == 0) {
+ mask |= mask >> 1;
+ }
+ mask |= kSmiTagMask;
+ // This does a single mask that checks for a too high value in a
+ // conservative way and for a non-Smi. It also filters out negative
+ // numbers, unfortunately, but since this code is inline we prefer
+ // brevity to comprehensiveness.
+ __ tst(tos, Operand(mask));
+ deferred->Branch(ne);
+ MultiplyByKnownInt(masm_, tos, tos, int_value);
+ deferred->BindExit();
+ frame_->EmitPush(tos);
+ break;
+ }
+
+ default:
+ UNREACHABLE();
+ break;
+ }
+}
+
+
void CodeGenerator::SmiOperation(Token::Value op,
Handle<Object> value,
bool reversed,
OverwriteMode mode) {
- VirtualFrame::SpilledScope spilled_scope;
+ VirtualFrame::SpilledScope spilled_scope(frame_);
// NOTE: This is an attempt to inline (a bit) more of the code for
// some possible smi operations (like + and -) when (at least) one
// of the operands is a literal smi. With this optimization, the
switch (op) {
case Token::ADD: {
DeferredCode* deferred =
- new DeferredInlineSmiOperation(op, int_value, reversed, mode);
+ new DeferredInlineSmiOperation(op, int_value, reversed, mode, r0);
__ add(r0, r0, Operand(value), SetCC);
deferred->Branch(vs);
case Token::SUB: {
DeferredCode* deferred =
- new DeferredInlineSmiOperation(op, int_value, reversed, mode);
+ new DeferredInlineSmiOperation(op, int_value, reversed, mode, r0);
if (reversed) {
__ rsb(r0, r0, Operand(value), SetCC);
case Token::BIT_XOR:
case Token::BIT_AND: {
DeferredCode* deferred =
- new DeferredInlineSmiOperation(op, int_value, reversed, mode);
+ new DeferredInlineSmiOperation(op, int_value, reversed, mode, r0);
__ tst(r0, Operand(kSmiTagMask));
deferred->Branch(ne);
switch (op) {
}
int shift_value = int_value & 0x1f; // least significant 5 bits
DeferredCode* deferred =
- new DeferredInlineSmiOperation(op, shift_value, false, mode);
+ new DeferredInlineSmiOperation(op, shift_value, false, mode, r0);
__ tst(r0, Operand(kSmiTagMask));
deferred->Branch(ne);
__ mov(r2, Operand(r0, ASR, kSmiTagSize)); // remove tags
break;
}
DeferredCode* deferred =
- new DeferredInlineSmiOperation(op, int_value, reversed, mode);
+ new DeferredInlineSmiOperation(op, int_value, reversed, mode, r0);
unsigned mask = (0x80000000u | kSmiTagMask);
__ tst(r0, Operand(mask));
deferred->Branch(ne); // Go to deferred code on non-Smis and negative.
break;
}
DeferredCode* deferred =
- new DeferredInlineSmiOperation(op, int_value, reversed, mode);
+ new DeferredInlineSmiOperation(op, int_value, reversed, mode, r0);
unsigned max_smi_that_wont_overflow = Smi::kMaxValue / int_value;
max_smi_that_wont_overflow <<= kSmiTagSize;
unsigned mask = 0x80000000u;
Expression* left,
Expression* right,
bool strict) {
- if (left != NULL) LoadAndSpill(left);
- if (right != NULL) LoadAndSpill(right);
+ VirtualFrame::RegisterAllocationScope scope(this);
+
+ if (left != NULL) Load(left);
+ if (right != NULL) Load(right);
- VirtualFrame::SpilledScope spilled_scope;
// sp[0] : y
// sp[1] : x
// result : cc register
// Strict only makes sense for equality comparisons.
ASSERT(!strict || cc == eq);
- JumpTarget exit;
- JumpTarget smi;
+ Register lhs;
+ Register rhs;
+
+ // We load the top two stack positions into registers chosen by the virtual
+ // frame. This should keep the register shuffling to a minimum.
// Implement '>' and '<=' by reversal to obtain ECMA-262 conversion order.
if (cc == gt || cc == le) {
cc = ReverseCondition(cc);
- frame_->EmitPop(r1);
- frame_->EmitPop(r0);
+ lhs = frame_->PopToRegister();
+ rhs = frame_->PopToRegister(lhs); // Don't pop to the same register again!
} else {
- frame_->EmitPop(r0);
- frame_->EmitPop(r1);
+ rhs = frame_->PopToRegister();
+ lhs = frame_->PopToRegister(rhs); // Don't pop to the same register again!
}
- __ orr(r2, r0, Operand(r1));
- __ tst(r2, Operand(kSmiTagMask));
+
+ ASSERT(rhs.is(r0) || rhs.is(r1));
+ ASSERT(lhs.is(r0) || lhs.is(r1));
+
+ // Now we have the two sides in r0 and r1. We flush any other registers
+ // because the stub doesn't know about register allocation.
+ frame_->SpillAll();
+ Register scratch = VirtualFrame::scratch0();
+ __ orr(scratch, lhs, Operand(rhs));
+ __ tst(scratch, Operand(kSmiTagMask));
+ JumpTarget smi;
smi.Branch(eq);
// Perform non-smi comparison by stub.
// CompareStub takes arguments in r0 and r1, returns <0, >0 or 0 in r0.
// We call with 0 args because there are 0 on the stack.
+ if (!rhs.is(r0)) {
+ __ Swap(rhs, lhs, ip);
+ }
+
CompareStub stub(cc, strict);
frame_->CallStub(&stub, 0);
__ cmp(r0, Operand(0));
+ JumpTarget exit;
exit.Jump();
// Do smi comparisons by pointer comparison.
smi.Bind();
- __ cmp(r1, Operand(r0));
+ __ cmp(lhs, Operand(rhs));
exit.Bind();
cc_reg_ = cc;
void CodeGenerator::CallWithArguments(ZoneList<Expression*>* args,
CallFunctionFlags flags,
int position) {
- VirtualFrame::SpilledScope spilled_scope;
+ VirtualFrame::SpilledScope spilled_scope(frame_);
// Push the arguments ("left-to-right") on the stack.
int arg_count = args->length();
for (int i = 0; i < arg_count; i++) {
void CodeGenerator::Branch(bool if_true, JumpTarget* target) {
- VirtualFrame::SpilledScope spilled_scope;
+ VirtualFrame::SpilledScope spilled_scope(frame_);
ASSERT(has_cc());
Condition cc = if_true ? cc_reg_ : NegateCondition(cc_reg_);
target->Branch(cc);
void CodeGenerator::CheckStack() {
- VirtualFrame::SpilledScope spilled_scope;
+ VirtualFrame::SpilledScope spilled_scope(frame_);
Comment cmnt(masm_, "[ check stack");
__ LoadRoot(ip, Heap::kStackLimitRootIndex);
// Put the lr setup instruction in the delay slot. kInstrSize is added to
#ifdef DEBUG
int original_height = frame_->height();
#endif
- VirtualFrame::SpilledScope spilled_scope;
+ VirtualFrame::SpilledScope spilled_scope(frame_);
for (int i = 0; frame_ != NULL && i < statements->length(); i++) {
VisitAndSpill(statements->at(i));
}
#ifdef DEBUG
int original_height = frame_->height();
#endif
- VirtualFrame::SpilledScope spilled_scope;
+ VirtualFrame::SpilledScope spilled_scope(frame_);
Comment cmnt(masm_, "[ Block");
CodeForStatementPosition(node);
node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
void CodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
- VirtualFrame::SpilledScope spilled_scope;
+ VirtualFrame::SpilledScope spilled_scope(frame_);
frame_->EmitPush(cp);
__ mov(r0, Operand(pairs));
frame_->EmitPush(r0);
#ifdef DEBUG
int original_height = frame_->height();
#endif
- VirtualFrame::SpilledScope spilled_scope;
+ VirtualFrame::SpilledScope spilled_scope(frame_);
Comment cmnt(masm_, "[ Declaration");
Variable* var = node->proxy()->var();
ASSERT(var != NULL); // must have been resolved
#ifdef DEBUG
int original_height = frame_->height();
#endif
- VirtualFrame::SpilledScope spilled_scope;
+ VirtualFrame::SpilledScope spilled_scope(frame_);
Comment cmnt(masm_, "[ ExpressionStatement");
CodeForStatementPosition(node);
Expression* expression = node->expression();
#ifdef DEBUG
int original_height = frame_->height();
#endif
- VirtualFrame::SpilledScope spilled_scope;
+ VirtualFrame::SpilledScope spilled_scope(frame_);
Comment cmnt(masm_, "// EmptyStatement");
CodeForStatementPosition(node);
// nothing to do
#ifdef DEBUG
int original_height = frame_->height();
#endif
- VirtualFrame::SpilledScope spilled_scope;
+ VirtualFrame::SpilledScope spilled_scope(frame_);
Comment cmnt(masm_, "[ IfStatement");
// Generate different code depending on which parts of the if statement
// are present or not.
void CodeGenerator::VisitContinueStatement(ContinueStatement* node) {
- VirtualFrame::SpilledScope spilled_scope;
+ VirtualFrame::SpilledScope spilled_scope(frame_);
Comment cmnt(masm_, "[ ContinueStatement");
CodeForStatementPosition(node);
node->target()->continue_target()->Jump();
void CodeGenerator::VisitBreakStatement(BreakStatement* node) {
- VirtualFrame::SpilledScope spilled_scope;
+ VirtualFrame::SpilledScope spilled_scope(frame_);
Comment cmnt(masm_, "[ BreakStatement");
CodeForStatementPosition(node);
node->target()->break_target()->Jump();
void CodeGenerator::VisitReturnStatement(ReturnStatement* node) {
- VirtualFrame::SpilledScope spilled_scope;
+ VirtualFrame::SpilledScope spilled_scope(frame_);
Comment cmnt(masm_, "[ ReturnStatement");
CodeForStatementPosition(node);
#ifdef DEBUG
int original_height = frame_->height();
#endif
- VirtualFrame::SpilledScope spilled_scope;
+ VirtualFrame::SpilledScope spilled_scope(frame_);
Comment cmnt(masm_, "[ WithEnterStatement");
CodeForStatementPosition(node);
LoadAndSpill(node->expression());
#ifdef DEBUG
int original_height = frame_->height();
#endif
- VirtualFrame::SpilledScope spilled_scope;
+ VirtualFrame::SpilledScope spilled_scope(frame_);
Comment cmnt(masm_, "[ WithExitStatement");
CodeForStatementPosition(node);
// Pop context.
#ifdef DEBUG
int original_height = frame_->height();
#endif
- VirtualFrame::SpilledScope spilled_scope;
+ VirtualFrame::SpilledScope spilled_scope(frame_);
Comment cmnt(masm_, "[ SwitchStatement");
CodeForStatementPosition(node);
node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
#ifdef DEBUG
int original_height = frame_->height();
#endif
- VirtualFrame::SpilledScope spilled_scope;
+ VirtualFrame::SpilledScope spilled_scope(frame_);
Comment cmnt(masm_, "[ DoWhileStatement");
CodeForStatementPosition(node);
node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
#ifdef DEBUG
int original_height = frame_->height();
#endif
- VirtualFrame::SpilledScope spilled_scope;
+ VirtualFrame::SpilledScope spilled_scope(frame_);
Comment cmnt(masm_, "[ WhileStatement");
CodeForStatementPosition(node);
#ifdef DEBUG
int original_height = frame_->height();
#endif
- VirtualFrame::SpilledScope spilled_scope;
+ VirtualFrame::SpilledScope spilled_scope(frame_);
Comment cmnt(masm_, "[ ForStatement");
CodeForStatementPosition(node);
if (node->init() != NULL) {
#ifdef DEBUG
int original_height = frame_->height();
#endif
- VirtualFrame::SpilledScope spilled_scope;
+ VirtualFrame::SpilledScope spilled_scope(frame_);
Comment cmnt(masm_, "[ ForInStatement");
CodeForStatementPosition(node);
#ifdef DEBUG
int original_height = frame_->height();
#endif
- VirtualFrame::SpilledScope spilled_scope;
+ VirtualFrame::SpilledScope spilled_scope(frame_);
Comment cmnt(masm_, "[ TryCatchStatement");
CodeForStatementPosition(node);
#ifdef DEBUG
int original_height = frame_->height();
#endif
- VirtualFrame::SpilledScope spilled_scope;
+ VirtualFrame::SpilledScope spilled_scope(frame_);
Comment cmnt(masm_, "[ TryFinallyStatement");
CodeForStatementPosition(node);
#ifdef DEBUG
int original_height = frame_->height();
#endif
- VirtualFrame::SpilledScope spilled_scope;
+ VirtualFrame::SpilledScope spilled_scope(frame_);
Comment cmnt(masm_, "[ DebuggerStatament");
CodeForStatementPosition(node);
#ifdef ENABLE_DEBUGGER_SUPPORT
void CodeGenerator::InstantiateFunction(
Handle<SharedFunctionInfo> function_info) {
- VirtualFrame::SpilledScope spilled_scope;
+ VirtualFrame::SpilledScope spilled_scope(frame_);
__ mov(r0, Operand(function_info));
// Use the fast case closure allocation code that allocates in new
// space for nested functions that don't need literals cloning.
#ifdef DEBUG
int original_height = frame_->height();
#endif
- VirtualFrame::SpilledScope spilled_scope;
+ VirtualFrame::SpilledScope spilled_scope(frame_);
Comment cmnt(masm_, "[ FunctionLiteral");
// Build the function info and instantiate it.
#ifdef DEBUG
int original_height = frame_->height();
#endif
- VirtualFrame::SpilledScope spilled_scope;
+ VirtualFrame::SpilledScope spilled_scope(frame_);
Comment cmnt(masm_, "[ SharedFunctionInfoLiteral");
InstantiateFunction(node->shared_function_info());
ASSERT(frame_->height() == original_height + 1);
#ifdef DEBUG
int original_height = frame_->height();
#endif
- VirtualFrame::SpilledScope spilled_scope;
+ VirtualFrame::SpilledScope spilled_scope(frame_);
Comment cmnt(masm_, "[ Conditional");
JumpTarget then;
JumpTarget else_;
void CodeGenerator::LoadFromSlot(Slot* slot, TypeofState typeof_state) {
- VirtualFrame::SpilledScope spilled_scope;
if (slot->type() == Slot::LOOKUP) {
+ VirtualFrame::SpilledScope spilled_scope(frame_);
ASSERT(slot->var()->is_dynamic());
JumpTarget slow;
frame_->EmitPush(r0);
} else {
- // Special handling for locals allocated in registers.
- __ ldr(r0, SlotOperand(slot, r2));
- frame_->EmitPush(r0);
+ Register scratch = VirtualFrame::scratch0();
+ frame_->EmitPush(SlotOperand(slot, scratch));
if (slot->var()->mode() == Variable::CONST) {
// Const slots may contain 'the hole' value (the constant hasn't been
// initialized yet) which needs to be converted into the 'undefined'
// value.
Comment cmnt(masm_, "[ Unhole const");
- frame_->EmitPop(r0);
+ frame_->EmitPop(scratch);
__ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
- __ cmp(r0, ip);
- __ LoadRoot(r0, Heap::kUndefinedValueRootIndex, eq);
- frame_->EmitPush(r0);
+ __ cmp(scratch, ip);
+ __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex, eq);
+ frame_->EmitPush(scratch);
}
}
}
void CodeGenerator::StoreToSlot(Slot* slot, InitState init_state) {
ASSERT(slot != NULL);
if (slot->type() == Slot::LOOKUP) {
+ VirtualFrame::SpilledScope spilled_scope(frame_);
ASSERT(slot->var()->is_dynamic());
// For now, just do a runtime call.
} else {
ASSERT(!slot->var()->is_dynamic());
+ Register scratch = VirtualFrame::scratch0();
+ VirtualFrame::RegisterAllocationScope scope(this);
+ // The frame must be spilled when branching to this target.
JumpTarget exit;
+
if (init_state == CONST_INIT) {
ASSERT(slot->var()->mode() == Variable::CONST);
// Only the first const initialization must be executed (the slot
// still contains 'the hole' value). When the assignment is
// executed, the code is identical to a normal store (see below).
Comment cmnt(masm_, "[ Init const");
- __ ldr(r2, SlotOperand(slot, r2));
+ __ ldr(scratch, SlotOperand(slot, scratch));
__ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
- __ cmp(r2, ip);
+ __ cmp(scratch, ip);
+ frame_->SpillAll();
exit.Branch(ne);
}
// initialize consts to 'the hole' value and by doing so, end up
// calling this code. r2 may be loaded with context; used below in
// RecordWrite.
- frame_->EmitPop(r0);
- __ str(r0, SlotOperand(slot, r2));
- frame_->EmitPush(r0);
+ Register tos = frame_->Peek();
+ __ str(tos, SlotOperand(slot, scratch));
if (slot->type() == Slot::CONTEXT) {
// Skip write barrier if the written value is a smi.
- __ tst(r0, Operand(kSmiTagMask));
+ __ tst(tos, Operand(kSmiTagMask));
+ // We don't use tos any more after here.
+ VirtualFrame::SpilledScope spilled_scope(frame_);
exit.Branch(eq);
- // r2 is loaded with context when calling SlotOperand above.
+ // scratch is loaded with context when calling SlotOperand above.
int offset = FixedArray::kHeaderSize + slot->index() * kPointerSize;
__ mov(r3, Operand(offset));
- __ RecordWrite(r2, r3, r1);
+ // r1 could be identical with tos, but that doesn't matter.
+ __ RecordWrite(scratch, r3, r1);
}
// If we definitely did not jump over the assignment, we do not need
// to bind the exit label. Doing so can defeat peephole
// optimization.
if (init_state == CONST_INIT || slot->type() == Slot::CONTEXT) {
+ frame_->SpillAll();
exit.Bind();
}
}
if (s->is_eval_scope()) {
Label next, fast;
- if (!context.is(tmp)) {
- __ mov(tmp, Operand(context));
- }
+ __ Move(tmp, context);
__ bind(&next);
// Terminate at global context.
__ ldr(tmp2, FieldMemOperand(tmp, HeapObject::kMapOffset));
#ifdef DEBUG
int original_height = frame_->height();
#endif
- VirtualFrame::SpilledScope spilled_scope;
Comment cmnt(masm_, "[ Slot");
LoadFromSlot(node, NOT_INSIDE_TYPEOF);
ASSERT(frame_->height() == original_height + 1);
#ifdef DEBUG
int original_height = frame_->height();
#endif
- VirtualFrame::SpilledScope spilled_scope;
Comment cmnt(masm_, "[ VariableProxy");
Variable* var = node->var();
} else {
ASSERT(var->is_global());
Reference ref(this, node);
- ref.GetValueAndSpill();
+ ref.GetValue();
}
ASSERT(frame_->height() == original_height + 1);
}
#ifdef DEBUG
int original_height = frame_->height();
#endif
- VirtualFrame::SpilledScope spilled_scope;
Comment cmnt(masm_, "[ Literal");
- __ mov(r0, Operand(node->handle()));
- frame_->EmitPush(r0);
+ Register reg = frame_->GetTOSRegister();
+ __ mov(reg, Operand(node->handle()));
+ frame_->EmitPush(reg);
ASSERT(frame_->height() == original_height + 1);
}
#ifdef DEBUG
int original_height = frame_->height();
#endif
- VirtualFrame::SpilledScope spilled_scope;
+ VirtualFrame::SpilledScope spilled_scope(frame_);
Comment cmnt(masm_, "[ RexExp Literal");
// Retrieve the literal array and check the allocated entry.
#ifdef DEBUG
int original_height = frame_->height();
#endif
- VirtualFrame::SpilledScope spilled_scope;
+ VirtualFrame::SpilledScope spilled_scope(frame_);
Comment cmnt(masm_, "[ ObjectLiteral");
// Load the function of this activation.
#ifdef DEBUG
int original_height = frame_->height();
#endif
- VirtualFrame::SpilledScope spilled_scope;
+ VirtualFrame::SpilledScope spilled_scope(frame_);
Comment cmnt(masm_, "[ ArrayLiteral");
// Load the function of this activation.
#ifdef DEBUG
int original_height = frame_->height();
#endif
- VirtualFrame::SpilledScope spilled_scope;
+ VirtualFrame::SpilledScope spilled_scope(frame_);
// Call runtime routine to allocate the catch extension object and
// assign the exception value to the catch variable.
Comment cmnt(masm_, "[ CatchExtensionObject");
void CodeGenerator::VisitAssignment(Assignment* node) {
+ VirtualFrame::RegisterAllocationScope scope(this);
#ifdef DEBUG
int original_height = frame_->height();
#endif
- VirtualFrame::SpilledScope spilled_scope;
Comment cmnt(masm_, "[ Assignment");
{ Reference target(this, node->target(), node->is_compound());
if (target.is_illegal()) {
// Fool the virtual frame into thinking that we left the assignment's
// value on the frame.
- __ mov(r0, Operand(Smi::FromInt(0)));
- frame_->EmitPush(r0);
+ Register tos = frame_->GetTOSRegister();
+ __ mov(tos, Operand(Smi::FromInt(0)));
+ frame_->EmitPush(tos);
ASSERT(frame_->height() == original_height + 1);
return;
}
if (node->op() == Token::ASSIGN ||
node->op() == Token::INIT_VAR ||
node->op() == Token::INIT_CONST) {
- LoadAndSpill(node->value());
+ Load(node->value());
} else { // Assignment is a compound assignment.
// Get the old value of the lhs.
- target.GetValueAndSpill();
+ target.GetValue();
Literal* literal = node->value()->AsLiteral();
bool overwrite =
(node->value()->AsBinaryOperation() != NULL &&
node->value()->AsBinaryOperation()->ResultOverwriteAllowed());
if (literal != NULL && literal->handle()->IsSmi()) {
- SmiOperation(node->binary_op(),
- literal->handle(),
- false,
- overwrite ? OVERWRITE_RIGHT : NO_OVERWRITE);
- frame_->EmitPush(r0);
-
+ VirtualFrameSmiOperation(node->binary_op(),
+ literal->handle(),
+ false,
+ overwrite ? OVERWRITE_RIGHT : NO_OVERWRITE);
} else {
- LoadAndSpill(node->value());
- GenericBinaryOperation(node->binary_op(),
- overwrite ? OVERWRITE_RIGHT : NO_OVERWRITE);
- frame_->EmitPush(r0);
+ Load(node->value());
+ VirtualFrameBinaryOperation(node->binary_op(),
+ overwrite ? OVERWRITE_RIGHT : NO_OVERWRITE);
}
}
Variable* var = node->target()->AsVariableProxy()->AsVariable();
#ifdef DEBUG
int original_height = frame_->height();
#endif
- VirtualFrame::SpilledScope spilled_scope;
+ VirtualFrame::SpilledScope spilled_scope(frame_);
Comment cmnt(masm_, "[ Throw");
LoadAndSpill(node->exception());
#ifdef DEBUG
int original_height = frame_->height();
#endif
- VirtualFrame::SpilledScope spilled_scope;
+ VirtualFrame::SpilledScope spilled_scope(frame_);
Comment cmnt(masm_, "[ Property");
{ Reference property(this, node);
- property.GetValueAndSpill();
+ property.GetValue();
}
ASSERT(frame_->height() == original_height + 1);
}
#ifdef DEBUG
int original_height = frame_->height();
#endif
- VirtualFrame::SpilledScope spilled_scope;
+ VirtualFrame::SpilledScope spilled_scope(frame_);
Comment cmnt(masm_, "[ Call");
Expression* function = node->expression();
#ifdef DEBUG
int original_height = frame_->height();
#endif
- VirtualFrame::SpilledScope spilled_scope;
+ VirtualFrame::SpilledScope spilled_scope(frame_);
Comment cmnt(masm_, "[ CallNew");
// According to ECMA-262, section 11.2.2, page 44, the function
void CodeGenerator::GenerateClassOf(ZoneList<Expression*>* args) {
- VirtualFrame::SpilledScope spilled_scope;
+ VirtualFrame::SpilledScope spilled_scope(frame_);
ASSERT(args->length() == 1);
JumpTarget leave, null, function, non_function_constructor;
void CodeGenerator::GenerateValueOf(ZoneList<Expression*>* args) {
- VirtualFrame::SpilledScope spilled_scope;
+ VirtualFrame::SpilledScope spilled_scope(frame_);
ASSERT(args->length() == 1);
JumpTarget leave;
LoadAndSpill(args->at(0));
void CodeGenerator::GenerateSetValueOf(ZoneList<Expression*>* args) {
- VirtualFrame::SpilledScope spilled_scope;
+ VirtualFrame::SpilledScope spilled_scope(frame_);
ASSERT(args->length() == 2);
JumpTarget leave;
LoadAndSpill(args->at(0)); // Load the object.
void CodeGenerator::GenerateIsSmi(ZoneList<Expression*>* args) {
- VirtualFrame::SpilledScope spilled_scope;
+ VirtualFrame::SpilledScope spilled_scope(frame_);
ASSERT(args->length() == 1);
LoadAndSpill(args->at(0));
frame_->EmitPop(r0);
void CodeGenerator::GenerateLog(ZoneList<Expression*>* args) {
- VirtualFrame::SpilledScope spilled_scope;
+ VirtualFrame::SpilledScope spilled_scope(frame_);
// See comment in CodeGenerator::GenerateLog in codegen-ia32.cc.
ASSERT_EQ(args->length(), 3);
#ifdef ENABLE_LOGGING_AND_PROFILING
void CodeGenerator::GenerateIsNonNegativeSmi(ZoneList<Expression*>* args) {
- VirtualFrame::SpilledScope spilled_scope;
+ VirtualFrame::SpilledScope spilled_scope(frame_);
ASSERT(args->length() == 1);
LoadAndSpill(args->at(0));
frame_->EmitPop(r0);
// undefined in order to trigger the slow case, Runtime_StringCharCodeAt.
// It is not yet implemented on ARM, so it always goes to the slow case.
void CodeGenerator::GenerateFastCharCodeAt(ZoneList<Expression*>* args) {
- VirtualFrame::SpilledScope spilled_scope;
+ VirtualFrame::SpilledScope spilled_scope(frame_);
ASSERT(args->length() == 2);
Comment(masm_, "[ GenerateFastCharCodeAt");
void CodeGenerator::GenerateIsArray(ZoneList<Expression*>* args) {
- VirtualFrame::SpilledScope spilled_scope;
+ VirtualFrame::SpilledScope spilled_scope(frame_);
ASSERT(args->length() == 1);
LoadAndSpill(args->at(0));
JumpTarget answer;
void CodeGenerator::GenerateIsRegExp(ZoneList<Expression*>* args) {
- VirtualFrame::SpilledScope spilled_scope;
+ VirtualFrame::SpilledScope spilled_scope(frame_);
ASSERT(args->length() == 1);
LoadAndSpill(args->at(0));
JumpTarget answer;
void CodeGenerator::GenerateIsObject(ZoneList<Expression*>* args) {
// This generates a fast version of:
// (typeof(arg) === 'object' || %_ClassOf(arg) == 'RegExp')
- VirtualFrame::SpilledScope spilled_scope;
+ VirtualFrame::SpilledScope spilled_scope(frame_);
ASSERT(args->length() == 1);
LoadAndSpill(args->at(0));
frame_->EmitPop(r1);
void CodeGenerator::GenerateIsFunction(ZoneList<Expression*>* args) {
// This generates a fast version of:
// (%_ClassOf(arg) === 'Function')
- VirtualFrame::SpilledScope spilled_scope;
+ VirtualFrame::SpilledScope spilled_scope(frame_);
ASSERT(args->length() == 1);
LoadAndSpill(args->at(0));
frame_->EmitPop(r0);
void CodeGenerator::GenerateIsUndetectableObject(ZoneList<Expression*>* args) {
- VirtualFrame::SpilledScope spilled_scope;
+ VirtualFrame::SpilledScope spilled_scope(frame_);
ASSERT(args->length() == 1);
LoadAndSpill(args->at(0));
frame_->EmitPop(r0);
void CodeGenerator::GenerateIsConstructCall(ZoneList<Expression*>* args) {
- VirtualFrame::SpilledScope spilled_scope;
+ VirtualFrame::SpilledScope spilled_scope(frame_);
ASSERT(args->length() == 0);
// Get the frame pointer for the calling frame.
void CodeGenerator::GenerateArgumentsLength(ZoneList<Expression*>* args) {
- VirtualFrame::SpilledScope spilled_scope;
+ VirtualFrame::SpilledScope spilled_scope(frame_);
ASSERT(args->length() == 0);
Label exit;
void CodeGenerator::GenerateArguments(ZoneList<Expression*>* args) {
- VirtualFrame::SpilledScope spilled_scope;
+ VirtualFrame::SpilledScope spilled_scope(frame_);
ASSERT(args->length() == 1);
// Satisfy contract with ArgumentsAccessStub:
void CodeGenerator::GenerateRandomHeapNumber(
ZoneList<Expression*>* args) {
- VirtualFrame::SpilledScope spilled_scope;
+ VirtualFrame::SpilledScope spilled_scope(frame_);
ASSERT(args->length() == 0);
Label slow_allocate_heapnumber;
void CodeGenerator::GenerateObjectEquals(ZoneList<Expression*>* args) {
- VirtualFrame::SpilledScope spilled_scope;
+ VirtualFrame::SpilledScope spilled_scope(frame_);
ASSERT(args->length() == 2);
// Load the two objects into registers and perform the comparison.
#ifdef DEBUG
int original_height = frame_->height();
#endif
- VirtualFrame::SpilledScope spilled_scope;
+ VirtualFrame::SpilledScope spilled_scope(frame_);
if (CheckForInlineRuntimeCall(node)) {
ASSERT((has_cc() && frame_->height() == original_height) ||
(!has_cc() && frame_->height() == original_height + 1));
#ifdef DEBUG
int original_height = frame_->height();
#endif
- VirtualFrame::SpilledScope spilled_scope;
+ VirtualFrame::SpilledScope spilled_scope(frame_);
Comment cmnt(masm_, "[ UnaryOperation");
Token::Value op = node->op();
#ifdef DEBUG
int original_height = frame_->height();
#endif
- VirtualFrame::SpilledScope spilled_scope;
+ VirtualFrame::SpilledScope spilled_scope(frame_);
Comment cmnt(masm_, "[ CountOperation");
bool is_postfix = node->is_postfix();
ASSERT(frame_->height() == original_height + 1);
return;
}
- target.GetValueAndSpill();
+ target.GetValue();
frame_->EmitPop(r0);
JumpTarget slow;
#ifdef DEBUG
int original_height = frame_->height();
#endif
- VirtualFrame::SpilledScope spilled_scope;
Comment cmnt(masm_, "[ BinaryOperation");
if (node->op() == Token::AND || node->op() == Token::OR) {
+ VirtualFrame::SpilledScope spilled_scope(frame_);
GenerateLogicalBooleanOperation(node);
} else {
// Optimize for the case where (at least) one of the expressions
node->right()->AsBinaryOperation()->ResultOverwriteAllowed());
if (rliteral != NULL && rliteral->handle()->IsSmi()) {
- LoadAndSpill(node->left());
- SmiOperation(node->op(),
- rliteral->handle(),
- false,
- overwrite_right ? OVERWRITE_RIGHT : NO_OVERWRITE);
-
+ VirtualFrame::RegisterAllocationScope scope(this);
+ Load(node->left());
+ VirtualFrameSmiOperation(
+ node->op(),
+ rliteral->handle(),
+ false,
+ overwrite_right ? OVERWRITE_RIGHT : NO_OVERWRITE);
} else if (lliteral != NULL && lliteral->handle()->IsSmi()) {
- LoadAndSpill(node->right());
- SmiOperation(node->op(),
- lliteral->handle(),
- true,
- overwrite_left ? OVERWRITE_LEFT : NO_OVERWRITE);
-
+ VirtualFrame::RegisterAllocationScope scope(this);
+ Load(node->right());
+ VirtualFrameSmiOperation(node->op(),
+ lliteral->handle(),
+ true,
+ overwrite_left ? OVERWRITE_LEFT : NO_OVERWRITE);
} else {
+ VirtualFrame::RegisterAllocationScope scope(this);
OverwriteMode overwrite_mode = NO_OVERWRITE;
if (overwrite_left) {
overwrite_mode = OVERWRITE_LEFT;
} else if (overwrite_right) {
overwrite_mode = OVERWRITE_RIGHT;
}
- LoadAndSpill(node->left());
- LoadAndSpill(node->right());
- GenericBinaryOperation(node->op(), overwrite_mode);
+ Load(node->left());
+ Load(node->right());
+ VirtualFrameBinaryOperation(node->op(), overwrite_mode);
}
- frame_->EmitPush(r0);
}
ASSERT(!has_valid_frame() ||
(has_cc() && frame_->height() == original_height) ||
#ifdef DEBUG
int original_height = frame_->height();
#endif
- VirtualFrame::SpilledScope spilled_scope;
+ VirtualFrame::SpilledScope spilled_scope(frame_);
__ ldr(r0, frame_->Function());
frame_->EmitPush(r0);
ASSERT(frame_->height() == original_height + 1);
#ifdef DEBUG
int original_height = frame_->height();
#endif
- VirtualFrame::SpilledScope spilled_scope;
Comment cmnt(masm_, "[ CompareOperation");
+ VirtualFrame::RegisterAllocationScope nonspilled_scope(this);
+
// Get the expressions from the node.
Expression* left = node->left();
Expression* right = node->right();
right->AsLiteral() != NULL && right->AsLiteral()->IsNull();
// The 'null' value can only be equal to 'null' or 'undefined'.
if (left_is_null || right_is_null) {
- LoadAndSpill(left_is_null ? right : left);
- frame_->EmitPop(r0);
+ Load(left_is_null ? right : left);
+ Register tos = frame_->PopToRegister();
+ // JumpTargets can't cope with register allocation yet.
+ frame_->SpillAll();
__ LoadRoot(ip, Heap::kNullValueRootIndex);
- __ cmp(r0, ip);
+ __ cmp(tos, ip);
// The 'null' value is only equal to 'undefined' if using non-strict
// comparisons.
true_target()->Branch(eq);
__ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
- __ cmp(r0, Operand(ip));
+ __ cmp(tos, Operand(ip));
true_target()->Branch(eq);
- __ tst(r0, Operand(kSmiTagMask));
+ __ tst(tos, Operand(kSmiTagMask));
false_target()->Branch(eq);
// It can be an undetectable object.
- __ ldr(r0, FieldMemOperand(r0, HeapObject::kMapOffset));
- __ ldrb(r0, FieldMemOperand(r0, Map::kBitFieldOffset));
- __ and_(r0, r0, Operand(1 << Map::kIsUndetectable));
- __ cmp(r0, Operand(1 << Map::kIsUndetectable));
+ __ ldr(tos, FieldMemOperand(tos, HeapObject::kMapOffset));
+ __ ldrb(tos, FieldMemOperand(tos, Map::kBitFieldOffset));
+ __ and_(tos, tos, Operand(1 << Map::kIsUndetectable));
+ __ cmp(tos, Operand(1 << Map::kIsUndetectable));
}
cc_reg_ = eq;
right->AsLiteral()->handle()->IsString())) {
Handle<String> check(String::cast(*right->AsLiteral()->handle()));
- // Load the operand, move it to register r1.
+ // Load the operand, move it to a register.
LoadTypeofExpression(operation->expression());
- frame_->EmitPop(r1);
+ Register tos = frame_->PopToRegister();
+
+ // JumpTargets can't cope with register allocation yet.
+ frame_->SpillAll();
+
+ Register scratch = VirtualFrame::scratch0();
if (check->Equals(Heap::number_symbol())) {
- __ tst(r1, Operand(kSmiTagMask));
+ __ tst(tos, Operand(kSmiTagMask));
true_target()->Branch(eq);
- __ ldr(r1, FieldMemOperand(r1, HeapObject::kMapOffset));
+ __ ldr(tos, FieldMemOperand(tos, HeapObject::kMapOffset));
__ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
- __ cmp(r1, ip);
+ __ cmp(tos, ip);
cc_reg_ = eq;
} else if (check->Equals(Heap::string_symbol())) {
- __ tst(r1, Operand(kSmiTagMask));
+ __ tst(tos, Operand(kSmiTagMask));
false_target()->Branch(eq);
- __ ldr(r1, FieldMemOperand(r1, HeapObject::kMapOffset));
+ __ ldr(tos, FieldMemOperand(tos, HeapObject::kMapOffset));
// It can be an undetectable string object.
- __ ldrb(r2, FieldMemOperand(r1, Map::kBitFieldOffset));
- __ and_(r2, r2, Operand(1 << Map::kIsUndetectable));
- __ cmp(r2, Operand(1 << Map::kIsUndetectable));
+ __ ldrb(scratch, FieldMemOperand(tos, Map::kBitFieldOffset));
+ __ and_(scratch, scratch, Operand(1 << Map::kIsUndetectable));
+ __ cmp(scratch, Operand(1 << Map::kIsUndetectable));
false_target()->Branch(eq);
- __ ldrb(r2, FieldMemOperand(r1, Map::kInstanceTypeOffset));
- __ cmp(r2, Operand(FIRST_NONSTRING_TYPE));
+ __ ldrb(scratch, FieldMemOperand(tos, Map::kInstanceTypeOffset));
+ __ cmp(scratch, Operand(FIRST_NONSTRING_TYPE));
cc_reg_ = lt;
} else if (check->Equals(Heap::boolean_symbol())) {
__ LoadRoot(ip, Heap::kTrueValueRootIndex);
- __ cmp(r1, ip);
+ __ cmp(tos, ip);
true_target()->Branch(eq);
__ LoadRoot(ip, Heap::kFalseValueRootIndex);
- __ cmp(r1, ip);
+ __ cmp(tos, ip);
cc_reg_ = eq;
} else if (check->Equals(Heap::undefined_symbol())) {
__ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
- __ cmp(r1, ip);
+ __ cmp(tos, ip);
true_target()->Branch(eq);
- __ tst(r1, Operand(kSmiTagMask));
+ __ tst(tos, Operand(kSmiTagMask));
false_target()->Branch(eq);
// It can be an undetectable object.
- __ ldr(r1, FieldMemOperand(r1, HeapObject::kMapOffset));
- __ ldrb(r2, FieldMemOperand(r1, Map::kBitFieldOffset));
- __ and_(r2, r2, Operand(1 << Map::kIsUndetectable));
- __ cmp(r2, Operand(1 << Map::kIsUndetectable));
+ __ ldr(tos, FieldMemOperand(tos, HeapObject::kMapOffset));
+ __ ldrb(scratch, FieldMemOperand(tos, Map::kBitFieldOffset));
+ __ and_(scratch, scratch, Operand(1 << Map::kIsUndetectable));
+ __ cmp(scratch, Operand(1 << Map::kIsUndetectable));
cc_reg_ = eq;
} else if (check->Equals(Heap::function_symbol())) {
- __ tst(r1, Operand(kSmiTagMask));
+ __ tst(tos, Operand(kSmiTagMask));
false_target()->Branch(eq);
- Register map_reg = r2;
- __ CompareObjectType(r1, map_reg, r1, JS_FUNCTION_TYPE);
+ Register map_reg = scratch;
+ __ CompareObjectType(tos, map_reg, tos, JS_FUNCTION_TYPE);
true_target()->Branch(eq);
// Regular expressions are callable so typeof == 'function'.
- __ CompareInstanceType(map_reg, r1, JS_REGEXP_TYPE);
+ __ CompareInstanceType(map_reg, tos, JS_REGEXP_TYPE);
cc_reg_ = eq;
} else if (check->Equals(Heap::object_symbol())) {
- __ tst(r1, Operand(kSmiTagMask));
+ __ tst(tos, Operand(kSmiTagMask));
false_target()->Branch(eq);
__ LoadRoot(ip, Heap::kNullValueRootIndex);
- __ cmp(r1, ip);
+ __ cmp(tos, ip);
true_target()->Branch(eq);
- Register map_reg = r2;
- __ CompareObjectType(r1, map_reg, r1, JS_REGEXP_TYPE);
+ Register map_reg = scratch;
+ __ CompareObjectType(tos, map_reg, tos, JS_REGEXP_TYPE);
false_target()->Branch(eq);
// It can be an undetectable object.
- __ ldrb(r1, FieldMemOperand(map_reg, Map::kBitFieldOffset));
- __ and_(r1, r1, Operand(1 << Map::kIsUndetectable));
- __ cmp(r1, Operand(1 << Map::kIsUndetectable));
+ __ ldrb(tos, FieldMemOperand(map_reg, Map::kBitFieldOffset));
+ __ and_(tos, tos, Operand(1 << Map::kIsUndetectable));
+ __ cmp(tos, Operand(1 << Map::kIsUndetectable));
false_target()->Branch(eq);
- __ ldrb(r1, FieldMemOperand(map_reg, Map::kInstanceTypeOffset));
- __ cmp(r1, Operand(FIRST_JS_OBJECT_TYPE));
+ __ ldrb(tos, FieldMemOperand(map_reg, Map::kInstanceTypeOffset));
+ __ cmp(tos, Operand(FIRST_JS_OBJECT_TYPE));
false_target()->Branch(lt);
- __ cmp(r1, Operand(LAST_JS_OBJECT_TYPE));
+ __ cmp(tos, Operand(LAST_JS_OBJECT_TYPE));
cc_reg_ = le;
} else {
break;
case Token::IN: {
+ VirtualFrame::SpilledScope scope(frame_);
LoadAndSpill(left);
LoadAndSpill(right);
frame_->InvokeBuiltin(Builtins::IN, CALL_JS, 2);
}
case Token::INSTANCEOF: {
+ VirtualFrame::SpilledScope scope(frame_);
LoadAndSpill(left);
LoadAndSpill(right);
InstanceofStub stub;
}
case NAMED: {
+ VirtualFrame::SpilledScope scope(frame);
Comment cmnt(masm, "[ Store to named Property");
// Call the appropriate IC code.
Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
}
case KEYED: {
+ VirtualFrame::SpilledScope scope(frame);
Comment cmnt(masm, "[ Store to keyed Property");
Property* property = expression_->AsProperty();
ASSERT(property != NULL);
// Scratch register contains result when we fall through to here.
Register result = scratch;
__ bind(&found_in_symbol_table);
- if (!result.is(r0)) {
- __ mov(r0, result);
- }
+ __ Move(r0, result);
}
class VirtualFrame : public ZoneObject {
public:
+ class RegisterAllocationScope;
// A utility class to introduce a scope where the virtual frame is
// expected to remain spilled. The constructor spills the code
- // generator's current frame, but no attempt is made to require it
- // to stay spilled. It is intended as documentation while the code
- // generator is being transformed.
+ // generator's current frame, and keeps it spilled.
class SpilledScope BASE_EMBEDDED {
public:
- SpilledScope() {}
+ explicit SpilledScope(VirtualFrame* frame)
+ : old_is_spilled_(is_spilled_) {
+ if (frame != NULL) {
+ if (!is_spilled_) {
+ frame->SpillAll();
+ } else {
+ frame->AssertIsSpilled();
+ }
+ }
+ is_spilled_ = true;
+ }
+ ~SpilledScope() {
+ is_spilled_ = old_is_spilled_;
+ }
+ static bool is_spilled() { return is_spilled_; }
+
+ private:
+ static bool is_spilled_;
+ int old_is_spilled_;
+
+ SpilledScope() { }
+
+ friend class RegisterAllocationScope;
+ };
+
+ class RegisterAllocationScope BASE_EMBEDDED {
+ public:
+ // A utility class to introduce a scope where the virtual frame
+ // is not spilled, ie. where register allocation occurs. Eventually
+ // when RegisterAllocationScope is ubiquitous it can be removed
+ // along with the (by then unused) SpilledScope class.
+ explicit RegisterAllocationScope(CodeGenerator* cgen)
+ : cgen_(cgen),
+ old_is_spilled_(SpilledScope::is_spilled_) {
+ SpilledScope::is_spilled_ = false;
+ if (old_is_spilled_) {
+ VirtualFrame* frame = cgen->frame();
+ if (frame != NULL) {
+ frame->AssertIsSpilled();
+ }
+ }
+ }
+ ~RegisterAllocationScope() {
+ SpilledScope::is_spilled_ = old_is_spilled_;
+ if (old_is_spilled_) {
+ VirtualFrame* frame = cgen_->frame();
+ if (frame != NULL) {
+ frame->SpillAll();
+ }
+ }
+ }
+
+ private:
+ CodeGenerator* cgen_;
+ bool old_is_spilled_;
+
+ RegisterAllocationScope() { }
};
// An illegal index into the virtual frame.
return element_count() - expression_base_index();
}
- int register_location(int num) {
- ASSERT(num >= 0 && num < RegisterAllocator::kNumRegisters);
- return register_locations_[num];
- }
-
- int register_location(Register reg) {
- return register_locations_[RegisterAllocator::ToNumber(reg)];
- }
-
- void set_register_location(Register reg, int index) {
- register_locations_[RegisterAllocator::ToNumber(reg)] = index;
- }
-
bool is_used(int num) {
- ASSERT(num >= 0 && num < RegisterAllocator::kNumRegisters);
- return register_locations_[num] != kIllegalIndex;
+ switch (num) {
+ case 0: { // r0.
+ return kR0InUse[top_of_stack_state_];
+ }
+ case 1: { // r1.
+ return kR1InUse[top_of_stack_state_];
+ }
+ case 2:
+ case 3:
+ case 4:
+ case 5:
+ case 6: { // r2 to r6.
+ ASSERT(num - kFirstAllocatedRegister < kNumberOfAllocatedRegisters);
+ ASSERT(num >= kFirstAllocatedRegister);
+ if ((register_allocation_map_ &
+ (1 << (num - kFirstAllocatedRegister))) == 0) {
+ return false;
+ } else {
+ return true;
+ }
+ }
+ default: {
+ ASSERT(num < kFirstAllocatedRegister ||
+ num >= kFirstAllocatedRegister + kNumberOfAllocatedRegisters);
+ return false;
+ }
+ }
}
bool is_used(Register reg) {
- return register_locations_[RegisterAllocator::ToNumber(reg)]
- != kIllegalIndex;
+ return is_used(RegisterAllocator::ToNumber(reg));
}
// Add extra in-memory elements to the top of the frame to match an actual
void Adjust(int count);
// Forget elements from the top of the frame to match an actual frame (eg,
- // the frame after a runtime call). No code is emitted.
+ // the frame after a runtime call). No code is emitted except to bring the
+ // frame to a spilled state.
void Forget(int count) {
- ASSERT(count >= 0);
- ASSERT(stack_pointer_ == element_count() - 1);
- stack_pointer_ -= count;
- // On ARM, all elements are in memory, so there is no extra bookkeeping
- // (registers, copies, etc.) beyond dropping the elements.
+ SpillAll();
element_count_ -= count;
}
- // Forget count elements from the top of the frame and adjust the stack
- // pointer downward. This is used, for example, before merging frames at
- // break, continue, and return targets.
- void ForgetElements(int count);
-
// Spill all values from the frame to memory.
- inline void SpillAll();
+ void SpillAll();
+
+ void AssertIsSpilled() {
+ ASSERT(top_of_stack_state_ == NO_TOS_REGISTERS);
+ ASSERT(register_allocation_map_ == 0);
+ }
+
+ void AssertIsNotSpilled() {
+ ASSERT(!SpilledScope::is_spilled());
+ }
// Spill all occurrences of a specific register from the frame.
void Spill(Register reg) {
- if (is_used(reg)) SpillElementAt(register_location(reg));
+ UNIMPLEMENTED();
}
// Spill all occurrences of an arbitrary register if possible. Return the
// register spilled or no_reg if it was not possible to free any register
- // (ie, they all have frame-external references).
+ // (ie, they all have frame-external references). Unimplemented.
Register SpillAnyRegister();
- // Prepare this virtual frame for merging to an expected frame by
- // performing some state changes that do not require generating
- // code. It is guaranteed that no code will be generated.
- void PrepareMergeTo(VirtualFrame* expected);
-
// Make this virtual frame have a state identical to an expected virtual
// frame. As a side effect, code may be emitted to make this frame match
// the expected one.
// registers. Used when the code generator's frame is switched from this
// one to NULL by an unconditional jump.
void DetachFromCodeGenerator() {
- RegisterAllocator* cgen_allocator = cgen()->allocator();
- for (int i = 0; i < RegisterAllocator::kNumRegisters; i++) {
- if (is_used(i)) cgen_allocator->Unuse(i);
- }
+ AssertIsSpilled();
}
// (Re)attach a frame to its code generator. This informs the register
// Used when a code generator's frame is switched from NULL to this one by
// binding a label.
void AttachToCodeGenerator() {
- RegisterAllocator* cgen_allocator = cgen()->allocator();
- for (int i = 0; i < RegisterAllocator::kNumRegisters; i++) {
- if (is_used(i)) cgen_allocator->Unuse(i);
- }
+ AssertIsSpilled();
}
// Emit code for the physical JS entry and exit frame sequences. After
void AllocateStackSlots();
// The current top of the expression stack as an assembly operand.
- MemOperand Top() { return MemOperand(sp, 0); }
+ MemOperand Top() {
+ AssertIsSpilled();
+ return MemOperand(sp, 0);
+ }
// An element of the expression stack as an assembly operand.
MemOperand ElementAt(int index) {
+ AssertIsSpilled();
return MemOperand(sp, index * kPointerSize);
}
- // Random-access store to a frame-top relative frame element. The result
- // becomes owned by the frame and is invalidated.
- void SetElementAt(int index, Result* value);
-
- // Set a frame element to a constant. The index is frame-top relative.
- void SetElementAt(int index, Handle<Object> value) {
- Result temp(value);
- SetElementAt(index, &temp);
- }
-
// A frame-allocated local as an assembly operand.
MemOperand LocalAt(int index) {
ASSERT(0 <= index);
return MemOperand(fp, kLocal0Offset - index * kPointerSize);
}
- // Push the value of a local frame slot on top of the frame and invalidate
- // the local slot. The slot should be written to before trying to read
- // from it again.
- void TakeLocalAt(int index) {
- TakeFrameSlotAt(local0_index() + index);
- }
-
// Push the address of the receiver slot on the frame.
void PushReceiverSlotAddress();
// The context frame slot.
MemOperand Context() { return MemOperand(fp, kContextOffset); }
- // Save the value of the esi register to the context frame slot.
- void SaveContextRegister();
-
- // Restore the esi register from the value of the context frame
- // slot.
- void RestoreContextRegister();
-
// A parameter as an assembly operand.
MemOperand ParameterAt(int index) {
// Index -1 corresponds to the receiver.
return MemOperand(fp, (1 + parameter_count() - index) * kPointerSize);
}
- // Push the value of a paramter frame slot on top of the frame and
- // invalidate the parameter slot. The slot should be written to before
- // trying to read from it again.
- void TakeParameterAt(int index) {
- TakeFrameSlotAt(param0_index() + index);
- }
-
- // Store the top value on the virtual frame into a parameter frame slot.
- // The value is left in place on top of the frame.
- void StoreToParameterAt(int index) {
- StoreToFrameSlotAt(param0_index() + index);
- }
-
// The receiver frame slot.
MemOperand Receiver() { return ParameterAt(-1); }
// Call stub given the number of arguments it expects on (and
// removes from) the stack.
void CallStub(CodeStub* stub, int arg_count) {
- Forget(arg_count);
+ if (arg_count != 0) Forget(arg_count);
ASSERT(cgen()->HasValidEntryRegisters());
masm()->CallStub(stub);
}
// Drop one element.
void Drop() { Drop(1); }
- // Pop an element from the top of the expression stack. Returns a
- // Result, which may be a constant or a register.
- Result Pop();
+ // Pop an element from the top of the expression stack. Discards
+ // the result.
+ void Pop();
+
+ // Pop an element from the top of the expression stack. The register
+ // will be one normally used for the top of stack register allocation
+ // so you can't hold on to it if you push on the stack.
+ Register PopToRegister(Register but_not_to_this_one = no_reg);
+
+ // Look at the top of the stack. The register returned is aliased and
+ // must be copied to a scratch register before modification.
+ Register Peek();
// Pop and save an element from the top of the expression stack and
// emit a corresponding pop instruction.
void EmitPop(Register reg);
+ // Takes the top two elements and puts them in r0 (top element) and r1
+ // (second element).
+ void PopToR1R0();
+
+ // Takes the top element and puts it in r1.
+ void PopToR1();
+
+ // Takes the top element and puts it in r0.
+ void PopToR0();
+
// Push an element on top of the expression stack and emit a
// corresponding push instruction.
void EmitPush(Register reg);
+ void EmitPush(MemOperand operand);
+
+ // Get a register which is free and which must be immediately used to
+ // push on the top of the stack.
+ Register GetTOSRegister();
// Push multiple registers on the stack and the virtual frame
// Register are selected by setting bit in src_regs and
// are pushed in decreasing order: r15 .. r0.
void EmitPushMultiple(int count, int src_regs);
- // Push an element on the virtual frame.
- inline void Push(Handle<Object> value);
- inline void Push(Smi* value);
-
- // Nip removes zero or more elements from immediately below the top
- // of the frame, leaving the previous top-of-frame value on top of
- // the frame. Nip(k) is equivalent to x = Pop(), Drop(k), Push(x).
- inline void Nip(int num_dropped);
-
- inline void SetTypeForLocalAt(int index, TypeInfo info);
- inline void SetTypeForParamAt(int index, TypeInfo info);
+ static Register scratch0() { return r7; }
+ static Register scratch1() { return r9; }
private:
static const int kLocal0Offset = JavaScriptFrameConstants::kLocal0Offset;
static const int kHandlerSize = StackHandlerConstants::kSize / kPointerSize;
static const int kPreallocatedElements = 5 + 8; // 8 expression stack slots.
+ // 5 states for the top of stack, which can be in memory or in r0 and r1.
+ enum TopOfStack { NO_TOS_REGISTERS, R0_TOS, R1_TOS, R1_R0_TOS, R0_R1_TOS,
+ TOS_STATES};
+ static const int kMaxTOSRegisters = 2;
+
+ static const bool kR0InUse[TOS_STATES];
+ static const bool kR1InUse[TOS_STATES];
+ static const int kVirtualElements[TOS_STATES];
+ static const TopOfStack kStateAfterPop[TOS_STATES];
+ static const TopOfStack kStateAfterPush[TOS_STATES];
+ static const Register kTopRegister[TOS_STATES];
+ static const Register kBottomRegister[TOS_STATES];
+
+ // We allocate up to 5 locals in registers.
+ static const int kNumberOfAllocatedRegisters = 5;
+ // r2 to r6 are allocated to locals.
+ static const int kFirstAllocatedRegister = 2;
+
+ static const Register kAllocatedRegisters[kNumberOfAllocatedRegisters];
+
+ static Register AllocatedRegister(int r) {
+ ASSERT(r >= 0 && r < kNumberOfAllocatedRegisters);
+ return kAllocatedRegisters[r];
+ }
+
// The number of elements on the stack frame.
int element_count_;
+ TopOfStack top_of_stack_state_:3;
+ int register_allocation_map_:kNumberOfAllocatedRegisters;
// The index of the element that is at the processor's stack pointer
- // (the sp register).
- int stack_pointer_;
-
- // The index of the register frame element using each register, or
- // kIllegalIndex if a register is not on the frame.
- int register_locations_[RegisterAllocator::kNumRegisters];
+ // (the sp register). For now since everything is in memory it is given
+ // by the number of elements on the not-very-virtual stack frame.
+ int stack_pointer() { return element_count_ - 1; }
// The number of frame-allocated locals and parameters respectively.
int parameter_count() { return cgen()->scope()->num_parameters(); }
return (frame_pointer() - index) * kPointerSize;
}
- // Record an occurrence of a register in the virtual frame. This has the
- // effect of incrementing the register's external reference count and
- // of updating the index of the register's location in the frame.
- void Use(Register reg, int index) {
- ASSERT(!is_used(reg));
- set_register_location(reg, index);
- cgen()->allocator()->Use(reg);
- }
-
- // Record that a register reference has been dropped from the frame. This
- // decrements the register's external reference count and invalidates the
- // index of the register's location in the frame.
- void Unuse(Register reg) {
- ASSERT(is_used(reg));
- set_register_location(reg, kIllegalIndex);
- cgen()->allocator()->Unuse(reg);
- }
-
- // Spill the element at a particular index---write it to memory if
- // necessary, free any associated register, and forget its value if
- // constant.
- void SpillElementAt(int index);
-
- // Sync the element at a particular index. If it is a register or
- // constant that disagrees with the value on the stack, write it to memory.
- // Keep the element type as register or constant, and clear the dirty bit.
- void SyncElementAt(int index);
-
- // Sync a single unsynced element that lies beneath or at the stack pointer.
- void SyncElementBelowStackPointer(int index);
-
- // Sync a single unsynced element that lies just above the stack pointer.
- void SyncElementByPushing(int index);
-
- // Push a the value of a frame slot (typically a local or parameter) on
- // top of the frame and invalidate the slot.
- void TakeFrameSlotAt(int index);
-
- // Store the value on top of the frame to a frame slot (typically a local
- // or parameter).
- void StoreToFrameSlotAt(int index);
-
// Spill all elements in registers. Spill the top spilled_args elements
// on the frame. Sync all other frame elements.
// Then drop dropped_args elements from the virtual frame, to match
// the effect of an upcoming call that will drop them from the stack.
void PrepareForCall(int spilled_args, int dropped_args);
- // Move frame elements currently in registers or constants, that
- // should be in memory in the expected frame, to memory.
- void MergeMoveRegistersToMemory(VirtualFrame* expected);
-
- // Make the register-to-register moves necessary to
- // merge this frame with the expected frame.
- // Register to memory moves must already have been made,
- // and memory to register moves must follow this call.
- // This is because some new memory-to-register moves are
- // created in order to break cycles of register moves.
- // Used in the implementation of MergeTo().
- void MergeMoveRegistersToRegisters(VirtualFrame* expected);
-
- // Make the memory-to-register and constant-to-register moves
- // needed to make this frame equal the expected frame.
- // Called after all register-to-memory and register-to-register
- // moves have been made. After this function returns, the frames
- // should be equal.
- void MergeMoveMemoryToRegisters(VirtualFrame* expected);
-
- // Invalidates a frame slot (puts an invalid frame element in it).
- // Copies on the frame are correctly handled, and if this slot was
- // the backing store of copies, the index of the new backing store
- // is returned. Otherwise, returns kIllegalIndex.
- // Register counts are correctly updated.
- int InvalidateFrameSlotAt(int index);
+ // If all top-of-stack registers are in use then the lowest one is pushed
+ // onto the physical stack and made free.
+ void EnsureOneFreeTOSRegister();
inline bool Equals(VirtualFrame* other);