From: erik.corry@gmail.com Date: Thu, 8 Apr 2010 22:30:30 +0000 (+0000) Subject: Simple register allocation for ARM. Only top of expression X-Git-Tag: upstream/4.7.83~22051 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=fbefa72f810df0cb89772ef0f7bf3fc71fcdd9b6;p=platform%2Fupstream%2Fv8.git Simple register allocation for ARM. Only top of expression stack for now. Next step is probably fixing the binary op stubs so they can take swapped registers and fixing the deferred code so it doesn't insist that all registers except the two operands are flushed. Generates slightly worse code sometimes because the peephole push-pop elimination gets confused when we don't use the same register all the time (the old code used r0 always). Review URL: http://codereview.chromium.org/1604002 git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@4368 ce2b1a6d-e550-0410-aec6-3dcde31c8c00 --- diff --git a/src/arm/codegen-arm-inl.h b/src/arm/codegen-arm-inl.h index 17e18d9fd..6edec4d76 100644 --- a/src/arm/codegen-arm-inl.h +++ b/src/arm/codegen-arm-inl.h @@ -29,6 +29,8 @@ #ifndef V8_ARM_CODEGEN_ARM_INL_H_ #define V8_ARM_CODEGEN_ARM_INL_H_ +#include "virtual-frame-arm.h" + namespace v8 { namespace internal { @@ -43,6 +45,7 @@ void CodeGenerator::LoadConditionAndSpill(Expression* expression, void CodeGenerator::LoadAndSpill(Expression* expression) { + ASSERT(VirtualFrame::SpilledScope::is_spilled()); Load(expression); } @@ -57,11 +60,6 @@ void CodeGenerator::VisitStatementsAndSpill(ZoneList* statements) { } -void Reference::GetValueAndSpill() { - GetValue(); -} - - // Platform-specific inline functions. void DeferredCode::Jump() { __ jmp(&entry_label_); } diff --git a/src/arm/codegen-arm.cc b/src/arm/codegen-arm.cc index 74e31e1bc..776853276 100644 --- a/src/arm/codegen-arm.cc +++ b/src/arm/codegen-arm.cc @@ -181,7 +181,7 @@ void CodeGenerator::Generate(CompilationInfo* info) { // for stack overflow. frame_->AllocateStackSlots(); - VirtualFrame::SpilledScope spilled_scope; + VirtualFrame::SpilledScope spilled_scope(frame_); int heap_slots = scope()->num_heap_slots(); if (heap_slots > 0) { // Allocate local context. @@ -274,8 +274,6 @@ void CodeGenerator::Generate(CompilationInfo* info) { // fp, and lr have been pushed on the stack. Adjust the virtual // frame to match this state. frame_->Adjust(4); - allocator_->Unuse(r1); - allocator_->Unuse(lr); // Bind all the bailout labels to the beginning of the function. List* bailouts = info->bailouts(); @@ -505,6 +503,7 @@ void CodeGenerator::LoadCondition(Expression* x, has_valid_frame() && !has_cc() && frame_->height() == original_height) { + frame_->SpillAll(); true_target->Jump(); } } @@ -529,6 +528,7 @@ void CodeGenerator::Load(Expression* expr) { if (has_cc()) { // Convert cc_reg_ into a boolean value. + VirtualFrame::SpilledScope scope(frame_); JumpTarget loaded; JumpTarget materialize_true; materialize_true.Branch(cc_reg_); @@ -543,6 +543,7 @@ void CodeGenerator::Load(Expression* expr) { } if (true_target.is_linked() || false_target.is_linked()) { + VirtualFrame::SpilledScope scope(frame_); // We have at least one condition value that has been "translated" // into a branch, thus it needs to be loaded explicitly. JumpTarget loaded; @@ -577,14 +578,14 @@ void CodeGenerator::Load(Expression* expr) { void CodeGenerator::LoadGlobal() { - VirtualFrame::SpilledScope spilled_scope; + VirtualFrame::SpilledScope spilled_scope(frame_); __ ldr(r0, GlobalObject()); frame_->EmitPush(r0); } void CodeGenerator::LoadGlobalReceiver(Register scratch) { - VirtualFrame::SpilledScope spilled_scope; + VirtualFrame::SpilledScope spilled_scope(frame_); __ ldr(scratch, ContextOperand(cp, Context::GLOBAL_INDEX)); __ ldr(scratch, FieldMemOperand(scratch, GlobalObject::kGlobalReceiverOffset)); @@ -594,7 +595,7 @@ void CodeGenerator::LoadGlobalReceiver(Register scratch) { void CodeGenerator::LoadTypeofExpression(Expression* expr) { // Special handling of identifiers as subexpressions of typeof. - VirtualFrame::SpilledScope spilled_scope; + VirtualFrame::SpilledScope spilled_scope(frame_); Variable* variable = expr->AsVariableProxy()->AsVariable(); if (variable != NULL && !variable->is_this() && variable->is_global()) { // For a global variable we build the property reference @@ -604,7 +605,7 @@ void CodeGenerator::LoadTypeofExpression(Expression* expr) { Literal key(variable->name()); Property property(&global, &key, RelocInfo::kNoPosition); Reference ref(this, &property); - ref.GetValueAndSpill(); + ref.GetValue(); } else if (variable != NULL && variable->slot() != NULL) { // For a variable that rewrites to a slot, we signal it is the immediate // subexpression of a typeof. @@ -634,7 +635,7 @@ Reference::~Reference() { void CodeGenerator::LoadReference(Reference* ref) { - VirtualFrame::SpilledScope spilled_scope; + VirtualFrame::SpilledScope spilled_scope(frame_); Comment cmnt(masm_, "[ LoadReference"); Expression* e = ref->expression(); Property* property = e->AsProperty(); @@ -669,16 +670,18 @@ void CodeGenerator::LoadReference(Reference* ref) { void CodeGenerator::UnloadReference(Reference* ref) { - VirtualFrame::SpilledScope spilled_scope; + int size = ref->size(); + ref->set_unloaded(); + if (size == 0) return; + // Pop a reference from the stack while preserving TOS. + VirtualFrame::RegisterAllocationScope scope(this); Comment cmnt(masm_, "[ UnloadReference"); - int size = ref->size(); if (size > 0) { - frame_->EmitPop(r0); + Register tos = frame_->PopToRegister(); frame_->Drop(size); - frame_->EmitPush(r0); + frame_->EmitPush(tos); } - ref->set_unloaded(); } @@ -687,7 +690,7 @@ void CodeGenerator::UnloadReference(Reference* ref) { // may jump to 'false_target' in case the register converts to 'false'. void CodeGenerator::ToBoolean(JumpTarget* true_target, JumpTarget* false_target) { - VirtualFrame::SpilledScope spilled_scope; + VirtualFrame::SpilledScope spilled_scope(frame_); // Note: The generated code snippet does not change stack variables. // Only the condition code should be set. frame_->EmitPop(r0); @@ -729,15 +732,15 @@ void CodeGenerator::ToBoolean(JumpTarget* true_target, void CodeGenerator::GenericBinaryOperation(Token::Value op, OverwriteMode overwrite_mode, int constant_rhs) { - VirtualFrame::SpilledScope spilled_scope; + VirtualFrame::SpilledScope spilled_scope(frame_); // sp[0] : y // sp[1] : x // result : r0 // Stub is entered with a call: 'return address' is in lr. switch (op) { - case Token::ADD: // fall through. - case Token::SUB: // fall through. + case Token::ADD: + case Token::SUB: case Token::MUL: case Token::DIV: case Token::MOD: @@ -756,9 +759,55 @@ void CodeGenerator::GenericBinaryOperation(Token::Value op, case Token::COMMA: frame_->EmitPop(r0); - // simply discard left value + // Simply discard left value. + frame_->Drop(); + break; + + default: + // Other cases should have been handled before this point. + UNREACHABLE(); + break; + } +} + + +void CodeGenerator::VirtualFrameBinaryOperation(Token::Value op, + OverwriteMode overwrite_mode, + int constant_rhs) { + // top of virtual frame: y + // 2nd elt. on virtual frame : x + // result : top of virtual frame + + // Stub is entered with a call: 'return address' is in lr. + switch (op) { + case Token::ADD: // fall through. + case Token::SUB: // fall through. + case Token::MUL: + case Token::DIV: + case Token::MOD: + case Token::BIT_OR: + case Token::BIT_AND: + case Token::BIT_XOR: + case Token::SHL: + case Token::SHR: + case Token::SAR: { + frame_->PopToR1R0(); // Pop y to r0 and x to r1. + { + VirtualFrame::SpilledScope spilled_scope(frame_); + GenericBinaryOpStub stub(op, overwrite_mode, constant_rhs); + frame_->CallStub(&stub, 0); + } + frame_->EmitPush(r0); + break; + } + + case Token::COMMA: { + Register scratch = frame_->PopToRegister(); + // Simply discard left value. frame_->Drop(); + frame_->EmitPush(scratch); break; + } default: // Other cases should have been handled before this point. @@ -773,11 +822,13 @@ class DeferredInlineSmiOperation: public DeferredCode { DeferredInlineSmiOperation(Token::Value op, int value, bool reversed, - OverwriteMode overwrite_mode) + OverwriteMode overwrite_mode, + Register tos) : op_(op), value_(value), reversed_(reversed), - overwrite_mode_(overwrite_mode) { + overwrite_mode_(overwrite_mode), + tos_register_(tos) { set_comment("[ DeferredInlinedSmiOperation"); } @@ -788,6 +839,7 @@ class DeferredInlineSmiOperation: public DeferredCode { int value_; bool reversed_; OverwriteMode overwrite_mode_; + Register tos_register_; }; @@ -796,10 +848,10 @@ void DeferredInlineSmiOperation::Generate() { case Token::ADD: { // Revert optimistic add. if (reversed_) { - __ sub(r0, r0, Operand(Smi::FromInt(value_))); + __ sub(r0, tos_register_, Operand(Smi::FromInt(value_))); __ mov(r1, Operand(Smi::FromInt(value_))); } else { - __ sub(r1, r0, Operand(Smi::FromInt(value_))); + __ sub(r1, tos_register_, Operand(Smi::FromInt(value_))); __ mov(r0, Operand(Smi::FromInt(value_))); } break; @@ -808,10 +860,10 @@ void DeferredInlineSmiOperation::Generate() { case Token::SUB: { // Revert optimistic sub. if (reversed_) { - __ rsb(r0, r0, Operand(Smi::FromInt(value_))); + __ rsb(r0, tos_register_, Operand(Smi::FromInt(value_))); __ mov(r1, Operand(Smi::FromInt(value_))); } else { - __ add(r1, r0, Operand(Smi::FromInt(value_))); + __ add(r1, tos_register_, Operand(Smi::FromInt(value_))); __ mov(r0, Operand(Smi::FromInt(value_))); } break; @@ -825,9 +877,10 @@ void DeferredInlineSmiOperation::Generate() { case Token::BIT_XOR: case Token::BIT_AND: { if (reversed_) { + __ Move(r0, tos_register_); __ mov(r1, Operand(Smi::FromInt(value_))); } else { - __ mov(r1, Operand(r0)); + __ Move(r1, tos_register_); __ mov(r0, Operand(Smi::FromInt(value_))); } break; @@ -837,7 +890,7 @@ void DeferredInlineSmiOperation::Generate() { case Token::SHR: case Token::SAR: { if (!reversed_) { - __ mov(r1, Operand(r0)); + __ Move(r1, tos_register_); __ mov(r0, Operand(Smi::FromInt(value_))); } else { UNREACHABLE(); // Should have been handled in SmiOperation. @@ -853,6 +906,11 @@ void DeferredInlineSmiOperation::Generate() { GenericBinaryOpStub stub(op_, overwrite_mode_, value_); __ CallStub(&stub); + // The generic stub returns its value in r0, but that's not + // necessarily what we want. We want whatever the inlined code + // expected, which is that the answer is in the same register as + // the operand was. + __ Move(tos_register_, r0); } @@ -877,11 +935,248 @@ static int BitPosition(unsigned x) { } +void CodeGenerator::VirtualFrameSmiOperation(Token::Value op, + Handle value, + bool reversed, + OverwriteMode mode) { + int int_value = Smi::cast(*value)->value(); + + bool something_to_inline; + switch (op) { + case Token::ADD: + case Token::SUB: + case Token::BIT_AND: + case Token::BIT_OR: + case Token::BIT_XOR: { + something_to_inline = true; + break; + } + case Token::SHL: + case Token::SHR: + case Token::SAR: { + if (reversed) { + something_to_inline = false; + } else { + something_to_inline = true; + } + break; + } + case Token::MOD: { + if (reversed || int_value < 2 || !IsPowerOf2(int_value)) { + something_to_inline = false; + } else { + something_to_inline = true; + } + break; + } + case Token::MUL: { + if (!IsEasyToMultiplyBy(int_value)) { + something_to_inline = false; + } else { + something_to_inline = true; + } + break; + } + default: { + something_to_inline = false; + break; + } + } + + if (!something_to_inline) { + if (!reversed) { + // Move the lhs to r1. + frame_->PopToR1(); + // Flush any other registers to the stack. + frame_->SpillAll(); + // Tell the virtual frame that TOS is in r1 (no code emitted). + frame_->EmitPush(r1); + // We know that r0 is free. + __ mov(r0, Operand(value)); + // Push r0 on the virtual frame (no code emitted). + frame_->EmitPush(r0); + // This likes having r1 and r0 on top of the stack. It pushes + // the answer on the virtual frame. + VirtualFrameBinaryOperation(op, mode, int_value); + } else { + // Move the rhs to r0. + frame_->PopToR0(); + // Flush any other registers to the stack. + frame_->SpillAll(); + // We know that r1 is free. + __ mov(r1, Operand(value)); + // Tell the virtual frame that TOS is in r1 (no code emitted). + frame_->EmitPush(r1); + // Push r0 on the virtual frame (no code emitted). + frame_->EmitPush(r0); + // This likes having r1 and r0 on top of the stack. It pushes + // the answer on the virtual frame. + VirtualFrameBinaryOperation(op, mode, kUnknownIntValue); + } + return; + } + + // We move the top of stack to a register (normally no move is invoved). + Register tos = frame_->PopToRegister(); + // All other registers are spilled. The deferred code expects one argument + // in a register and all other values are flushed to the stack. The + // answer is returned in the same register that the top of stack argument was + // in. + frame_->SpillAll(); + + switch (op) { + case Token::ADD: { + DeferredCode* deferred = + new DeferredInlineSmiOperation(op, int_value, reversed, mode, tos); + + __ add(tos, tos, Operand(value), SetCC); + deferred->Branch(vs); + __ tst(tos, Operand(kSmiTagMask)); + deferred->Branch(ne); + deferred->BindExit(); + frame_->EmitPush(tos); + break; + } + + case Token::SUB: { + DeferredCode* deferred = + new DeferredInlineSmiOperation(op, int_value, reversed, mode, tos); + + if (reversed) { + __ rsb(tos, tos, Operand(value), SetCC); + } else { + __ sub(tos, tos, Operand(value), SetCC); + } + deferred->Branch(vs); + __ tst(tos, Operand(kSmiTagMask)); + deferred->Branch(ne); + deferred->BindExit(); + frame_->EmitPush(tos); + break; + } + + + case Token::BIT_OR: + case Token::BIT_XOR: + case Token::BIT_AND: { + DeferredCode* deferred = + new DeferredInlineSmiOperation(op, int_value, reversed, mode, tos); + __ tst(tos, Operand(kSmiTagMask)); + deferred->Branch(ne); + switch (op) { + case Token::BIT_OR: __ orr(tos, tos, Operand(value)); break; + case Token::BIT_XOR: __ eor(tos, tos, Operand(value)); break; + case Token::BIT_AND: __ and_(tos, tos, Operand(value)); break; + default: UNREACHABLE(); + } + deferred->BindExit(); + frame_->EmitPush(tos); + break; + } + + case Token::SHL: + case Token::SHR: + case Token::SAR: { + ASSERT(!reversed); + Register scratch = VirtualFrame::scratch0(); + Register scratch2 = VirtualFrame::scratch1(); + int shift_value = int_value & 0x1f; // least significant 5 bits + DeferredCode* deferred = + new DeferredInlineSmiOperation(op, shift_value, false, mode, tos); + __ tst(tos, Operand(kSmiTagMask)); + deferred->Branch(ne); + __ mov(scratch, Operand(tos, ASR, kSmiTagSize)); // remove tags + switch (op) { + case Token::SHL: { + if (shift_value != 0) { + __ mov(scratch, Operand(scratch, LSL, shift_value)); + } + // check that the *unsigned* result fits in a smi + __ add(scratch2, scratch, Operand(0x40000000), SetCC); + deferred->Branch(mi); + break; + } + case Token::SHR: { + // LSR by immediate 0 means shifting 32 bits. + if (shift_value != 0) { + __ mov(scratch, Operand(scratch, LSR, shift_value)); + } + // check that the *unsigned* result fits in a smi + // neither of the two high-order bits can be set: + // - 0x80000000: high bit would be lost when smi tagging + // - 0x40000000: this number would convert to negative when + // smi tagging these two cases can only happen with shifts + // by 0 or 1 when handed a valid smi + __ and_(scratch2, scratch, Operand(0xc0000000), SetCC); + deferred->Branch(ne); + break; + } + case Token::SAR: { + if (shift_value != 0) { + // ASR by immediate 0 means shifting 32 bits. + __ mov(scratch, Operand(scratch, ASR, shift_value)); + } + break; + } + default: UNREACHABLE(); + } + __ mov(tos, Operand(scratch, LSL, kSmiTagSize)); + deferred->BindExit(); + frame_->EmitPush(tos); + break; + } + + case Token::MOD: { + ASSERT(!reversed); + ASSERT(int_value >= 2); + ASSERT(IsPowerOf2(int_value)); + DeferredCode* deferred = + new DeferredInlineSmiOperation(op, int_value, reversed, mode, tos); + unsigned mask = (0x80000000u | kSmiTagMask); + __ tst(tos, Operand(mask)); + deferred->Branch(ne); // Go to deferred code on non-Smis and negative. + mask = (int_value << kSmiTagSize) - 1; + __ and_(tos, tos, Operand(mask)); + deferred->BindExit(); + frame_->EmitPush(tos); + break; + } + + case Token::MUL: { + ASSERT(IsEasyToMultiplyBy(int_value)); + DeferredCode* deferred = + new DeferredInlineSmiOperation(op, int_value, reversed, mode, tos); + unsigned max_smi_that_wont_overflow = Smi::kMaxValue / int_value; + max_smi_that_wont_overflow <<= kSmiTagSize; + unsigned mask = 0x80000000u; + while ((mask & max_smi_that_wont_overflow) == 0) { + mask |= mask >> 1; + } + mask |= kSmiTagMask; + // This does a single mask that checks for a too high value in a + // conservative way and for a non-Smi. It also filters out negative + // numbers, unfortunately, but since this code is inline we prefer + // brevity to comprehensiveness. + __ tst(tos, Operand(mask)); + deferred->Branch(ne); + MultiplyByKnownInt(masm_, tos, tos, int_value); + deferred->BindExit(); + frame_->EmitPush(tos); + break; + } + + default: + UNREACHABLE(); + break; + } +} + + void CodeGenerator::SmiOperation(Token::Value op, Handle value, bool reversed, OverwriteMode mode) { - VirtualFrame::SpilledScope spilled_scope; + VirtualFrame::SpilledScope spilled_scope(frame_); // NOTE: This is an attempt to inline (a bit) more of the code for // some possible smi operations (like + and -) when (at least) one // of the operands is a literal smi. With this optimization, the @@ -900,7 +1195,7 @@ void CodeGenerator::SmiOperation(Token::Value op, switch (op) { case Token::ADD: { DeferredCode* deferred = - new DeferredInlineSmiOperation(op, int_value, reversed, mode); + new DeferredInlineSmiOperation(op, int_value, reversed, mode, r0); __ add(r0, r0, Operand(value), SetCC); deferred->Branch(vs); @@ -912,7 +1207,7 @@ void CodeGenerator::SmiOperation(Token::Value op, case Token::SUB: { DeferredCode* deferred = - new DeferredInlineSmiOperation(op, int_value, reversed, mode); + new DeferredInlineSmiOperation(op, int_value, reversed, mode, r0); if (reversed) { __ rsb(r0, r0, Operand(value), SetCC); @@ -931,7 +1226,7 @@ void CodeGenerator::SmiOperation(Token::Value op, case Token::BIT_XOR: case Token::BIT_AND: { DeferredCode* deferred = - new DeferredInlineSmiOperation(op, int_value, reversed, mode); + new DeferredInlineSmiOperation(op, int_value, reversed, mode, r0); __ tst(r0, Operand(kSmiTagMask)); deferred->Branch(ne); switch (op) { @@ -953,7 +1248,7 @@ void CodeGenerator::SmiOperation(Token::Value op, } int shift_value = int_value & 0x1f; // least significant 5 bits DeferredCode* deferred = - new DeferredInlineSmiOperation(op, shift_value, false, mode); + new DeferredInlineSmiOperation(op, shift_value, false, mode, r0); __ tst(r0, Operand(kSmiTagMask)); deferred->Branch(ne); __ mov(r2, Operand(r0, ASR, kSmiTagSize)); // remove tags @@ -1002,7 +1297,7 @@ void CodeGenerator::SmiOperation(Token::Value op, break; } DeferredCode* deferred = - new DeferredInlineSmiOperation(op, int_value, reversed, mode); + new DeferredInlineSmiOperation(op, int_value, reversed, mode, r0); unsigned mask = (0x80000000u | kSmiTagMask); __ tst(r0, Operand(mask)); deferred->Branch(ne); // Go to deferred code on non-Smis and negative. @@ -1018,7 +1313,7 @@ void CodeGenerator::SmiOperation(Token::Value op, break; } DeferredCode* deferred = - new DeferredInlineSmiOperation(op, int_value, reversed, mode); + new DeferredInlineSmiOperation(op, int_value, reversed, mode, r0); unsigned max_smi_that_wont_overflow = Smi::kMaxValue / int_value; max_smi_that_wont_overflow <<= kSmiTagSize; unsigned mask = 0x80000000u; @@ -1064,10 +1359,11 @@ void CodeGenerator::Comparison(Condition cc, Expression* left, Expression* right, bool strict) { - if (left != NULL) LoadAndSpill(left); - if (right != NULL) LoadAndSpill(right); + VirtualFrame::RegisterAllocationScope scope(this); + + if (left != NULL) Load(left); + if (right != NULL) Load(right); - VirtualFrame::SpilledScope spilled_scope; // sp[0] : y // sp[1] : x // result : cc register @@ -1075,32 +1371,49 @@ void CodeGenerator::Comparison(Condition cc, // Strict only makes sense for equality comparisons. ASSERT(!strict || cc == eq); - JumpTarget exit; - JumpTarget smi; + Register lhs; + Register rhs; + + // We load the top two stack positions into registers chosen by the virtual + // frame. This should keep the register shuffling to a minimum. // Implement '>' and '<=' by reversal to obtain ECMA-262 conversion order. if (cc == gt || cc == le) { cc = ReverseCondition(cc); - frame_->EmitPop(r1); - frame_->EmitPop(r0); + lhs = frame_->PopToRegister(); + rhs = frame_->PopToRegister(lhs); // Don't pop to the same register again! } else { - frame_->EmitPop(r0); - frame_->EmitPop(r1); + rhs = frame_->PopToRegister(); + lhs = frame_->PopToRegister(rhs); // Don't pop to the same register again! } - __ orr(r2, r0, Operand(r1)); - __ tst(r2, Operand(kSmiTagMask)); + + ASSERT(rhs.is(r0) || rhs.is(r1)); + ASSERT(lhs.is(r0) || lhs.is(r1)); + + // Now we have the two sides in r0 and r1. We flush any other registers + // because the stub doesn't know about register allocation. + frame_->SpillAll(); + Register scratch = VirtualFrame::scratch0(); + __ orr(scratch, lhs, Operand(rhs)); + __ tst(scratch, Operand(kSmiTagMask)); + JumpTarget smi; smi.Branch(eq); // Perform non-smi comparison by stub. // CompareStub takes arguments in r0 and r1, returns <0, >0 or 0 in r0. // We call with 0 args because there are 0 on the stack. + if (!rhs.is(r0)) { + __ Swap(rhs, lhs, ip); + } + CompareStub stub(cc, strict); frame_->CallStub(&stub, 0); __ cmp(r0, Operand(0)); + JumpTarget exit; exit.Jump(); // Do smi comparisons by pointer comparison. smi.Bind(); - __ cmp(r1, Operand(r0)); + __ cmp(lhs, Operand(rhs)); exit.Bind(); cc_reg_ = cc; @@ -1111,7 +1424,7 @@ void CodeGenerator::Comparison(Condition cc, void CodeGenerator::CallWithArguments(ZoneList* args, CallFunctionFlags flags, int position) { - VirtualFrame::SpilledScope spilled_scope; + VirtualFrame::SpilledScope spilled_scope(frame_); // Push the arguments ("left-to-right") on the stack. int arg_count = args->length(); for (int i = 0; i < arg_count; i++) { @@ -1133,7 +1446,7 @@ void CodeGenerator::CallWithArguments(ZoneList* args, void CodeGenerator::Branch(bool if_true, JumpTarget* target) { - VirtualFrame::SpilledScope spilled_scope; + VirtualFrame::SpilledScope spilled_scope(frame_); ASSERT(has_cc()); Condition cc = if_true ? cc_reg_ : NegateCondition(cc_reg_); target->Branch(cc); @@ -1142,7 +1455,7 @@ void CodeGenerator::Branch(bool if_true, JumpTarget* target) { void CodeGenerator::CheckStack() { - VirtualFrame::SpilledScope spilled_scope; + VirtualFrame::SpilledScope spilled_scope(frame_); Comment cmnt(masm_, "[ check stack"); __ LoadRoot(ip, Heap::kStackLimitRootIndex); // Put the lr setup instruction in the delay slot. kInstrSize is added to @@ -1164,7 +1477,7 @@ void CodeGenerator::VisitStatements(ZoneList* statements) { #ifdef DEBUG int original_height = frame_->height(); #endif - VirtualFrame::SpilledScope spilled_scope; + VirtualFrame::SpilledScope spilled_scope(frame_); for (int i = 0; frame_ != NULL && i < statements->length(); i++) { VisitAndSpill(statements->at(i)); } @@ -1176,7 +1489,7 @@ void CodeGenerator::VisitBlock(Block* node) { #ifdef DEBUG int original_height = frame_->height(); #endif - VirtualFrame::SpilledScope spilled_scope; + VirtualFrame::SpilledScope spilled_scope(frame_); Comment cmnt(masm_, "[ Block"); CodeForStatementPosition(node); node->break_target()->set_direction(JumpTarget::FORWARD_ONLY); @@ -1190,7 +1503,7 @@ void CodeGenerator::VisitBlock(Block* node) { void CodeGenerator::DeclareGlobals(Handle pairs) { - VirtualFrame::SpilledScope spilled_scope; + VirtualFrame::SpilledScope spilled_scope(frame_); frame_->EmitPush(cp); __ mov(r0, Operand(pairs)); frame_->EmitPush(r0); @@ -1205,7 +1518,7 @@ void CodeGenerator::VisitDeclaration(Declaration* node) { #ifdef DEBUG int original_height = frame_->height(); #endif - VirtualFrame::SpilledScope spilled_scope; + VirtualFrame::SpilledScope spilled_scope(frame_); Comment cmnt(masm_, "[ Declaration"); Variable* var = node->proxy()->var(); ASSERT(var != NULL); // must have been resolved @@ -1274,7 +1587,7 @@ void CodeGenerator::VisitExpressionStatement(ExpressionStatement* node) { #ifdef DEBUG int original_height = frame_->height(); #endif - VirtualFrame::SpilledScope spilled_scope; + VirtualFrame::SpilledScope spilled_scope(frame_); Comment cmnt(masm_, "[ ExpressionStatement"); CodeForStatementPosition(node); Expression* expression = node->expression(); @@ -1289,7 +1602,7 @@ void CodeGenerator::VisitEmptyStatement(EmptyStatement* node) { #ifdef DEBUG int original_height = frame_->height(); #endif - VirtualFrame::SpilledScope spilled_scope; + VirtualFrame::SpilledScope spilled_scope(frame_); Comment cmnt(masm_, "// EmptyStatement"); CodeForStatementPosition(node); // nothing to do @@ -1301,7 +1614,7 @@ void CodeGenerator::VisitIfStatement(IfStatement* node) { #ifdef DEBUG int original_height = frame_->height(); #endif - VirtualFrame::SpilledScope spilled_scope; + VirtualFrame::SpilledScope spilled_scope(frame_); Comment cmnt(masm_, "[ IfStatement"); // Generate different code depending on which parts of the if statement // are present or not. @@ -1387,7 +1700,7 @@ void CodeGenerator::VisitIfStatement(IfStatement* node) { void CodeGenerator::VisitContinueStatement(ContinueStatement* node) { - VirtualFrame::SpilledScope spilled_scope; + VirtualFrame::SpilledScope spilled_scope(frame_); Comment cmnt(masm_, "[ ContinueStatement"); CodeForStatementPosition(node); node->target()->continue_target()->Jump(); @@ -1395,7 +1708,7 @@ void CodeGenerator::VisitContinueStatement(ContinueStatement* node) { void CodeGenerator::VisitBreakStatement(BreakStatement* node) { - VirtualFrame::SpilledScope spilled_scope; + VirtualFrame::SpilledScope spilled_scope(frame_); Comment cmnt(masm_, "[ BreakStatement"); CodeForStatementPosition(node); node->target()->break_target()->Jump(); @@ -1403,7 +1716,7 @@ void CodeGenerator::VisitBreakStatement(BreakStatement* node) { void CodeGenerator::VisitReturnStatement(ReturnStatement* node) { - VirtualFrame::SpilledScope spilled_scope; + VirtualFrame::SpilledScope spilled_scope(frame_); Comment cmnt(masm_, "[ ReturnStatement"); CodeForStatementPosition(node); @@ -1426,7 +1739,7 @@ void CodeGenerator::VisitWithEnterStatement(WithEnterStatement* node) { #ifdef DEBUG int original_height = frame_->height(); #endif - VirtualFrame::SpilledScope spilled_scope; + VirtualFrame::SpilledScope spilled_scope(frame_); Comment cmnt(masm_, "[ WithEnterStatement"); CodeForStatementPosition(node); LoadAndSpill(node->expression()); @@ -1452,7 +1765,7 @@ void CodeGenerator::VisitWithExitStatement(WithExitStatement* node) { #ifdef DEBUG int original_height = frame_->height(); #endif - VirtualFrame::SpilledScope spilled_scope; + VirtualFrame::SpilledScope spilled_scope(frame_); Comment cmnt(masm_, "[ WithExitStatement"); CodeForStatementPosition(node); // Pop context. @@ -1467,7 +1780,7 @@ void CodeGenerator::VisitSwitchStatement(SwitchStatement* node) { #ifdef DEBUG int original_height = frame_->height(); #endif - VirtualFrame::SpilledScope spilled_scope; + VirtualFrame::SpilledScope spilled_scope(frame_); Comment cmnt(masm_, "[ SwitchStatement"); CodeForStatementPosition(node); node->break_target()->set_direction(JumpTarget::FORWARD_ONLY); @@ -1556,7 +1869,7 @@ void CodeGenerator::VisitDoWhileStatement(DoWhileStatement* node) { #ifdef DEBUG int original_height = frame_->height(); #endif - VirtualFrame::SpilledScope spilled_scope; + VirtualFrame::SpilledScope spilled_scope(frame_); Comment cmnt(masm_, "[ DoWhileStatement"); CodeForStatementPosition(node); node->break_target()->set_direction(JumpTarget::FORWARD_ONLY); @@ -1629,7 +1942,7 @@ void CodeGenerator::VisitWhileStatement(WhileStatement* node) { #ifdef DEBUG int original_height = frame_->height(); #endif - VirtualFrame::SpilledScope spilled_scope; + VirtualFrame::SpilledScope spilled_scope(frame_); Comment cmnt(masm_, "[ WhileStatement"); CodeForStatementPosition(node); @@ -1678,7 +1991,7 @@ void CodeGenerator::VisitForStatement(ForStatement* node) { #ifdef DEBUG int original_height = frame_->height(); #endif - VirtualFrame::SpilledScope spilled_scope; + VirtualFrame::SpilledScope spilled_scope(frame_); Comment cmnt(masm_, "[ ForStatement"); CodeForStatementPosition(node); if (node->init() != NULL) { @@ -1753,7 +2066,7 @@ void CodeGenerator::VisitForInStatement(ForInStatement* node) { #ifdef DEBUG int original_height = frame_->height(); #endif - VirtualFrame::SpilledScope spilled_scope; + VirtualFrame::SpilledScope spilled_scope(frame_); Comment cmnt(masm_, "[ ForInStatement"); CodeForStatementPosition(node); @@ -1989,7 +2302,7 @@ void CodeGenerator::VisitTryCatchStatement(TryCatchStatement* node) { #ifdef DEBUG int original_height = frame_->height(); #endif - VirtualFrame::SpilledScope spilled_scope; + VirtualFrame::SpilledScope spilled_scope(frame_); Comment cmnt(masm_, "[ TryCatchStatement"); CodeForStatementPosition(node); @@ -2110,7 +2423,7 @@ void CodeGenerator::VisitTryFinallyStatement(TryFinallyStatement* node) { #ifdef DEBUG int original_height = frame_->height(); #endif - VirtualFrame::SpilledScope spilled_scope; + VirtualFrame::SpilledScope spilled_scope(frame_); Comment cmnt(masm_, "[ TryFinallyStatement"); CodeForStatementPosition(node); @@ -2294,7 +2607,7 @@ void CodeGenerator::VisitDebuggerStatement(DebuggerStatement* node) { #ifdef DEBUG int original_height = frame_->height(); #endif - VirtualFrame::SpilledScope spilled_scope; + VirtualFrame::SpilledScope spilled_scope(frame_); Comment cmnt(masm_, "[ DebuggerStatament"); CodeForStatementPosition(node); #ifdef ENABLE_DEBUGGER_SUPPORT @@ -2307,7 +2620,7 @@ void CodeGenerator::VisitDebuggerStatement(DebuggerStatement* node) { void CodeGenerator::InstantiateFunction( Handle function_info) { - VirtualFrame::SpilledScope spilled_scope; + VirtualFrame::SpilledScope spilled_scope(frame_); __ mov(r0, Operand(function_info)); // Use the fast case closure allocation code that allocates in new // space for nested functions that don't need literals cloning. @@ -2330,7 +2643,7 @@ void CodeGenerator::VisitFunctionLiteral(FunctionLiteral* node) { #ifdef DEBUG int original_height = frame_->height(); #endif - VirtualFrame::SpilledScope spilled_scope; + VirtualFrame::SpilledScope spilled_scope(frame_); Comment cmnt(masm_, "[ FunctionLiteral"); // Build the function info and instantiate it. @@ -2351,7 +2664,7 @@ void CodeGenerator::VisitSharedFunctionInfoLiteral( #ifdef DEBUG int original_height = frame_->height(); #endif - VirtualFrame::SpilledScope spilled_scope; + VirtualFrame::SpilledScope spilled_scope(frame_); Comment cmnt(masm_, "[ SharedFunctionInfoLiteral"); InstantiateFunction(node->shared_function_info()); ASSERT(frame_->height() == original_height + 1); @@ -2362,7 +2675,7 @@ void CodeGenerator::VisitConditional(Conditional* node) { #ifdef DEBUG int original_height = frame_->height(); #endif - VirtualFrame::SpilledScope spilled_scope; + VirtualFrame::SpilledScope spilled_scope(frame_); Comment cmnt(masm_, "[ Conditional"); JumpTarget then; JumpTarget else_; @@ -2386,8 +2699,8 @@ void CodeGenerator::VisitConditional(Conditional* node) { void CodeGenerator::LoadFromSlot(Slot* slot, TypeofState typeof_state) { - VirtualFrame::SpilledScope spilled_scope; if (slot->type() == Slot::LOOKUP) { + VirtualFrame::SpilledScope spilled_scope(frame_); ASSERT(slot->var()->is_dynamic()); JumpTarget slow; @@ -2445,19 +2758,18 @@ void CodeGenerator::LoadFromSlot(Slot* slot, TypeofState typeof_state) { frame_->EmitPush(r0); } else { - // Special handling for locals allocated in registers. - __ ldr(r0, SlotOperand(slot, r2)); - frame_->EmitPush(r0); + Register scratch = VirtualFrame::scratch0(); + frame_->EmitPush(SlotOperand(slot, scratch)); if (slot->var()->mode() == Variable::CONST) { // Const slots may contain 'the hole' value (the constant hasn't been // initialized yet) which needs to be converted into the 'undefined' // value. Comment cmnt(masm_, "[ Unhole const"); - frame_->EmitPop(r0); + frame_->EmitPop(scratch); __ LoadRoot(ip, Heap::kTheHoleValueRootIndex); - __ cmp(r0, ip); - __ LoadRoot(r0, Heap::kUndefinedValueRootIndex, eq); - frame_->EmitPush(r0); + __ cmp(scratch, ip); + __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex, eq); + frame_->EmitPush(scratch); } } } @@ -2466,6 +2778,7 @@ void CodeGenerator::LoadFromSlot(Slot* slot, TypeofState typeof_state) { void CodeGenerator::StoreToSlot(Slot* slot, InitState init_state) { ASSERT(slot != NULL); if (slot->type() == Slot::LOOKUP) { + VirtualFrame::SpilledScope spilled_scope(frame_); ASSERT(slot->var()->is_dynamic()); // For now, just do a runtime call. @@ -2499,17 +2812,22 @@ void CodeGenerator::StoreToSlot(Slot* slot, InitState init_state) { } else { ASSERT(!slot->var()->is_dynamic()); + Register scratch = VirtualFrame::scratch0(); + VirtualFrame::RegisterAllocationScope scope(this); + // The frame must be spilled when branching to this target. JumpTarget exit; + if (init_state == CONST_INIT) { ASSERT(slot->var()->mode() == Variable::CONST); // Only the first const initialization must be executed (the slot // still contains 'the hole' value). When the assignment is // executed, the code is identical to a normal store (see below). Comment cmnt(masm_, "[ Init const"); - __ ldr(r2, SlotOperand(slot, r2)); + __ ldr(scratch, SlotOperand(slot, scratch)); __ LoadRoot(ip, Heap::kTheHoleValueRootIndex); - __ cmp(r2, ip); + __ cmp(scratch, ip); + frame_->SpillAll(); exit.Branch(ne); } @@ -2522,22 +2840,25 @@ void CodeGenerator::StoreToSlot(Slot* slot, InitState init_state) { // initialize consts to 'the hole' value and by doing so, end up // calling this code. r2 may be loaded with context; used below in // RecordWrite. - frame_->EmitPop(r0); - __ str(r0, SlotOperand(slot, r2)); - frame_->EmitPush(r0); + Register tos = frame_->Peek(); + __ str(tos, SlotOperand(slot, scratch)); if (slot->type() == Slot::CONTEXT) { // Skip write barrier if the written value is a smi. - __ tst(r0, Operand(kSmiTagMask)); + __ tst(tos, Operand(kSmiTagMask)); + // We don't use tos any more after here. + VirtualFrame::SpilledScope spilled_scope(frame_); exit.Branch(eq); - // r2 is loaded with context when calling SlotOperand above. + // scratch is loaded with context when calling SlotOperand above. int offset = FixedArray::kHeaderSize + slot->index() * kPointerSize; __ mov(r3, Operand(offset)); - __ RecordWrite(r2, r3, r1); + // r1 could be identical with tos, but that doesn't matter. + __ RecordWrite(scratch, r3, r1); } // If we definitely did not jump over the assignment, we do not need // to bind the exit label. Doing so can defeat peephole // optimization. if (init_state == CONST_INIT || slot->type() == Slot::CONTEXT) { + frame_->SpillAll(); exit.Bind(); } } @@ -2574,9 +2895,7 @@ void CodeGenerator::LoadFromGlobalSlotCheckExtensions(Slot* slot, if (s->is_eval_scope()) { Label next, fast; - if (!context.is(tmp)) { - __ mov(tmp, Operand(context)); - } + __ Move(tmp, context); __ bind(&next); // Terminate at global context. __ ldr(tmp2, FieldMemOperand(tmp, HeapObject::kMapOffset)); @@ -2617,7 +2936,6 @@ void CodeGenerator::VisitSlot(Slot* node) { #ifdef DEBUG int original_height = frame_->height(); #endif - VirtualFrame::SpilledScope spilled_scope; Comment cmnt(masm_, "[ Slot"); LoadFromSlot(node, NOT_INSIDE_TYPEOF); ASSERT(frame_->height() == original_height + 1); @@ -2628,7 +2946,6 @@ void CodeGenerator::VisitVariableProxy(VariableProxy* node) { #ifdef DEBUG int original_height = frame_->height(); #endif - VirtualFrame::SpilledScope spilled_scope; Comment cmnt(masm_, "[ VariableProxy"); Variable* var = node->var(); @@ -2638,7 +2955,7 @@ void CodeGenerator::VisitVariableProxy(VariableProxy* node) { } else { ASSERT(var->is_global()); Reference ref(this, node); - ref.GetValueAndSpill(); + ref.GetValue(); } ASSERT(frame_->height() == original_height + 1); } @@ -2648,10 +2965,10 @@ void CodeGenerator::VisitLiteral(Literal* node) { #ifdef DEBUG int original_height = frame_->height(); #endif - VirtualFrame::SpilledScope spilled_scope; Comment cmnt(masm_, "[ Literal"); - __ mov(r0, Operand(node->handle())); - frame_->EmitPush(r0); + Register reg = frame_->GetTOSRegister(); + __ mov(reg, Operand(node->handle())); + frame_->EmitPush(reg); ASSERT(frame_->height() == original_height + 1); } @@ -2660,7 +2977,7 @@ void CodeGenerator::VisitRegExpLiteral(RegExpLiteral* node) { #ifdef DEBUG int original_height = frame_->height(); #endif - VirtualFrame::SpilledScope spilled_scope; + VirtualFrame::SpilledScope spilled_scope(frame_); Comment cmnt(masm_, "[ RexExp Literal"); // Retrieve the literal array and check the allocated entry. @@ -2704,7 +3021,7 @@ void CodeGenerator::VisitObjectLiteral(ObjectLiteral* node) { #ifdef DEBUG int original_height = frame_->height(); #endif - VirtualFrame::SpilledScope spilled_scope; + VirtualFrame::SpilledScope spilled_scope(frame_); Comment cmnt(masm_, "[ ObjectLiteral"); // Load the function of this activation. @@ -2785,7 +3102,7 @@ void CodeGenerator::VisitArrayLiteral(ArrayLiteral* node) { #ifdef DEBUG int original_height = frame_->height(); #endif - VirtualFrame::SpilledScope spilled_scope; + VirtualFrame::SpilledScope spilled_scope(frame_); Comment cmnt(masm_, "[ ArrayLiteral"); // Load the function of this activation. @@ -2844,7 +3161,7 @@ void CodeGenerator::VisitCatchExtensionObject(CatchExtensionObject* node) { #ifdef DEBUG int original_height = frame_->height(); #endif - VirtualFrame::SpilledScope spilled_scope; + VirtualFrame::SpilledScope spilled_scope(frame_); // Call runtime routine to allocate the catch extension object and // assign the exception value to the catch variable. Comment cmnt(masm_, "[ CatchExtensionObject"); @@ -2857,18 +3174,19 @@ void CodeGenerator::VisitCatchExtensionObject(CatchExtensionObject* node) { void CodeGenerator::VisitAssignment(Assignment* node) { + VirtualFrame::RegisterAllocationScope scope(this); #ifdef DEBUG int original_height = frame_->height(); #endif - VirtualFrame::SpilledScope spilled_scope; Comment cmnt(masm_, "[ Assignment"); { Reference target(this, node->target(), node->is_compound()); if (target.is_illegal()) { // Fool the virtual frame into thinking that we left the assignment's // value on the frame. - __ mov(r0, Operand(Smi::FromInt(0))); - frame_->EmitPush(r0); + Register tos = frame_->GetTOSRegister(); + __ mov(tos, Operand(Smi::FromInt(0))); + frame_->EmitPush(tos); ASSERT(frame_->height() == original_height + 1); return; } @@ -2876,27 +3194,24 @@ void CodeGenerator::VisitAssignment(Assignment* node) { if (node->op() == Token::ASSIGN || node->op() == Token::INIT_VAR || node->op() == Token::INIT_CONST) { - LoadAndSpill(node->value()); + Load(node->value()); } else { // Assignment is a compound assignment. // Get the old value of the lhs. - target.GetValueAndSpill(); + target.GetValue(); Literal* literal = node->value()->AsLiteral(); bool overwrite = (node->value()->AsBinaryOperation() != NULL && node->value()->AsBinaryOperation()->ResultOverwriteAllowed()); if (literal != NULL && literal->handle()->IsSmi()) { - SmiOperation(node->binary_op(), - literal->handle(), - false, - overwrite ? OVERWRITE_RIGHT : NO_OVERWRITE); - frame_->EmitPush(r0); - + VirtualFrameSmiOperation(node->binary_op(), + literal->handle(), + false, + overwrite ? OVERWRITE_RIGHT : NO_OVERWRITE); } else { - LoadAndSpill(node->value()); - GenericBinaryOperation(node->binary_op(), - overwrite ? OVERWRITE_RIGHT : NO_OVERWRITE); - frame_->EmitPush(r0); + Load(node->value()); + VirtualFrameBinaryOperation(node->binary_op(), + overwrite ? OVERWRITE_RIGHT : NO_OVERWRITE); } } Variable* var = node->target()->AsVariableProxy()->AsVariable(); @@ -2925,7 +3240,7 @@ void CodeGenerator::VisitThrow(Throw* node) { #ifdef DEBUG int original_height = frame_->height(); #endif - VirtualFrame::SpilledScope spilled_scope; + VirtualFrame::SpilledScope spilled_scope(frame_); Comment cmnt(masm_, "[ Throw"); LoadAndSpill(node->exception()); @@ -2940,11 +3255,11 @@ void CodeGenerator::VisitProperty(Property* node) { #ifdef DEBUG int original_height = frame_->height(); #endif - VirtualFrame::SpilledScope spilled_scope; + VirtualFrame::SpilledScope spilled_scope(frame_); Comment cmnt(masm_, "[ Property"); { Reference property(this, node); - property.GetValueAndSpill(); + property.GetValue(); } ASSERT(frame_->height() == original_height + 1); } @@ -2954,7 +3269,7 @@ void CodeGenerator::VisitCall(Call* node) { #ifdef DEBUG int original_height = frame_->height(); #endif - VirtualFrame::SpilledScope spilled_scope; + VirtualFrame::SpilledScope spilled_scope(frame_); Comment cmnt(masm_, "[ Call"); Expression* function = node->expression(); @@ -3145,7 +3460,7 @@ void CodeGenerator::VisitCallNew(CallNew* node) { #ifdef DEBUG int original_height = frame_->height(); #endif - VirtualFrame::SpilledScope spilled_scope; + VirtualFrame::SpilledScope spilled_scope(frame_); Comment cmnt(masm_, "[ CallNew"); // According to ECMA-262, section 11.2.2, page 44, the function @@ -3185,7 +3500,7 @@ void CodeGenerator::VisitCallNew(CallNew* node) { void CodeGenerator::GenerateClassOf(ZoneList* args) { - VirtualFrame::SpilledScope spilled_scope; + VirtualFrame::SpilledScope spilled_scope(frame_); ASSERT(args->length() == 1); JumpTarget leave, null, function, non_function_constructor; @@ -3245,7 +3560,7 @@ void CodeGenerator::GenerateClassOf(ZoneList* args) { void CodeGenerator::GenerateValueOf(ZoneList* args) { - VirtualFrame::SpilledScope spilled_scope; + VirtualFrame::SpilledScope spilled_scope(frame_); ASSERT(args->length() == 1); JumpTarget leave; LoadAndSpill(args->at(0)); @@ -3264,7 +3579,7 @@ void CodeGenerator::GenerateValueOf(ZoneList* args) { void CodeGenerator::GenerateSetValueOf(ZoneList* args) { - VirtualFrame::SpilledScope spilled_scope; + VirtualFrame::SpilledScope spilled_scope(frame_); ASSERT(args->length() == 2); JumpTarget leave; LoadAndSpill(args->at(0)); // Load the object. @@ -3289,7 +3604,7 @@ void CodeGenerator::GenerateSetValueOf(ZoneList* args) { void CodeGenerator::GenerateIsSmi(ZoneList* args) { - VirtualFrame::SpilledScope spilled_scope; + VirtualFrame::SpilledScope spilled_scope(frame_); ASSERT(args->length() == 1); LoadAndSpill(args->at(0)); frame_->EmitPop(r0); @@ -3299,7 +3614,7 @@ void CodeGenerator::GenerateIsSmi(ZoneList* args) { void CodeGenerator::GenerateLog(ZoneList* args) { - VirtualFrame::SpilledScope spilled_scope; + VirtualFrame::SpilledScope spilled_scope(frame_); // See comment in CodeGenerator::GenerateLog in codegen-ia32.cc. ASSERT_EQ(args->length(), 3); #ifdef ENABLE_LOGGING_AND_PROFILING @@ -3315,7 +3630,7 @@ void CodeGenerator::GenerateLog(ZoneList* args) { void CodeGenerator::GenerateIsNonNegativeSmi(ZoneList* args) { - VirtualFrame::SpilledScope spilled_scope; + VirtualFrame::SpilledScope spilled_scope(frame_); ASSERT(args->length() == 1); LoadAndSpill(args->at(0)); frame_->EmitPop(r0); @@ -3347,7 +3662,7 @@ void CodeGenerator::GenerateMathSqrt(ZoneList* args) { // undefined in order to trigger the slow case, Runtime_StringCharCodeAt. // It is not yet implemented on ARM, so it always goes to the slow case. void CodeGenerator::GenerateFastCharCodeAt(ZoneList* args) { - VirtualFrame::SpilledScope spilled_scope; + VirtualFrame::SpilledScope spilled_scope(frame_); ASSERT(args->length() == 2); Comment(masm_, "[ GenerateFastCharCodeAt"); @@ -3465,7 +3780,7 @@ void CodeGenerator::GenerateCharFromCode(ZoneList* args) { void CodeGenerator::GenerateIsArray(ZoneList* args) { - VirtualFrame::SpilledScope spilled_scope; + VirtualFrame::SpilledScope spilled_scope(frame_); ASSERT(args->length() == 1); LoadAndSpill(args->at(0)); JumpTarget answer; @@ -3484,7 +3799,7 @@ void CodeGenerator::GenerateIsArray(ZoneList* args) { void CodeGenerator::GenerateIsRegExp(ZoneList* args) { - VirtualFrame::SpilledScope spilled_scope; + VirtualFrame::SpilledScope spilled_scope(frame_); ASSERT(args->length() == 1); LoadAndSpill(args->at(0)); JumpTarget answer; @@ -3505,7 +3820,7 @@ void CodeGenerator::GenerateIsRegExp(ZoneList* args) { void CodeGenerator::GenerateIsObject(ZoneList* args) { // This generates a fast version of: // (typeof(arg) === 'object' || %_ClassOf(arg) == 'RegExp') - VirtualFrame::SpilledScope spilled_scope; + VirtualFrame::SpilledScope spilled_scope(frame_); ASSERT(args->length() == 1); LoadAndSpill(args->at(0)); frame_->EmitPop(r1); @@ -3535,7 +3850,7 @@ void CodeGenerator::GenerateIsObject(ZoneList* args) { void CodeGenerator::GenerateIsFunction(ZoneList* args) { // This generates a fast version of: // (%_ClassOf(arg) === 'Function') - VirtualFrame::SpilledScope spilled_scope; + VirtualFrame::SpilledScope spilled_scope(frame_); ASSERT(args->length() == 1); LoadAndSpill(args->at(0)); frame_->EmitPop(r0); @@ -3548,7 +3863,7 @@ void CodeGenerator::GenerateIsFunction(ZoneList* args) { void CodeGenerator::GenerateIsUndetectableObject(ZoneList* args) { - VirtualFrame::SpilledScope spilled_scope; + VirtualFrame::SpilledScope spilled_scope(frame_); ASSERT(args->length() == 1); LoadAndSpill(args->at(0)); frame_->EmitPop(r0); @@ -3562,7 +3877,7 @@ void CodeGenerator::GenerateIsUndetectableObject(ZoneList* args) { void CodeGenerator::GenerateIsConstructCall(ZoneList* args) { - VirtualFrame::SpilledScope spilled_scope; + VirtualFrame::SpilledScope spilled_scope(frame_); ASSERT(args->length() == 0); // Get the frame pointer for the calling frame. @@ -3584,7 +3899,7 @@ void CodeGenerator::GenerateIsConstructCall(ZoneList* args) { void CodeGenerator::GenerateArgumentsLength(ZoneList* args) { - VirtualFrame::SpilledScope spilled_scope; + VirtualFrame::SpilledScope spilled_scope(frame_); ASSERT(args->length() == 0); Label exit; @@ -3608,7 +3923,7 @@ void CodeGenerator::GenerateArgumentsLength(ZoneList* args) { void CodeGenerator::GenerateArguments(ZoneList* args) { - VirtualFrame::SpilledScope spilled_scope; + VirtualFrame::SpilledScope spilled_scope(frame_); ASSERT(args->length() == 1); // Satisfy contract with ArgumentsAccessStub: @@ -3626,7 +3941,7 @@ void CodeGenerator::GenerateArguments(ZoneList* args) { void CodeGenerator::GenerateRandomHeapNumber( ZoneList* args) { - VirtualFrame::SpilledScope spilled_scope; + VirtualFrame::SpilledScope spilled_scope(frame_); ASSERT(args->length() == 0); Label slow_allocate_heapnumber; @@ -3728,7 +4043,7 @@ void CodeGenerator::GenerateMathCos(ZoneList* args) { void CodeGenerator::GenerateObjectEquals(ZoneList* args) { - VirtualFrame::SpilledScope spilled_scope; + VirtualFrame::SpilledScope spilled_scope(frame_); ASSERT(args->length() == 2); // Load the two objects into registers and perform the comparison. @@ -3745,7 +4060,7 @@ void CodeGenerator::VisitCallRuntime(CallRuntime* node) { #ifdef DEBUG int original_height = frame_->height(); #endif - VirtualFrame::SpilledScope spilled_scope; + VirtualFrame::SpilledScope spilled_scope(frame_); if (CheckForInlineRuntimeCall(node)) { ASSERT((has_cc() && frame_->height() == original_height) || (!has_cc() && frame_->height() == original_height + 1)); @@ -3791,7 +4106,7 @@ void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) { #ifdef DEBUG int original_height = frame_->height(); #endif - VirtualFrame::SpilledScope spilled_scope; + VirtualFrame::SpilledScope spilled_scope(frame_); Comment cmnt(masm_, "[ UnaryOperation"); Token::Value op = node->op(); @@ -3922,7 +4237,7 @@ void CodeGenerator::VisitCountOperation(CountOperation* node) { #ifdef DEBUG int original_height = frame_->height(); #endif - VirtualFrame::SpilledScope spilled_scope; + VirtualFrame::SpilledScope spilled_scope(frame_); Comment cmnt(masm_, "[ CountOperation"); bool is_postfix = node->is_postfix(); @@ -3950,7 +4265,7 @@ void CodeGenerator::VisitCountOperation(CountOperation* node) { ASSERT(frame_->height() == original_height + 1); return; } - target.GetValueAndSpill(); + target.GetValue(); frame_->EmitPop(r0); JumpTarget slow; @@ -4131,10 +4446,10 @@ void CodeGenerator::VisitBinaryOperation(BinaryOperation* node) { #ifdef DEBUG int original_height = frame_->height(); #endif - VirtualFrame::SpilledScope spilled_scope; Comment cmnt(masm_, "[ BinaryOperation"); if (node->op() == Token::AND || node->op() == Token::OR) { + VirtualFrame::SpilledScope spilled_scope(frame_); GenerateLogicalBooleanOperation(node); } else { // Optimize for the case where (at least) one of the expressions @@ -4151,31 +4466,32 @@ void CodeGenerator::VisitBinaryOperation(BinaryOperation* node) { node->right()->AsBinaryOperation()->ResultOverwriteAllowed()); if (rliteral != NULL && rliteral->handle()->IsSmi()) { - LoadAndSpill(node->left()); - SmiOperation(node->op(), - rliteral->handle(), - false, - overwrite_right ? OVERWRITE_RIGHT : NO_OVERWRITE); - + VirtualFrame::RegisterAllocationScope scope(this); + Load(node->left()); + VirtualFrameSmiOperation( + node->op(), + rliteral->handle(), + false, + overwrite_right ? OVERWRITE_RIGHT : NO_OVERWRITE); } else if (lliteral != NULL && lliteral->handle()->IsSmi()) { - LoadAndSpill(node->right()); - SmiOperation(node->op(), - lliteral->handle(), - true, - overwrite_left ? OVERWRITE_LEFT : NO_OVERWRITE); - + VirtualFrame::RegisterAllocationScope scope(this); + Load(node->right()); + VirtualFrameSmiOperation(node->op(), + lliteral->handle(), + true, + overwrite_left ? OVERWRITE_LEFT : NO_OVERWRITE); } else { + VirtualFrame::RegisterAllocationScope scope(this); OverwriteMode overwrite_mode = NO_OVERWRITE; if (overwrite_left) { overwrite_mode = OVERWRITE_LEFT; } else if (overwrite_right) { overwrite_mode = OVERWRITE_RIGHT; } - LoadAndSpill(node->left()); - LoadAndSpill(node->right()); - GenericBinaryOperation(node->op(), overwrite_mode); + Load(node->left()); + Load(node->right()); + VirtualFrameBinaryOperation(node->op(), overwrite_mode); } - frame_->EmitPush(r0); } ASSERT(!has_valid_frame() || (has_cc() && frame_->height() == original_height) || @@ -4187,7 +4503,7 @@ void CodeGenerator::VisitThisFunction(ThisFunction* node) { #ifdef DEBUG int original_height = frame_->height(); #endif - VirtualFrame::SpilledScope spilled_scope; + VirtualFrame::SpilledScope spilled_scope(frame_); __ ldr(r0, frame_->Function()); frame_->EmitPush(r0); ASSERT(frame_->height() == original_height + 1); @@ -4198,9 +4514,10 @@ void CodeGenerator::VisitCompareOperation(CompareOperation* node) { #ifdef DEBUG int original_height = frame_->height(); #endif - VirtualFrame::SpilledScope spilled_scope; Comment cmnt(masm_, "[ CompareOperation"); + VirtualFrame::RegisterAllocationScope nonspilled_scope(this); + // Get the expressions from the node. Expression* left = node->left(); Expression* right = node->right(); @@ -4217,10 +4534,12 @@ void CodeGenerator::VisitCompareOperation(CompareOperation* node) { right->AsLiteral() != NULL && right->AsLiteral()->IsNull(); // The 'null' value can only be equal to 'null' or 'undefined'. if (left_is_null || right_is_null) { - LoadAndSpill(left_is_null ? right : left); - frame_->EmitPop(r0); + Load(left_is_null ? right : left); + Register tos = frame_->PopToRegister(); + // JumpTargets can't cope with register allocation yet. + frame_->SpillAll(); __ LoadRoot(ip, Heap::kNullValueRootIndex); - __ cmp(r0, ip); + __ cmp(tos, ip); // The 'null' value is only equal to 'undefined' if using non-strict // comparisons. @@ -4228,17 +4547,17 @@ void CodeGenerator::VisitCompareOperation(CompareOperation* node) { true_target()->Branch(eq); __ LoadRoot(ip, Heap::kUndefinedValueRootIndex); - __ cmp(r0, Operand(ip)); + __ cmp(tos, Operand(ip)); true_target()->Branch(eq); - __ tst(r0, Operand(kSmiTagMask)); + __ tst(tos, Operand(kSmiTagMask)); false_target()->Branch(eq); // It can be an undetectable object. - __ ldr(r0, FieldMemOperand(r0, HeapObject::kMapOffset)); - __ ldrb(r0, FieldMemOperand(r0, Map::kBitFieldOffset)); - __ and_(r0, r0, Operand(1 << Map::kIsUndetectable)); - __ cmp(r0, Operand(1 << Map::kIsUndetectable)); + __ ldr(tos, FieldMemOperand(tos, HeapObject::kMapOffset)); + __ ldrb(tos, FieldMemOperand(tos, Map::kBitFieldOffset)); + __ and_(tos, tos, Operand(1 << Map::kIsUndetectable)); + __ cmp(tos, Operand(1 << Map::kIsUndetectable)); } cc_reg_ = eq; @@ -4257,90 +4576,95 @@ void CodeGenerator::VisitCompareOperation(CompareOperation* node) { right->AsLiteral()->handle()->IsString())) { Handle check(String::cast(*right->AsLiteral()->handle())); - // Load the operand, move it to register r1. + // Load the operand, move it to a register. LoadTypeofExpression(operation->expression()); - frame_->EmitPop(r1); + Register tos = frame_->PopToRegister(); + + // JumpTargets can't cope with register allocation yet. + frame_->SpillAll(); + + Register scratch = VirtualFrame::scratch0(); if (check->Equals(Heap::number_symbol())) { - __ tst(r1, Operand(kSmiTagMask)); + __ tst(tos, Operand(kSmiTagMask)); true_target()->Branch(eq); - __ ldr(r1, FieldMemOperand(r1, HeapObject::kMapOffset)); + __ ldr(tos, FieldMemOperand(tos, HeapObject::kMapOffset)); __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex); - __ cmp(r1, ip); + __ cmp(tos, ip); cc_reg_ = eq; } else if (check->Equals(Heap::string_symbol())) { - __ tst(r1, Operand(kSmiTagMask)); + __ tst(tos, Operand(kSmiTagMask)); false_target()->Branch(eq); - __ ldr(r1, FieldMemOperand(r1, HeapObject::kMapOffset)); + __ ldr(tos, FieldMemOperand(tos, HeapObject::kMapOffset)); // It can be an undetectable string object. - __ ldrb(r2, FieldMemOperand(r1, Map::kBitFieldOffset)); - __ and_(r2, r2, Operand(1 << Map::kIsUndetectable)); - __ cmp(r2, Operand(1 << Map::kIsUndetectable)); + __ ldrb(scratch, FieldMemOperand(tos, Map::kBitFieldOffset)); + __ and_(scratch, scratch, Operand(1 << Map::kIsUndetectable)); + __ cmp(scratch, Operand(1 << Map::kIsUndetectable)); false_target()->Branch(eq); - __ ldrb(r2, FieldMemOperand(r1, Map::kInstanceTypeOffset)); - __ cmp(r2, Operand(FIRST_NONSTRING_TYPE)); + __ ldrb(scratch, FieldMemOperand(tos, Map::kInstanceTypeOffset)); + __ cmp(scratch, Operand(FIRST_NONSTRING_TYPE)); cc_reg_ = lt; } else if (check->Equals(Heap::boolean_symbol())) { __ LoadRoot(ip, Heap::kTrueValueRootIndex); - __ cmp(r1, ip); + __ cmp(tos, ip); true_target()->Branch(eq); __ LoadRoot(ip, Heap::kFalseValueRootIndex); - __ cmp(r1, ip); + __ cmp(tos, ip); cc_reg_ = eq; } else if (check->Equals(Heap::undefined_symbol())) { __ LoadRoot(ip, Heap::kUndefinedValueRootIndex); - __ cmp(r1, ip); + __ cmp(tos, ip); true_target()->Branch(eq); - __ tst(r1, Operand(kSmiTagMask)); + __ tst(tos, Operand(kSmiTagMask)); false_target()->Branch(eq); // It can be an undetectable object. - __ ldr(r1, FieldMemOperand(r1, HeapObject::kMapOffset)); - __ ldrb(r2, FieldMemOperand(r1, Map::kBitFieldOffset)); - __ and_(r2, r2, Operand(1 << Map::kIsUndetectable)); - __ cmp(r2, Operand(1 << Map::kIsUndetectable)); + __ ldr(tos, FieldMemOperand(tos, HeapObject::kMapOffset)); + __ ldrb(scratch, FieldMemOperand(tos, Map::kBitFieldOffset)); + __ and_(scratch, scratch, Operand(1 << Map::kIsUndetectable)); + __ cmp(scratch, Operand(1 << Map::kIsUndetectable)); cc_reg_ = eq; } else if (check->Equals(Heap::function_symbol())) { - __ tst(r1, Operand(kSmiTagMask)); + __ tst(tos, Operand(kSmiTagMask)); false_target()->Branch(eq); - Register map_reg = r2; - __ CompareObjectType(r1, map_reg, r1, JS_FUNCTION_TYPE); + Register map_reg = scratch; + __ CompareObjectType(tos, map_reg, tos, JS_FUNCTION_TYPE); true_target()->Branch(eq); // Regular expressions are callable so typeof == 'function'. - __ CompareInstanceType(map_reg, r1, JS_REGEXP_TYPE); + __ CompareInstanceType(map_reg, tos, JS_REGEXP_TYPE); cc_reg_ = eq; } else if (check->Equals(Heap::object_symbol())) { - __ tst(r1, Operand(kSmiTagMask)); + __ tst(tos, Operand(kSmiTagMask)); false_target()->Branch(eq); __ LoadRoot(ip, Heap::kNullValueRootIndex); - __ cmp(r1, ip); + __ cmp(tos, ip); true_target()->Branch(eq); - Register map_reg = r2; - __ CompareObjectType(r1, map_reg, r1, JS_REGEXP_TYPE); + Register map_reg = scratch; + __ CompareObjectType(tos, map_reg, tos, JS_REGEXP_TYPE); false_target()->Branch(eq); // It can be an undetectable object. - __ ldrb(r1, FieldMemOperand(map_reg, Map::kBitFieldOffset)); - __ and_(r1, r1, Operand(1 << Map::kIsUndetectable)); - __ cmp(r1, Operand(1 << Map::kIsUndetectable)); + __ ldrb(tos, FieldMemOperand(map_reg, Map::kBitFieldOffset)); + __ and_(tos, tos, Operand(1 << Map::kIsUndetectable)); + __ cmp(tos, Operand(1 << Map::kIsUndetectable)); false_target()->Branch(eq); - __ ldrb(r1, FieldMemOperand(map_reg, Map::kInstanceTypeOffset)); - __ cmp(r1, Operand(FIRST_JS_OBJECT_TYPE)); + __ ldrb(tos, FieldMemOperand(map_reg, Map::kInstanceTypeOffset)); + __ cmp(tos, Operand(FIRST_JS_OBJECT_TYPE)); false_target()->Branch(lt); - __ cmp(r1, Operand(LAST_JS_OBJECT_TYPE)); + __ cmp(tos, Operand(LAST_JS_OBJECT_TYPE)); cc_reg_ = le; } else { @@ -4379,6 +4703,7 @@ void CodeGenerator::VisitCompareOperation(CompareOperation* node) { break; case Token::IN: { + VirtualFrame::SpilledScope scope(frame_); LoadAndSpill(left); LoadAndSpill(right); frame_->InvokeBuiltin(Builtins::IN, CALL_JS, 2); @@ -4387,6 +4712,7 @@ void CodeGenerator::VisitCompareOperation(CompareOperation* node) { } case Token::INSTANCEOF: { + VirtualFrame::SpilledScope scope(frame_); LoadAndSpill(left); LoadAndSpill(right); InstanceofStub stub; @@ -4518,6 +4844,7 @@ void Reference::SetValue(InitState init_state) { } case NAMED: { + VirtualFrame::SpilledScope scope(frame); Comment cmnt(masm, "[ Store to named Property"); // Call the appropriate IC code. Handle ic(Builtins::builtin(Builtins::StoreIC_Initialize)); @@ -4533,6 +4860,7 @@ void Reference::SetValue(InitState init_state) { } case KEYED: { + VirtualFrame::SpilledScope scope(frame); Comment cmnt(masm, "[ Store to keyed Property"); Property* property = expression_->AsProperty(); ASSERT(property != NULL); @@ -7429,9 +7757,7 @@ void StringStubBase::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm, // Scratch register contains result when we fall through to here. Register result = scratch; __ bind(&found_in_symbol_table); - if (!result.is(r0)) { - __ mov(r0, result); - } + __ Move(r0, result); } diff --git a/src/arm/codegen-arm.h b/src/arm/codegen-arm.h index 9382d6cca..3b7299852 100644 --- a/src/arm/codegen-arm.h +++ b/src/arm/codegen-arm.h @@ -92,10 +92,6 @@ class Reference BASE_EMBEDDED { // If the reference is not consumed, it is left in place under its value. void GetValue(); - // Generate code to pop a reference, push the value of the reference, - // and then spill the stack frame. - inline void GetValueAndSpill(); - // Generate code to store the value on top of the expression stack in the // reference. The reference is expected to be immediately below the value // on the expression stack. The value is stored in the location specified @@ -314,6 +310,9 @@ class CodeGenerator: public AstVisitor { void GenericBinaryOperation(Token::Value op, OverwriteMode overwrite_mode, int known_rhs = kUnknownIntValue); + void VirtualFrameBinaryOperation(Token::Value op, + OverwriteMode overwrite_mode, + int known_rhs = kUnknownIntValue); void Comparison(Condition cc, Expression* left, Expression* right, @@ -324,6 +323,11 @@ class CodeGenerator: public AstVisitor { bool reversed, OverwriteMode mode); + void VirtualFrameSmiOperation(Token::Value op, + Handle value, + bool reversed, + OverwriteMode mode); + void CallWithArguments(ZoneList* arguments, CallFunctionFlags flags, int position); diff --git a/src/arm/macro-assembler-arm.cc b/src/arm/macro-assembler-arm.cc index ac1c14fd9..80687a3fe 100644 --- a/src/arm/macro-assembler-arm.cc +++ b/src/arm/macro-assembler-arm.cc @@ -180,6 +180,19 @@ void MacroAssembler::Drop(int count, Condition cond) { } +void MacroAssembler::Swap(Register reg1, Register reg2, Register scratch) { + if (scratch.is(no_reg)) { + eor(reg1, reg1, Operand(reg2)); + eor(reg2, reg2, Operand(reg1)); + eor(reg1, reg1, Operand(reg2)); + } else { + mov(scratch, reg1); + mov(reg1, reg2); + mov(reg2, scratch); + } +} + + void MacroAssembler::Call(Label* target) { bl(target); } @@ -190,6 +203,13 @@ void MacroAssembler::Move(Register dst, Handle value) { } +void MacroAssembler::Move(Register dst, Register src) { + if (!dst.is(src)) { + mov(dst, src); + } +} + + void MacroAssembler::SmiJumpTable(Register index, Vector targets) { // Empty the const pool. CheckConstPool(true, true); diff --git a/src/arm/macro-assembler-arm.h b/src/arm/macro-assembler-arm.h index 1097bd9d1..5ff401eb4 100644 --- a/src/arm/macro-assembler-arm.h +++ b/src/arm/macro-assembler-arm.h @@ -70,8 +70,15 @@ class MacroAssembler: public Assembler { // from the stack, clobbering only the sp register. void Drop(int count, Condition cond = al); + + // Swap two registers. If the scratch register is omitted then a slightly + // less efficient form using xor instead of mov is emitted. + void Swap(Register reg1, Register reg2, Register scratch = no_reg); + void Call(Label* target); void Move(Register dst, Handle value); + // May do nothing if the registers are identical. + void Move(Register dst, Register src); // Jumps to the label at the index given by the Smi in "index". void SmiJumpTable(Register index, Vector targets); // Load an object from the root table. diff --git a/src/arm/register-allocator-arm-inl.h b/src/arm/register-allocator-arm-inl.h index 4691f2974..945cdeb3c 100644 --- a/src/arm/register-allocator-arm-inl.h +++ b/src/arm/register-allocator-arm-inl.h @@ -92,9 +92,6 @@ Register RegisterAllocator::ToRegister(int num) { void RegisterAllocator::Initialize() { Reset(); - // The non-reserved r1 and lr registers are live on JS function entry. - Use(r1); // JS function. - Use(lr); // Return address. } diff --git a/src/arm/register-allocator-arm.h b/src/arm/register-allocator-arm.h index f953ed9f1..fdbc88f5d 100644 --- a/src/arm/register-allocator-arm.h +++ b/src/arm/register-allocator-arm.h @@ -33,7 +33,8 @@ namespace internal { class RegisterAllocatorConstants : public AllStatic { public: - static const int kNumRegisters = 12; + // No registers are currently managed by the register allocator on ARM. + static const int kNumRegisters = 0; static const int kInvalidRegister = -1; }; diff --git a/src/arm/virtual-frame-arm.cc b/src/arm/virtual-frame-arm.cc index 1e3a86565..dc3db6dd4 100644 --- a/src/arm/virtual-frame-arm.cc +++ b/src/arm/virtual-frame-arm.cc @@ -37,34 +37,126 @@ namespace internal { #define __ ACCESS_MASM(masm()) -void VirtualFrame::SyncElementBelowStackPointer(int index) { - UNREACHABLE(); +void VirtualFrame::PopToR1R0() { + VirtualFrame where_to_go = *this; + // Shuffle things around so the top of stack is in r0 and r1. + where_to_go.top_of_stack_state_ = R0_R1_TOS; + MergeTo(&where_to_go); + // Pop the two registers off the stack so they are detached from the frame. + element_count_ -= 2; + top_of_stack_state_ = NO_TOS_REGISTERS; } -void VirtualFrame::SyncElementByPushing(int index) { - UNREACHABLE(); +void VirtualFrame::PopToR1() { + VirtualFrame where_to_go = *this; + // Shuffle things around so the top of stack is only in r1. + where_to_go.top_of_stack_state_ = R1_TOS; + MergeTo(&where_to_go); + // Pop the register off the stack so it is detached from the frame. + element_count_ -= 1; + top_of_stack_state_ = NO_TOS_REGISTERS; } -void VirtualFrame::MergeTo(VirtualFrame* expected) { - // ARM frames are currently always in memory. - ASSERT(Equals(expected)); -} - - -void VirtualFrame::MergeMoveRegistersToMemory(VirtualFrame* expected) { - UNREACHABLE(); -} - - -void VirtualFrame::MergeMoveRegistersToRegisters(VirtualFrame* expected) { - UNREACHABLE(); +void VirtualFrame::PopToR0() { + VirtualFrame where_to_go = *this; + // Shuffle things around so the top of stack only in r0. + where_to_go.top_of_stack_state_ = R0_TOS; + MergeTo(&where_to_go); + // Pop the register off the stack so it is detached from the frame. + element_count_ -= 1; + top_of_stack_state_ = NO_TOS_REGISTERS; } -void VirtualFrame::MergeMoveMemoryToRegisters(VirtualFrame* expected) { - UNREACHABLE(); +void VirtualFrame::MergeTo(VirtualFrame* expected) { + if (Equals(expected)) return; +#define CASE_NUMBER(a, b) ((a) * TOS_STATES + (b)) + switch (CASE_NUMBER(top_of_stack_state_, expected->top_of_stack_state_)) { + case CASE_NUMBER(NO_TOS_REGISTERS, NO_TOS_REGISTERS): + break; + case CASE_NUMBER(NO_TOS_REGISTERS, R0_TOS): + __ pop(r0); + break; + case CASE_NUMBER(NO_TOS_REGISTERS, R1_TOS): + __ pop(r1); + break; + case CASE_NUMBER(NO_TOS_REGISTERS, R0_R1_TOS): + __ pop(r0); + __ pop(r1); + break; + case CASE_NUMBER(NO_TOS_REGISTERS, R1_R0_TOS): + __ pop(r1); + __ pop(r1); + break; + case CASE_NUMBER(R0_TOS, NO_TOS_REGISTERS): + __ push(r0); + break; + case CASE_NUMBER(R0_TOS, R0_TOS): + break; + case CASE_NUMBER(R0_TOS, R1_TOS): + __ mov(r1, r0); + break; + case CASE_NUMBER(R0_TOS, R0_R1_TOS): + __ pop(r1); + break; + case CASE_NUMBER(R0_TOS, R1_R0_TOS): + __ mov(r1, r0); + __ pop(r0); + break; + case CASE_NUMBER(R1_TOS, NO_TOS_REGISTERS): + __ push(r1); + break; + case CASE_NUMBER(R1_TOS, R0_TOS): + __ mov(r0, r1); + break; + case CASE_NUMBER(R1_TOS, R1_TOS): + break; + case CASE_NUMBER(R1_TOS, R0_R1_TOS): + __ mov(r0, r1); + __ pop(r1); + break; + case CASE_NUMBER(R1_TOS, R1_R0_TOS): + __ pop(r0); + break; + case CASE_NUMBER(R0_R1_TOS, NO_TOS_REGISTERS): + __ push(r1); + __ push(r0); + break; + case CASE_NUMBER(R0_R1_TOS, R0_TOS): + __ push(r1); + break; + case CASE_NUMBER(R0_R1_TOS, R1_TOS): + __ push(r1); + __ mov(r1, r0); + break; + case CASE_NUMBER(R0_R1_TOS, R0_R1_TOS): + break; + case CASE_NUMBER(R0_R1_TOS, R1_R0_TOS): + __ Swap(r0, r1, ip); + break; + case CASE_NUMBER(R1_R0_TOS, NO_TOS_REGISTERS): + __ push(r0); + __ push(r1); + break; + case CASE_NUMBER(R1_R0_TOS, R0_TOS): + __ push(r0); + __ mov(r0, r1); + break; + case CASE_NUMBER(R1_R0_TOS, R1_TOS): + __ push(r0); + break; + case CASE_NUMBER(R1_R0_TOS, R0_R1_TOS): + __ Swap(r0, r1, ip); + break; + case CASE_NUMBER(R1_R0_TOS, R1_R0_TOS): + break; + default: + UNREACHABLE(); +#undef CASE_NUMBER + } + ASSERT(register_allocation_map_ == expected->register_allocation_map_); } @@ -92,8 +184,6 @@ void VirtualFrame::Enter() { __ stm(db_w, sp, r1.bit() | cp.bit() | fp.bit() | lr.bit()); // Adjust FP to point to saved FP. __ add(fp, sp, Operand(2 * kPointerSize)); - cgen()->allocator()->Unuse(r1); - cgen()->allocator()->Unuse(lr); } @@ -152,37 +242,11 @@ void VirtualFrame::AllocateStackSlots() { -void VirtualFrame::SaveContextRegister() { - UNIMPLEMENTED(); -} - - -void VirtualFrame::RestoreContextRegister() { - UNIMPLEMENTED(); -} - - void VirtualFrame::PushReceiverSlotAddress() { UNIMPLEMENTED(); } -int VirtualFrame::InvalidateFrameSlotAt(int index) { - UNIMPLEMENTED(); - return kIllegalIndex; -} - - -void VirtualFrame::TakeFrameSlotAt(int index) { - UNIMPLEMENTED(); -} - - -void VirtualFrame::StoreToFrameSlotAt(int index) { - UNIMPLEMENTED(); -} - - void VirtualFrame::PushTryHandler(HandlerType type) { // Grow the expression stack by handler size less one (the return // address in lr is already counted by a call instruction). @@ -247,52 +311,192 @@ void VirtualFrame::CallCodeObject(Handle code, } +// NO_TOS_REGISTERS, R0_TOS, R1_TOS, R1_R0_TOS, R0_R1_TOS. +const bool VirtualFrame::kR0InUse[TOS_STATES] = + { false, true, false, true, true }; +const bool VirtualFrame::kR1InUse[TOS_STATES] = + { false, false, true, true, true }; +const int VirtualFrame::kVirtualElements[TOS_STATES] = + { 0, 1, 1, 2, 2 }; +const Register VirtualFrame::kTopRegister[TOS_STATES] = + { r0, r0, r1, r1, r0 }; +const Register VirtualFrame::kBottomRegister[TOS_STATES] = + { r0, r0, r1, r0, r1 }; +const Register VirtualFrame::kAllocatedRegisters[ + VirtualFrame::kNumberOfAllocatedRegisters] = { r2, r3, r4, r5, r6 }; +// Popping is done by the transition implied by kStateAfterPop. Of course if +// there were no stack slots allocated to registers then the physical SP must +// be adjusted. +const VirtualFrame::TopOfStack VirtualFrame::kStateAfterPop[TOS_STATES] = + { NO_TOS_REGISTERS, NO_TOS_REGISTERS, NO_TOS_REGISTERS, R0_TOS, R1_TOS }; +// Pushing is done by the transition implied by kStateAfterPush. Of course if +// the maximum number of registers was already allocated to the top of stack +// slots then one register must be physically pushed onto the stack. +const VirtualFrame::TopOfStack VirtualFrame::kStateAfterPush[TOS_STATES] = + { R0_TOS, R1_R0_TOS, R0_R1_TOS, R0_R1_TOS, R1_R0_TOS }; + + +bool VirtualFrame::SpilledScope::is_spilled_ = false; + + void VirtualFrame::Drop(int count) { ASSERT(count >= 0); ASSERT(height() >= count); - int num_virtual_elements = (element_count() - 1) - stack_pointer_; - - // Emit code to lower the stack pointer if necessary. - if (num_virtual_elements < count) { - int num_dropped = count - num_virtual_elements; - stack_pointer_ -= num_dropped; - __ add(sp, sp, Operand(num_dropped * kPointerSize)); - } - // Discard elements from the virtual frame and free any registers. + int num_virtual_elements = kVirtualElements[top_of_stack_state_]; + while (num_virtual_elements > 0) { + Pop(); + num_virtual_elements--; + count--; + if (count == 0) return; + } + if (count == 0) return; + __ add(sp, sp, Operand(count * kPointerSize)); element_count_ -= count; } -Result VirtualFrame::Pop() { - UNIMPLEMENTED(); - return Result(); +void VirtualFrame::Pop() { + if (top_of_stack_state_ == NO_TOS_REGISTERS) { + __ add(sp, sp, Operand(kPointerSize)); + } else { + top_of_stack_state_ = kStateAfterPop[top_of_stack_state_]; + } + element_count_--; } void VirtualFrame::EmitPop(Register reg) { - ASSERT(stack_pointer_ == element_count() - 1); - stack_pointer_--; + ASSERT(!is_used(reg)); + if (top_of_stack_state_ == NO_TOS_REGISTERS) { + __ pop(reg); + } else { + __ mov(reg, kTopRegister[top_of_stack_state_]); + top_of_stack_state_ = kStateAfterPop[top_of_stack_state_]; + } element_count_--; - __ pop(reg); +} + + +Register VirtualFrame::Peek() { + AssertIsNotSpilled(); + if (top_of_stack_state_ == NO_TOS_REGISTERS) { + top_of_stack_state_ = kStateAfterPush[top_of_stack_state_]; + Register answer = kTopRegister[top_of_stack_state_]; + __ pop(answer); + return answer; + } else { + return kTopRegister[top_of_stack_state_]; + } +} + + +Register VirtualFrame::PopToRegister(Register but_not_to_this_one) { + ASSERT(but_not_to_this_one.is(r0) || + but_not_to_this_one.is(r1) || + but_not_to_this_one.is(no_reg)); + AssertIsNotSpilled(); + element_count_--; + if (top_of_stack_state_ == NO_TOS_REGISTERS) { + if (but_not_to_this_one.is(r0)) { + __ pop(r1); + return r1; + } else { + __ pop(r0); + return r0; + } + } else { + Register answer = kTopRegister[top_of_stack_state_]; + ASSERT(!answer.is(but_not_to_this_one)); + top_of_stack_state_ = kStateAfterPop[top_of_stack_state_]; + return answer; + } +} + + +void VirtualFrame::EnsureOneFreeTOSRegister() { + if (kVirtualElements[top_of_stack_state_] == kMaxTOSRegisters) { + __ push(kBottomRegister[top_of_stack_state_]); + top_of_stack_state_ = kStateAfterPush[top_of_stack_state_]; + top_of_stack_state_ = kStateAfterPop[top_of_stack_state_]; + } + ASSERT(kVirtualElements[top_of_stack_state_] != kMaxTOSRegisters); } void VirtualFrame::EmitPush(Register reg) { - ASSERT(stack_pointer_ == element_count() - 1); element_count_++; - stack_pointer_++; - __ push(reg); + if (SpilledScope::is_spilled()) { + __ push(reg); + return; + } + if (top_of_stack_state_ == NO_TOS_REGISTERS) { + if (reg.is(r0)) { + top_of_stack_state_ = R0_TOS; + return; + } + if (reg.is(r1)) { + top_of_stack_state_ = R1_TOS; + return; + } + } + EnsureOneFreeTOSRegister(); + top_of_stack_state_ = kStateAfterPush[top_of_stack_state_]; + Register dest = kTopRegister[top_of_stack_state_]; + __ Move(dest, reg); +} + + +Register VirtualFrame::GetTOSRegister() { + if (SpilledScope::is_spilled()) return r0; + + EnsureOneFreeTOSRegister(); + return kTopRegister[kStateAfterPush[top_of_stack_state_]]; +} + + +void VirtualFrame::EmitPush(MemOperand operand) { + element_count_++; + if (SpilledScope::is_spilled()) { + __ ldr(r0, operand); + __ push(r0); + return; + } + EnsureOneFreeTOSRegister(); + top_of_stack_state_ = kStateAfterPush[top_of_stack_state_]; + __ ldr(kTopRegister[top_of_stack_state_], operand); } void VirtualFrame::EmitPushMultiple(int count, int src_regs) { - ASSERT(stack_pointer_ == element_count() - 1); + ASSERT(SpilledScope::is_spilled()); Adjust(count); __ stm(db_w, sp, src_regs); } +void VirtualFrame::SpillAll() { + switch (top_of_stack_state_) { + case R1_R0_TOS: + masm()->push(r0); + // Fall through. + case R1_TOS: + masm()->push(r1); + top_of_stack_state_ = NO_TOS_REGISTERS; + break; + case R0_R1_TOS: + masm()->push(r1); + // Fall through. + case R0_TOS: + masm()->push(r0); + top_of_stack_state_ = NO_TOS_REGISTERS; + // Fall through. + case NO_TOS_REGISTERS: + break; + } + ASSERT(register_allocation_map_ == 0); // Not yet implemented. +} + #undef __ } } // namespace v8::internal diff --git a/src/arm/virtual-frame-arm.h b/src/arm/virtual-frame-arm.h index 6ba1eecc0..a7b34e552 100644 --- a/src/arm/virtual-frame-arm.h +++ b/src/arm/virtual-frame-arm.h @@ -45,14 +45,69 @@ namespace internal { class VirtualFrame : public ZoneObject { public: + class RegisterAllocationScope; // A utility class to introduce a scope where the virtual frame is // expected to remain spilled. The constructor spills the code - // generator's current frame, but no attempt is made to require it - // to stay spilled. It is intended as documentation while the code - // generator is being transformed. + // generator's current frame, and keeps it spilled. class SpilledScope BASE_EMBEDDED { public: - SpilledScope() {} + explicit SpilledScope(VirtualFrame* frame) + : old_is_spilled_(is_spilled_) { + if (frame != NULL) { + if (!is_spilled_) { + frame->SpillAll(); + } else { + frame->AssertIsSpilled(); + } + } + is_spilled_ = true; + } + ~SpilledScope() { + is_spilled_ = old_is_spilled_; + } + static bool is_spilled() { return is_spilled_; } + + private: + static bool is_spilled_; + int old_is_spilled_; + + SpilledScope() { } + + friend class RegisterAllocationScope; + }; + + class RegisterAllocationScope BASE_EMBEDDED { + public: + // A utility class to introduce a scope where the virtual frame + // is not spilled, ie. where register allocation occurs. Eventually + // when RegisterAllocationScope is ubiquitous it can be removed + // along with the (by then unused) SpilledScope class. + explicit RegisterAllocationScope(CodeGenerator* cgen) + : cgen_(cgen), + old_is_spilled_(SpilledScope::is_spilled_) { + SpilledScope::is_spilled_ = false; + if (old_is_spilled_) { + VirtualFrame* frame = cgen->frame(); + if (frame != NULL) { + frame->AssertIsSpilled(); + } + } + } + ~RegisterAllocationScope() { + SpilledScope::is_spilled_ = old_is_spilled_; + if (old_is_spilled_) { + VirtualFrame* frame = cgen_->frame(); + if (frame != NULL) { + frame->SpillAll(); + } + } + } + + private: + CodeGenerator* cgen_; + bool old_is_spilled_; + + RegisterAllocationScope() { } }; // An illegal index into the virtual frame. @@ -75,27 +130,38 @@ class VirtualFrame : public ZoneObject { return element_count() - expression_base_index(); } - int register_location(int num) { - ASSERT(num >= 0 && num < RegisterAllocator::kNumRegisters); - return register_locations_[num]; - } - - int register_location(Register reg) { - return register_locations_[RegisterAllocator::ToNumber(reg)]; - } - - void set_register_location(Register reg, int index) { - register_locations_[RegisterAllocator::ToNumber(reg)] = index; - } - bool is_used(int num) { - ASSERT(num >= 0 && num < RegisterAllocator::kNumRegisters); - return register_locations_[num] != kIllegalIndex; + switch (num) { + case 0: { // r0. + return kR0InUse[top_of_stack_state_]; + } + case 1: { // r1. + return kR1InUse[top_of_stack_state_]; + } + case 2: + case 3: + case 4: + case 5: + case 6: { // r2 to r6. + ASSERT(num - kFirstAllocatedRegister < kNumberOfAllocatedRegisters); + ASSERT(num >= kFirstAllocatedRegister); + if ((register_allocation_map_ & + (1 << (num - kFirstAllocatedRegister))) == 0) { + return false; + } else { + return true; + } + } + default: { + ASSERT(num < kFirstAllocatedRegister || + num >= kFirstAllocatedRegister + kNumberOfAllocatedRegisters); + return false; + } + } } bool is_used(Register reg) { - return register_locations_[RegisterAllocator::ToNumber(reg)] - != kIllegalIndex; + return is_used(RegisterAllocator::ToNumber(reg)); } // Add extra in-memory elements to the top of the frame to match an actual @@ -104,39 +170,35 @@ class VirtualFrame : public ZoneObject { void Adjust(int count); // Forget elements from the top of the frame to match an actual frame (eg, - // the frame after a runtime call). No code is emitted. + // the frame after a runtime call). No code is emitted except to bring the + // frame to a spilled state. void Forget(int count) { - ASSERT(count >= 0); - ASSERT(stack_pointer_ == element_count() - 1); - stack_pointer_ -= count; - // On ARM, all elements are in memory, so there is no extra bookkeeping - // (registers, copies, etc.) beyond dropping the elements. + SpillAll(); element_count_ -= count; } - // Forget count elements from the top of the frame and adjust the stack - // pointer downward. This is used, for example, before merging frames at - // break, continue, and return targets. - void ForgetElements(int count); - // Spill all values from the frame to memory. - inline void SpillAll(); + void SpillAll(); + + void AssertIsSpilled() { + ASSERT(top_of_stack_state_ == NO_TOS_REGISTERS); + ASSERT(register_allocation_map_ == 0); + } + + void AssertIsNotSpilled() { + ASSERT(!SpilledScope::is_spilled()); + } // Spill all occurrences of a specific register from the frame. void Spill(Register reg) { - if (is_used(reg)) SpillElementAt(register_location(reg)); + UNIMPLEMENTED(); } // Spill all occurrences of an arbitrary register if possible. Return the // register spilled or no_reg if it was not possible to free any register - // (ie, they all have frame-external references). + // (ie, they all have frame-external references). Unimplemented. Register SpillAnyRegister(); - // Prepare this virtual frame for merging to an expected frame by - // performing some state changes that do not require generating - // code. It is guaranteed that no code will be generated. - void PrepareMergeTo(VirtualFrame* expected); - // Make this virtual frame have a state identical to an expected virtual // frame. As a side effect, code may be emitted to make this frame match // the expected one. @@ -147,10 +209,7 @@ class VirtualFrame : public ZoneObject { // registers. Used when the code generator's frame is switched from this // one to NULL by an unconditional jump. void DetachFromCodeGenerator() { - RegisterAllocator* cgen_allocator = cgen()->allocator(); - for (int i = 0; i < RegisterAllocator::kNumRegisters; i++) { - if (is_used(i)) cgen_allocator->Unuse(i); - } + AssertIsSpilled(); } // (Re)attach a frame to its code generator. This informs the register @@ -158,10 +217,7 @@ class VirtualFrame : public ZoneObject { // Used when a code generator's frame is switched from NULL to this one by // binding a label. void AttachToCodeGenerator() { - RegisterAllocator* cgen_allocator = cgen()->allocator(); - for (int i = 0; i < RegisterAllocator::kNumRegisters; i++) { - if (is_used(i)) cgen_allocator->Unuse(i); - } + AssertIsSpilled(); } // Emit code for the physical JS entry and exit frame sequences. After @@ -184,23 +240,17 @@ class VirtualFrame : public ZoneObject { void AllocateStackSlots(); // The current top of the expression stack as an assembly operand. - MemOperand Top() { return MemOperand(sp, 0); } + MemOperand Top() { + AssertIsSpilled(); + return MemOperand(sp, 0); + } // An element of the expression stack as an assembly operand. MemOperand ElementAt(int index) { + AssertIsSpilled(); return MemOperand(sp, index * kPointerSize); } - // Random-access store to a frame-top relative frame element. The result - // becomes owned by the frame and is invalidated. - void SetElementAt(int index, Result* value); - - // Set a frame element to a constant. The index is frame-top relative. - void SetElementAt(int index, Handle value) { - Result temp(value); - SetElementAt(index, &temp); - } - // A frame-allocated local as an assembly operand. MemOperand LocalAt(int index) { ASSERT(0 <= index); @@ -208,13 +258,6 @@ class VirtualFrame : public ZoneObject { return MemOperand(fp, kLocal0Offset - index * kPointerSize); } - // Push the value of a local frame slot on top of the frame and invalidate - // the local slot. The slot should be written to before trying to read - // from it again. - void TakeLocalAt(int index) { - TakeFrameSlotAt(local0_index() + index); - } - // Push the address of the receiver slot on the frame. void PushReceiverSlotAddress(); @@ -224,13 +267,6 @@ class VirtualFrame : public ZoneObject { // The context frame slot. MemOperand Context() { return MemOperand(fp, kContextOffset); } - // Save the value of the esi register to the context frame slot. - void SaveContextRegister(); - - // Restore the esi register from the value of the context frame - // slot. - void RestoreContextRegister(); - // A parameter as an assembly operand. MemOperand ParameterAt(int index) { // Index -1 corresponds to the receiver. @@ -239,19 +275,6 @@ class VirtualFrame : public ZoneObject { return MemOperand(fp, (1 + parameter_count() - index) * kPointerSize); } - // Push the value of a paramter frame slot on top of the frame and - // invalidate the parameter slot. The slot should be written to before - // trying to read from it again. - void TakeParameterAt(int index) { - TakeFrameSlotAt(param0_index() + index); - } - - // Store the top value on the virtual frame into a parameter frame slot. - // The value is left in place on top of the frame. - void StoreToParameterAt(int index) { - StoreToFrameSlotAt(param0_index() + index); - } - // The receiver frame slot. MemOperand Receiver() { return ParameterAt(-1); } @@ -261,7 +284,7 @@ class VirtualFrame : public ZoneObject { // Call stub given the number of arguments it expects on (and // removes from) the stack. void CallStub(CodeStub* stub, int arg_count) { - Forget(arg_count); + if (arg_count != 0) Forget(arg_count); ASSERT(cgen()->HasValidEntryRegisters()); masm()->CallStub(stub); } @@ -296,34 +319,49 @@ class VirtualFrame : public ZoneObject { // Drop one element. void Drop() { Drop(1); } - // Pop an element from the top of the expression stack. Returns a - // Result, which may be a constant or a register. - Result Pop(); + // Pop an element from the top of the expression stack. Discards + // the result. + void Pop(); + + // Pop an element from the top of the expression stack. The register + // will be one normally used for the top of stack register allocation + // so you can't hold on to it if you push on the stack. + Register PopToRegister(Register but_not_to_this_one = no_reg); + + // Look at the top of the stack. The register returned is aliased and + // must be copied to a scratch register before modification. + Register Peek(); // Pop and save an element from the top of the expression stack and // emit a corresponding pop instruction. void EmitPop(Register reg); + // Takes the top two elements and puts them in r0 (top element) and r1 + // (second element). + void PopToR1R0(); + + // Takes the top element and puts it in r1. + void PopToR1(); + + // Takes the top element and puts it in r0. + void PopToR0(); + // Push an element on top of the expression stack and emit a // corresponding push instruction. void EmitPush(Register reg); + void EmitPush(MemOperand operand); + + // Get a register which is free and which must be immediately used to + // push on the top of the stack. + Register GetTOSRegister(); // Push multiple registers on the stack and the virtual frame // Register are selected by setting bit in src_regs and // are pushed in decreasing order: r15 .. r0. void EmitPushMultiple(int count, int src_regs); - // Push an element on the virtual frame. - inline void Push(Handle value); - inline void Push(Smi* value); - - // Nip removes zero or more elements from immediately below the top - // of the frame, leaving the previous top-of-frame value on top of - // the frame. Nip(k) is equivalent to x = Pop(), Drop(k), Push(x). - inline void Nip(int num_dropped); - - inline void SetTypeForLocalAt(int index, TypeInfo info); - inline void SetTypeForParamAt(int index, TypeInfo info); + static Register scratch0() { return r7; } + static Register scratch1() { return r9; } private: static const int kLocal0Offset = JavaScriptFrameConstants::kLocal0Offset; @@ -333,16 +371,40 @@ class VirtualFrame : public ZoneObject { static const int kHandlerSize = StackHandlerConstants::kSize / kPointerSize; static const int kPreallocatedElements = 5 + 8; // 8 expression stack slots. + // 5 states for the top of stack, which can be in memory or in r0 and r1. + enum TopOfStack { NO_TOS_REGISTERS, R0_TOS, R1_TOS, R1_R0_TOS, R0_R1_TOS, + TOS_STATES}; + static const int kMaxTOSRegisters = 2; + + static const bool kR0InUse[TOS_STATES]; + static const bool kR1InUse[TOS_STATES]; + static const int kVirtualElements[TOS_STATES]; + static const TopOfStack kStateAfterPop[TOS_STATES]; + static const TopOfStack kStateAfterPush[TOS_STATES]; + static const Register kTopRegister[TOS_STATES]; + static const Register kBottomRegister[TOS_STATES]; + + // We allocate up to 5 locals in registers. + static const int kNumberOfAllocatedRegisters = 5; + // r2 to r6 are allocated to locals. + static const int kFirstAllocatedRegister = 2; + + static const Register kAllocatedRegisters[kNumberOfAllocatedRegisters]; + + static Register AllocatedRegister(int r) { + ASSERT(r >= 0 && r < kNumberOfAllocatedRegisters); + return kAllocatedRegisters[r]; + } + // The number of elements on the stack frame. int element_count_; + TopOfStack top_of_stack_state_:3; + int register_allocation_map_:kNumberOfAllocatedRegisters; // The index of the element that is at the processor's stack pointer - // (the sp register). - int stack_pointer_; - - // The index of the register frame element using each register, or - // kIllegalIndex if a register is not on the frame. - int register_locations_[RegisterAllocator::kNumRegisters]; + // (the sp register). For now since everything is in memory it is given + // by the number of elements on the not-very-virtual stack frame. + int stack_pointer() { return element_count_ - 1; } // The number of frame-allocated locals and parameters respectively. int parameter_count() { return cgen()->scope()->num_parameters(); } @@ -380,80 +442,15 @@ class VirtualFrame : public ZoneObject { return (frame_pointer() - index) * kPointerSize; } - // Record an occurrence of a register in the virtual frame. This has the - // effect of incrementing the register's external reference count and - // of updating the index of the register's location in the frame. - void Use(Register reg, int index) { - ASSERT(!is_used(reg)); - set_register_location(reg, index); - cgen()->allocator()->Use(reg); - } - - // Record that a register reference has been dropped from the frame. This - // decrements the register's external reference count and invalidates the - // index of the register's location in the frame. - void Unuse(Register reg) { - ASSERT(is_used(reg)); - set_register_location(reg, kIllegalIndex); - cgen()->allocator()->Unuse(reg); - } - - // Spill the element at a particular index---write it to memory if - // necessary, free any associated register, and forget its value if - // constant. - void SpillElementAt(int index); - - // Sync the element at a particular index. If it is a register or - // constant that disagrees with the value on the stack, write it to memory. - // Keep the element type as register or constant, and clear the dirty bit. - void SyncElementAt(int index); - - // Sync a single unsynced element that lies beneath or at the stack pointer. - void SyncElementBelowStackPointer(int index); - - // Sync a single unsynced element that lies just above the stack pointer. - void SyncElementByPushing(int index); - - // Push a the value of a frame slot (typically a local or parameter) on - // top of the frame and invalidate the slot. - void TakeFrameSlotAt(int index); - - // Store the value on top of the frame to a frame slot (typically a local - // or parameter). - void StoreToFrameSlotAt(int index); - // Spill all elements in registers. Spill the top spilled_args elements // on the frame. Sync all other frame elements. // Then drop dropped_args elements from the virtual frame, to match // the effect of an upcoming call that will drop them from the stack. void PrepareForCall(int spilled_args, int dropped_args); - // Move frame elements currently in registers or constants, that - // should be in memory in the expected frame, to memory. - void MergeMoveRegistersToMemory(VirtualFrame* expected); - - // Make the register-to-register moves necessary to - // merge this frame with the expected frame. - // Register to memory moves must already have been made, - // and memory to register moves must follow this call. - // This is because some new memory-to-register moves are - // created in order to break cycles of register moves. - // Used in the implementation of MergeTo(). - void MergeMoveRegistersToRegisters(VirtualFrame* expected); - - // Make the memory-to-register and constant-to-register moves - // needed to make this frame equal the expected frame. - // Called after all register-to-memory and register-to-register - // moves have been made. After this function returns, the frames - // should be equal. - void MergeMoveMemoryToRegisters(VirtualFrame* expected); - - // Invalidates a frame slot (puts an invalid frame element in it). - // Copies on the frame are correctly handled, and if this slot was - // the backing store of copies, the index of the new backing store - // is returned. Otherwise, returns kIllegalIndex. - // Register counts are correctly updated. - int InvalidateFrameSlotAt(int index); + // If all top-of-stack registers are in use then the lowest one is pushed + // onto the physical stack and made free. + void EnsureOneFreeTOSRegister(); inline bool Equals(VirtualFrame* other); diff --git a/src/codegen.h b/src/codegen.h index c3812fb68..8967a704f 100644 --- a/src/codegen.h +++ b/src/codegen.h @@ -229,7 +229,12 @@ class DeferredCode: public ZoneObject { Label entry_label_; Label exit_label_; - int registers_[RegisterAllocator::kNumRegisters]; + // C++ doesn't allow zero length arrays, so we make the array length 1 even + // if we don't need it. + static const int kRegistersArrayLength = + (RegisterAllocator::kNumRegisters == 0) ? + 1 : RegisterAllocator::kNumRegisters; + int registers_[kRegistersArrayLength]; #ifdef DEBUG const char* comment_; diff --git a/src/jump-target-light.cc b/src/jump-target-light.cc index 098d97dee..befb43073 100644 --- a/src/jump-target-light.cc +++ b/src/jump-target-light.cc @@ -77,23 +77,10 @@ DeferredCode::DeferredCode() ASSERT(position_ != RelocInfo::kNoPosition); CodeGeneratorScope::Current()->AddDeferred(this); + #ifdef DEBUG - comment_ = ""; + CodeGeneratorScope::Current()->frame()->AssertIsSpilled(); #endif - - // Copy the register locations from the code generator's frame. - // These are the registers that will be spilled on entry to the - // deferred code and restored on exit. - VirtualFrame* frame = CodeGeneratorScope::Current()->frame(); - for (int i = 0; i < RegisterAllocator::kNumRegisters; i++) { - int loc = frame->register_location(i); - if (loc == VirtualFrame::kIllegalIndex) { - registers_[i] = kIgnore; - } else { - // Needs to be restored on exit but not saved on entry. - registers_[i] = frame->fp_relative(loc) | kSyncedFlag; - } - } } } } // namespace v8::internal diff --git a/src/register-allocator.h b/src/register-allocator.h index 456453391..a03a9d2fb 100644 --- a/src/register-allocator.h +++ b/src/register-allocator.h @@ -213,7 +213,11 @@ class RegisterFile BASE_EMBEDDED { } private: - static const int kNumRegisters = RegisterAllocatorConstants::kNumRegisters; + // C++ doesn't like zero length arrays, so we make the array length 1 even if + // we don't need it. + static const int kNumRegisters = + (RegisterAllocatorConstants::kNumRegisters == 0) ? + 1 : RegisterAllocatorConstants::kNumRegisters; int ref_counts_[kNumRegisters]; diff --git a/src/virtual-frame-heavy-inl.h b/src/virtual-frame-heavy-inl.h index a4a0a9ba1..6381d0126 100644 --- a/src/virtual-frame-heavy-inl.h +++ b/src/virtual-frame-heavy-inl.h @@ -131,6 +131,22 @@ void VirtualFrame::SetTypeForParamAt(int index, TypeInfo info) { elements_[param0_index() + index].set_type_info(info); } + +void VirtualFrame::Nip(int num_dropped) { + ASSERT(num_dropped >= 0); + if (num_dropped == 0) return; + Result tos = Pop(); + if (num_dropped > 1) { + Drop(num_dropped - 1); + } + SetElementAt(0, &tos); +} + + +void VirtualFrame::Push(Smi* value) { + Push(Handle (value)); +} + } } // namespace v8::internal #endif // V8_VIRTUAL_FRAME_HEAVY_INL_H_ diff --git a/src/virtual-frame-heavy.cc b/src/virtual-frame-heavy.cc index 854ed75cb..727028005 100644 --- a/src/virtual-frame-heavy.cc +++ b/src/virtual-frame-heavy.cc @@ -295,4 +295,18 @@ void VirtualFrame::PrepareForCall(int spilled_args, int dropped_args) { } +// If there are any registers referenced only by the frame, spill one. +Register VirtualFrame::SpillAnyRegister() { + // Find the leftmost (ordered by register number) register whose only + // reference is in the frame. + for (int i = 0; i < RegisterAllocator::kNumRegisters; i++) { + if (is_used(i) && cgen()->allocator()->count(i) == 1) { + SpillElementAt(register_location(i)); + ASSERT(!cgen()->allocator()->is_used(i)); + return RegisterAllocator::ToRegister(i); + } + } + return no_reg; +} + } } // namespace v8::internal diff --git a/src/virtual-frame-inl.h b/src/virtual-frame-inl.h index e4c6e6e5e..c9f4aac18 100644 --- a/src/virtual-frame-inl.h +++ b/src/virtual-frame-inl.h @@ -36,25 +36,4 @@ #include "virtual-frame-light-inl.h" #endif - -namespace v8 { -namespace internal { - -void VirtualFrame::Push(Smi* value) { - Push(Handle (value)); -} - - -void VirtualFrame::Nip(int num_dropped) { - ASSERT(num_dropped >= 0); - if (num_dropped == 0) return; - Result tos = Pop(); - if (num_dropped > 1) { - Drop(num_dropped - 1); - } - SetElementAt(0, &tos); -} - -} } // namespace v8::internal - #endif // V8_VIRTUAL_FRAME_INL_H_ diff --git a/src/virtual-frame-light-inl.h b/src/virtual-frame-light-inl.h index 5c823ae5d..c50e6c8cf 100644 --- a/src/virtual-frame-light-inl.h +++ b/src/virtual-frame-light-inl.h @@ -39,54 +39,28 @@ namespace internal { // the parameters, and a return address. All frame elements are in memory. VirtualFrame::VirtualFrame() : element_count_(parameter_count() + 2), - stack_pointer_(parameter_count() + 1) { - for (int i = 0; i < RegisterAllocator::kNumRegisters; i++) { - register_locations_[i] = kIllegalIndex; - } -} + top_of_stack_state_(NO_TOS_REGISTERS), + register_allocation_map_(0) { } // When cloned, a frame is a deep copy of the original. VirtualFrame::VirtualFrame(VirtualFrame* original) : element_count_(original->element_count()), - stack_pointer_(original->stack_pointer_) { - memcpy(®ister_locations_, - original->register_locations_, - sizeof(register_locations_)); -} - - -void VirtualFrame::Push(Handle value) { - UNIMPLEMENTED(); -} + top_of_stack_state_(original->top_of_stack_state_), + register_allocation_map_(original->register_allocation_map_) { } bool VirtualFrame::Equals(VirtualFrame* other) { -#ifdef DEBUG - for (int i = 0; i < RegisterAllocator::kNumRegisters; i++) { - if (register_location(i) != other->register_location(i)) { - return false; - } - } - if (element_count() != other->element_count()) return false; -#endif - if (stack_pointer_ != other->stack_pointer_) return false; + ASSERT(element_count() == other->element_count()); + if (top_of_stack_state_ != other->top_of_stack_state_) return false; + if (register_allocation_map_ != other->register_allocation_map_) return false; return true; } -void VirtualFrame::SetTypeForLocalAt(int index, TypeInfo info) { - UNIMPLEMENTED(); -} - - -// Everything is always spilled anyway. -void VirtualFrame::SpillAll() { -} - - void VirtualFrame::PrepareForReturn() { + SpillAll(); } diff --git a/src/virtual-frame-light.cc b/src/virtual-frame-light.cc index 4662cf016..27c48a537 100644 --- a/src/virtual-frame-light.cc +++ b/src/virtual-frame-light.cc @@ -36,17 +36,14 @@ namespace internal { void VirtualFrame::Adjust(int count) { ASSERT(count >= 0); - ASSERT(stack_pointer_ == element_count() - 1); - element_count_ += count; - stack_pointer_ += count; } -// Make the type of the element at a given index be MEMORY. -void VirtualFrame::SpillElementAt(int index) { +// If there are any registers referenced only by the frame, spill one. +Register VirtualFrame::SpillAnyRegister() { UNIMPLEMENTED(); + return no_reg; } - } } // namespace v8::internal diff --git a/src/virtual-frame.cc b/src/virtual-frame.cc index d618bc268..310ff5949 100644 --- a/src/virtual-frame.cc +++ b/src/virtual-frame.cc @@ -37,21 +37,6 @@ namespace internal { // ------------------------------------------------------------------------- // VirtualFrame implementation. -// If there are any registers referenced only by the frame, spill one. -Register VirtualFrame::SpillAnyRegister() { - // Find the leftmost (ordered by register number) register whose only - // reference is in the frame. - for (int i = 0; i < RegisterAllocator::kNumRegisters; i++) { - if (is_used(i) && cgen()->allocator()->count(i) == 1) { - SpillElementAt(register_location(i)); - ASSERT(!cgen()->allocator()->is_used(i)); - return RegisterAllocator::ToRegister(i); - } - } - return no_reg; -} - - // Specialization of List::ResizeAdd to non-inlined version for FrameElements. // The function ResizeAdd becomes a real function, whose implementation is the // inlined ResizeAddInternal.