From 310fd67aea3c781c5d5ade1560ec31ee02d08a5c Mon Sep 17 00:00:00 2001 From: "whesse@chromium.org" Date: Wed, 17 Jun 2009 12:16:59 +0000 Subject: [PATCH] X64 Implementation: Make codegen load literals and assign to local variables. Review URL: http://codereview.chromium.org/126198 git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@2204 ce2b1a6d-e550-0410-aec6-3dcde31c8c00 --- src/x64/assembler-x64.cc | 2 +- src/x64/codegen-x64.cc | 834 +++++++++++++++++++++++++++++- src/x64/macro-assembler-x64.cc | 2 +- src/x64/register-allocator-x64.cc | 16 +- src/x64/virtual-frame-x64.cc | 355 ++++++++++++- 5 files changed, 1166 insertions(+), 43 deletions(-) diff --git a/src/x64/assembler-x64.cc b/src/x64/assembler-x64.cc index eaeea194c..e89a829f4 100644 --- a/src/x64/assembler-x64.cc +++ b/src/x64/assembler-x64.cc @@ -1887,7 +1887,7 @@ void CallIC::GenerateNormal(MacroAssembler* a, int b) { } void JumpTarget::DoBind() { - UNIMPLEMENTED(); + // UNIMPLEMENTED(); } void JumpTarget::DoBranch(Condition a, Hint b) { diff --git a/src/x64/codegen-x64.cc b/src/x64/codegen-x64.cc index 1629c6e8c..9f2335f5c 100644 --- a/src/x64/codegen-x64.cc +++ b/src/x64/codegen-x64.cc @@ -29,7 +29,9 @@ #include "v8.h" #include "macro-assembler.h" #include "register-allocator-inl.h" -#include "codegen.h" +#include "codegen-inl.h" +#include "codegen-x64-inl.h" + // TEST #include "compiler.h" @@ -101,7 +103,7 @@ void CodeGenerator::DeclareGlobals(Handle a) { void CodeGenerator::TestCodeGenerator() { // Compile a function from a string, and run it. Handle test_function = Compiler::Compile( - Factory::NewStringFromAscii(CStrVector("42")), + Factory::NewStringFromAscii(CStrVector("39; 42;")), Factory::NewStringFromAscii(CStrVector("CodeGeneratorTestScript")), 0, 0, @@ -128,6 +130,7 @@ void CodeGenerator::TestCodeGenerator() { 0, NULL, &pending_exceptions); + // Function compiles and runs, but returns a JSFunction object. CHECK(result->IsSmi()); CHECK_EQ(42, Smi::cast(*result)->value()); } @@ -136,7 +139,7 @@ void CodeGenerator::TestCodeGenerator() { void CodeGenerator::GenCode(FunctionLiteral* function) { // Record the position for debugging purposes. CodeForFunctionPosition(function); - // ZoneList* body = fun->body(); + ZoneList* body = function->body(); // Initialize state. ASSERT(scope_ == NULL); @@ -176,12 +179,37 @@ void CodeGenerator::GenCode(FunctionLiteral* function) { allocator_->Initialize(); frame_->Enter(); - Result return_register = allocator_->Allocate(rax); + // Allocate space for locals and initialize them. + frame_->AllocateStackSlots(); + // Initialize the function return target after the locals are set + // up, because it needs the expected frame height from the frame. + function_return_.set_direction(JumpTarget::BIDIRECTIONAL); + function_return_is_shadowed_ = false; + + VisitStatements(body); + } + // Adjust for function-level loop nesting. + loop_nesting_ -= function->loop_nesting(); - __ movq(return_register.reg(), Immediate(0x54)); // Smi 42 + // Code generation state must be reset. + ASSERT(state_ == NULL); + ASSERT(loop_nesting() == 0); + ASSERT(!function_return_is_shadowed_); + function_return_.Unuse(); + DeleteFrame(); - GenerateReturnSequence(&return_register); + // Process any deferred code using the register allocator. + if (!HasStackOverflow()) { + HistogramTimerScope deferred_timer(&Counters::deferred_code_generation); + JumpTarget::set_compiling_deferred_code(true); + ProcessDeferred(); + JumpTarget::set_compiling_deferred_code(false); } + + // There is no need to delete the register allocator, it is a + // stack-allocated local. + allocator_ = NULL; + scope_ = NULL; } void CodeGenerator::GenerateReturnSequence(Result* return_value) { @@ -221,9 +249,31 @@ void CodeGenerator::GenerateFastCaseSwitchJumpTable(SwitchStatement* a, UNIMPLEMENTED(); } -void CodeGenerator::VisitStatements(ZoneList* a) { - UNIMPLEMENTED(); +#ifdef DEBUG +bool CodeGenerator::HasValidEntryRegisters() { + return (allocator()->count(rax) == (frame()->is_used(rax) ? 1 : 0)) + && (allocator()->count(rbx) == (frame()->is_used(rbx) ? 1 : 0)) + && (allocator()->count(rcx) == (frame()->is_used(rcx) ? 1 : 0)) + && (allocator()->count(rdx) == (frame()->is_used(rdx) ? 1 : 0)) + && (allocator()->count(rdi) == (frame()->is_used(rdi) ? 1 : 0)) + && (allocator()->count(r8) == (frame()->is_used(r8) ? 1 : 0)) + && (allocator()->count(r9) == (frame()->is_used(r9) ? 1 : 0)) + && (allocator()->count(r11) == (frame()->is_used(r11) ? 1 : 0)) + && (allocator()->count(r14) == (frame()->is_used(r14) ? 1 : 0)) + && (allocator()->count(r15) == (frame()->is_used(r15) ? 1 : 0)) + && (allocator()->count(r13) == (frame()->is_used(r13) ? 1 : 0)) + && (allocator()->count(r12) == (frame()->is_used(r12) ? 1 : 0)); } +#endif + + +void CodeGenerator::VisitStatements(ZoneList* statements) { + ASSERT(!in_spilled_code()); + for (int i = 0; has_valid_frame() && i < statements->length(); i++) { + Visit(statements->at(i)); + } +} + void CodeGenerator::VisitBlock(Block* a) { UNIMPLEMENTED(); @@ -233,10 +283,19 @@ void CodeGenerator::VisitDeclaration(Declaration* a) { UNIMPLEMENTED(); } -void CodeGenerator::VisitExpressionStatement(ExpressionStatement* a) { - UNIMPLEMENTED(); + +void CodeGenerator::VisitExpressionStatement(ExpressionStatement* node) { + ASSERT(!in_spilled_code()); + Comment cmnt(masm_, "[ ExpressionStatement"); + CodeForStatementPosition(node); + Expression* expression = node->expression(); + expression->MarkAsStatement(); + Load(expression); + // Remove the lingering expression result from the top of stack. + frame_->Drop(); } + void CodeGenerator::VisitEmptyStatement(EmptyStatement* a) { UNIMPLEMENTED(); } @@ -253,10 +312,32 @@ void CodeGenerator::VisitBreakStatement(BreakStatement* a) { UNIMPLEMENTED(); } -void CodeGenerator::VisitReturnStatement(ReturnStatement* a) { - UNIMPLEMENTED(); + +void CodeGenerator::VisitReturnStatement(ReturnStatement* node) { + ASSERT(!in_spilled_code()); + Comment cmnt(masm_, "[ ReturnStatement"); + + CodeForStatementPosition(node); + Load(node->expression()); + Result return_value = frame_->Pop(); + /* if (function_return_is_shadowed_) { + function_return_.Jump(&return_value); + } else { + frame_->PrepareForReturn(); + if (function_return_.is_bound()) { + // If the function return label is already bound we reuse the + // code by jumping to the return site. + function_return_.Jump(&return_value); + } else { + function_return_.Bind(&return_value); + GenerateReturnSequence(&return_value); + } + } + */ + GenerateReturnSequence(&return_value); } + void CodeGenerator::VisitWithEnterStatement(WithEnterStatement* a) { UNIMPLEMENTED(); } @@ -302,18 +383,32 @@ void CodeGenerator::VisitConditional(Conditional* a) { UNIMPLEMENTED(); } -void CodeGenerator::VisitSlot(Slot* a) { - UNIMPLEMENTED(); +void CodeGenerator::VisitSlot(Slot* node) { + Comment cmnt(masm_, "[ Slot"); + LoadFromSlot(node, typeof_state()); } -void CodeGenerator::VisitVariableProxy(VariableProxy* a) { - UNIMPLEMENTED(); + +void CodeGenerator::VisitVariableProxy(VariableProxy* node) { + Comment cmnt(masm_, "[ VariableProxy"); + Variable* var = node->var(); + Expression* expr = var->rewrite(); + if (expr != NULL) { + Visit(expr); + } else { + ASSERT(var->is_global()); + Reference ref(this, node); + // ref.GetValue(typeof_state()); + } } -void CodeGenerator::VisitLiteral(Literal* a) { - UNIMPLEMENTED(); + +void CodeGenerator::VisitLiteral(Literal* node) { + Comment cmnt(masm_, "[ Literal"); + frame_->Push(node->handle()); } + void CodeGenerator::VisitRegExpLiteral(RegExpLiteral* a) { UNIMPLEMENTED(); } @@ -330,12 +425,96 @@ void CodeGenerator::VisitCatchExtensionObject(CatchExtensionObject* a) { UNIMPLEMENTED(); } -void CodeGenerator::VisitAssignment(Assignment* a) { - UNIMPLEMENTED(); + +void CodeGenerator::VisitAssignment(Assignment* node) { + Comment cmnt(masm_, "[ Assignment"); + CodeForStatementPosition(node); + + { Reference target(this, node->target()); + if (target.is_illegal()) { + // Fool the virtual frame into thinking that we left the assignment's + // value on the frame. + frame_->Push(Smi::FromInt(0)); + return; + } + Variable* var = node->target()->AsVariableProxy()->AsVariable(); + + if (node->starts_initialization_block()) { + ASSERT(target.type() == Reference::NAMED || + target.type() == Reference::KEYED); + // Change to slow case in the beginning of an initialization + // block to avoid the quadratic behavior of repeatedly adding + // fast properties. + + // The receiver is the argument to the runtime call. It is the + // first value pushed when the reference was loaded to the + // frame. + frame_->PushElementAt(target.size() - 1); + // Result ignored = frame_->CallRuntime(Runtime::kToSlowProperties, 1); + } + if (node->op() == Token::ASSIGN || + node->op() == Token::INIT_VAR || + node->op() == Token::INIT_CONST) { + Load(node->value()); + + } else { + // TODO(X64): Make compound assignments work. + /* + Literal* literal = node->value()->AsLiteral(); + bool overwrite_value = + (node->value()->AsBinaryOperation() != NULL && + node->value()->AsBinaryOperation()->ResultOverwriteAllowed()); + Variable* right_var = node->value()->AsVariableProxy()->AsVariable(); + // There are two cases where the target is not read in the right hand + // side, that are easy to test for: the right hand side is a literal, + // or the right hand side is a different variable. TakeValue invalidates + // the target, with an implicit promise that it will be written to again + // before it is read. + if (literal != NULL || (right_var != NULL && right_var != var)) { + target.TakeValue(NOT_INSIDE_TYPEOF); + } else { + target.GetValue(NOT_INSIDE_TYPEOF); + } + */ + Load(node->value()); + /* + GenericBinaryOperation(node->binary_op(), + node->type(), + overwrite_value ? OVERWRITE_RIGHT : NO_OVERWRITE); + */ + } + + if (var != NULL && + var->mode() == Variable::CONST && + node->op() != Token::INIT_VAR && node->op() != Token::INIT_CONST) { + // Assignment ignored - leave the value on the stack. + } else { + CodeForSourcePosition(node->position()); + if (node->op() == Token::INIT_CONST) { + // Dynamic constant initializations must use the function context + // and initialize the actual constant declared. Dynamic variable + // initializations are simply assignments and use SetValue. + target.SetValue(CONST_INIT); + } else { + target.SetValue(NOT_CONST_INIT); + } + if (node->ends_initialization_block()) { + ASSERT(target.type() == Reference::NAMED || + target.type() == Reference::KEYED); + // End of initialization block. Revert to fast case. The + // argument to the runtime call is the receiver, which is the + // first value pushed as part of the reference, which is below + // the lhs value. + frame_->PushElementAt(target.size()); + // Result ignored = frame_->CallRuntime(Runtime::kToFastProperties, 1); + } + } + } } + void CodeGenerator::VisitThrow(Throw* a) { - UNIMPLEMENTED(); + // UNIMPLEMENTED(); } void CodeGenerator::VisitProperty(Property* a) { @@ -425,7 +604,620 @@ void CodeGenerator::GenerateValueOf(ZoneList* args) { UNIMPLEMENTED(); } +// ----------------------------------------------------------------------------- +// CodeGenerator implementation of Expressions + +void CodeGenerator::Load(Expression* x, TypeofState typeof_state) { +#ifdef DEBUG + int original_height = frame_->height(); +#endif + ASSERT(!in_spilled_code()); + JumpTarget true_target; + JumpTarget false_target; + ControlDestination dest(&true_target, &false_target, true); + LoadCondition(x, typeof_state, &dest, false); + + if (dest.false_was_fall_through()) { + // The false target was just bound. + JumpTarget loaded; + frame_->Push(Factory::false_value()); + // There may be dangling jumps to the true target. + if (true_target.is_linked()) { + loaded.Jump(); + true_target.Bind(); + frame_->Push(Factory::true_value()); + loaded.Bind(); + } + + } else if (dest.is_used()) { + // There is true, and possibly false, control flow (with true as + // the fall through). + JumpTarget loaded; + frame_->Push(Factory::true_value()); + if (false_target.is_linked()) { + loaded.Jump(); + false_target.Bind(); + frame_->Push(Factory::false_value()); + loaded.Bind(); + } + + } else { + // We have a valid value on top of the frame, but we still may + // have dangling jumps to the true and false targets from nested + // subexpressions (eg, the left subexpressions of the + // short-circuited boolean operators). + ASSERT(has_valid_frame()); + if (true_target.is_linked() || false_target.is_linked()) { + JumpTarget loaded; + loaded.Jump(); // Don't lose the current TOS. + if (true_target.is_linked()) { + true_target.Bind(); + frame_->Push(Factory::true_value()); + if (false_target.is_linked()) { + loaded.Jump(); + } + } + if (false_target.is_linked()) { + false_target.Bind(); + frame_->Push(Factory::false_value()); + } + loaded.Bind(); + } + } + + ASSERT(has_valid_frame()); + ASSERT(frame_->height() == original_height + 1); +} + + +// Emit code to load the value of an expression to the top of the +// frame. If the expression is boolean-valued it may be compiled (or +// partially compiled) into control flow to the control destination. +// If force_control is true, control flow is forced. +void CodeGenerator::LoadCondition(Expression* x, + TypeofState typeof_state, + ControlDestination* dest, + bool force_control) { + ASSERT(!in_spilled_code()); + int original_height = frame_->height(); + + { CodeGenState new_state(this, typeof_state, dest); + Visit(x); + + // If we hit a stack overflow, we may not have actually visited + // the expression. In that case, we ensure that we have a + // valid-looking frame state because we will continue to generate + // code as we unwind the C++ stack. + // + // It's possible to have both a stack overflow and a valid frame + // state (eg, a subexpression overflowed, visiting it returned + // with a dummied frame state, and visiting this expression + // returned with a normal-looking state). + if (HasStackOverflow() && + !dest->is_used() && + frame_->height() == original_height) { + dest->Goto(true); + } + } + + if (force_control && !dest->is_used()) { + // Convert the TOS value into flow to the control destination. + // TODO(X64): Make control flow to control destinations work. + // ToBoolean(dest); + } + + ASSERT(!(force_control && !dest->is_used())); + ASSERT(dest->is_used() || frame_->height() == original_height + 1); +} + + +void CodeGenerator::LoadUnsafeSmi(Register target, Handle value) { + UNIMPLEMENTED(); + // TODO(X64): Implement security policy for loads of smis. +} + + +bool CodeGenerator::IsUnsafeSmi(Handle value) { + return false; +} + +//------------------------------------------------------------------------------ +// CodeGenerator implementation of variables, lookups, and stores. + +Reference::Reference(CodeGenerator* cgen, Expression* expression) + : cgen_(cgen), expression_(expression), type_(ILLEGAL) { + cgen->LoadReference(this); +} + + +Reference::~Reference() { + cgen_->UnloadReference(this); +} + + +void CodeGenerator::LoadReference(Reference* ref) { + // References are loaded from both spilled and unspilled code. Set the + // state to unspilled to allow that (and explicitly spill after + // construction at the construction sites). + bool was_in_spilled_code = in_spilled_code_; + in_spilled_code_ = false; + + Comment cmnt(masm_, "[ LoadReference"); + Expression* e = ref->expression(); + Property* property = e->AsProperty(); + Variable* var = e->AsVariableProxy()->AsVariable(); + + if (property != NULL) { + // The expression is either a property or a variable proxy that rewrites + // to a property. + Load(property->obj()); + // We use a named reference if the key is a literal symbol, unless it is + // a string that can be legally parsed as an integer. This is because + // otherwise we will not get into the slow case code that handles [] on + // String objects. + Literal* literal = property->key()->AsLiteral(); + uint32_t dummy; + if (literal != NULL && + literal->handle()->IsSymbol() && + !String::cast(*(literal->handle()))->AsArrayIndex(&dummy)) { + ref->set_type(Reference::NAMED); + } else { + Load(property->key()); + ref->set_type(Reference::KEYED); + } + } else if (var != NULL) { + // The expression is a variable proxy that does not rewrite to a + // property. Global variables are treated as named property references. + if (var->is_global()) { + LoadGlobal(); + ref->set_type(Reference::NAMED); + } else { + ASSERT(var->slot() != NULL); + ref->set_type(Reference::SLOT); + } + } else { + // Anything else is a runtime error. + Load(e); + // frame_->CallRuntime(Runtime::kThrowReferenceError, 1); + } + + in_spilled_code_ = was_in_spilled_code; +} + + +void CodeGenerator::UnloadReference(Reference* ref) { + // Pop a reference from the stack while preserving TOS. + Comment cmnt(masm_, "[ UnloadReference"); + frame_->Nip(ref->size()); +} + + +void Reference::SetValue(InitState init_state) { + ASSERT(cgen_->HasValidEntryRegisters()); + ASSERT(!is_illegal()); + MacroAssembler* masm = cgen_->masm(); + switch (type_) { + case SLOT: { + Comment cmnt(masm, "[ Store to Slot"); + Slot* slot = expression_->AsVariableProxy()->AsVariable()->slot(); + ASSERT(slot != NULL); + cgen_->StoreToSlot(slot, init_state); + break; + } + // TODO(X64): Make cases other than SLOT work. + /* + case NAMED: { + Comment cmnt(masm, "[ Store to named Property"); + cgen_->frame()->Push(GetName()); + Result answer = cgen_->frame()->CallStoreIC(); + cgen_->frame()->Push(&answer); + break; + } + + case KEYED: { + Comment cmnt(masm, "[ Store to keyed Property"); + + // Generate inlined version of the keyed store if the code is in + // a loop and the key is likely to be a smi. + Property* property = expression()->AsProperty(); + ASSERT(property != NULL); + SmiAnalysis* key_smi_analysis = property->key()->type(); + + if (cgen_->loop_nesting() > 0 && key_smi_analysis->IsLikelySmi()) { + Comment cmnt(masm, "[ Inlined store to keyed Property"); + + // Get the receiver, key and value into registers. + Result value = cgen_->frame()->Pop(); + Result key = cgen_->frame()->Pop(); + Result receiver = cgen_->frame()->Pop(); + + Result tmp = cgen_->allocator_->Allocate(); + ASSERT(tmp.is_valid()); + + // Determine whether the value is a constant before putting it + // in a register. + bool value_is_constant = value.is_constant(); + + // Make sure that value, key and receiver are in registers. + value.ToRegister(); + key.ToRegister(); + receiver.ToRegister(); + + DeferredReferenceSetKeyedValue* deferred = + new DeferredReferenceSetKeyedValue(value.reg(), + key.reg(), + receiver.reg()); + + // Check that the value is a smi if it is not a constant. We + // can skip the write barrier for smis and constants. + if (!value_is_constant) { + __ test(value.reg(), Immediate(kSmiTagMask)); + deferred->Branch(not_zero); + } + + // Check that the key is a non-negative smi. + __ test(key.reg(), Immediate(kSmiTagMask | 0x80000000)); + deferred->Branch(not_zero); + + // Check that the receiver is not a smi. + __ test(receiver.reg(), Immediate(kSmiTagMask)); + deferred->Branch(zero); + + // Check that the receiver is a JSArray. + __ mov(tmp.reg(), + FieldOperand(receiver.reg(), HeapObject::kMapOffset)); + __ movzx_b(tmp.reg(), + FieldOperand(tmp.reg(), Map::kInstanceTypeOffset)); + __ cmp(tmp.reg(), JS_ARRAY_TYPE); + deferred->Branch(not_equal); + + // Check that the key is within bounds. Both the key and the + // length of the JSArray are smis. + __ cmp(key.reg(), + FieldOperand(receiver.reg(), JSArray::kLengthOffset)); + deferred->Branch(greater_equal); + + // Get the elements array from the receiver and check that it + // is not a dictionary. + __ mov(tmp.reg(), + FieldOperand(receiver.reg(), JSObject::kElementsOffset)); + // Bind the deferred code patch site to be able to locate the + // fixed array map comparison. When debugging, we patch this + // comparison to always fail so that we will hit the IC call + // in the deferred code which will allow the debugger to + // break for fast case stores. + __ bind(deferred->patch_site()); + __ cmp(FieldOperand(tmp.reg(), HeapObject::kMapOffset), + Immediate(Factory::fixed_array_map())); + deferred->Branch(not_equal); + + // Store the value. + __ mov(Operand(tmp.reg(), + key.reg(), + times_2, + Array::kHeaderSize - kHeapObjectTag), + value.reg()); + __ IncrementCounter(&Counters::keyed_store_inline, 1); + + deferred->BindExit(); + + cgen_->frame()->Push(&receiver); + cgen_->frame()->Push(&key); + cgen_->frame()->Push(&value); + } else { + Result answer = cgen_->frame()->CallKeyedStoreIC(); + // Make sure that we do not have a test instruction after the + // call. A test instruction after the call is used to + // indicate that we have generated an inline version of the + // keyed store. + __ nop(); + cgen_->frame()->Push(&answer); + } + break; + } + */ + default: + UNREACHABLE(); + } +} + + +Operand CodeGenerator::SlotOperand(Slot* slot, Register tmp) { + // Currently, this assertion will fail if we try to assign to + // a constant variable that is constant because it is read-only + // (such as the variable referring to a named function expression). + // We need to implement assignments to read-only variables. + // Ideally, we should do this during AST generation (by converting + // such assignments into expression statements); however, in general + // we may not be able to make the decision until past AST generation, + // that is when the entire program is known. + ASSERT(slot != NULL); + int index = slot->index(); + switch (slot->type()) { + case Slot::PARAMETER: + return frame_->ParameterAt(index); + + case Slot::LOCAL: + return frame_->LocalAt(index); + + case Slot::CONTEXT: { + // Follow the context chain if necessary. + ASSERT(!tmp.is(rsi)); // do not overwrite context register + Register context = rsi; + int chain_length = scope()->ContextChainLength(slot->var()->scope()); + for (int i = 0; i < chain_length; i++) { + // Load the closure. + // (All contexts, even 'with' contexts, have a closure, + // and it is the same for all contexts inside a function. + // There is no need to go to the function context first.) + __ movq(tmp, ContextOperand(context, Context::CLOSURE_INDEX)); + // Load the function context (which is the incoming, outer context). + __ movq(tmp, FieldOperand(tmp, JSFunction::kContextOffset)); + context = tmp; + } + // We may have a 'with' context now. Get the function context. + // (In fact this mov may never be the needed, since the scope analysis + // may not permit a direct context access in this case and thus we are + // always at a function context. However it is safe to dereference be- + // cause the function context of a function context is itself. Before + // deleting this mov we should try to create a counter-example first, + // though...) + __ movq(tmp, ContextOperand(context, Context::FCONTEXT_INDEX)); + return ContextOperand(tmp, index); + } + + default: + UNREACHABLE(); + return Operand(rsp, 0); + } +} + + +Operand CodeGenerator::ContextSlotOperandCheckExtensions(Slot* slot, + Result tmp, + JumpTarget* slow) { + UNIMPLEMENTED(); + return Operand(rsp, 0); +} + + +void CodeGenerator::LoadFromSlot(Slot* slot, TypeofState typeof_state) { + if (slot->type() == Slot::LOOKUP) { + ASSERT(slot->var()->is_dynamic()); + + JumpTarget slow; + JumpTarget done; + Result value; + + // Generate fast-case code for variables that might be shadowed by + // eval-introduced variables. Eval is used a lot without + // introducing variables. In those cases, we do not want to + // perform a runtime call for all variables in the scope + // containing the eval. + if (slot->var()->mode() == Variable::DYNAMIC_GLOBAL) { + value = LoadFromGlobalSlotCheckExtensions(slot, typeof_state, &slow); + // If there was no control flow to slow, we can exit early. + if (!slow.is_linked()) { + frame_->Push(&value); + return; + } + + done.Jump(&value); + + } else if (slot->var()->mode() == Variable::DYNAMIC_LOCAL) { + Slot* potential_slot = slot->var()->local_if_not_shadowed()->slot(); + // Only generate the fast case for locals that rewrite to slots. + // This rules out argument loads. + if (potential_slot != NULL) { + // Allocate a fresh register to use as a temp in + // ContextSlotOperandCheckExtensions and to hold the result + // value. + value = allocator_->Allocate(); + ASSERT(value.is_valid()); + __ movq(value.reg(), + ContextSlotOperandCheckExtensions(potential_slot, + value, + &slow)); + if (potential_slot->var()->mode() == Variable::CONST) { + __ movq(kScratchRegister, Factory::the_hole_value(), + RelocInfo::EMBEDDED_OBJECT); + __ cmpq(value.reg(), kScratchRegister); + done.Branch(not_equal, &value); + __ movq(value.reg(), Factory::undefined_value(), + RelocInfo::EMBEDDED_OBJECT); + } + // There is always control flow to slow from + // ContextSlotOperandCheckExtensions so we have to jump around + // it. + done.Jump(&value); + } + } + + slow.Bind(); + // A runtime call is inevitable. We eagerly sync frame elements + // to memory so that we can push the arguments directly into place + // on top of the frame. + frame_->SyncRange(0, frame_->element_count() - 1); + frame_->EmitPush(rsi); + __ movq(kScratchRegister, slot->var()->name(), RelocInfo::EMBEDDED_OBJECT); + frame_->EmitPush(kScratchRegister); + if (typeof_state == INSIDE_TYPEOF) { + // value = + // frame_->CallRuntime(Runtime::kLoadContextSlotNoReferenceError, 2); + } else { + // value = frame_->CallRuntime(Runtime::kLoadContextSlot, 2); + } + + done.Bind(&value); + frame_->Push(&value); + + } else if (slot->var()->mode() == Variable::CONST) { + // Const slots may contain 'the hole' value (the constant hasn't been + // initialized yet) which needs to be converted into the 'undefined' + // value. + // + // We currently spill the virtual frame because constants use the + // potentially unsafe direct-frame access of SlotOperand. + VirtualFrame::SpilledScope spilled_scope; + Comment cmnt(masm_, "[ Load const"); + JumpTarget exit; + __ movq(rcx, SlotOperand(slot, rcx)); + __ movq(kScratchRegister, Factory::the_hole_value(), + RelocInfo::EMBEDDED_OBJECT); + __ cmpq(rcx, kScratchRegister); + exit.Branch(not_equal); + __ movq(rcx, Factory::undefined_value(), RelocInfo::EMBEDDED_OBJECT); + exit.Bind(); + frame_->EmitPush(rcx); + + } else if (slot->type() == Slot::PARAMETER) { + frame_->PushParameterAt(slot->index()); + + } else if (slot->type() == Slot::LOCAL) { + frame_->PushLocalAt(slot->index()); + + } else { + // The other remaining slot types (LOOKUP and GLOBAL) cannot reach + // here. + // + // The use of SlotOperand below is safe for an unspilled frame + // because it will always be a context slot. + ASSERT(slot->type() == Slot::CONTEXT); + Result temp = allocator_->Allocate(); + ASSERT(temp.is_valid()); + __ movq(temp.reg(), SlotOperand(slot, temp.reg())); + frame_->Push(&temp); + } +} + + +void CodeGenerator::StoreToSlot(Slot* slot, InitState init_state) { + // TODO(X64): Enable more types of slot. + + if (slot->type() == Slot::LOOKUP) { + UNIMPLEMENTED(); + /* + ASSERT(slot->var()->is_dynamic()); + + // For now, just do a runtime call. Since the call is inevitable, + // we eagerly sync the virtual frame so we can directly push the + // arguments into place. + frame_->SyncRange(0, frame_->element_count() - 1); + + frame_->EmitPush(esi); + frame_->EmitPush(Immediate(slot->var()->name())); + + Result value; + if (init_state == CONST_INIT) { + // Same as the case for a normal store, but ignores attribute + // (e.g. READ_ONLY) of context slot so that we can initialize const + // properties (introduced via eval("const foo = (some expr);")). Also, + // uses the current function context instead of the top context. + // + // Note that we must declare the foo upon entry of eval(), via a + // context slot declaration, but we cannot initialize it at the same + // time, because the const declaration may be at the end of the eval + // code (sigh...) and the const variable may have been used before + // (where its value is 'undefined'). Thus, we can only do the + // initialization when we actually encounter the expression and when + // the expression operands are defined and valid, and thus we need the + // split into 2 operations: declaration of the context slot followed + // by initialization. + value = frame_->CallRuntime(Runtime::kInitializeConstContextSlot, 3); + } else { + value = frame_->CallRuntime(Runtime::kStoreContextSlot, 3); + } + // Storing a variable must keep the (new) value on the expression + // stack. This is necessary for compiling chained assignment + // expressions. + frame_->Push(&value); + */ + } else { + ASSERT(!slot->var()->is_dynamic()); + + JumpTarget exit; + if (init_state == CONST_INIT) { + ASSERT(slot->var()->mode() == Variable::CONST); + // Only the first const initialization must be executed (the slot + // still contains 'the hole' value). When the assignment is executed, + // the code is identical to a normal store (see below). + // + // We spill the frame in the code below because the direct-frame + // access of SlotOperand is potentially unsafe with an unspilled + // frame. + VirtualFrame::SpilledScope spilled_scope; + Comment cmnt(masm_, "[ Init const"); + __ movq(rcx, SlotOperand(slot, rcx)); + __ movq(kScratchRegister, Factory::the_hole_value(), + RelocInfo::EMBEDDED_OBJECT); + __ cmpq(rcx, kScratchRegister); + exit.Branch(not_equal); + } + + // We must execute the store. Storing a variable must keep the (new) + // value on the stack. This is necessary for compiling assignment + // expressions. + // + // Note: We will reach here even with slot->var()->mode() == + // Variable::CONST because of const declarations which will initialize + // consts to 'the hole' value and by doing so, end up calling this code. + if (slot->type() == Slot::PARAMETER) { + frame_->StoreToParameterAt(slot->index()); + } else if (slot->type() == Slot::LOCAL) { + frame_->StoreToLocalAt(slot->index()); + } else { + // The other slot types (LOOKUP and GLOBAL) cannot reach here. + // + // The use of SlotOperand below is safe for an unspilled frame + // because the slot is a context slot. + /* + ASSERT(slot->type() == Slot::CONTEXT); + frame_->Dup(); + Result value = frame_->Pop(); + value.ToRegister(); + Result start = allocator_->Allocate(); + ASSERT(start.is_valid()); + __ mov(SlotOperand(slot, start.reg()), value.reg()); + // RecordWrite may destroy the value registers. + // + // TODO(204): Avoid actually spilling when the value is not + // needed (probably the common case). + frame_->Spill(value.reg()); + int offset = FixedArray::kHeaderSize + slot->index() * kPointerSize; + Result temp = allocator_->Allocate(); + ASSERT(temp.is_valid()); + __ RecordWrite(start.reg(), offset, value.reg(), temp.reg()); + // The results start, value, and temp are unused by going out of + // scope. + */ + } + + exit.Bind(); + } +} + + +Result CodeGenerator::LoadFromGlobalSlotCheckExtensions( + Slot* slot, + TypeofState typeof_state, + JumpTarget* slow) { + UNIMPLEMENTED(); + return Result(rax); +} + + +void CodeGenerator::LoadGlobal() { + if (in_spilled_code()) { + frame_->EmitPush(GlobalObject()); + } else { + Result temp = allocator_->Allocate(); + __ movq(temp.reg(), GlobalObject()); + frame_->Push(&temp); + } +} + #undef __ + // End of CodeGenerator implementation. // ----------------------------------------------------------------------------- diff --git a/src/x64/macro-assembler-x64.cc b/src/x64/macro-assembler-x64.cc index 27a1f01b9..a87fd4fc1 100644 --- a/src/x64/macro-assembler-x64.cc +++ b/src/x64/macro-assembler-x64.cc @@ -545,7 +545,7 @@ void MacroAssembler::InvokeFunction(Register function, movq(rsi, FieldOperand(function, JSFunction::kContextOffset)); movl(rbx, FieldOperand(rdx, SharedFunctionInfo::kFormalParameterCountOffset)); movq(rdx, FieldOperand(rdx, SharedFunctionInfo::kCodeOffset)); - // Advances rdx to the end of the Code object headers, to the start of + // Advances rdx to the end of the Code object header, to the start of // the executable code. lea(rdx, FieldOperand(rdx, Code::kHeaderSize)); diff --git a/src/x64/register-allocator-x64.cc b/src/x64/register-allocator-x64.cc index 3aba60fb2..7f6826443 100644 --- a/src/x64/register-allocator-x64.cc +++ b/src/x64/register-allocator-x64.cc @@ -66,15 +66,17 @@ void Result::ToRegister(Register target) { CodeGeneratorScope::Current()->masm()->movq(fresh.reg(), reg()); } else { ASSERT(is_constant()); - /* - TODO(X64): Handle constant results. - if (CodeGeneratorScope::Current()->IsUnsafeSmi(handle())) { - CodeGeneratorScope::Current()->LoadUnsafeSmi(fresh.reg(), handle()); + if (handle()->IsSmi()) { + if (CodeGeneratorScope::Current()->IsUnsafeSmi(handle())) { + CodeGeneratorScope::Current()->LoadUnsafeSmi(fresh.reg(), handle()); + } else { + CodeGeneratorScope::Current()->masm()-> + movq(fresh.reg(), handle(), RelocInfo::NONE); + } } else { - CodeGeneratorScope::Current()->masm()->Set(fresh.reg(), - Immediate(handle())); + CodeGeneratorScope::Current()->masm()-> + movq(fresh.reg(), handle(), RelocInfo::EMBEDDED_OBJECT); } - */ } *this = fresh; } else if (is_register() && reg().is(target)) { diff --git a/src/x64/virtual-frame-x64.cc b/src/x64/virtual-frame-x64.cc index e6975fa43..52a762b6f 100644 --- a/src/x64/virtual-frame-x64.cc +++ b/src/x64/virtual-frame-x64.cc @@ -85,7 +85,7 @@ void VirtualFrame::Enter() { // Store the function in the frame. The frame owns the register // reference now (ie, it can keep it in rdi or spill it later). Push(rdi); - // SyncElementAt(element_count() - 1); + SyncElementAt(element_count() - 1); cgen()->allocator()->Unuse(rdi); } @@ -99,7 +99,7 @@ void VirtualFrame::Exit() { // Avoid using the leave instruction here, because it is too // short. We need the return sequence to be a least the size of a // call instruction to support patching the exit code in the - // debugger. See VisitReturnStatement for the full return sequence. + // debugger. See GenerateReturnSequence for the full return sequence. // TODO(X64): A patched call will be very long now. Make sure we // have enough room. __ movq(rsp, rbp); @@ -115,6 +115,28 @@ void VirtualFrame::Exit() { } +void VirtualFrame::AllocateStackSlots() { + int count = local_count(); + if (count > 0) { + Comment cmnt(masm(), "[ Allocate space for locals"); + // The locals are initialized to a constant (the undefined value), but + // we sync them with the actual frame to allocate space for spilling + // them later. First sync everything above the stack pointer so we can + // use pushes to allocate and initialize the locals. + SyncRange(stack_pointer_ + 1, element_count() - 1); + Handle undefined = Factory::undefined_value(); + FrameElement initial_value = + FrameElement::ConstantElement(undefined, FrameElement::SYNCED); + __ movq(kScratchRegister, undefined, RelocInfo::EMBEDDED_OBJECT); + for (int i = 0; i < count; i++) { + elements_.Add(initial_value); + stack_pointer_++; + __ push(kScratchRegister); + } + } +} + + void VirtualFrame::EmitPop(Register reg) { ASSERT(stack_pointer_ == element_count() - 1); stack_pointer_--; @@ -155,24 +177,262 @@ void VirtualFrame::EmitPush(Immediate immediate) { } -void VirtualFrame::Drop(int a) { - UNIMPLEMENTED(); +void VirtualFrame::Drop(int count) { + ASSERT(height() >= count); + int num_virtual_elements = (element_count() - 1) - stack_pointer_; + + // Emit code to lower the stack pointer if necessary. + if (num_virtual_elements < count) { + int num_dropped = count - num_virtual_elements; + stack_pointer_ -= num_dropped; + __ addq(rsp, Immediate(num_dropped * kPointerSize)); + } + + // Discard elements from the virtual frame and free any registers. + for (int i = 0; i < count; i++) { + FrameElement dropped = elements_.RemoveLast(); + if (dropped.is_register()) { + Unuse(dropped.reg()); + } + } } -int VirtualFrame::InvalidateFrameSlotAt(int a) { - UNIMPLEMENTED(); - return -1; + +int VirtualFrame::InvalidateFrameSlotAt(int index) { + FrameElement original = elements_[index]; + + // Is this element the backing store of any copies? + int new_backing_index = kIllegalIndex; + if (original.is_copied()) { + // Verify it is copied, and find first copy. + for (int i = index + 1; i < element_count(); i++) { + if (elements_[i].is_copy() && elements_[i].index() == index) { + new_backing_index = i; + break; + } + } + } + + if (new_backing_index == kIllegalIndex) { + // No copies found, return kIllegalIndex. + if (original.is_register()) { + Unuse(original.reg()); + } + elements_[index] = FrameElement::InvalidElement(); + return kIllegalIndex; + } + + // This is the backing store of copies. + Register backing_reg; + if (original.is_memory()) { + Result fresh = cgen()->allocator()->Allocate(); + ASSERT(fresh.is_valid()); + Use(fresh.reg(), new_backing_index); + backing_reg = fresh.reg(); + __ movq(backing_reg, Operand(rbp, fp_relative(index))); + } else { + // The original was in a register. + backing_reg = original.reg(); + set_register_location(backing_reg, new_backing_index); + } + // Invalidate the element at index. + elements_[index] = FrameElement::InvalidElement(); + // Set the new backing element. + if (elements_[new_backing_index].is_synced()) { + elements_[new_backing_index] = + FrameElement::RegisterElement(backing_reg, FrameElement::SYNCED); + } else { + elements_[new_backing_index] = + FrameElement::RegisterElement(backing_reg, FrameElement::NOT_SYNCED); + } + // Update the other copies. + for (int i = new_backing_index + 1; i < element_count(); i++) { + if (elements_[i].is_copy() && elements_[i].index() == index) { + elements_[i].set_index(new_backing_index); + elements_[new_backing_index].set_copied(); + } + } + return new_backing_index; +} + + +void VirtualFrame::StoreToFrameSlotAt(int index) { + // Store the value on top of the frame to the virtual frame slot at + // a given index. The value on top of the frame is left in place. + // This is a duplicating operation, so it can create copies. + ASSERT(index >= 0); + ASSERT(index < element_count()); + + int top_index = element_count() - 1; + FrameElement top = elements_[top_index]; + FrameElement original = elements_[index]; + if (top.is_copy() && top.index() == index) return; + ASSERT(top.is_valid()); + + InvalidateFrameSlotAt(index); + + // InvalidateFrameSlotAt can potentially change any frame element, due + // to spilling registers to allocate temporaries in order to preserve + // the copy-on-write semantics of aliased elements. Reload top from + // the frame. + top = elements_[top_index]; + + if (top.is_copy()) { + // There are two cases based on the relative positions of the + // stored-to slot and the backing slot of the top element. + int backing_index = top.index(); + ASSERT(backing_index != index); + if (backing_index < index) { + // 1. The top element is a copy of a slot below the stored-to + // slot. The stored-to slot becomes an unsynced copy of that + // same backing slot. + elements_[index] = CopyElementAt(backing_index); + } else { + // 2. The top element is a copy of a slot above the stored-to + // slot. The stored-to slot becomes the new (unsynced) backing + // slot and both the top element and the element at the former + // backing slot become copies of it. The sync state of the top + // and former backing elements is preserved. + FrameElement backing_element = elements_[backing_index]; + ASSERT(backing_element.is_memory() || backing_element.is_register()); + if (backing_element.is_memory()) { + // Because sets of copies are canonicalized to be backed by + // their lowest frame element, and because memory frame + // elements are backed by the corresponding stack address, we + // have to move the actual value down in the stack. + // + // TODO(209): considering allocating the stored-to slot to the + // temp register. Alternatively, allow copies to appear in + // any order in the frame and lazily move the value down to + // the slot. + __ movq(kScratchRegister, Operand(rbp, fp_relative(backing_index))); + __ movq(Operand(rbp, fp_relative(index)), kScratchRegister); + } else { + set_register_location(backing_element.reg(), index); + if (backing_element.is_synced()) { + // If the element is a register, we will not actually move + // anything on the stack but only update the virtual frame + // element. + backing_element.clear_sync(); + } + } + elements_[index] = backing_element; + + // The old backing element becomes a copy of the new backing + // element. + FrameElement new_element = CopyElementAt(index); + elements_[backing_index] = new_element; + if (backing_element.is_synced()) { + elements_[backing_index].set_sync(); + } + + // All the copies of the old backing element (including the top + // element) become copies of the new backing element. + for (int i = backing_index + 1; i < element_count(); i++) { + if (elements_[i].is_copy() && elements_[i].index() == backing_index) { + elements_[i].set_index(index); + } + } + } + return; + } + + // Move the top element to the stored-to slot and replace it (the + // top element) with a copy. + elements_[index] = top; + if (top.is_memory()) { + // TODO(209): consider allocating the stored-to slot to the temp + // register. Alternatively, allow copies to appear in any order + // in the frame and lazily move the value down to the slot. + FrameElement new_top = CopyElementAt(index); + new_top.set_sync(); + elements_[top_index] = new_top; + + // The sync state of the former top element is correct (synced). + // Emit code to move the value down in the frame. + __ movq(kScratchRegister, Operand(rsp, 0)); + __ movq(Operand(rbp, fp_relative(index)), kScratchRegister); + } else if (top.is_register()) { + set_register_location(top.reg(), index); + // The stored-to slot has the (unsynced) register reference and + // the top element becomes a copy. The sync state of the top is + // preserved. + FrameElement new_top = CopyElementAt(index); + if (top.is_synced()) { + new_top.set_sync(); + elements_[index].clear_sync(); + } + elements_[top_index] = new_top; + } else { + // The stored-to slot holds the same value as the top but + // unsynced. (We do not have copies of constants yet.) + ASSERT(top.is_constant()); + elements_[index].clear_sync(); + } } + void VirtualFrame::MergeTo(VirtualFrame* a) { UNIMPLEMENTED(); } + Result VirtualFrame::Pop() { - UNIMPLEMENTED(); - return Result(NULL); + FrameElement element = elements_.RemoveLast(); + int index = element_count(); + ASSERT(element.is_valid()); + + bool pop_needed = (stack_pointer_ == index); + if (pop_needed) { + stack_pointer_--; + if (element.is_memory()) { + Result temp = cgen()->allocator()->Allocate(); + ASSERT(temp.is_valid()); + temp.set_static_type(element.static_type()); + __ pop(temp.reg()); + return temp; + } + + __ addq(rsp, Immediate(kPointerSize)); + } + ASSERT(!element.is_memory()); + + // The top element is a register, constant, or a copy. Unuse + // registers and follow copies to their backing store. + if (element.is_register()) { + Unuse(element.reg()); + } else if (element.is_copy()) { + ASSERT(element.index() < index); + index = element.index(); + element = elements_[index]; + } + ASSERT(!element.is_copy()); + + // The element is memory, a register, or a constant. + if (element.is_memory()) { + // Memory elements could only be the backing store of a copy. + // Allocate the original to a register. + ASSERT(index <= stack_pointer_); + Result temp = cgen()->allocator()->Allocate(); + ASSERT(temp.is_valid()); + Use(temp.reg(), index); + FrameElement new_element = + FrameElement::RegisterElement(temp.reg(), FrameElement::SYNCED); + // Preserve the copy flag on the element. + if (element.is_copied()) new_element.set_copied(); + new_element.set_static_type(element.static_type()); + elements_[index] = new_element; + __ movq(temp.reg(), Operand(rbp, fp_relative(index))); + return Result(temp.reg(), element.static_type()); + } else if (element.is_register()) { + return Result(element.reg(), element.static_type()); + } else { + ASSERT(element.is_constant()); + return Result(element.handle()); + } } + Result VirtualFrame::RawCallStub(CodeStub* a) { UNIMPLEMENTED(); return Result(NULL); @@ -182,12 +442,81 @@ void VirtualFrame::SyncElementBelowStackPointer(int a) { UNIMPLEMENTED(); } -void VirtualFrame::SyncElementByPushing(int a) { - UNIMPLEMENTED(); + +void VirtualFrame::SyncElementByPushing(int index) { + // Sync an element of the frame that is just above the stack pointer + // by pushing it. + ASSERT(index == stack_pointer_ + 1); + stack_pointer_++; + FrameElement element = elements_[index]; + + switch (element.type()) { + case FrameElement::INVALID: + __ push(Immediate(Smi::FromInt(0))); + break; + + case FrameElement::MEMORY: + // No memory elements exist above the stack pointer. + UNREACHABLE(); + break; + + case FrameElement::REGISTER: + __ push(element.reg()); + break; + + case FrameElement::CONSTANT: + if (element.handle()->IsSmi()) { + if (CodeGeneratorScope::Current()->IsUnsafeSmi(element.handle())) { + CodeGeneratorScope::Current()->LoadUnsafeSmi(kScratchRegister, + element.handle()); + } else { + CodeGeneratorScope::Current()->masm()-> + movq(kScratchRegister, element.handle(), RelocInfo::NONE); + } + } else { + CodeGeneratorScope::Current()->masm()-> + movq(kScratchRegister, + element.handle(), + RelocInfo::EMBEDDED_OBJECT); + } + __ push(kScratchRegister); + break; + + case FrameElement::COPY: { + int backing_index = element.index(); + FrameElement backing = elements_[backing_index]; + ASSERT(backing.is_memory() || backing.is_register()); + if (backing.is_memory()) { + __ push(Operand(rbp, fp_relative(backing_index))); + } else { + __ push(backing.reg()); + } + break; + } + } + elements_[index].set_sync(); } -void VirtualFrame::SyncRange(int a, int b) { - UNIMPLEMENTED(); + +// Clear the dirty bits for the range of elements in +// [min(stack_pointer_ + 1,begin), end]. +void VirtualFrame::SyncRange(int begin, int end) { + ASSERT(begin >= 0); + ASSERT(end < element_count()); + // Sync elements below the range if they have not been materialized + // on the stack. + int start = Min(begin, stack_pointer_ + 1); + + // If positive we have to adjust the stack pointer. + int delta = end - stack_pointer_; + if (delta > 0) { + stack_pointer_ = end; + __ subq(rsp, Immediate(delta * kPointerSize)); + } + + for (int i = start; i <= end; i++) { + if (!elements_[i].is_synced()) SyncElementBelowStackPointer(i); + } } -- 2.34.1