From: lrn@chromium.org Date: Fri, 26 Jun 2009 08:29:01 +0000 (+0000) Subject: X64: Added support for "with" and "switch". X-Git-Tag: upstream/4.7.83~23808 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=5b5f528a4fb5693209774c042c2ba79925c9c1aa;p=platform%2Fupstream%2Fv8.git X64: Added support for "with" and "switch". Review URL: http://codereview.chromium.org/147197 git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@2278 ce2b1a6d-e550-0410-aec6-3dcde31c8c00 --- diff --git a/src/x64/codegen-x64.cc b/src/x64/codegen-x64.cc index d954c1a..ebadcd7 100644 --- a/src/x64/codegen-x64.cc +++ b/src/x64/codegen-x64.cc @@ -687,16 +687,163 @@ void CodeGenerator::VisitReturnStatement(ReturnStatement* node) { } -void CodeGenerator::VisitWithEnterStatement(WithEnterStatement* a) { - UNIMPLEMENTED(); +void CodeGenerator::VisitWithEnterStatement(WithEnterStatement* node) { + ASSERT(!in_spilled_code()); + Comment cmnt(masm_, "[ WithEnterStatement"); + CodeForStatementPosition(node); + Load(node->expression()); + Result context; + if (node->is_catch_block()) { + context = frame_->CallRuntime(Runtime::kPushCatchContext, 1); + } else { + context = frame_->CallRuntime(Runtime::kPushContext, 1); + } + + // Update context local. + frame_->SaveContextRegister(); + + // Verify that the runtime call result and rsi agree. + if (FLAG_debug_code) { + __ cmpq(context.reg(), rsi); + __ Assert(equal, "Runtime::NewContext should end up in rsi"); + } } -void CodeGenerator::VisitWithExitStatement(WithExitStatement* a) { - UNIMPLEMENTED(); + +void CodeGenerator::VisitWithExitStatement(WithExitStatement* node) { + ASSERT(!in_spilled_code()); + Comment cmnt(masm_, "[ WithExitStatement"); + CodeForStatementPosition(node); + // Pop context. + __ movq(rsi, ContextOperand(rsi, Context::PREVIOUS_INDEX)); + // Update context local. + frame_->SaveContextRegister(); } -void CodeGenerator::VisitSwitchStatement(SwitchStatement* a) { - UNIMPLEMENTED(); + +void CodeGenerator::VisitSwitchStatement(SwitchStatement* node) { + // TODO(X64): This code is completely generic and should be moved somewhere + // where it can be shared between architectures. + ASSERT(!in_spilled_code()); + Comment cmnt(masm_, "[ SwitchStatement"); + CodeForStatementPosition(node); + node->break_target()->set_direction(JumpTarget::FORWARD_ONLY); + + // Compile the switch value. + Load(node->tag()); + + ZoneList* cases = node->cases(); + int length = cases->length(); + CaseClause* default_clause = NULL; + + JumpTarget next_test; + // Compile the case label expressions and comparisons. Exit early + // if a comparison is unconditionally true. The target next_test is + // bound before the loop in order to indicate control flow to the + // first comparison. + next_test.Bind(); + for (int i = 0; i < length && !next_test.is_unused(); i++) { + CaseClause* clause = cases->at(i); + // The default is not a test, but remember it for later. + if (clause->is_default()) { + default_clause = clause; + continue; + } + + Comment cmnt(masm_, "[ Case comparison"); + // We recycle the same target next_test for each test. Bind it if + // the previous test has not done so and then unuse it for the + // loop. + if (next_test.is_linked()) { + next_test.Bind(); + } + next_test.Unuse(); + + // Duplicate the switch value. + frame_->Dup(); + + // Compile the label expression. + Load(clause->label()); + + // Compare and branch to the body if true or the next test if + // false. Prefer the next test as a fall through. + ControlDestination dest(clause->body_target(), &next_test, false); + Comparison(equal, true, &dest); + + // If the comparison fell through to the true target, jump to the + // actual body. + if (dest.true_was_fall_through()) { + clause->body_target()->Unuse(); + clause->body_target()->Jump(); + } + } + + // If there was control flow to a next test from the last one + // compiled, compile a jump to the default or break target. + if (!next_test.is_unused()) { + if (next_test.is_linked()) { + next_test.Bind(); + } + // Drop the switch value. + frame_->Drop(); + if (default_clause != NULL) { + default_clause->body_target()->Jump(); + } else { + node->break_target()->Jump(); + } + } + + // The last instruction emitted was a jump, either to the default + // clause or the break target, or else to a case body from the loop + // that compiles the tests. + ASSERT(!has_valid_frame()); + // Compile case bodies as needed. + for (int i = 0; i < length; i++) { + CaseClause* clause = cases->at(i); + + // There are two ways to reach the body: from the corresponding + // test or as the fall through of the previous body. + if (clause->body_target()->is_linked() || has_valid_frame()) { + if (clause->body_target()->is_linked()) { + if (has_valid_frame()) { + // If we have both a jump to the test and a fall through, put + // a jump on the fall through path to avoid the dropping of + // the switch value on the test path. The exception is the + // default which has already had the switch value dropped. + if (clause->is_default()) { + clause->body_target()->Bind(); + } else { + JumpTarget body; + body.Jump(); + clause->body_target()->Bind(); + frame_->Drop(); + body.Bind(); + } + } else { + // No fall through to worry about. + clause->body_target()->Bind(); + if (!clause->is_default()) { + frame_->Drop(); + } + } + } else { + // Otherwise, we have only fall through. + ASSERT(has_valid_frame()); + } + + // We are now prepared to compile the body. + Comment cmnt(masm_, "[ Case body"); + VisitStatements(clause->statements()); + } + clause->body_target()->Unuse(); + } + + // We may not have a valid frame here so bind the break target only + // if needed. + if (node->break_target()->is_linked()) { + node->break_target()->Bind(); + } + node->break_target()->Unuse(); } diff --git a/src/x64/frames-x64.cc b/src/x64/frames-x64.cc index f206be8..fe224ad 100644 --- a/src/x64/frames-x64.cc +++ b/src/x64/frames-x64.cc @@ -65,18 +65,14 @@ StackFrame::Type ExitFrame::GetStateForFramePointer(Address fp, State* state) { } int JavaScriptFrame::GetProvidedParametersCount() const { - UNIMPLEMENTED(); - return 0; -} - -byte* ArgumentsAdaptorFrame::GetCallerStackPointer() const { - UNIMPLEMENTED(); - return NULL; + return ComputeParametersCount(); } void ExitFrame::Iterate(ObjectVisitor* a) const { - UNIMPLEMENTED(); + // Exit frames on X64 do not contain any pointers. The arguments + // are traversed as part of the expression stack of the calling + // frame. } byte* InternalFrame::GetCallerStackPointer() const { @@ -86,8 +82,31 @@ byte* InternalFrame::GetCallerStackPointer() const { } byte* JavaScriptFrame::GetCallerStackPointer() const { - UNIMPLEMENTED(); - return NULL; + int arguments; + if (Heap::gc_state() != Heap::NOT_IN_GC || disable_heap_access_) { + // The arguments for cooked frames are traversed as if they were + // expression stack elements of the calling frame. The reason for + // this rather strange decision is that we cannot access the + // function during mark-compact GCs when the stack is cooked. + // In fact accessing heap objects (like function->shared() below) + // at all during GC is problematic. + arguments = 0; + } else { + // Compute the number of arguments by getting the number of formal + // parameters of the function. We must remember to take the + // receiver into account (+1). + JSFunction* function = JSFunction::cast(this->function()); + arguments = function->shared()->formal_parameter_count() + 1; + } + const int offset = StandardFrameConstants::kCallerSPOffset; + return fp() + offset + (arguments * kPointerSize); +} + + +byte* ArgumentsAdaptorFrame::GetCallerStackPointer() const { + const int arguments = Smi::cast(GetExpression(0))->value(); + const int offset = StandardFrameConstants::kCallerSPOffset; + return fp() + offset + (arguments + 1) * kPointerSize; } diff --git a/src/x64/frames-x64.h b/src/x64/frames-x64.h index 31d8a2d..d4ab2c6 100644 --- a/src/x64/frames-x64.h +++ b/src/x64/frames-x64.h @@ -59,12 +59,12 @@ class StackHandlerConstants : public AllStatic { class EntryFrameConstants : public AllStatic { public: - static const int kCallerFPOffset = 0 * kPointerSize; + static const int kCallerFPOffset = -6 * kPointerSize; - static const int kFunctionArgOffset = 1 * kPointerSize; - static const int kReceiverArgOffset = 2 * kPointerSize; - static const int kArgcOffset = 3 * kPointerSize; - static const int kArgvOffset = 4 * kPointerSize; + static const int kFunctionArgOffset = +3 * kPointerSize; + static const int kReceiverArgOffset = +4 * kPointerSize; + static const int kArgcOffset = +5 * kPointerSize; + static const int kArgvOffset = +6 * kPointerSize; };